Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-06-18 15:10:16 +00:00
parent 38a1a6cb91
commit c8cc2fe990
218 changed files with 2093 additions and 1731 deletions

View File

@ -1640,9 +1640,6 @@ Gitlab/NamespacedClass:
- 'app/models/project_pages_metadatum.rb'
- 'app/models/project_repository.rb'
- 'app/models/project_repository_storage_move.rb'
- 'app/models/project_services/mock_monitoring_service.rb'
- 'app/models/project_services/monitoring_service.rb'
- 'app/models/project_services/prometheus_service.rb'
- 'app/models/project_setting.rb'
- 'app/models/project_snippet.rb'
- 'app/models/project_statistics.rb'

View File

@ -0,0 +1,58 @@
<script>
import { GlDropdown, GlDropdownItem } from '@gitlab/ui';
import { __ } from '~/locale';
import { ISSUE_STATUS_SELECT_OPTIONS } from '../constants';
export default {
name: 'StatusSelect',
components: {
GlDropdown,
GlDropdownItem,
},
data() {
return {
status: null,
};
},
computed: {
dropdownText() {
return this.status?.text ?? this.$options.i18n.defaultDropdownText;
},
selectedValue() {
return this.status?.value;
},
},
methods: {
onDropdownItemClick(statusOption) {
// clear status if the currently checked status is clicked again
if (this.status?.value === statusOption.value) {
this.status = null;
} else {
this.status = statusOption;
}
},
},
i18n: {
dropdownTitle: __('Change status'),
defaultDropdownText: __('Select status'),
},
ISSUE_STATUS_SELECT_OPTIONS,
};
</script>
<template>
<div>
<input type="hidden" name="update[state_event]" :value="selectedValue" />
<gl-dropdown :text="dropdownText" :title="$options.i18n.dropdownTitle" class="gl-w-full">
<gl-dropdown-item
v-for="statusOption in $options.ISSUE_STATUS_SELECT_OPTIONS"
:key="statusOption.value"
:is-checked="selectedValue === statusOption.value"
is-check-item
:title="statusOption.text"
@click="onDropdownItemClick(statusOption)"
>
{{ statusOption.text }}
</gl-dropdown-item>
</gl-dropdown>
</div>
</template>

View File

@ -0,0 +1,17 @@
import { __ } from '~/locale';
export const ISSUE_STATUS_MODIFIERS = {
REOPEN: 'reopen',
CLOSE: 'close',
};
export const ISSUE_STATUS_SELECT_OPTIONS = [
{
value: ISSUE_STATUS_MODIFIERS.REOPEN,
text: __('Open'),
},
{
value: ISSUE_STATUS_MODIFIERS.CLOSE,
text: __('Closed'),
},
];

View File

@ -0,0 +1,17 @@
import Vue from 'vue';
import StatusSelect from './components/status_select.vue';
export default function initIssueStatusSelect() {
const el = document.querySelector('.js-issue-status');
if (!el) {
return null;
}
return new Vue({
el,
render(h) {
return h(StatusSelect);
},
});
}

View File

@ -1,8 +1,8 @@
import $ from 'jquery';
import { difference, intersection, union } from 'lodash';
import createFlash from './flash';
import axios from './lib/utils/axios_utils';
import { __ } from './locale';
import createFlash from '~/flash';
import axios from '~/lib/utils/axios_utils';
import { __ } from '~/locale';
export default {
init({ form, issues, prefixId } = {}) {

View File

@ -2,11 +2,12 @@
import $ from 'jquery';
import { property } from 'lodash';
import issueableEventHub from '~/issues_list/eventhub';
import LabelsSelect from '~/labels_select';
import MilestoneSelect from '~/milestone_select';
import initIssueStatusSelect from './init_issue_status_select';
import IssuableBulkUpdateActions from './issuable_bulk_update_actions';
import issueStatusSelect from './issue_status_select';
import issueableEventHub from './issues_list/eventhub';
import LabelsSelect from './labels_select';
import MilestoneSelect from './milestone_select';
import subscriptionSelect from './subscription_select';
const HIDDEN_CLASS = 'hidden';
@ -29,7 +30,7 @@ export default class IssuableBulkUpdateSidebar {
this.$sidebar = $('.right-sidebar');
this.$sidebarInnerContainer = this.$sidebar.find('.issuable-sidebar');
this.$bulkEditCancelBtn = $('.js-bulk-update-menu-hide');
this.$bulkEditSubmitBtn = $('.update-selected-issues');
this.$bulkEditSubmitBtn = $('.js-update-selected-issues');
this.$bulkUpdateEnableBtn = $('.js-bulk-update-toggle');
this.$otherFilters = $('.issues-other-filters');
this.$checkAllContainer = $('.check-all-holder');
@ -56,7 +57,7 @@ export default class IssuableBulkUpdateSidebar {
initDropdowns() {
new LabelsSelect();
new MilestoneSelect();
issueStatusSelect();
initIssueStatusSelect();
subscriptionSelect();
if (IS_EE) {

View File

@ -1,6 +1,6 @@
import $ from 'jquery';
import initDeprecatedJQueryDropdown from '~/deprecated_jquery_dropdown';
import { __ } from './locale';
import { __ } from '~/locale';
export default function subscriptionSelect() {
$('.js-subscription-event').each((i, element) => {

View File

@ -1,4 +1,4 @@
import issuableInitBulkUpdateSidebar from './issuable_init_bulk_update_sidebar';
import issuableInitBulkUpdateSidebar from '~/issuable_bulk_update_sidebar/issuable_init_bulk_update_sidebar';
export default class IssuableIndex {
constructor(pagePrefix = 'issuable_') {

View File

@ -1,27 +0,0 @@
import $ from 'jquery';
import initDeprecatedJQueryDropdown from '~/deprecated_jquery_dropdown';
import { __ } from './locale';
export default function issueStatusSelect() {
$('.js-issue-status').each((i, el) => {
const fieldName = $(el).data('fieldName');
initDeprecatedJQueryDropdown($(el), {
selectable: true,
fieldName,
toggleLabel(selected, element, instance) {
let label = __('Author');
const $item = instance.dropdown.find('.is-active');
if ($item.length) {
label = $item.text();
}
return label;
},
clicked(options) {
return options.e.preventDefault();
},
id(obj, element) {
return $(element).data('id');
},
});
});
}

View File

@ -450,7 +450,9 @@ export default {
},
async handleBulkUpdateClick() {
if (!this.hasInitBulkEdit) {
const initBulkUpdateSidebar = await import('~/issuable_init_bulk_update_sidebar');
const initBulkUpdateSidebar = await import(
'~/issuable_bulk_update_sidebar/issuable_init_bulk_update_sidebar'
);
initBulkUpdateSidebar.default.init('issuable_');
const usersSelect = await import('~/users_select');

View File

@ -5,11 +5,11 @@
import $ from 'jquery';
import { difference, isEqual, escape, sortBy, template, union } from 'lodash';
import initDeprecatedJQueryDropdown from '~/deprecated_jquery_dropdown';
import IssuableBulkUpdateActions from '~/issuable_bulk_update_sidebar/issuable_bulk_update_actions';
import { isScopedLabel } from '~/lib/utils/common_utils';
import boardsStore from './boards/stores/boards_store';
import CreateLabelDropdown from './create_label';
import createFlash from './flash';
import IssuableBulkUpdateActions from './issuable_bulk_update_actions';
import axios from './lib/utils/axios_utils';
import { sprintf, __ } from './locale';

View File

@ -1,5 +1,5 @@
import IssuableFilteredSearchTokenKeys from 'ee_else_ce/filtered_search/issuable_filtered_search_token_keys';
import issuableInitBulkUpdateSidebar from '~/issuable_init_bulk_update_sidebar';
import issuableInitBulkUpdateSidebar from '~/issuable_bulk_update_sidebar/issuable_init_bulk_update_sidebar';
import { mountIssuablesListApp } from '~/issues_list';
import initManualOrdering from '~/manual_ordering';
import { FILTERED_SEARCH } from '~/pages/constants';

View File

@ -1,6 +1,6 @@
import addExtraTokensForMergeRequests from 'ee_else_ce/filtered_search/add_extra_tokens_for_merge_requests';
import IssuableFilteredSearchTokenKeys from '~/filtered_search/issuable_filtered_search_token_keys';
import issuableInitBulkUpdateSidebar from '~/issuable_init_bulk_update_sidebar';
import issuableInitBulkUpdateSidebar from '~/issuable_bulk_update_sidebar/issuable_init_bulk_update_sidebar';
import { FILTERED_SEARCH } from '~/pages/constants';
import initFilteredSearch from '~/pages/search/init_filtered_search';
import projectSelect from '~/project_select';

View File

@ -76,6 +76,7 @@ export default {
<template>
<security-report-download-dropdown
:title="s__('SecurityReports|Download results')"
:artifacts="reportArtifacts"
:loading="isLoadingReportArtifacts"
/>

View File

@ -21,6 +21,16 @@ export default {
required: false,
default: false,
},
text: {
type: String,
required: false,
default: '',
},
title: {
type: String,
required: false,
default: '',
},
},
methods: {
artifactText({ name }) {
@ -35,7 +45,8 @@ export default {
<template>
<gl-dropdown
v-gl-tooltip
:text="s__('SecurityReports|Download results')"
:text="text"
:title="title"
:loading="loading"
icon="download"
size="small"

View File

@ -200,6 +200,7 @@ export default {
<template #action-buttons>
<security-report-download-dropdown
:text="s__('SecurityReports|Download results')"
:artifacts="reportArtifacts"
:loading="isLoadingReportArtifacts"
/>
@ -228,6 +229,7 @@ export default {
<template #action-buttons>
<security-report-download-dropdown
:text="s__('SecurityReports|Download results')"
:artifacts="reportArtifacts"
:loading="isLoadingReportArtifacts"
/>

View File

@ -31,7 +31,7 @@ class ChaosController < ActionController::Base
gc_stat = Gitlab::Chaos.run_gc
render json: {
worker_id: Prometheus::PidProvider.worker_id,
worker_id: ::Prometheus::PidProvider.worker_id,
gc_stat: gc_stat
}
end

View File

@ -16,7 +16,7 @@ module Metrics::Dashboard::PrometheusApiProxy
return error_response(variable_substitution_result)
end
prometheus_result = Prometheus::ProxyService.new(
prometheus_result = ::Prometheus::ProxyService.new(
proxyable,
proxy_method,
proxy_path,

View File

@ -30,7 +30,7 @@ class MetricsController < ActionController::Base
def system_metrics
Gitlab::Metrics::System.summary.merge(
worker_id: Prometheus::PidProvider.worker_id
worker_id: ::Prometheus::PidProvider.worker_id
)
end
end

View File

@ -14,6 +14,6 @@ class Projects::Environments::PrometheusApiController < Projects::ApplicationCon
end
def proxy_variable_substitution_service
Prometheus::ProxyVariableSubstitutionService
::Prometheus::ProxyVariableSubstitutionService
end
end

View File

@ -25,9 +25,9 @@ module Projects
false
end
def jira_service
strong_memoize(:jira_service) do
@project.jira_service
def jira_integration
strong_memoize(:jira_integration) do
@project.jira_integration
end
end

View File

@ -66,7 +66,7 @@ module Projects
)
if @metric.persisted?
redirect_to edit_project_service_path(project, ::PrometheusService),
redirect_to edit_project_service_path(project, ::Integrations::Prometheus),
notice: _('Metric was successfully added.')
else
render 'new'
@ -77,7 +77,7 @@ module Projects
@metric = update_metrics_service(prometheus_metric).execute
if @metric.persisted?
redirect_to edit_project_service_path(project, ::PrometheusService),
redirect_to edit_project_service_path(project, ::Integrations::Prometheus),
notice: _('Metric was successfully updated.')
else
render 'edit'
@ -93,7 +93,7 @@ module Projects
respond_to do |format|
format.html do
redirect_to edit_project_service_path(project, ::PrometheusService), status: :see_other
redirect_to edit_project_service_path(project, ::Integrations::Prometheus), status: :see_other
end
format.json do
head :ok

View File

@ -51,14 +51,14 @@ class Projects::RunnersController < Projects::ApplicationController
end
def toggle_shared_runners
if !project.shared_runners_enabled && project.group && project.group.shared_runners_setting == 'disabled_and_unoverridable'
render json: { error: _('Cannot enable shared runners because parent group does not allow it') }, status: :unauthorized
return
update_params = { shared_runners_enabled: !project.shared_runners_enabled }
result = Projects::UpdateService.new(project, current_user, update_params).execute
if result[:status] == :success
render json: {}, status: :ok
else
render json: { error: result[:message] }, status: :unauthorized
end
project.toggle!(:shared_runners_enabled)
render json: {}, status: :ok
end
def toggle_group_runners

View File

@ -105,11 +105,11 @@ class Projects::ServicesController < Projects::ApplicationController
end
def redirect_deprecated_prometheus_service
redirect_to edit_project_service_path(project, integration) if integration.is_a?(::PrometheusService) && Feature.enabled?(:settings_operations_prometheus_service, project)
redirect_to edit_project_service_path(project, integration) if integration.is_a?(::Integrations::Prometheus) && Feature.enabled?(:settings_operations_prometheus_service, project)
end
def set_deprecation_notice_for_prometheus_service
return if !integration.is_a?(::PrometheusService) || !Feature.enabled?(:settings_operations_prometheus_service, project)
return if !integration.is_a?(::Integrations::Prometheus) || !Feature.enabled?(:settings_operations_prometheus_service, project)
operations_link_start = "<a href=\"#{project_settings_operations_path(project)}\">"
message = s_('PrometheusService|You can now manage your Prometheus settings on the %{operations_link_start}Operations%{operations_link_end} page. Fields on this page has been deprecated.') % { operations_link_start: operations_link_start, operations_link_end: "</a>" }

View File

@ -14,7 +14,7 @@ module Mutations
private
def find_object(id:)
GitlabSchema.object_from_id(id, expected_class: ::PrometheusService)
GitlabSchema.object_from_id(id, expected_class: ::Integrations::Prometheus)
end
def response(integration, result)

View File

@ -6,7 +6,7 @@ module Mutations
class ResetToken < PrometheusIntegrationBase
graphql_name 'PrometheusIntegrationResetToken'
argument :id, Types::GlobalIDType[::PrometheusService],
argument :id, Types::GlobalIDType[::Integrations::Prometheus],
required: true,
description: "The ID of the integration to mutate."

View File

@ -6,7 +6,7 @@ module Mutations
class Update < PrometheusIntegrationBase
graphql_name 'PrometheusIntegrationUpdate'
argument :id, Types::GlobalIDType[::PrometheusService],
argument :id, Types::GlobalIDType[::Integrations::Prometheus],
required: true,
description: "The ID of the integration to mutate."

View File

@ -54,7 +54,7 @@ module Resolvers
def expected_integration_types
[].tap do |types|
types << ::AlertManagement::HttpIntegration if http_integrations_allowed?
types << ::PrometheusService if prometheus_integrations_allowed?
types << ::Integrations::Prometheus if prometheus_integrations_allowed?
end
end
end

View File

@ -34,16 +34,16 @@ module Resolvers
private
alias_method :jira_service, :object
alias_method :jira_integration, :object
def project
jira_service&.project
jira_integration&.project
end
def jira_projects(name:)
args = { query: name }.compact
Jira::Requests::Projects::ListService.new(project.jira_service, args).execute
Jira::Requests::Projects::ListService.new(project.jira_integration, args).execute
end
end
end

View File

@ -43,7 +43,7 @@ module Types
definition_methods do
def resolve_type(object, context)
if object.is_a?(::PrometheusService)
if object.is_a?(::Integrations::Prometheus)
Types::AlertManagement::PrometheusIntegrationType
else
Types::AlertManagement::HttpIntegrationType

View File

@ -5,7 +5,7 @@ module CustomMetricsHelper
{
'custom-metrics-path' => url_for([project, metric]),
'metric-persisted' => metric.persisted?.to_s,
'edit-project-service-path' => edit_project_service_path(project, PrometheusService),
'edit-project-service-path' => edit_project_service_path(project, ::Integrations::Prometheus),
'validate-query-path' => validate_query_project_prometheus_metrics_path(project),
'title' => metric.title.to_s,
'query' => metric.query.to_s,

View File

@ -5,7 +5,7 @@ module OperationsHelper
def prometheus_service
strong_memoize(:prometheus_service) do
@project.find_or_initialize_service(::PrometheusService.to_param)
@project.find_or_initialize_service(::Integrations::Prometheus.to_param)
end
end

View File

@ -47,12 +47,12 @@ module Clusters
def activate_project_services
::Clusters::Applications::ActivateServiceWorker
.perform_async(cluster_id, ::PrometheusService.to_param) # rubocop:disable CodeReuse/ServiceClass
.perform_async(cluster_id, ::Integrations::Prometheus.to_param)
end
def deactivate_project_services
::Clusters::Applications::DeactivateServiceWorker
.perform_async(cluster_id, ::PrometheusService.to_param) # rubocop:disable CodeReuse/ServiceClass
.perform_async(cluster_id, ::Integrations::Prometheus.to_param)
end
end
end

View File

@ -48,8 +48,12 @@ class Integration < ApplicationRecord
flowdock
hangouts_chat
irker
jenkins jira
packagist pipelines_email pivotaltracker pushover
mattermost mattermost_slash_commands microsoft_teams mock_ci mock_monitoring
redmine
slack slack_slash_commands
teamcity
].to_set.freeze
def self.renamed?(name)

View File

@ -0,0 +1,23 @@
# frozen_string_literal: true
# Base class for monitoring services
#
# These services integrate with a deployment solution like Prometheus
# to provide additional features for environments.
module Integrations
class BaseMonitoring < Integration
default_value_for :category, 'monitoring'
def self.supported_events
%w()
end
def can_query?
raise NotImplementedError
end
def query(_, *_)
raise NotImplementedError
end
end
end

View File

@ -272,6 +272,10 @@ module Integrations
test(nil)[:success]
end
def configured?
active? && valid_connection?
end
def test(_)
result = server_info
success = result.present?

View File

@ -0,0 +1,25 @@
# frozen_string_literal: true
module Integrations
class MockMonitoring < BaseMonitoring
def title
'Mock monitoring'
end
def description
'Mock monitoring service'
end
def self.to_param
'mock_monitoring'
end
def metrics(environment)
Gitlab::Json.parse(File.read(Rails.root + 'spec/fixtures/metrics.json'))
end
def can_test?
false
end
end
end

View File

@ -0,0 +1,205 @@
# frozen_string_literal: true
module Integrations
class Prometheus < BaseMonitoring
include PrometheusAdapter
# Access to prometheus is directly through the API
prop_accessor :api_url
prop_accessor :google_iap_service_account_json
prop_accessor :google_iap_audience_client_id
boolean_accessor :manual_configuration
# We need to allow the self-monitoring project to connect to the internal
# Prometheus instance.
# Since the internal Prometheus instance is usually a localhost URL, we need
# to allow localhost URLs when the following conditions are true:
# 1. project is the self-monitoring project.
# 2. api_url is the internal Prometheus URL.
with_options presence: true do
validates :api_url, public_url: true, if: ->(object) { object.manual_configuration? && !object.allow_local_api_url? }
validates :api_url, url: true, if: ->(object) { object.manual_configuration? && object.allow_local_api_url? }
end
before_save :synchronize_service_state
after_save :clear_reactive_cache!
after_commit :track_events
after_create_commit :create_default_alerts
scope :preload_project, -> { preload(:project) }
scope :with_clusters_with_cilium, -> { joins(project: [:clusters]).merge(Clusters::Cluster.with_available_cilium) }
def initialize_properties
if properties.nil?
self.properties = {}
end
end
def show_active_box?
false
end
def title
'Prometheus'
end
def description
s_('PrometheusService|Monitor application health with Prometheus metrics and dashboards')
end
def self.to_param
'prometheus'
end
def fields
[
{
type: 'checkbox',
name: 'manual_configuration',
title: s_('PrometheusService|Active'),
help: s_('PrometheusService|Select this checkbox to override the auto configuration settings with your own settings.'),
required: true
},
{
type: 'text',
name: 'api_url',
title: 'API URL',
placeholder: s_('PrometheusService|https://prometheus.example.com/'),
help: s_('PrometheusService|The Prometheus API base URL.'),
required: true
},
{
type: 'text',
name: 'google_iap_audience_client_id',
title: 'Google IAP Audience Client ID',
placeholder: s_('PrometheusService|IAP_CLIENT_ID.apps.googleusercontent.com'),
help: s_('PrometheusService|PrometheusService|The ID of the IAP-secured resource.'),
autocomplete: 'off',
required: false
},
{
type: 'textarea',
name: 'google_iap_service_account_json',
title: 'Google IAP Service Account JSON',
placeholder: s_('PrometheusService|{ "type": "service_account", "project_id": ... }'),
help: s_('PrometheusService|The contents of the credentials.json file of your service account.'),
required: false
}
]
end
# Check we can connect to the Prometheus API
def test(*args)
prometheus_client.ping
{ success: true, result: 'Checked API endpoint' }
rescue Gitlab::PrometheusClient::Error => err
{ success: false, result: err }
end
def prometheus_client
return unless should_return_client?
options = prometheus_client_default_options.merge(
allow_local_requests: allow_local_api_url?
)
if behind_iap?
# Adds the Authorization header
options[:headers] = iap_client.apply({})
end
Gitlab::PrometheusClient.new(api_url, options)
end
def prometheus_available?
return false if template?
return false unless project
project.all_clusters.enabled.eager_load(:integration_prometheus).any? do |cluster|
cluster.integration_prometheus_available?
end
end
def allow_local_api_url?
allow_local_requests_from_web_hooks_and_services? ||
(self_monitoring_project? && internal_prometheus_url?)
end
def configured?
should_return_client?
end
private
def self_monitoring_project?
project && project.id == current_settings.self_monitoring_project_id
end
def internal_prometheus_url?
api_url.present? && api_url == ::Gitlab::Prometheus::Internal.uri
end
def allow_local_requests_from_web_hooks_and_services?
current_settings.allow_local_requests_from_web_hooks_and_services?
end
def should_return_client?
api_url.present? && manual_configuration? && active? && valid?
end
def current_settings
Gitlab::CurrentSettings.current_application_settings
end
def synchronize_service_state
self.active = prometheus_available? || manual_configuration?
true
end
def track_events
if enabled_manual_prometheus?
Gitlab::Tracking.event('cluster:services:prometheus', 'enabled_manual_prometheus')
elsif disabled_manual_prometheus?
Gitlab::Tracking.event('cluster:services:prometheus', 'disabled_manual_prometheus')
end
true
end
def enabled_manual_prometheus?
manual_configuration_changed? && manual_configuration?
end
def disabled_manual_prometheus?
manual_configuration_changed? && !manual_configuration?
end
def create_default_alerts
return unless project_id
::Prometheus::CreateDefaultAlertsWorker.perform_async(project_id)
end
def behind_iap?
manual_configuration? && google_iap_audience_client_id.present? && google_iap_service_account_json.present?
end
def clean_google_iap_service_account
return unless google_iap_service_account_json
google_iap_service_account_json
.then { |json| Gitlab::Json.parse(json) }
.except('token_credential_uri')
end
def iap_client
@iap_client ||= Google::Auth::Credentials
.new(clean_google_iap_service_account, target_audience: google_iap_audience_client_id)
.client
end
end
end

View File

@ -172,25 +172,25 @@ class Project < ApplicationRecord
has_one :flowdock_integration, class_name: 'Integrations::Flowdock'
has_one :hangouts_chat_integration, class_name: 'Integrations::HangoutsChat'
has_one :irker_integration, class_name: 'Integrations::Irker'
has_one :jenkins_service, class_name: 'Integrations::Jenkins'
has_one :jira_service, class_name: 'Integrations::Jira'
has_one :jenkins_integration, class_name: 'Integrations::Jenkins'
has_one :jira_integration, class_name: 'Integrations::Jira'
has_one :mattermost_integration, class_name: 'Integrations::Mattermost'
has_one :mattermost_slash_commands_integration, class_name: 'Integrations::MattermostSlashCommands'
has_one :microsoft_teams_integration, class_name: 'Integrations::MicrosoftTeams'
has_one :mock_ci_integration, class_name: 'Integrations::MockCi'
has_one :mock_monitoring_integration, class_name: 'MockMonitoringService'
has_one :mock_monitoring_integration, class_name: 'Integrations::MockMonitoring'
has_one :packagist_integration, class_name: 'Integrations::Packagist'
has_one :pipelines_email_integration, class_name: 'Integrations::PipelinesEmail'
has_one :pivotaltracker_integration, class_name: 'Integrations::Pivotaltracker'
has_one :prometheus_service, class_name: 'Integrations::Prometheus', inverse_of: :project
has_one :pushover_integration, class_name: 'Integrations::Pushover'
has_one :redmine_service, class_name: 'Integrations::Redmine'
has_one :slack_service, class_name: 'Integrations::Slack'
has_one :slack_slash_commands_service, class_name: 'Integrations::SlackSlashCommands'
has_one :teamcity_service, class_name: 'Integrations::Teamcity'
has_one :redmine_integration, class_name: 'Integrations::Redmine'
has_one :slack_integration, class_name: 'Integrations::Slack'
has_one :slack_slash_commands_integration, class_name: 'Integrations::SlackSlashCommands'
has_one :teamcity_integration, class_name: 'Integrations::Teamcity'
has_one :unify_circuit_service, class_name: 'Integrations::UnifyCircuit'
has_one :webex_teams_service, class_name: 'Integrations::WebexTeams'
has_one :youtrack_service, class_name: 'Integrations::Youtrack'
has_one :prometheus_service, inverse_of: :project
has_one :root_of_fork_network,
foreign_key: 'root_project_id',
@ -542,7 +542,7 @@ class Project < ApplicationRecord
scope :for_milestones, ->(ids) { joins(:milestones).where('milestones.id' => ids).distinct }
scope :with_push, -> { joins(:events).merge(Event.pushed_action) }
scope :with_project_feature, -> { joins('LEFT JOIN project_features ON projects.id = project_features.project_id') }
scope :with_active_jira_services, -> { joins(:integrations).merge(::Integrations::Jira.active) }
scope :with_active_jira_integrations, -> { joins(:integrations).merge(::Integrations::Jira.active) }
scope :with_jira_dvcs_cloud, -> { joins(:feature_usage).merge(ProjectFeatureUsage.with_jira_dvcs_integration_enabled(cloud: true)) }
scope :with_jira_dvcs_server, -> { joins(:feature_usage).merge(ProjectFeatureUsage.with_jira_dvcs_integration_enabled(cloud: false)) }
scope :inc_routes, -> { includes(:route, namespace: :route) }

View File

@ -1,23 +0,0 @@
# frozen_string_literal: true
class MockMonitoringService < MonitoringService
def title
'Mock monitoring'
end
def description
'Mock monitoring service'
end
def self.to_param
'mock_monitoring'
end
def metrics(environment)
Gitlab::Json.parse(File.read(Rails.root + 'spec/fixtures/metrics.json'))
end
def can_test?
false
end
end

View File

@ -1,21 +0,0 @@
# frozen_string_literal: true
# Base class for monitoring services
#
# These services integrate with a deployment solution like Prometheus
# to provide additional features for environments.
class MonitoringService < Integration
default_value_for :category, 'monitoring'
def self.supported_events
%w()
end
def can_query?
raise NotImplementedError
end
def query(_, *_)
raise NotImplementedError
end
end

View File

@ -1,203 +0,0 @@
# frozen_string_literal: true
class PrometheusService < MonitoringService
include PrometheusAdapter
# Access to prometheus is directly through the API
prop_accessor :api_url
prop_accessor :google_iap_service_account_json
prop_accessor :google_iap_audience_client_id
boolean_accessor :manual_configuration
# We need to allow the self-monitoring project to connect to the internal
# Prometheus instance.
# Since the internal Prometheus instance is usually a localhost URL, we need
# to allow localhost URLs when the following conditions are true:
# 1. project is the self-monitoring project.
# 2. api_url is the internal Prometheus URL.
with_options presence: true do
validates :api_url, public_url: true, if: ->(object) { object.manual_configuration? && !object.allow_local_api_url? }
validates :api_url, url: true, if: ->(object) { object.manual_configuration? && object.allow_local_api_url? }
end
before_save :synchronize_service_state
after_save :clear_reactive_cache!
after_commit :track_events
after_create_commit :create_default_alerts
scope :preload_project, -> { preload(:project) }
scope :with_clusters_with_cilium, -> { joins(project: [:clusters]).merge(Clusters::Cluster.with_available_cilium) }
def initialize_properties
if properties.nil?
self.properties = {}
end
end
def show_active_box?
false
end
def title
'Prometheus'
end
def description
s_('PrometheusService|Monitor application health with Prometheus metrics and dashboards')
end
def self.to_param
'prometheus'
end
def fields
[
{
type: 'checkbox',
name: 'manual_configuration',
title: s_('PrometheusService|Active'),
help: s_('PrometheusService|Select this checkbox to override the auto configuration settings with your own settings.'),
required: true
},
{
type: 'text',
name: 'api_url',
title: 'API URL',
placeholder: s_('PrometheusService|https://prometheus.example.com/'),
help: s_('PrometheusService|The Prometheus API base URL.'),
required: true
},
{
type: 'text',
name: 'google_iap_audience_client_id',
title: 'Google IAP Audience Client ID',
placeholder: s_('PrometheusService|IAP_CLIENT_ID.apps.googleusercontent.com'),
help: s_('PrometheusService|PrometheusService|The ID of the IAP-secured resource.'),
autocomplete: 'off',
required: false
},
{
type: 'textarea',
name: 'google_iap_service_account_json',
title: 'Google IAP Service Account JSON',
placeholder: s_('PrometheusService|{ "type": "service_account", "project_id": ... }'),
help: s_('PrometheusService|The contents of the credentials.json file of your service account.'),
required: false
}
]
end
# Check we can connect to the Prometheus API
def test(*args)
prometheus_client.ping
{ success: true, result: 'Checked API endpoint' }
rescue Gitlab::PrometheusClient::Error => err
{ success: false, result: err }
end
def prometheus_client
return unless should_return_client?
options = prometheus_client_default_options.merge(
allow_local_requests: allow_local_api_url?
)
if behind_iap?
# Adds the Authorization header
options[:headers] = iap_client.apply({})
end
Gitlab::PrometheusClient.new(api_url, options)
end
def prometheus_available?
return false if template?
return false unless project
project.all_clusters.enabled.eager_load(:integration_prometheus).any? do |cluster|
cluster.integration_prometheus_available?
end
end
def allow_local_api_url?
allow_local_requests_from_web_hooks_and_services? ||
(self_monitoring_project? && internal_prometheus_url?)
end
def configured?
should_return_client?
end
private
def self_monitoring_project?
project && project.id == current_settings.self_monitoring_project_id
end
def internal_prometheus_url?
api_url.present? && api_url == ::Gitlab::Prometheus::Internal.uri
end
def allow_local_requests_from_web_hooks_and_services?
current_settings.allow_local_requests_from_web_hooks_and_services?
end
def should_return_client?
api_url.present? && manual_configuration? && active? && valid?
end
def current_settings
Gitlab::CurrentSettings.current_application_settings
end
def synchronize_service_state
self.active = prometheus_available? || manual_configuration?
true
end
def track_events
if enabled_manual_prometheus?
Gitlab::Tracking.event('cluster:services:prometheus', 'enabled_manual_prometheus')
elsif disabled_manual_prometheus?
Gitlab::Tracking.event('cluster:services:prometheus', 'disabled_manual_prometheus')
end
true
end
def enabled_manual_prometheus?
manual_configuration_changed? && manual_configuration?
end
def disabled_manual_prometheus?
manual_configuration_changed? && !manual_configuration?
end
def create_default_alerts
return unless project_id
Prometheus::CreateDefaultAlertsWorker.perform_async(project_id)
end
def behind_iap?
manual_configuration? && google_iap_audience_client_id.present? && google_iap_service_account_json.present?
end
def clean_google_iap_service_account
return unless google_iap_service_account_json
google_iap_service_account_json
.then { |json| Gitlab::Json.parse(json) }
.except('token_credential_uri')
end
def iap_client
@iap_client ||= Google::Auth::Credentials
.new(clean_google_iap_service_account, target_audience: google_iap_audience_client_id)
.client
end
end

View File

@ -16,6 +16,7 @@ class MergeRequestDiffEntity < Grape::Entity
end
expose :created_at
expose :state
expose :commits_count
expose :latest?, as: :latest

View File

@ -12,15 +12,16 @@ module Ci
return fallback_method.call unless plan_cron&.cron_valid?
now = Time.zone.now
plan_min_run = plan_cron.next_time_from(now)
schedule_next_run = schedule_cron.next_time_from(now)
return schedule_next_run if worker_cron.match?(schedule_next_run) && plan_cron.match?(schedule_next_run)
return schedule_next_run if worker_cron.match?(schedule_next_run) && plan_min_run <= schedule_next_run
plan_next_run = plan_cron.next_time_from(now)
plan_next_run = plan_cron.next_time_from(schedule_next_run)
return plan_next_run if worker_cron.match?(plan_next_run)
worker_next_run = worker_cron.next_time_from(now)
return worker_next_run if plan_cron.match?(worker_next_run)
worker_next_run = worker_cron.next_time_from(schedule_next_run)
return worker_next_run if plan_min_run <= worker_next_run
worker_cron.next_time_from(plan_next_run)
end

View File

@ -7,20 +7,20 @@ module Jira
JIRA_API_VERSION = 2
def initialize(jira_service, params = {})
@project = jira_service&.project
@jira_service = jira_service
def initialize(jira_integration, params = {})
@project = jira_integration&.project
@jira_integration = jira_integration
end
def execute
return ServiceResponse.error(message: _('Jira service not configured.')) unless jira_service&.active?
return ServiceResponse.error(message: _('Jira service not configured.')) unless jira_integration&.active?
request
end
private
attr_reader :jira_service, :project
attr_reader :jira_integration, :project
# We have to add the context_path here because the Jira client is not taking it into account
def base_api_url
@ -37,7 +37,7 @@ module Jira
end
def client
@client ||= jira_service.client
@client ||= jira_integration.client
end
def request

View File

@ -6,8 +6,8 @@ module Jira
class ListService < Base
extend ::Gitlab::Utils::Override
def initialize(jira_service, params = {})
super(jira_service, params)
def initialize(jira_integration, params = {})
super(jira_integration, params)
@query = params[:query]
end

View File

@ -32,9 +32,9 @@ module JiraImport
end
def user_mapper_service_factory
if project.jira_service.data_fields.deployment_server?
if project.jira_integration.data_fields.deployment_server?
ServerUsersMapperService.new(user, project, start_at)
elsif project.jira_service.data_fields.deployment_cloud?
elsif project.jira_integration.data_fields.deployment_cloud?
CloudUsersMapperService.new(user, project, start_at)
else
raise ArgumentError

View File

@ -13,7 +13,7 @@ module JiraImport
def initialize(current_user, project, start_at)
@current_user = current_user
@project = project
@jira_service = project.jira_service
@jira_integration = project.jira_integration
@start_at = start_at
end
@ -29,14 +29,14 @@ module JiraImport
private
attr_reader :current_user, :project, :jira_service, :start_at
attr_reader :current_user, :project, :jira_integration, :start_at
def jira_users
@jira_users ||= client.get(url)
end
def client
@client ||= jira_service.client
@client ||= jira_integration.client
end
def url

View File

@ -4,7 +4,7 @@ require 'prometheus/client/formats/text'
class MetricsService
def prometheus_metrics_text
Prometheus::Client::Formats::Text.marshal_multiprocess(multiprocess_metrics_path)
::Prometheus::Client::Formats::Text.marshal_multiprocess(multiprocess_metrics_path)
end
def metrics_text

View File

@ -193,7 +193,7 @@ module Projects
# Deprecated: https://gitlab.com/gitlab-org/gitlab/-/issues/326665
def create_prometheus_service
service = @project.find_or_initialize_service(::PrometheusService.to_param)
service = @project.find_or_initialize_service(::Integrations::Prometheus.to_param)
# If the service has already been inserted in the database, that
# means it came from a template, and there's nothing more to do.

View File

@ -102,7 +102,7 @@ module Projects
def prometheus_integration_params
return {} unless attrs = params[:prometheus_integration_attributes]
service = project.find_or_initialize_service(::PrometheusService.to_param)
service = project.find_or_initialize_service(::Integrations::Prometheus.to_param)
service.assign_attributes(attrs)
{ prometheus_service_attributes: service.attributes.except(*%w(id project_id created_at updated_at)) }

View File

@ -3,15 +3,14 @@
- billable_users_url = help_page_path('subscriptions/self_managed/index', anchor: 'billable-users')
- billable_users_link_start = '<a href="%{url}" target="_blank" rel="noopener noreferrer nofollow">'.html_safe % { url: billable_users_url }
= render_if_exists 'shared/qrtly_reconciliation_alert'
- if @notices
- @notices.each do |notice|
.js-vue-alert{ 'v-cloak': true, data: { variant: notice[:type],
dismissible: true.to_s } }
= notice[:message].html_safe
- if Gitlab.ee? && display_upcoming_reconciliation_alert?
#js-qrtly-reconciliation-alert{ data: upcoming_reconciliation_hash }
- if @license.present?
.license-panel.gl-mt-5
= render_if_exists 'admin/licenses/summary'

View File

@ -12,6 +12,6 @@
'role-arn' => @aws_role.role_arn,
'instance-types' => @instance_types,
'kubernetes-integration-help-path' => help_page_path('user/project/clusters/index'),
'account-and-external-ids-help-path' => help_page_path('user/project/clusters/add_eks_clusters.md', anchor: 'new-eks-cluster'),
'create-role-arn-help-path' => help_page_path('user/project/clusters/add_eks_clusters.md', anchor: 'new-eks-cluster'),
'account-and-external-ids-help-path' => help_page_path('user/project/clusters/add_eks_clusters.md', anchor: 'create-a-new-certificate-based-eks-cluster'),
'create-role-arn-help-path' => help_page_path('user/project/clusters/add_eks_clusters.md', anchor: 'create-a-new-certificate-based-eks-cluster'),
'external-link-icon' => sprite_icon('external-link') } }

View File

@ -6,6 +6,8 @@
- if show_thanks_for_purchase_banner?
= render_if_exists 'shared/thanks_for_purchase_banner', plan_title: plan_title, quantity: params[:purchased_quantity].to_i
= render_if_exists 'shared/qrtly_reconciliation_alert', group: @group
- if show_invite_banner?(@group)
= content_for :group_invite_members_banner do
.container-fluid.container-limited{ class: "gl-pb-2! gl-pt-6! #{@content_class}" }

View File

@ -19,6 +19,7 @@
= render_if_exists "layouts/header/ee_subscribable_banner"
= render_if_exists "shared/namespace_storage_limit_alert"
= render_if_exists "shared/new_user_signups_cap_reached_alert"
= yield :page_level_alert
= yield :customize_homepage_banner
- unless @hide_breadcrumbs
= render "layouts/nav/breadcrumbs"

View File

@ -1,7 +1,7 @@
.js-jira-import-root{ data: { project_path: @project.full_path,
issues_path: project_issues_path(@project),
jira_integration_path: edit_project_service_path(@project, :jira),
is_jira_configured: @project.jira_service&.active? && @project.jira_service&.valid_connection?.to_s,
is_jira_configured: @project.jira_integration&.configured?.to_s,
in_progress_illustration: image_path('illustrations/export-import.svg'),
project_id: @project.id,
setup_illustration: image_path('illustrations/manual_action.svg') } }

View File

@ -9,7 +9,7 @@
= auto_discovery_link_tag(:atom, safe_params.merge(rss_url_options).to_h, title: "#{@project.name} issues")
.js-jira-issues-import-status{ data: { can_edit: can?(current_user, :admin_project, @project).to_s,
is_jira_configured: @project.jira_service.present?.to_s,
is_jira_configured: @project.jira_integration.present?.to_s,
issues_path: project_issues_path(@project),
project_path: @project.full_path } }

View File

@ -1,6 +1,6 @@
- add_to_breadcrumbs _("Settings"), edit_project_path(@project)
- add_to_breadcrumbs _("Integrations"), project_settings_integrations_path(@project)
- add_to_breadcrumbs "Prometheus", edit_project_service_path(@project, PrometheusService)
- add_to_breadcrumbs "Prometheus", edit_project_service_path(@project, ::Integrations::Prometheus)
- breadcrumb_title s_('Metrics|Edit metric')
- page_title @metric.title, s_('Metrics|Edit metric')
= render 'form', project: @project, metric: @metric

View File

@ -1,6 +1,6 @@
- add_to_breadcrumbs _("Settings"), edit_project_path(@project)
- add_to_breadcrumbs _("Integrations"), project_settings_integrations_path(@project)
- add_to_breadcrumbs "Prometheus", edit_project_service_path(@project, PrometheusService)
- add_to_breadcrumbs "Prometheus", edit_project_service_path(@project, ::Integrations::Prometheus)
- breadcrumb_title s_('Metrics|New metric')
- page_title s_('Metrics|New metric')
= render 'form', project: @project, metric: @metric

View File

@ -6,21 +6,13 @@
= form_tag [:bulk_update, @project, type], method: :post, class: "bulk-update" do
.block.issuable-sidebar-header
.filter-item.inline.update-issues-btn.float-left
= button_tag _('Update all'), class: "gl-button btn update-selected-issues btn-confirm", disabled: true
= button_tag _('Update all'), class: "gl-button btn js-update-selected-issues btn-confirm", disabled: true
= button_tag _('Cancel'), class: "gl-button btn btn-default js-bulk-update-menu-hide float-right"
- if params[:state] != 'merged'
.block
.title
= _('Status')
.filter-item
= dropdown_tag(_("Select status"), options: { toggle_class: "js-issue-status", title: _("Change status"), dropdown_class: "dropdown-menu-status dropdown-menu-selectable", data: { field_name: "update[state_event]", default_label: _("Status") } } ) do
%ul
%li
%a{ href: "#", data: { id: "reopen" } }
= _('Open')
%li
%a{ href: "#", data: { id: "close" } }
= _('Closed')
.js-issue-status
.block
.title
= _('Assignee')

View File

@ -21,7 +21,7 @@ module Projects
private
def create_prometheus_service(project)
service = project.find_or_initialize_service(::PrometheusService.to_param)
service = project.find_or_initialize_service(::Integrations::Prometheus.to_param)
# If the service has already been inserted in the database, that
# means it came from a template, and there's nothing more to do.

View File

@ -15,7 +15,7 @@ module Prometheus
return unless project
result = Prometheus::CreateDefaultAlertsService.new(project: project).execute
result = ::Prometheus::CreateDefaultAlertsService.new(project: project).execute
log_info(result.message) if result.error?
end

View File

@ -1,8 +0,0 @@
---
name: security_ci_lint_authorization
introduced_by_url: https://gitlab.com/gitlab-org/security/gitlab/-/merge_requests/1279
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/326708
milestone: '14.0'
type: development
group: group::pipeline authoring
default_enabled: false

View File

@ -15,14 +15,14 @@ def prometheus_default_multiproc_dir
end
end
Prometheus::Client.configure do |config|
::Prometheus::Client.configure do |config|
config.logger = Gitlab::AppLogger
config.initial_mmap_file_size = 4 * 1024
config.multiprocess_files_dir = ENV['prometheus_multiproc_dir'] || prometheus_default_multiproc_dir
config.pid_provider = Prometheus::PidProvider.method(:worker_id)
config.pid_provider = ::Prometheus::PidProvider.method(:worker_id)
end
Gitlab::Application.configure do |config|
@ -43,7 +43,7 @@ if !Rails.env.test? && Gitlab::Metrics.prometheus_metrics_enabled?
# Thus, we order these events to run `reinitialize_on_pid_change` with `force: true` first.
Gitlab::Cluster::LifecycleEvents.on_master_start do
# Ensure that stale Prometheus metrics don't accumulate over time
Prometheus::CleanupMultiprocDirService.new.execute
::Prometheus::CleanupMultiprocDirService.new.execute
::Prometheus::Client.reinitialize_on_pid_change(force: true)
@ -64,7 +64,7 @@ if !Rails.env.test? && Gitlab::Metrics.prometheus_metrics_enabled?
end
Gitlab::Cluster::LifecycleEvents.on_worker_start do
defined?(::Prometheus::Client.reinitialize_on_pid_change) && Prometheus::Client.reinitialize_on_pid_change
defined?(::Prometheus::Client.reinitialize_on_pid_change) && ::Prometheus::Client.reinitialize_on_pid_change
Gitlab::Metrics::Samplers::RubySampler.initialize_instance.start
Gitlab::Metrics::Samplers::DatabaseSampler.initialize_instance.start

View File

@ -53,17 +53,16 @@ helpful:
you can create an Auditor user and then share the credentials with those users
to which you want to grant access.
## Adding an Auditor user
## Add an Auditor user
To create a new Auditor user:
To create an Auditor user:
1. Create a new user or edit an existing one by navigating to
**Admin Area > Users**. The option of the access level is located in
the 'Access' section.
![Admin Area Form](img/auditor_access_form.png)
1. Select **Save changes** or **Create user** for the changes to take effect.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Overview > Users**.
1. Create a new user or edit an existing one, and in the **Access** section
select Auditor.
1. Select **Create user** or **Save changes** if you created a new user or
edited an existing one respectively.
To revoke Auditor permissions from a user, make them a regular user by
following the previous steps.

View File

@ -58,19 +58,25 @@ Feature.enable('geo_repository_verification')
## Repository verification
Go to the **Admin Area > Geo** dashboard on the **primary** node and expand
the **Verification information** section for that node to view automatic checksumming
status for each data type. Successes are shown in green, pending work
in gray, and failures in red.
On the **primary** node:
![Verification status](img/verification_status_primary_v14_0.png)
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
1. Expand **Verification information** tab for that node to view automatic checksumming
status for repositories and wikis. Successes are shown in green, pending work
in gray, and failures in red.
Go to the **Admin Area > Geo** dashboard on the **secondary** node and expand
the **Verification information** section for that node to view automatic verification
status for each data type. As with checksumming, successes are shown in
green, pending work in gray, and failures in red.
![Verification status](img/verification_status_primary_v14_0.png)
![Verification status](img/verification_status_secondary_v14_0.png)
On the **secondary** node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
1. Expand **Verification information** tab for that node to view automatic checksumming
status for repositories and wikis. Successes are shown in green, pending work
in gray, and failures in red.
![Verification status](img/verification_status_secondary_v14_0.png)
## Using checksums to compare Geo nodes
@ -92,11 +98,14 @@ data. The default and recommended re-verification interval is 7 days, though
an interval as short as 1 day can be set. Shorter intervals reduce risk but
increase load and vice versa.
Go to the **Admin Area > Geo** dashboard on the **primary** node, and
click the **Edit** button for the **primary** node to customize the minimum
re-verification interval:
On the **primary** node:
![Re-verification interval](img/reverification-interval.png)
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
1. Select **Edit** for the **primary** node to customize the minimum
re-verification interval:
![Re-verification interval](img/reverification-interval.png)
The automatic background re-verification is enabled by default, but you can
disable if you need. Run the following commands in a Rails console on the
@ -141,17 +150,19 @@ sudo gitlab-rake geo:verification:wiki:reset
If the **primary** and **secondary** nodes have a checksum verification mismatch, the cause may not be apparent. To find the cause of a checksum mismatch:
1. Go to the **Admin Area > Overview > Projects** dashboard on the **primary** node, find the
project that you want to check the checksum differences and click on the
**Edit** button:
![Projects dashboard](img/checksum-differences-admin-projects.png)
1. On the **primary** node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Overview > Projects**.
1. Find the project that you want to check the checksum differences and
select its name.
1. On the project administration page get the **Gitaly storage name**,
and **Gitaly relative path**.
1. On the project administration page get the **Gitaly storage name**, and **Gitaly relative path**:
![Project administration page](img/checksum-differences-admin-project-page.png)
![Project administration page](img/checksum-differences-admin-project-page.png)
1. Go to the project's repository directory on both **primary** and **secondary** nodes
(the path is usually `/var/opt/gitlab/git-data/repositories`). Note that if `git_data_dirs`
is customized, check the directory layout on your server to be sure.
is customized, check the directory layout on your server to be sure:
```shell
cd /var/opt/gitlab/git-data/repositories

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

View File

@ -109,13 +109,16 @@ The maintenance window won't end until Geo replication and verification is
completely finished. To keep the window as short as possible, you should
ensure these processes are close to 100% as possible during active use.
Go to the **Admin Area > Geo** dashboard on the **secondary** node to
review status. Replicated objects (shown in green) should be close to 100%,
and there should be no failures (shown in red). If a large proportion of
objects aren't yet replicated (shown in gray), consider giving the node more
time to complete
On the **secondary** node:
![Replication status](../replication/img/geo_node_dashboard_v14_0.png)
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
Replicated objects (shown in green) should be close to 100%,
and there should be no failures (shown in red). If a large proportion of
objects aren't yet replicated (shown in gray), consider giving the node more
time to complete
![Replication status](../replication/img/geo_node_dashboard_v14_0.png)
If any objects are failing to replicate, this should be investigated before
scheduling the maintenance window. Following a planned failover, anything that
@ -134,23 +137,26 @@ This [content was moved to another location](background_verification.md).
### Notify users of scheduled maintenance
On the **primary** node, navigate to **Admin Area > Messages**, add a broadcast
message. You can check under **Admin Area > Geo** to estimate how long it
takes to finish syncing. An example message would be:
On the **primary** node:
> A scheduled maintenance takes place at XX:XX UTC. We expect it to take
> less than 1 hour.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Messages**.
1. Add a message notifying users on the maintenance window.
You can check under **Geo > Nodes** to estimate how long it
takes to finish syncing.
1. Select **Add broadcast message**.
## Prevent updates to the **primary** node
To ensure that all data is replicated to a secondary site, updates (write requests) need to
be disabled on the primary site:
be disabled on the **primary** site:
1. Enable [maintenance mode](../../maintenance_mode/index.md).
1. Disable non-Geo periodic background jobs on the **primary** node by navigating
to **Admin Area > Monitoring > Background Jobs > Cron**, pressing `Disable All`,
and then pressing `Enable` for the `geo_sidekiq_cron_config_worker` cron job.
1. Enable [maintenance mode](../../maintenance_mode/index.md) on the **primary** node.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
1. On the Sidekiq dashboard, select **Cron**.
1. Select `Disable All` to disable non-Geo periodic background jobs.
1. Select `Enable` for the `geo_sidekiq_cron_config_worker` cron job.
This job re-enables several other cron jobs that are essential for planned
failover to complete successfully.
@ -158,23 +164,28 @@ be disabled on the primary site:
1. If you are manually replicating any data not managed by Geo, trigger the
final replication process now.
1. On the **primary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues**
and wait for all queues except those with `geo` in the name to drop to 0.
These queues contain work that has been submitted by your users; failing over
before it is completed, causes the work to be lost.
1. On the **primary** node, navigate to **Admin Area > Geo** and wait for the
following conditions to be true of the **secondary** node you are failing over to:
1. On the **primary** node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
1. On the Sidekiq dashboard, select **Queues**, and wait for all queues except
those with `geo` in the name to drop to 0.
These queues contain work that has been submitted by your users; failing over
before it is completed, causes the work to be lost.
1. On the left sidebar, select **Geo > Nodes** and wait for the
following conditions to be true of the **secondary** node you are failing over to:
- All replication meters to each 100% replicated, 0% failures.
- All verification meters reach 100% verified, 0% failures.
- Database replication lag is 0ms.
- The Geo log cursor is up to date (0 events behind).
- All replication meters reach 100% replicated, 0% failures.
- All verification meters reach 100% verified, 0% failures.
- Database replication lag is 0ms.
- The Geo log cursor is up to date (0 events behind).
1. On the **secondary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues**
and wait for all the `geo` queues to drop to 0 queued and 0 running jobs.
1. On the **secondary** node, use [these instructions](../../raketasks/check.md)
to verify the integrity of CI artifacts, LFS objects, and uploads in file
storage.
1. On the **secondary** node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
1. On the Sidekiq dashboard, select **Queues**, and wait for all the `geo`
queues to drop to 0 queued and 0 running jobs.
1. [Run an integrity check](../../raketasks/check.md) to verify the integrity
of CI artifacts, LFS objects, and uploads in file storage.
At this point, your **secondary** node contains an up-to-date copy of everything the
**primary** node has, meaning nothing was lost when you fail over.

View File

@ -63,13 +63,16 @@ Before following any of those steps, make sure you have `root` access to the
**secondary** to promote it, since there isn't provided an automated way to
promote a Geo replica and perform a failover.
On the **secondary** node, navigate to the **Admin Area > Geo** dashboard to
review its status. Replicated objects (shown in green) should be close to 100%,
and there should be no failures (shown in red). If a large proportion of
objects aren't yet replicated (shown in gray), consider giving the node more
time to complete.
On the **secondary** node:
![Replication status](../../replication/img/geo_node_dashboard_v14_0.png)
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes** to see its status.
Replicated objects (shown in green) should be close to 100%,
and there should be no failures (shown in red). If a large proportion of
objects aren't yet replicated (shown in gray), consider giving the node more
time to complete.
![Replication status](../../replication/img/geo_node_dashboard_v14_0.png)
If any objects are failing to replicate, this should be investigated before
scheduling the maintenance window. After a planned failover, anything that
@ -126,11 +129,14 @@ follow these steps to avoid unnecessary data loss:
existing Git repository with an SSH remote URL. The server should refuse
connection.
1. On the **primary** node, disable non-Geo periodic background jobs by navigating
to **Admin Area > Monitoring > Background Jobs > Cron**, clicking `Disable All`,
and then clicking `Enable` for the `geo_sidekiq_cron_config_worker` cron job.
This job will re-enable several other cron jobs that are essential for planned
failover to complete successfully.
1. On the **primary** node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
1. On the Sidekiq dhasboard, select **Cron**.
1. Select `Disable All` to disable any non-Geo periodic background jobs.
1. Select `Enable` for the `geo_sidekiq_cron_config_worker` cron job.
This job will re-enable several other cron jobs that are essential for planned
failover to complete successfully.
1. Finish replicating and verifying all data:
@ -141,22 +147,28 @@ follow these steps to avoid unnecessary data loss:
1. If you are manually replicating any
[data not managed by Geo](../../replication/datatypes.md#limitations-on-replicationverification),
trigger the final replication process now.
1. On the **primary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues**
and wait for all queues except those with `geo` in the name to drop to 0.
These queues contain work that has been submitted by your users; failing over
before it is completed will cause the work to be lost.
1. On the **primary** node, navigate to **Admin Area > Geo** and wait for the
following conditions to be true of the **secondary** node you are failing over to:
- All replication meters to each 100% replicated, 0% failures.
- All verification meters reach 100% verified, 0% failures.
- Database replication lag is 0ms.
- The Geo log cursor is up to date (0 events behind).
1. On the **primary** node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
1. On the Sidekiq dashboard, select **Queues**, and wait for all queues except
those with `geo` in the name to drop to 0.
These queues contain work that has been submitted by your users; failing over
before it is completed, causes the work to be lost.
1. On the left sidebar, select **Geo > Nodes** and wait for the
following conditions to be true of the **secondary** node you are failing over to:
1. On the **secondary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues**
and wait for all the `geo` queues to drop to 0 queued and 0 running jobs.
1. On the **secondary** node, use [these instructions](../../../raketasks/check.md)
to verify the integrity of CI artifacts, LFS objects, and uploads in file
storage.
- All replication meters reach 100% replicated, 0% failures.
- All verification meters reach 100% verified, 0% failures.
- Database replication lag is 0ms.
- The Geo log cursor is up to date (0 events behind).
1. On the **secondary** node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
1. On the Sidekiq dashboard, select **Queues**, and wait for all the `geo`
queues to drop to 0 queued and 0 running jobs.
1. [Run an integrity check](../../../raketasks/check.md) to verify the integrity
of CI artifacts, LFS objects, and uploads in file storage.
At this point, your **secondary** node will contain an up-to-date copy of everything the
**primary** node has, meaning nothing will be lost when you fail over.

View File

@ -114,11 +114,14 @@ follow these steps to avoid unnecessary data loss:
existing Git repository with an SSH remote URL. The server should refuse
connection.
1. On the **primary** node, disable non-Geo periodic background jobs by navigating
to **Admin Area > Monitoring > Background Jobs > Cron**, clicking `Disable All`,
and then clicking `Enable` for the `geo_sidekiq_cron_config_worker` cron job.
This job will re-enable several other cron jobs that are essential for planned
failover to complete successfully.
1. On the **primary** node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
1. On the Sidekiq dhasboard, select **Cron**.
1. Select `Disable All` to disable any non-Geo periodic background jobs.
1. Select `Enable` for the `geo_sidekiq_cron_config_worker` cron job.
This job will re-enable several other cron jobs that are essential for planned
failover to complete successfully.
1. Finish replicating and verifying all data:
@ -129,22 +132,28 @@ follow these steps to avoid unnecessary data loss:
1. If you are manually replicating any
[data not managed by Geo](../../replication/datatypes.md#limitations-on-replicationverification),
trigger the final replication process now.
1. On the **primary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues**
and wait for all queues except those with `geo` in the name to drop to 0.
These queues contain work that has been submitted by your users; failing over
before it is completed will cause the work to be lost.
1. On the **primary** node, navigate to **Admin Area > Geo** and wait for the
following conditions to be true of the **secondary** node you are failing over to:
- All replication meters to each 100% replicated, 0% failures.
- All verification meters reach 100% verified, 0% failures.
- Database replication lag is 0ms.
- The Geo log cursor is up to date (0 events behind).
1. On the **primary** node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
1. On the Sidekiq dashboard, select **Queues**, and wait for all queues except
those with `geo` in the name to drop to 0.
These queues contain work that has been submitted by your users; failing over
before it is completed, causes the work to be lost.
1. On the left sidebar, select **Geo > Nodes** and wait for the
following conditions to be true of the **secondary** node you are failing over to:
1. On the **secondary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues**
and wait for all the `geo` queues to drop to 0 queued and 0 running jobs.
1. On the **secondary** node, use [these instructions](../../../raketasks/check.md)
to verify the integrity of CI artifacts, LFS objects, and uploads in file
storage.
- All replication meters reach 100% replicated, 0% failures.
- All verification meters reach 100% verified, 0% failures.
- Database replication lag is 0ms.
- The Geo log cursor is up to date (0 events behind).
1. On the **secondary** node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
1. On the Sidekiq dashboard, select **Queues**, and wait for all the `geo`
queues to drop to 0 queued and 0 running jobs.
1. [Run an integrity check](../../../raketasks/check.md) to verify the integrity
of CI artifacts, LFS objects, and uploads in file storage.
At this point, your **secondary** node will contain an up-to-date copy of everything the
**primary** node has, meaning nothing will be lost when you fail over.

View File

@ -196,9 +196,9 @@ keys must be manually replicated to the **secondary** node.
gitlab-ctl reconfigure
```
1. Visit the **primary** node's **Admin Area > Geo**
(`/admin/geo/nodes`) in your browser.
1. Click the **New node** button.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
1. Select **New node**.
![Add secondary node](img/adding_a_secondary_node_v13_3.png)
1. Fill in **Name** with the `gitlab_rails['geo_node_name']` in
`/etc/gitlab/gitlab.rb`. These values must always match *exactly*, character
@ -209,7 +209,7 @@ keys must be manually replicated to the **secondary** node.
1. Optionally, choose which groups or storage shards should be replicated by the
**secondary** node. Leave blank to replicate all. Read more in
[selective synchronization](#selective-synchronization).
1. Click the **Add node** button to add the **secondary** node.
1. Select **Add node** to add the **secondary** node.
1. SSH into your GitLab **secondary** server and restart the services:
```shell
@ -252,18 +252,22 @@ on the **secondary** node.
Geo synchronizes repositories over HTTP/HTTPS, and therefore requires this clone
method to be enabled. This is enabled by default, but if converting an existing node to Geo it should be checked:
1. Go to **Admin Area > Settings** (`/admin/application_settings/general`) on the **primary** node.
1. Expand "Visibility and access controls".
On the **primary** node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Settings > General**.
1. Expand **Visibility and access controls**.
1. Ensure "Enabled Git access protocols" is set to either "Both SSH and HTTP(S)" or "Only HTTP(S)".
### Step 6. Verify proper functioning of the **secondary** node
Your **secondary** node is now configured!
You can sign in to the **secondary** node with the same credentials you used with
the **primary** node. After you sign in:
You can sign in to the _secondary_ node with the same credentials you used with
the _primary_ node. Visit the _secondary_ node's **Admin Area > Geo**
(`/admin/geo/nodes`) in your browser to determine if it's correctly identified
as a _secondary_ Geo node, and if Geo is enabled.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
1. Verify that it's correctly identified as a **secondary** Geo node, and that
Geo is enabled.
The initial replication, or 'backfill', is probably still in progress. You
can monitor the synchronization process on each Geo node from the **primary**

View File

@ -33,9 +33,12 @@ to do that.
## Remove the primary site from the UI
1. Go to **Admin Area > Geo** (`/admin/geo/nodes`).
1. Click the **Remove** button for the **primary** node.
1. Confirm by clicking **Remove** when the prompt appears.
To remove the **primary** site:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
1. Select **Remove** for the **primary** node.
1. Confirm by selecting **Remove** when the prompt appears.
## Remove secondary replication slots

View File

@ -127,7 +127,10 @@ For each application and Sidekiq node on the **secondary** site:
### Verify replication
To verify Container Registry replication is working, go to **Admin Area > Geo**
(`/admin/geo/nodes`) on the **secondary** site.
The initial replication, or "backfill", is probably still in progress.
To verify Container Registry replication is working, on the **secondary** site:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
The initial replication, or "backfill", is probably still in progress.
You can monitor the synchronization process on each Geo site from the **primary** site's **Geo Nodes** dashboard in your browser.

View File

@ -21,7 +21,7 @@ To have:
[Read more about using object storage with GitLab](../../object_storage.md).
## Enabling GitLab managed object storage replication
## Enabling GitLab-managed object storage replication
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/10586) in GitLab 12.4.
@ -31,10 +31,11 @@ This is a [**beta** feature](https://about.gitlab.com/handbook/product/#beta) an
**Secondary** sites can replicate files stored on the **primary** site regardless of
whether they are stored on the local file system or in object storage.
To enable GitLab replication, you must:
To enable GitLab replication:
1. Go to **Admin Area > Geo**.
1. Press **Edit** on the **secondary** site.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
1. Select **Edit** on the **secondary** site.
1. In the **Synchronization Settings** section, find the **Allow this secondary node to replicate content on Object Storage**
checkbox to enable it.

View File

@ -9,7 +9,8 @@ type: howto
**Secondary** sites can be removed from the Geo cluster using the Geo administration page of the **primary** site. To remove a **secondary** site:
1. Go to **Admin Area > Geo** (`/admin/geo/nodes`).
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
1. Select the **Remove** button for the **secondary** site you want to remove.
1. Confirm by selecting **Remove** when the prompt appears.

View File

@ -25,8 +25,12 @@ Before attempting more advanced troubleshooting:
### Check the health of the **secondary** node
Visit the **primary** node's **Admin Area > Geo** (`/admin/geo/nodes`) in
your browser. We perform the following health checks on each **secondary** node
On the **primary** node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
We perform the following health checks on each **secondary** node
to help identify if something is wrong:
- Is the node running?
@ -129,7 +133,8 @@ Geo finds the current machine's Geo node name in `/etc/gitlab/gitlab.rb` by:
- Using the `gitlab_rails['geo_node_name']` setting.
- If that is not defined, using the `external_url` setting.
This name is used to look up the node with the same **Name** in **Admin Area > Geo**.
This name is used to look up the node with the same **Name** in the **Geo Nodes**
dashboard.
To check if the current machine has a node name that matches a node in the
database, run the check task:
@ -739,8 +744,11 @@ If you are able to log in to the **primary** node, but you receive this error
when attempting to log into a **secondary**, you should check that the Geo
node's URL matches its external URL.
1. On the primary, visit **Admin Area > Geo**.
1. Find the affected **secondary** and click **Edit**.
On the **primary** node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
1. Find the affected **secondary** site and select **Edit**.
1. Ensure the **URL** field matches the value found in `/etc/gitlab/gitlab.rb`
in `external_url "https://gitlab.example.com"` on the frontend server(s) of
the **secondary** node.

View File

@ -7,20 +7,28 @@ type: howto
# Tuning Geo **(PREMIUM SELF)**
## Changing the sync/verification capacity values
You can limit the number of concurrent operations the nodes can run
in the background.
In **Admin Area > Geo** (`/admin/geo/nodes`),
there are several variables that can be tuned to improve performance of Geo:
## Changing the sync/verification concurrency values
- Repository sync capacity
- File sync capacity
- Container repositories sync capacity
- Verification capacity
On the **primary** site:
Increasing capacity values will increase the number of jobs that are scheduled.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
1. Select **Edit** of the secondary node you want to tune.
1. Under **Tuning settings**, there are several variables that can be tuned to
improve the performance of Geo:
- Repository synchronization concurrency limit
- File synchronization concurrency limit
- Container repositories synchronization concurrency limit
- Verification concurrency limit
Increasing the concurrency values will increase the number of jobs that are scheduled.
However, this may not lead to more downloads in parallel unless the number of
available Sidekiq threads is also increased. For example, if repository sync
capacity is increased from 25 to 50, you may also want to increase the number
available Sidekiq threads is also increased. For example, if repository synchronization
concurrency is increased from 25 to 50, you may also want to increase the number
of Sidekiq threads from 25 to 50. See the
[Sidekiq concurrency documentation](../../operations/extra_sidekiq_processes.md#number-of-threads)
for more details.

View File

@ -9,25 +9,27 @@ info: To determine the technical writer assigned to the Stage/Group associated w
GitLab supports and automates housekeeping tasks within your current repository,
such as compressing file revisions and removing unreachable objects.
## Automatic housekeeping
## Configure housekeeping
GitLab automatically runs `git gc` and `git repack` on repositories
after Git pushes. You can change how often this happens or turn it off in
**Admin Area > Settings > Repository** (`/admin/application_settings/repository`).
after Git pushes.
## Manual housekeeping
You can change how often this happens or turn it off:
The housekeeping function runs `repack` or `gc` depending on the
**Housekeeping** settings configured in **Admin Area > Settings > Repository**.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Settings > Repository**.
1. Expand **Repository maintenance**.
1. Configure the Housekeeping options.
1. Select **Save changes**.
For example in the following scenario a `git repack -d` will be executed:
For example, in the following scenario a `git repack -d` will be executed:
- Project: pushes since GC counter (`pushes_since_gc`) = `10`
- Git GC period = `200`
- Full repack period = `50`
When the `pushes_since_gc` value is 50 a `repack -A -d --pack-kept-objects` runs, similarly when
the `pushes_since_gc` value is 200 a `git gc` runs.
the `pushes_since_gc` value is 200 a `git gc` runs:
- `git gc` ([man page](https://mirrors.edge.kernel.org/pub/software/scm/git/docs/git-gc.html)) runs a number of housekeeping tasks,
such as compressing file revisions (to reduce disk space and increase performance)
@ -38,12 +40,6 @@ the `pushes_since_gc` value is 200 a `git gc` runs.
Housekeeping also [removes unreferenced LFS files](../raketasks/cleanup.md#remove-unreferenced-lfs-files)
from your project on the same schedule as the `git gc` operation, freeing up storage space for your project.
To manually start the housekeeping process:
1. In your project, go to **Settings > General**.
1. Expand the **Advanced** section.
1. Select **Run housekeeping**.
## How housekeeping handles pool repositories
Housekeeping for pool repositories is handled differently from standard repositories.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

View File

@ -21,10 +21,11 @@ Maintenance Mode allows most external actions that do not change internal state.
There are three ways to enable Maintenance Mode as an administrator:
- **Web UI**:
1. Go to **Admin Area > Settings > General**, expand **Maintenance Mode**, and toggle **Enable Maintenance Mode**.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Settings > General**.
1. Expand **Maintenance Mode**, and toggle **Enable Maintenance Mode**.
You can optionally add a message for the banner as well.
1. Click **Save** for the changes to take effect.
1. Select **Save changes**.
- **API**:
@ -44,9 +45,11 @@ There are three ways to enable Maintenance Mode as an administrator:
There are three ways to disable Maintenance Mode:
- **Web UI**:
1. Go to **Admin Area > Settings > General**, expand **Maintenance Mode**, and toggle **Enable Maintenance Mode**.
1. Click **Save** for the changes to take effect.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Settings > General**.
1. Expand **Maintenance Mode**, and toggle **Enable Maintenance Mode**.
You can optionally add a message for the banner as well.
1. Select **Save changes**.
- **API**:
@ -166,7 +169,10 @@ Background jobs (cron jobs, Sidekiq) continue running as is, because background
[During a planned Geo failover](../geo/disaster_recovery/planned_failover.md#prevent-updates-to-the-primary-node),
it is recommended that you disable all cron jobs except for those related to Geo.
You can monitor queues and disable jobs in **Admin Area > Monitoring > Background Jobs**.
To monitor queues and disable jobs:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
### Incident management

View File

@ -87,10 +87,10 @@ To start multiple processes:
sudo gitlab-ctl reconfigure
```
After the extra Sidekiq processes are added, navigate to
**Admin Area > Monitoring > Background Jobs** (`/admin/background_jobs`) in GitLab.
To view the Sidekiq processes in GitLab:
![Multiple Sidekiq processes](img/sidekiq-cluster.png)
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
## Negate settings

View File

@ -104,11 +104,13 @@ In the case of lookup failures (which are common), the `authorized_keys`
file is still scanned. So Git SSH performance would still be slow for many
users as long as a large file exists.
You can disable any more writes to the `authorized_keys` file by unchecking
`Write to "authorized_keys" file` in the **Admin Area > Settings > Network > Performance optimization** of your GitLab
installation.
To disable any more writes to the `authorized_keys` file:
![Write to authorized keys setting](img/write_to_authorized_keys_setting.png)
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Settings > Network**.
1. Expand **Performance optimization**.
1. Clear the **Write to "authorized_keys" file** checkbox.
1. Select **Save changes**.
Again, confirm that SSH is working by removing your user's SSH key in the UI,
adding a new one, and attempting to pull a repository.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

View File

@ -800,7 +800,7 @@ To explicitly enable API source:
1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
Or if you want to use legacy confiration source you can:
Or if you want to use legacy configuration source you can:
1. Add the following to your `/etc/gitlab/gitlab.rb` file:

View File

@ -9,23 +9,24 @@ info: To determine the technical writer assigned to the Stage/Group associated w
The GitLab UI polls for updates for different resources (issue notes, issue
titles, pipeline statuses, etc.) on a schedule appropriate to the resource.
In **[Admin Area](../user/admin_area/index.md) > Settings > Preferences > Real-time features**,
you can configure "Polling
interval multiplier". This multiplier is applied to all resources at once,
and decimal values are supported. For the sake of the examples below, we will
say that issue notes poll every 2 seconds, and issue titles poll every 5
seconds; these are _not_ the actual values.
To configure the polling interval multiplier:
- 1 is the default, and recommended for most installations. (Issue notes poll
every 2 seconds, and issue titles poll every 5 seconds.)
- 0 disables UI polling completely. (On the next poll, clients stop
polling for updates.)
- A value greater than 1 slows polling down. If you see issues with
database load from lots of clients polling for updates, increasing the
multiplier from 1 can be a good compromise, rather than disabling polling
completely. (For example: If this is set to 2, then issue notes poll every 4
seconds, and issue titles poll every 10 seconds.)
- A value between 0 and 1 makes the UI poll more frequently (so updates
show in other sessions faster), but is **not recommended**. 1 should be
fast enough. (For example, if this is set to 0.5, then issue notes poll every
1 second, and issue titles poll every 2.5 seconds.)
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Settings > Preferences**.
1. Expand **Real-time features**.
1. Set a value for the polling interval multiplier. This multiplier is applied
to all resources at once, and decimal values are supported:
- `1.0` is the default, and recommended for most installations.
- `0` disables UI polling completely. On the next poll, clients stop
polling for updates.
- A value greater than `1` slows polling down. If you see issues with
database load from lots of clients polling for updates, increasing the
multiplier from 1 can be a good compromise, rather than disabling polling
completely. For example, if you set the value to `2`, all polling intervals
are multiplied by 2, which means that polling happens half as frequently.
- A value between `0` and `1` makes the UI poll more frequently (so updates
show in other sessions faster), but is **not recommended**. `1` should be
fast enough.
1. Select **Save changes**.

View File

@ -207,8 +207,7 @@ above.
### Dangling commits
`gitlab:git:fsck` can find dangling commits. To fix them, try
[manually triggering housekeeping](../housekeeping.md#manual-housekeeping)
for the affected project(s).
[enabling housekeeping](../housekeeping.md).
If the issue persists, try triggering `gc` via the
[Rails Console](../operations/rails_console.md#starting-a-rails-console-session):

View File

@ -50,8 +50,13 @@ Note the following:
- Importing is only possible if the version of the import and export GitLab instances are
compatible as described in the [Version history](../../user/project/settings/import_export.md#version-history).
- The project import option must be enabled in
application settings (`/admin/application_settings/general`) under **Import sources**, which is available
under **Admin Area > Settings > Visibility and access controls**.
- The project import option must be enabled:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Settings > General**.
1. Expand **Visibility and access controls**.
1. Under **Import sources**, check the "Project export enabled" option.
1. Select **Save changes**.
- The exports are stored in a temporary directory and are deleted every
24 hours by a specific worker.

View File

@ -107,12 +107,15 @@ to project IDs 50 to 100 in an Omnibus GitLab installation:
sudo gitlab-rake gitlab:storage:migrate_to_hashed ID_FROM=50 ID_TO=100
```
You can monitor the progress in the **Admin Area > Monitoring > Background Jobs** page.
There is a specific queue you can watch to see how long it will take to finish:
`hashed_storage:hashed_storage_project_migrate`.
To monitor the progress in GitLab:
After it reaches zero, you can confirm every project has been migrated by running the commands above.
If you find it necessary, you can run this migration script again to schedule missing projects.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
1. Watch how long the `hashed_storage:hashed_storage_project_migrate` queue
will take to finish. After it reaches zero, you can confirm every project
has been migrated by running the commands above.
If you find it necessary, you can run the previous migration script again to schedule missing projects.
Any error or warning is logged in Sidekiq's log file.
@ -120,7 +123,7 @@ If [Geo](../geo/index.md) is enabled, each project that is successfully migrated
generates an event to replicate the changes on any **secondary** nodes.
You only need the `gitlab:storage:migrate_to_hashed` Rake task to migrate your repositories, but there are
[additional commands(#list-projects-and-attachments) to help you inspect projects and attachments in both legacy and hashed storage.
[additional commands](#list-projects-and-attachments) to help you inspect projects and attachments in both legacy and hashed storage.
## Rollback from hashed storage to legacy storage

View File

@ -275,7 +275,7 @@ integration active:
p = Project.find_by_sql("SELECT p.id FROM projects p LEFT JOIN services s ON p.id = s.project_id WHERE s.type = 'JiraService' AND s.active = true")
p.each do |project|
project.jira_service.update_attribute(:password, '<your-new-password>')
project.jira_integration.update_attribute(:password, '<your-new-password>')
end
```
@ -286,9 +286,9 @@ To change all Jira project to use the instance-level integration settings:
1. In a Rails console:
```ruby
jira_service_instance_id = JiraService.find_by(instance: true).id
JiraService.where(active: true, instance: false, template: false, inherit_from_id: nil).find_each do |service|
service.update_attribute(:inherit_from_id, jira_service_instance_id)
jira_integration_instance_id = Integrations::Jira.find_by(instance: true).id
Integrations::Jira.where(active: true, instance: false, template: false, inherit_from_id: nil).find_each do |integration|
integration.update_attribute(:inherit_from_id, jira_integration_instance_id)
end
```

View File

@ -3273,7 +3273,7 @@ Input type: `PrometheusIntegrationResetTokenInput`
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="mutationprometheusintegrationresettokenclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationprometheusintegrationresettokenid"></a>`id` | [`PrometheusServiceID!`](#prometheusserviceid) | The ID of the integration to mutate. |
| <a id="mutationprometheusintegrationresettokenid"></a>`id` | [`IntegrationsPrometheusID!`](#integrationsprometheusid) | The ID of the integration to mutate. |
#### Fields
@ -3294,7 +3294,7 @@ Input type: `PrometheusIntegrationUpdateInput`
| <a id="mutationprometheusintegrationupdateactive"></a>`active` | [`Boolean`](#boolean) | Whether the integration is receiving alerts. |
| <a id="mutationprometheusintegrationupdateapiurl"></a>`apiUrl` | [`String`](#string) | Endpoint at which Prometheus can be queried. |
| <a id="mutationprometheusintegrationupdateclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationprometheusintegrationupdateid"></a>`id` | [`PrometheusServiceID!`](#prometheusserviceid) | The ID of the integration to mutate. |
| <a id="mutationprometheusintegrationupdateid"></a>`id` | [`IntegrationsPrometheusID!`](#integrationsprometheusid) | The ID of the integration to mutate. |
#### Fields
@ -15365,6 +15365,13 @@ An example `IncidentManagementOncallRotationID` is: `"gid://gitlab/IncidentManag
Represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1.
### `IntegrationsPrometheusID`
A `IntegrationsPrometheusID` is a global ID. It is encoded as a string.
An example `IntegrationsPrometheusID` is: `"gid://gitlab/Integrations::Prometheus/1"`.
The older format `"gid://gitlab/PrometheusService/1"` was deprecated in 14.1.
### `IssuableID`
A `IssuableID` is a global ID. It is encoded as a string.
@ -15510,12 +15517,6 @@ A `ProjectID` is a global ID. It is encoded as a string.
An example `ProjectID` is: `"gid://gitlab/Project/1"`.
### `PrometheusServiceID`
A `PrometheusServiceID` is a global ID. It is encoded as a string.
An example `PrometheusServiceID` is: `"gid://gitlab/PrometheusService/1"`.
### `ReleasesLinkID`
A `ReleasesLinkID` is a global ID. It is encoded as a string.

View File

@ -258,7 +258,7 @@ Example request:
```shell
curl --header "Private-Token: <your_access_token>" "https://gitlab.example.com/api/v4/groups/26/clusters/24" \
-H "Content-Type:application/json" \
--request PUT --data '{"name":"new-cluster-name","domain":"new-domain.com","api_url":"https://new-api-url.com"}'
--request PUT --data '{"name":"new-cluster-name","domain":"new-domain.com","platform_kubernetes_attributes":{"api_url":"https://10.10.101.1:6433"}}'
```
Example response:

View File

@ -363,7 +363,7 @@ use `%{created_at}` in Ruby but `%{createdAt}` in JavaScript. Make sure to
// => When x == 2: 'Last 2 days'
```
The `n_` method should only be used to fetch pluralized translations of the same
The `n_` and `n__` methods should only be used to fetch pluralized translations of the same
string, not to control the logic of showing different strings for different
quantities. Some languages have different quantities of target plural forms.
For example, Chinese (simplified) has only one target plural form in our
@ -376,7 +376,7 @@ For example, use this:
if selected_projects.one?
selected_projects.first.name
else
n__("Project selected", "%d projects selected", selected_projects.count)
n_("Project selected", "%d projects selected", selected_projects.count)
end
```

View File

@ -238,9 +238,11 @@ in this section whenever you need to update GitLab.
### Check the current version
To determine the version of GitLab you're currently running,
go to the **{admin}** **Admin Area**, and find the version
under the **Components** table.
To determine the version of GitLab you're currently running:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Overview > Dashboard**.
1. Find the version under the **Components** table.
If there's a newer available version of GitLab that contains one or more
security fixes, GitLab displays an **Update asap** notification message that

View File

@ -10,7 +10,10 @@ type: howto
You can configure various settings for GitLab Geo nodes. For more information, see
[Geo documentation](../../administration/geo/index.md).
On the primary node, go to **Admin Area > Geo**. On secondary nodes, go to **Admin Area > Geo > Nodes**.
On either the primary or secondary node:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
## Common settings
@ -61,8 +64,13 @@ The **primary** node's Internal URL is used by **secondary** nodes to contact it
[External URL](https://docs.gitlab.com/omnibus/settings/configuration.html#configuring-the-external-url-for-gitlab)
which is used by users. Internal URL does not need to be a private address.
Internal URL defaults to External URL, but you can customize it under
**Admin Area > Geo > Nodes**.
Internal URL defaults to external URL, but you can also customize it:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Geo > Nodes**.
1. Select **Edit** on the node you want to customize.
1. Edit the internal URL.
1. Select **Save changes**.
WARNING:
We recommend using an HTTPS connection while configuring the Geo nodes. To avoid

View File

@ -22,7 +22,7 @@ This can be useful for:
## Permissions
Only the management project receives `cluster-admin` privileges. All
other projects continue to receive [namespace scoped `edit` level privileges](../project/clusters/add_remove_clusters.md#rbac-cluster-resources).
other projects continue to receive [namespace scoped `edit` level privileges](../project/clusters/cluster_access.md#rbac-cluster-resources).
Management projects are restricted to the following:

View File

@ -163,7 +163,7 @@ are deployed to the Kubernetes cluster, see the documentation for
## Security of runners
For important information about securely configuring runners, see
[Security of runners](../../project/clusters/add_remove_clusters.md#security-of-runners)
[Security of runners](../../project/clusters/cluster_access.md#security-of-runners)
documentation for project-level clusters.
## More information

View File

@ -4,85 +4,57 @@ group: Configure
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Adding EKS clusters **(FREE)**
# EKS clusters (DEPRECATED) **(FREE)**
GitLab supports adding new and existing EKS clusters.
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/22392) in GitLab 12.5.
> - [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) in GitLab 14.0.
## EKS requirements
WARNING:
Use [Infrastrucure as Code](../../infrastructure/index.md) to create new clusters. The method described in this document is deprecated as of GitLab 14.0.
Before creating your first cluster on Amazon EKS with the GitLab integration, make sure the following
requirements are met:
Through GitLab, you can create new clusters and add existing clusters hosted on Amazon Elastic
Kubernetes Service (EKS).
- An [Amazon Web Services](https://aws.amazon.com/) account is set up and you are able to log in.
- You have permissions to manage IAM resources.
- If you want to use an [existing EKS cluster](#existing-eks-cluster):
- An Amazon EKS cluster with worker nodes properly configured.
- `kubectl` [installed and configured](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html#get-started-kubectl)
for access to the EKS cluster.
## Add an existing EKS cluster
### Additional requirements for self-managed instances **(FREE SELF)**
If you already have an EKS cluster and want to integrate it with GitLab,
see how to [add an existing cluster](add_existing_cluster.md).
If you are using a self-managed GitLab instance, GitLab must first be configured with a set of
Amazon credentials. These credentials are used to assume an Amazon IAM role provided by the user
creating the cluster. Create an IAM user and ensure it has permissions to assume the role(s) that
your users need to create EKS clusters.
## Create a new certificate-based EKS cluster
For example, the following policy document allows assuming a role whose name starts with
`gitlab-eks-` in account `123456789012`:
Prerequisites:
```json
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Resource": "arn:aws:iam::123456789012:role/gitlab-eks-*"
}
}
```
- An [Amazon Web Services](https://aws.amazon.com/) account.
- Permissions to manage IAM resources.
### Configure Amazon authentication
For instance-level clusters, see [additional requirements for self-managed instances](#additional-requirements-for-self-managed-instances). **(FREE SELF)**
To configure Amazon authentication in GitLab, generate an access key for the IAM user in the Amazon AWS console, and following the steps below.
To create new Kubernetes clusters for your project, group, or instance through the certificate-based method:
1. Navigate to **Admin Area > Settings > General** and expand the **Amazon EKS** section.
1. Check **Enable Amazon EKS integration**.
1. Enter your **Account ID**.
1. Depending on your configuration, enter your access key and ID:
1. [Define the access control (RBAC or ABAC) for your cluster](cluster_access.md).
1. [Create a cluster in GitLab](#create-a-new-eks-cluster-in-gitlab).
1. [Prepare the cluster in Amazon](#prepare-the-cluster-in-amazon).
1. [Configure your cluster's data in GitLab](#configure-your-clusters-data-in-gitlab).
- _GitLab 13.7 and later, and using an instance profile_: You may leave
**Access key ID** and **Secret access key** blank.
Read [Instance profiles](#instance-profiles) for more information.
- _All GitLab versions_: Enter your access key credentials into
**Access key ID** and **Secret access key**.
Further steps:
1. Click **Save changes**.
1. [Create a default Storage Class](#create-a-default-storage-class).
1. [Deploy the app to EKS](#deploy-the-app-to-eks).
#### Instance profiles
### Create a new EKS cluster in GitLab
> Introduced in [GitLab 13.7](https://gitlab.com/gitlab-org/gitlab/-/issues/291015).
To create a new EKS cluster:
You may leave `Access key ID` and `Secret access key` fields blank if
you are using an instance profile
[to pass an IAM role to an EC2 instance](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html).
Instance profiles dynamically retrieve temporary credentials from AWS when needed.
## New EKS cluster
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/22392) in GitLab 12.5.
To create and add a new Kubernetes cluster to your project, group, or instance:
1. Navigate to your:
1. Go to your:
- Project's **Infrastructure > Kubernetes clusters** page, for a project-level cluster.
- Group's **Kubernetes** page, for a group-level cluster.
- **Admin Area > Kubernetes**, for an instance-level cluster.
1. Click **Integrate with a cluster certificate**.
- **Menu >** **{admin}** **Admin > Kubernetes**, for an instance-level cluster.
1. Select **Integrate with a cluster certificate**.
1. Under the **Create new cluster** tab, click **Amazon EKS** to display an
`Account ID` and `External ID` needed for later steps.
1. In the [IAM Management Console](https://console.aws.amazon.com/iam/home), create an IAM policy:
1. From the left panel, select **Policies**.
1. Click **Create Policy**, which opens a new window.
1. Select **Create Policy**, which opens a new window.
1. Select the **JSON** tab, and paste the following snippet in place of the
existing content. These permissions give GitLab the ability to create
resources, but not delete them:
@ -133,132 +105,163 @@ To create and add a new Kubernetes cluster to your project, group, or instance:
}
```
If an error is encountered during the creation process, changes will
not be rolled back and you must remove resources manually. You can do this by deleting
the relevant [CloudFormation stack](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-console-delete-stack.html)
If you get an error during this process, GitLab does not roll back the changes. You must remove resources manually. You can do this by deleting
the relevant [CloudFormation stack](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-console-delete-stack.html).
1. Click **Review policy**.
1. Enter a suitable name for this policy, and click **Create Policy**. You can now close this window.
1. In the [IAM Management Console](https://console.aws.amazon.com/iam/home), create an **EKS IAM role** following the [Amazon EKS cluster IAM role instructions](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html). This role should exist so that Kubernetes clusters managed by Amazon EKS can make calls to other AWS services on your behalf to manage the resources that you use with the service.
In addition to the policies that guide suggests, you must also include the `AmazonEKSClusterPolicy`
policy for this role in order for GitLab to manage the EKS cluster correctly.
1. In the [IAM Management Console](https://console.aws.amazon.com/iam/home), create another IAM role which will be used by GitLab to authenticate with AWS. Follow these steps to create it:
1. On the AWS IAM console, select **Roles** from the left panel.
1. Click **Create role**.
1. Under `Select type of trusted entity`, select **Another AWS account**.
1. Enter the Account ID from GitLab into the `Account ID` field.
1. Check **Require external ID**.
1. Enter the External ID from GitLab into the `External ID` field.
1. Click **Next: Permissions**, and select the policy you just created.
1. Click **Next: Tags**, and optionally enter any tags you wish to associate with this role.
1. Click **Next: Review**.
1. Enter a role name and optional description into the fields provided.
1. Click **Create role**, the new role name displays at the top. Click on its name and copy the `Role ARN` from the newly created role.
1. In GitLab, enter the copied role ARN into the `Role ARN` field.
1. In the **Cluster Region** field, enter the [region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) you plan to use for your new cluster. GitLab confirms you have access to this region when authenticating your role.
1. Click **Authenticate with AWS**.
1. Choose your cluster's settings:
- **Kubernetes cluster name** - The name you wish to give the cluster.
- **Environment scope** - The [associated environment](index.md#setting-the-environment-scope) to this cluster.
- **Kubernetes version** - The [Kubernetes version](index.md#supported-cluster-versions) to use.
- **Service role** - Select the **EKS IAM role** you created earlier to allow Amazon EKS
and the Kubernetes control plane to manage AWS resources on your behalf.
### Prepare the cluster in Amazon
NOTE:
This IAM role is _not_ the IAM role you created in the previous step. It should be
the one you created much earlier by following the
[Amazon EKS cluster IAM role](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html)
guide.
- **Key pair name** - Select the [key pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
that you can use to connect to your worker nodes if required.
- **VPC** - Select a [VPC](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html)
to use for your EKS Cluster resources.
- **Subnets** - Choose the [subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)
in your VPC where your worker nodes run. You must select at least two.
- **Security group** - Choose the [security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html)
to apply to the EKS-managed Elastic Network Interfaces that are created in your worker node subnets.
- **Instance type** - The [instance type](https://aws.amazon.com/ec2/instance-types/) of your worker nodes.
- **Node count** - The number of worker nodes.
- **GitLab-managed cluster** - Leave this checked if you want GitLab to manage namespaces and service accounts for this cluster.
See the [Managed clusters section](index.md#gitlab-managed-clusters) for more information.
1. Finally, click the **Create Kubernetes cluster** button.
1. [Create an **EKS IAM role** for your cluster](#create-an-eks-iam-role-for-your-cluster) (**role A**).
1. [Create **another EKS IAM role** for GitLab authentication with Amazon](#create-another-eks-iam-role-for-gitlab-authentication-with-amazon) (**role B**).
#### Create an EKS IAM role for your cluster
In the [IAM Management Console](https://console.aws.amazon.com/iam/home),
create an **EKS IAM role** (**role A**) following the [Amazon EKS cluster IAM role instructions](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html).
This role is necessary so that Kubernetes clusters managed by Amazon EKS can make calls to other AWS
services on your behalf to manage the resources that you use with the service.
For GitLab to manage the EKS cluster correctly, you must include `AmazonEKSClusterPolicy` in
addition to the policies the guide suggests.
#### Create another EKS IAM role for GitLab authentication with Amazon
In the [IAM Management Console](https://console.aws.amazon.com/iam/home),
create another IAM role (**role B**) for GitLab authentication with AWS:
1. On the AWS IAM console, select **Roles** from the left panel.
1. Click **Create role**.
1. Under **Select type of trusted entity**, select **Another AWS account**.
1. Enter the Account ID from GitLab into the **Account ID** field.
1. Check **Require external ID**.
1. Enter the External ID from GitLab into the **External ID** field.
1. Click **Next: Permissions**, and select the policy you just created.
1. Click **Next: Tags**, and optionally enter any tags you wish to associate with this role.
1. Click **Next: Review**.
1. Enter a role name and optional description into the fields provided.
1. Click **Create role**. The new role name displays at the top. Click on its name and copy the
`Role ARN` from the newly created role.
### Configure your cluster's data in GitLab
1. Back in GitLab, enter the copied role ARN into the **Role ARN** field.
1. In the **Cluster Region** field, enter the [region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) you plan to use for your new cluster. GitLab confirms you have access to this region when authenticating your role.
1. Select **Authenticate with AWS**.
1. Adjust your [cluster's settings](#cluster-settings).
1. Select the **Create Kubernetes cluster** button.
After about 10 minutes, your cluster is ready to go.
NOTE:
If you have [installed and configured](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html#get-started-kubectl) `kubectl` and you would like to manage your cluster with it, you must add your AWS external ID in the AWS configuration. For more information on how to configure AWS CLI, see [using an IAM role in the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html#cli-configure-role-xaccount).
If you have [installed and configured](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html#get-started-kubectl) `kubectl` and you would like to manage your cluster with it, you must add your AWS external ID in the AWS configuration. For more information on how to configure AWS CLI, see [using an IAM role in the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html#cli-configure-role-xaccount).
### Cluster creation flow
#### Cluster settings
The following sequence illustrates how GitLab works with AWS to create an EKS cluster:
When you create a new cluster, you have the following settings:
```mermaid
sequenceDiagram
autonumber
participant G as GitLab
participant A as AWS
participant E as EKS cluster
alt static credentials
G->>G: Load AWS Access and secret key
end
alt IAM instance profile
G->>A: Fetch temporary credentials
A->>G: Temporary access credentials
end
G->>A: AssumeRole: EKS Provision Role
A->>A: Check account, external IDs
A->>A: Check permissions
A->>G: New access credentials
note over G: user selects EKS cluster options
note over G,A: Use Service Role credentials
G->>A: CreateStack (CloudFormation)
A->>G: Received
G->>G: Wait 5 minutes
loop Poll for cluster creation
G->>A: DescribeStacks
A->>G: CREATE_IN_PROGRESS
end
note over G,E: EKS Cluster Created
G->>A: DescribeStacks
A->>G: CREATE_COMPLETE
G->>E: kubectl create role (service account)
E->>G: OK
| Setting | Description |
| ----------------------- |------------ |
| Kubernetes cluster name | Your cluster's name. |
| Environment scope | The [associated environment](index.md#setting-the-environment-scope). |
| Service role | The **EKS IAM role** (**role A**). |
| Kubernetes version | The [Kubernetes version](index.md#supported-cluster-versions) for your cluster. |
| Key pair name | The [key pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) that you can use to connect to your worker nodes. |
| VPC | The [VPC](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) to use for your EKS Cluster resources. |
| Subnets | The [subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in your VPC where your worker nodes run. Two are required. |
| Security group | The [security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) to apply to the EKS-managed Elastic Network Interfaces that are created in your worker node subnets. |
| Instance type | The [instance type](https://aws.amazon.com/ec2/instance-types/) of your worker nodes. |
| Node count | The number of worker nodes. |
| GitLab-managed cluster | Check if you want GitLab to manage namespaces and service accounts for this cluster. |
## Create a default Storage Class
Amazon EKS doesn't have a default Storage Class out of the box, which means
requests for persistent volumes are not automatically fulfilled. As part
of Auto DevOps, the deployed PostgreSQL instance requests persistent storage,
and without a default storage class it cannot start.
If a default Storage Class doesn't already exist and is desired, follow Amazon's
[guide on storage classes](https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html)
to create one.
Alternatively, disable PostgreSQL by setting the project variable
[`POSTGRES_ENABLED`](../../../topics/autodevops/customize.md#cicd-variables) to `false`.
## Deploy the app to EKS
With RBAC disabled and services deployed,
[Auto DevOps](../../../topics/autodevops/index.md) can now be leveraged
to build, test, and deploy the app.
[Enable Auto DevOps](../../../topics/autodevops/index.md#at-the-project-level)
if not already enabled. If a wildcard DNS entry was created resolving to the
Load Balancer, enter it in the `domain` field under the Auto DevOps settings.
Otherwise, the deployed app isn't externally available outside of the cluster.
![Deploy Pipeline](img/pipeline.png)
GitLab creates a new pipeline, which begins to build, test, and deploy the app.
After the pipeline has finished, your app runs in EKS, and is available
to users. Click on **CI/CD > Environments**.
![Deployed Environment](img/environment.png)
GitLab displays a list of the environments and their deploy status, as well as
options to browse to the app, view monitoring metrics, and even access a shell
on the running pod.
## Additional requirements for self-managed instances **(FREE SELF)**
If you are using a self-managed GitLab instance, you need to configure
Amazon credentials. GitLab uses these credentials to assume an Amazon IAM role to create your cluster.
Create an IAM user and ensure it has permissions to assume the role(s) that
your users need to create EKS clusters.
For example, the following policy document allows assuming a role whose name starts with
`gitlab-eks-` in account `123456789012`:
```json
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Resource": "arn:aws:iam::123456789012:role/gitlab-eks-*"
}
}
```
First, GitLab must obtain an initial set of credentials to communicate with the AWS API.
These credentials can be retrieved in one of two ways:
### Configure Amazon authentication
- Statically through the [Configure Amazon authentication](#configure-amazon-authentication).
- Dynamically via an IAM instance profile ([introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/291015) in GitLab 13.7).
To configure Amazon authentication in GitLab, generate an access key for the
IAM user in the Amazon AWS console, and follow these steps:
After GitLab retrieves the AWS credentials, it makes an
[AssumeRole](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
API call to obtain credentials for the Provision Role. AWS confirms
the request has the correct account ID, external ID, and permissions.
1. In GitLab, on the top bar, select **Menu >** **{admin}** **Admin > Settings > General** and expand the **Amazon EKS** section.
1. Check **Enable Amazon EKS integration**.
1. Enter your **Account ID**.
1. Enter your [access key and ID](#eks-access-key-and-id).
1. Click **Save changes**.
If the request is valid, AWS returns a new set of temporary credentials GitLab
uses to load the **Create cluster** options page.
#### EKS access key and ID
On the **Create cluster** page, the user must select a **Service Role**, which is
the IAM role that is actually used to create the cluster, and other options
such as the Kubernetes cluster name, Kubernetes version, and region.
After the user clicks the **Create Kubernetes cluster** button, GitLab
submits a CloudFormation API request to create an EKS cluster with the given parameters
from the user. GitLab waits 5 minutes before checking whether the cluster was created,
and polls once a minute for up to 30 minutes.
> Instance profiles were [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/291015) in GitLab 13.7.
After GitLab receives a `CREATE_COMPLETE` message from AWS, GitLab talks
to the EKS cluster to create a Kubernetes service account with `cluster-admin`
privileges, and updates its internal database to reflect the newly-created
Kubernetes cluster. From this point forward, GitLab uses this service account to
interact with the cluster.
If you're using GitLab 13.7 or later, you can use instance profiles to
dynamically retrieve temporary credentials from AWS when needed.
In this case, leave the `Access key ID` and `Secret access key` fields blank
and [pass an IAM role to an EC2 instance](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html).
### Troubleshooting creating a new cluster
Otherwise, enter your access key credentials into **Access key ID** and **Secret access key**.
## Troubleshooting
The following errors are commonly encountered when creating a new cluster.
#### Validation failed: Role ARN must be a valid Amazon Resource Name
### Validation failed: Role ARN must be a valid Amazon Resource Name
Check that the `Provision Role ARN` is correct. An example of a valid ARN:
@ -266,7 +269,7 @@ Check that the `Provision Role ARN` is correct. An example of a valid ARN:
arn:aws:iam::123456789012:role/gitlab-eks-provision'
```
#### Access denied: User `arn:aws:iam::x` is not authorized to perform: `sts:AssumeRole` on resource: `arn:aws:iam::y`
### Access denied: User `arn:aws:iam::x` is not authorized to perform: `sts:AssumeRole` on resource: `arn:aws:iam::y`
This error occurs when the credentials defined in the
[Configure Amazon authentication](#configure-amazon-authentication) cannot assume the role defined by the
@ -280,7 +283,7 @@ Provision Role ARN. Check that:
![AWS IAM Trust relationships](img/aws_iam_role_trust.png)
#### Could not load Security Groups for this VPC
### Could not load Security Groups for this VPC
When populating options in the configuration form, GitLab returns this error
because GitLab has successfully assumed your provided role, but the role has
@ -307,46 +310,3 @@ This role should be the role you created by following the
[EKS cluster IAM role](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html) guide.
In addition to the policies that guide suggests, you must also include the
`AmazonEKSClusterPolicy` policy for this role in order for GitLab to manage the EKS cluster correctly.
## Existing EKS cluster
For information on adding an existing EKS cluster, see
[Existing Kubernetes cluster](add_remove_clusters.md#existing-kubernetes-cluster).
### Create a default Storage Class
Amazon EKS doesn't have a default Storage Class out of the box, which means
requests for persistent volumes are not automatically fulfilled. As part
of Auto DevOps, the deployed PostgreSQL instance requests persistent storage,
and without a default storage class it cannot start.
If a default Storage Class doesn't already exist and is desired, follow Amazon's
[guide on storage classes](https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html)
to create one.
Alternatively, disable PostgreSQL by setting the project variable
[`POSTGRES_ENABLED`](../../../topics/autodevops/customize.md#cicd-variables) to `false`.
### Deploy the app to EKS
With RBAC disabled and services deployed,
[Auto DevOps](../../../topics/autodevops/index.md) can now be leveraged
to build, test, and deploy the app.
[Enable Auto DevOps](../../../topics/autodevops/index.md#at-the-project-level)
if not already enabled. If a wildcard DNS entry was created resolving to the
Load Balancer, enter it in the `domain` field under the Auto DevOps settings.
Otherwise, the deployed app isn't externally available outside of the cluster.
![Deploy Pipeline](img/pipeline.png)
GitLab creates a new pipeline, which begins to build, test, and deploy the app.
After the pipeline has finished, your app runs in EKS, and is available
to users. Click on **CI/CD > Environments**.
![Deployed Environment](img/environment.png)
GitLab displays a list of the environments and their deploy status, as well as
options to browse to the app, view monitoring metrics, and even access a shell
on the running pod.

Some files were not shown because too many files have changed in this diff Show More