+
diff --git a/app/assets/javascripts/issuable_bulk_update_sidebar/constants.js b/app/assets/javascripts/issuable_bulk_update_sidebar/constants.js
new file mode 100644
index 00000000000..ad15b25f9cf
--- /dev/null
+++ b/app/assets/javascripts/issuable_bulk_update_sidebar/constants.js
@@ -0,0 +1,17 @@
+import { __ } from '~/locale';
+
+export const ISSUE_STATUS_MODIFIERS = {
+ REOPEN: 'reopen',
+ CLOSE: 'close',
+};
+
+export const ISSUE_STATUS_SELECT_OPTIONS = [
+ {
+ value: ISSUE_STATUS_MODIFIERS.REOPEN,
+ text: __('Open'),
+ },
+ {
+ value: ISSUE_STATUS_MODIFIERS.CLOSE,
+ text: __('Closed'),
+ },
+];
diff --git a/app/assets/javascripts/issuable_bulk_update_sidebar/init_issue_status_select.js b/app/assets/javascripts/issuable_bulk_update_sidebar/init_issue_status_select.js
new file mode 100644
index 00000000000..43179a86d70
--- /dev/null
+++ b/app/assets/javascripts/issuable_bulk_update_sidebar/init_issue_status_select.js
@@ -0,0 +1,17 @@
+import Vue from 'vue';
+import StatusSelect from './components/status_select.vue';
+
+export default function initIssueStatusSelect() {
+ const el = document.querySelector('.js-issue-status');
+
+ if (!el) {
+ return null;
+ }
+
+ return new Vue({
+ el,
+ render(h) {
+ return h(StatusSelect);
+ },
+ });
+}
diff --git a/app/assets/javascripts/issuable_bulk_update_actions.js b/app/assets/javascripts/issuable_bulk_update_sidebar/issuable_bulk_update_actions.js
similarity index 97%
rename from app/assets/javascripts/issuable_bulk_update_actions.js
rename to app/assets/javascripts/issuable_bulk_update_sidebar/issuable_bulk_update_actions.js
index 911533457ac..463e0e5837e 100644
--- a/app/assets/javascripts/issuable_bulk_update_actions.js
+++ b/app/assets/javascripts/issuable_bulk_update_sidebar/issuable_bulk_update_actions.js
@@ -1,8 +1,8 @@
import $ from 'jquery';
import { difference, intersection, union } from 'lodash';
-import createFlash from './flash';
-import axios from './lib/utils/axios_utils';
-import { __ } from './locale';
+import createFlash from '~/flash';
+import axios from '~/lib/utils/axios_utils';
+import { __ } from '~/locale';
export default {
init({ form, issues, prefixId } = {}) {
diff --git a/app/assets/javascripts/issuable_bulk_update_sidebar.js b/app/assets/javascripts/issuable_bulk_update_sidebar/issuable_bulk_update_sidebar.js
similarity index 94%
rename from app/assets/javascripts/issuable_bulk_update_sidebar.js
rename to app/assets/javascripts/issuable_bulk_update_sidebar/issuable_bulk_update_sidebar.js
index 97d50dde9f7..a9d4548f8cf 100644
--- a/app/assets/javascripts/issuable_bulk_update_sidebar.js
+++ b/app/assets/javascripts/issuable_bulk_update_sidebar/issuable_bulk_update_sidebar.js
@@ -2,11 +2,12 @@
import $ from 'jquery';
import { property } from 'lodash';
+
+import issueableEventHub from '~/issues_list/eventhub';
+import LabelsSelect from '~/labels_select';
+import MilestoneSelect from '~/milestone_select';
+import initIssueStatusSelect from './init_issue_status_select';
import IssuableBulkUpdateActions from './issuable_bulk_update_actions';
-import issueStatusSelect from './issue_status_select';
-import issueableEventHub from './issues_list/eventhub';
-import LabelsSelect from './labels_select';
-import MilestoneSelect from './milestone_select';
import subscriptionSelect from './subscription_select';
const HIDDEN_CLASS = 'hidden';
@@ -29,7 +30,7 @@ export default class IssuableBulkUpdateSidebar {
this.$sidebar = $('.right-sidebar');
this.$sidebarInnerContainer = this.$sidebar.find('.issuable-sidebar');
this.$bulkEditCancelBtn = $('.js-bulk-update-menu-hide');
- this.$bulkEditSubmitBtn = $('.update-selected-issues');
+ this.$bulkEditSubmitBtn = $('.js-update-selected-issues');
this.$bulkUpdateEnableBtn = $('.js-bulk-update-toggle');
this.$otherFilters = $('.issues-other-filters');
this.$checkAllContainer = $('.check-all-holder');
@@ -56,7 +57,7 @@ export default class IssuableBulkUpdateSidebar {
initDropdowns() {
new LabelsSelect();
new MilestoneSelect();
- issueStatusSelect();
+ initIssueStatusSelect();
subscriptionSelect();
if (IS_EE) {
diff --git a/app/assets/javascripts/issuable_init_bulk_update_sidebar.js b/app/assets/javascripts/issuable_bulk_update_sidebar/issuable_init_bulk_update_sidebar.js
similarity index 100%
rename from app/assets/javascripts/issuable_init_bulk_update_sidebar.js
rename to app/assets/javascripts/issuable_bulk_update_sidebar/issuable_init_bulk_update_sidebar.js
diff --git a/app/assets/javascripts/subscription_select.js b/app/assets/javascripts/issuable_bulk_update_sidebar/subscription_select.js
similarity index 96%
rename from app/assets/javascripts/subscription_select.js
rename to app/assets/javascripts/issuable_bulk_update_sidebar/subscription_select.js
index 4a688d819b0..b12ac776b4f 100644
--- a/app/assets/javascripts/subscription_select.js
+++ b/app/assets/javascripts/issuable_bulk_update_sidebar/subscription_select.js
@@ -1,6 +1,6 @@
import $ from 'jquery';
import initDeprecatedJQueryDropdown from '~/deprecated_jquery_dropdown';
-import { __ } from './locale';
+import { __ } from '~/locale';
export default function subscriptionSelect() {
$('.js-subscription-event').each((i, element) => {
diff --git a/app/assets/javascripts/issuable_index.js b/app/assets/javascripts/issuable_index.js
index cdeee68b762..5a57da292a0 100644
--- a/app/assets/javascripts/issuable_index.js
+++ b/app/assets/javascripts/issuable_index.js
@@ -1,4 +1,4 @@
-import issuableInitBulkUpdateSidebar from './issuable_init_bulk_update_sidebar';
+import issuableInitBulkUpdateSidebar from '~/issuable_bulk_update_sidebar/issuable_init_bulk_update_sidebar';
export default class IssuableIndex {
constructor(pagePrefix = 'issuable_') {
diff --git a/app/assets/javascripts/issue_status_select.js b/app/assets/javascripts/issue_status_select.js
deleted file mode 100644
index 2ede0837930..00000000000
--- a/app/assets/javascripts/issue_status_select.js
+++ /dev/null
@@ -1,27 +0,0 @@
-import $ from 'jquery';
-import initDeprecatedJQueryDropdown from '~/deprecated_jquery_dropdown';
-import { __ } from './locale';
-
-export default function issueStatusSelect() {
- $('.js-issue-status').each((i, el) => {
- const fieldName = $(el).data('fieldName');
- initDeprecatedJQueryDropdown($(el), {
- selectable: true,
- fieldName,
- toggleLabel(selected, element, instance) {
- let label = __('Author');
- const $item = instance.dropdown.find('.is-active');
- if ($item.length) {
- label = $item.text();
- }
- return label;
- },
- clicked(options) {
- return options.e.preventDefault();
- },
- id(obj, element) {
- return $(element).data('id');
- },
- });
- });
-}
diff --git a/app/assets/javascripts/issues_list/components/issues_list_app.vue b/app/assets/javascripts/issues_list/components/issues_list_app.vue
index dbf7717b248..921af766796 100644
--- a/app/assets/javascripts/issues_list/components/issues_list_app.vue
+++ b/app/assets/javascripts/issues_list/components/issues_list_app.vue
@@ -450,7 +450,9 @@ export default {
},
async handleBulkUpdateClick() {
if (!this.hasInitBulkEdit) {
- const initBulkUpdateSidebar = await import('~/issuable_init_bulk_update_sidebar');
+ const initBulkUpdateSidebar = await import(
+ '~/issuable_bulk_update_sidebar/issuable_init_bulk_update_sidebar'
+ );
initBulkUpdateSidebar.default.init('issuable_');
const usersSelect = await import('~/users_select');
diff --git a/app/assets/javascripts/labels_select.js b/app/assets/javascripts/labels_select.js
index 3df806161f7..a62ab301227 100644
--- a/app/assets/javascripts/labels_select.js
+++ b/app/assets/javascripts/labels_select.js
@@ -5,11 +5,11 @@
import $ from 'jquery';
import { difference, isEqual, escape, sortBy, template, union } from 'lodash';
import initDeprecatedJQueryDropdown from '~/deprecated_jquery_dropdown';
+import IssuableBulkUpdateActions from '~/issuable_bulk_update_sidebar/issuable_bulk_update_actions';
import { isScopedLabel } from '~/lib/utils/common_utils';
import boardsStore from './boards/stores/boards_store';
import CreateLabelDropdown from './create_label';
import createFlash from './flash';
-import IssuableBulkUpdateActions from './issuable_bulk_update_actions';
import axios from './lib/utils/axios_utils';
import { sprintf, __ } from './locale';
diff --git a/app/assets/javascripts/pages/groups/issues/index.js b/app/assets/javascripts/pages/groups/issues/index.js
index 76db578f6f9..342c054471d 100644
--- a/app/assets/javascripts/pages/groups/issues/index.js
+++ b/app/assets/javascripts/pages/groups/issues/index.js
@@ -1,5 +1,5 @@
import IssuableFilteredSearchTokenKeys from 'ee_else_ce/filtered_search/issuable_filtered_search_token_keys';
-import issuableInitBulkUpdateSidebar from '~/issuable_init_bulk_update_sidebar';
+import issuableInitBulkUpdateSidebar from '~/issuable_bulk_update_sidebar/issuable_init_bulk_update_sidebar';
import { mountIssuablesListApp } from '~/issues_list';
import initManualOrdering from '~/manual_ordering';
import { FILTERED_SEARCH } from '~/pages/constants';
diff --git a/app/assets/javascripts/pages/groups/merge_requests/index.js b/app/assets/javascripts/pages/groups/merge_requests/index.js
index 2f6f9bb16e1..02a0a50f984 100644
--- a/app/assets/javascripts/pages/groups/merge_requests/index.js
+++ b/app/assets/javascripts/pages/groups/merge_requests/index.js
@@ -1,6 +1,6 @@
import addExtraTokensForMergeRequests from 'ee_else_ce/filtered_search/add_extra_tokens_for_merge_requests';
import IssuableFilteredSearchTokenKeys from '~/filtered_search/issuable_filtered_search_token_keys';
-import issuableInitBulkUpdateSidebar from '~/issuable_init_bulk_update_sidebar';
+import issuableInitBulkUpdateSidebar from '~/issuable_bulk_update_sidebar/issuable_init_bulk_update_sidebar';
import { FILTERED_SEARCH } from '~/pages/constants';
import initFilteredSearch from '~/pages/search/init_filtered_search';
import projectSelect from '~/project_select';
diff --git a/app/assets/javascripts/vue_shared/security_reports/components/artifact_downloads/merge_request_artifact_download.vue b/app/assets/javascripts/vue_shared/security_reports/components/artifact_downloads/merge_request_artifact_download.vue
index 8fdc5ca78db..f3dd26b02cb 100644
--- a/app/assets/javascripts/vue_shared/security_reports/components/artifact_downloads/merge_request_artifact_download.vue
+++ b/app/assets/javascripts/vue_shared/security_reports/components/artifact_downloads/merge_request_artifact_download.vue
@@ -76,6 +76,7 @@ export default {
diff --git a/app/assets/javascripts/vue_shared/security_reports/components/security_report_download_dropdown.vue b/app/assets/javascripts/vue_shared/security_reports/components/security_report_download_dropdown.vue
index 5d39d740c07..4178c5d1170 100644
--- a/app/assets/javascripts/vue_shared/security_reports/components/security_report_download_dropdown.vue
+++ b/app/assets/javascripts/vue_shared/security_reports/components/security_report_download_dropdown.vue
@@ -21,6 +21,16 @@ export default {
required: false,
default: false,
},
+ text: {
+ type: String,
+ required: false,
+ default: '',
+ },
+ title: {
+ type: String,
+ required: false,
+ default: '',
+ },
},
methods: {
artifactText({ name }) {
@@ -35,7 +45,8 @@ export default {
@@ -228,6 +229,7 @@ export default {
diff --git a/app/controllers/chaos_controller.rb b/app/controllers/chaos_controller.rb
index 1cfcd2905f2..4e5af1945a4 100644
--- a/app/controllers/chaos_controller.rb
+++ b/app/controllers/chaos_controller.rb
@@ -31,7 +31,7 @@ class ChaosController < ActionController::Base
gc_stat = Gitlab::Chaos.run_gc
render json: {
- worker_id: Prometheus::PidProvider.worker_id,
+ worker_id: ::Prometheus::PidProvider.worker_id,
gc_stat: gc_stat
}
end
diff --git a/app/controllers/concerns/metrics/dashboard/prometheus_api_proxy.rb b/app/controllers/concerns/metrics/dashboard/prometheus_api_proxy.rb
index e0e3f628cc5..65237b552ca 100644
--- a/app/controllers/concerns/metrics/dashboard/prometheus_api_proxy.rb
+++ b/app/controllers/concerns/metrics/dashboard/prometheus_api_proxy.rb
@@ -16,7 +16,7 @@ module Metrics::Dashboard::PrometheusApiProxy
return error_response(variable_substitution_result)
end
- prometheus_result = Prometheus::ProxyService.new(
+ prometheus_result = ::Prometheus::ProxyService.new(
proxyable,
proxy_method,
proxy_path,
diff --git a/app/controllers/metrics_controller.rb b/app/controllers/metrics_controller.rb
index 1ef1e12bb02..a0c307a0a03 100644
--- a/app/controllers/metrics_controller.rb
+++ b/app/controllers/metrics_controller.rb
@@ -30,7 +30,7 @@ class MetricsController < ActionController::Base
def system_metrics
Gitlab::Metrics::System.summary.merge(
- worker_id: Prometheus::PidProvider.worker_id
+ worker_id: ::Prometheus::PidProvider.worker_id
)
end
end
diff --git a/app/controllers/projects/environments/prometheus_api_controller.rb b/app/controllers/projects/environments/prometheus_api_controller.rb
index 97810d7d439..94fe67b5e85 100644
--- a/app/controllers/projects/environments/prometheus_api_controller.rb
+++ b/app/controllers/projects/environments/prometheus_api_controller.rb
@@ -14,6 +14,6 @@ class Projects::Environments::PrometheusApiController < Projects::ApplicationCon
end
def proxy_variable_substitution_service
- Prometheus::ProxyVariableSubstitutionService
+ ::Prometheus::ProxyVariableSubstitutionService
end
end
diff --git a/app/controllers/projects/import/jira_controller.rb b/app/controllers/projects/import/jira_controller.rb
index 8418a607659..46c4761b0ea 100644
--- a/app/controllers/projects/import/jira_controller.rb
+++ b/app/controllers/projects/import/jira_controller.rb
@@ -25,9 +25,9 @@ module Projects
false
end
- def jira_service
- strong_memoize(:jira_service) do
- @project.jira_service
+ def jira_integration
+ strong_memoize(:jira_integration) do
+ @project.jira_integration
end
end
diff --git a/app/controllers/projects/prometheus/metrics_controller.rb b/app/controllers/projects/prometheus/metrics_controller.rb
index d70d29a341f..f3a3d22244c 100644
--- a/app/controllers/projects/prometheus/metrics_controller.rb
+++ b/app/controllers/projects/prometheus/metrics_controller.rb
@@ -66,7 +66,7 @@ module Projects
)
if @metric.persisted?
- redirect_to edit_project_service_path(project, ::PrometheusService),
+ redirect_to edit_project_service_path(project, ::Integrations::Prometheus),
notice: _('Metric was successfully added.')
else
render 'new'
@@ -77,7 +77,7 @@ module Projects
@metric = update_metrics_service(prometheus_metric).execute
if @metric.persisted?
- redirect_to edit_project_service_path(project, ::PrometheusService),
+ redirect_to edit_project_service_path(project, ::Integrations::Prometheus),
notice: _('Metric was successfully updated.')
else
render 'edit'
@@ -93,7 +93,7 @@ module Projects
respond_to do |format|
format.html do
- redirect_to edit_project_service_path(project, ::PrometheusService), status: :see_other
+ redirect_to edit_project_service_path(project, ::Integrations::Prometheus), status: :see_other
end
format.json do
head :ok
diff --git a/app/controllers/projects/runners_controller.rb b/app/controllers/projects/runners_controller.rb
index ec1f57f090a..5e96cd4f888 100644
--- a/app/controllers/projects/runners_controller.rb
+++ b/app/controllers/projects/runners_controller.rb
@@ -51,14 +51,14 @@ class Projects::RunnersController < Projects::ApplicationController
end
def toggle_shared_runners
- if !project.shared_runners_enabled && project.group && project.group.shared_runners_setting == 'disabled_and_unoverridable'
- render json: { error: _('Cannot enable shared runners because parent group does not allow it') }, status: :unauthorized
- return
+ update_params = { shared_runners_enabled: !project.shared_runners_enabled }
+ result = Projects::UpdateService.new(project, current_user, update_params).execute
+
+ if result[:status] == :success
+ render json: {}, status: :ok
+ else
+ render json: { error: result[:message] }, status: :unauthorized
end
-
- project.toggle!(:shared_runners_enabled)
-
- render json: {}, status: :ok
end
def toggle_group_runners
diff --git a/app/controllers/projects/services_controller.rb b/app/controllers/projects/services_controller.rb
index cad13d7e708..7288cbd2a40 100644
--- a/app/controllers/projects/services_controller.rb
+++ b/app/controllers/projects/services_controller.rb
@@ -105,11 +105,11 @@ class Projects::ServicesController < Projects::ApplicationController
end
def redirect_deprecated_prometheus_service
- redirect_to edit_project_service_path(project, integration) if integration.is_a?(::PrometheusService) && Feature.enabled?(:settings_operations_prometheus_service, project)
+ redirect_to edit_project_service_path(project, integration) if integration.is_a?(::Integrations::Prometheus) && Feature.enabled?(:settings_operations_prometheus_service, project)
end
def set_deprecation_notice_for_prometheus_service
- return if !integration.is_a?(::PrometheusService) || !Feature.enabled?(:settings_operations_prometheus_service, project)
+ return if !integration.is_a?(::Integrations::Prometheus) || !Feature.enabled?(:settings_operations_prometheus_service, project)
operations_link_start = ""
message = s_('PrometheusService|You can now manage your Prometheus settings on the %{operations_link_start}Operations%{operations_link_end} page. Fields on this page has been deprecated.') % { operations_link_start: operations_link_start, operations_link_end: "" }
diff --git a/app/graphql/mutations/alert_management/prometheus_integration/prometheus_integration_base.rb b/app/graphql/mutations/alert_management/prometheus_integration/prometheus_integration_base.rb
index cb243f49b33..d8678ea4d61 100644
--- a/app/graphql/mutations/alert_management/prometheus_integration/prometheus_integration_base.rb
+++ b/app/graphql/mutations/alert_management/prometheus_integration/prometheus_integration_base.rb
@@ -14,7 +14,7 @@ module Mutations
private
def find_object(id:)
- GitlabSchema.object_from_id(id, expected_class: ::PrometheusService)
+ GitlabSchema.object_from_id(id, expected_class: ::Integrations::Prometheus)
end
def response(integration, result)
diff --git a/app/graphql/mutations/alert_management/prometheus_integration/reset_token.rb b/app/graphql/mutations/alert_management/prometheus_integration/reset_token.rb
index 428be091436..33a12405583 100644
--- a/app/graphql/mutations/alert_management/prometheus_integration/reset_token.rb
+++ b/app/graphql/mutations/alert_management/prometheus_integration/reset_token.rb
@@ -6,7 +6,7 @@ module Mutations
class ResetToken < PrometheusIntegrationBase
graphql_name 'PrometheusIntegrationResetToken'
- argument :id, Types::GlobalIDType[::PrometheusService],
+ argument :id, Types::GlobalIDType[::Integrations::Prometheus],
required: true,
description: "The ID of the integration to mutate."
diff --git a/app/graphql/mutations/alert_management/prometheus_integration/update.rb b/app/graphql/mutations/alert_management/prometheus_integration/update.rb
index 7594766176f..ddab1af908c 100644
--- a/app/graphql/mutations/alert_management/prometheus_integration/update.rb
+++ b/app/graphql/mutations/alert_management/prometheus_integration/update.rb
@@ -6,7 +6,7 @@ module Mutations
class Update < PrometheusIntegrationBase
graphql_name 'PrometheusIntegrationUpdate'
- argument :id, Types::GlobalIDType[::PrometheusService],
+ argument :id, Types::GlobalIDType[::Integrations::Prometheus],
required: true,
description: "The ID of the integration to mutate."
diff --git a/app/graphql/resolvers/alert_management/integrations_resolver.rb b/app/graphql/resolvers/alert_management/integrations_resolver.rb
index cb7e73c2d1a..38d0f9880ca 100644
--- a/app/graphql/resolvers/alert_management/integrations_resolver.rb
+++ b/app/graphql/resolvers/alert_management/integrations_resolver.rb
@@ -54,7 +54,7 @@ module Resolvers
def expected_integration_types
[].tap do |types|
types << ::AlertManagement::HttpIntegration if http_integrations_allowed?
- types << ::PrometheusService if prometheus_integrations_allowed?
+ types << ::Integrations::Prometheus if prometheus_integrations_allowed?
end
end
end
diff --git a/app/graphql/resolvers/projects/jira_projects_resolver.rb b/app/graphql/resolvers/projects/jira_projects_resolver.rb
index de85e8c42e6..864acb6d759 100644
--- a/app/graphql/resolvers/projects/jira_projects_resolver.rb
+++ b/app/graphql/resolvers/projects/jira_projects_resolver.rb
@@ -34,16 +34,16 @@ module Resolvers
private
- alias_method :jira_service, :object
+ alias_method :jira_integration, :object
def project
- jira_service&.project
+ jira_integration&.project
end
def jira_projects(name:)
args = { query: name }.compact
- Jira::Requests::Projects::ListService.new(project.jira_service, args).execute
+ Jira::Requests::Projects::ListService.new(project.jira_integration, args).execute
end
end
end
diff --git a/app/graphql/types/alert_management/integration_type.rb b/app/graphql/types/alert_management/integration_type.rb
index d26d7348765..6cbc17cdbfb 100644
--- a/app/graphql/types/alert_management/integration_type.rb
+++ b/app/graphql/types/alert_management/integration_type.rb
@@ -43,7 +43,7 @@ module Types
definition_methods do
def resolve_type(object, context)
- if object.is_a?(::PrometheusService)
+ if object.is_a?(::Integrations::Prometheus)
Types::AlertManagement::PrometheusIntegrationType
else
Types::AlertManagement::HttpIntegrationType
diff --git a/app/helpers/custom_metrics_helper.rb b/app/helpers/custom_metrics_helper.rb
index 5ea386e268d..9fbfe377c61 100644
--- a/app/helpers/custom_metrics_helper.rb
+++ b/app/helpers/custom_metrics_helper.rb
@@ -5,7 +5,7 @@ module CustomMetricsHelper
{
'custom-metrics-path' => url_for([project, metric]),
'metric-persisted' => metric.persisted?.to_s,
- 'edit-project-service-path' => edit_project_service_path(project, PrometheusService),
+ 'edit-project-service-path' => edit_project_service_path(project, ::Integrations::Prometheus),
'validate-query-path' => validate_query_project_prometheus_metrics_path(project),
'title' => metric.title.to_s,
'query' => metric.query.to_s,
diff --git a/app/helpers/operations_helper.rb b/app/helpers/operations_helper.rb
index fb410c46128..553bd0d2ebf 100644
--- a/app/helpers/operations_helper.rb
+++ b/app/helpers/operations_helper.rb
@@ -5,7 +5,7 @@ module OperationsHelper
def prometheus_service
strong_memoize(:prometheus_service) do
- @project.find_or_initialize_service(::PrometheusService.to_param)
+ @project.find_or_initialize_service(::Integrations::Prometheus.to_param)
end
end
diff --git a/app/models/clusters/integrations/prometheus.rb b/app/models/clusters/integrations/prometheus.rb
index 0a01ac5d1ce..be7ec72f785 100644
--- a/app/models/clusters/integrations/prometheus.rb
+++ b/app/models/clusters/integrations/prometheus.rb
@@ -47,12 +47,12 @@ module Clusters
def activate_project_services
::Clusters::Applications::ActivateServiceWorker
- .perform_async(cluster_id, ::PrometheusService.to_param) # rubocop:disable CodeReuse/ServiceClass
+ .perform_async(cluster_id, ::Integrations::Prometheus.to_param)
end
def deactivate_project_services
::Clusters::Applications::DeactivateServiceWorker
- .perform_async(cluster_id, ::PrometheusService.to_param) # rubocop:disable CodeReuse/ServiceClass
+ .perform_async(cluster_id, ::Integrations::Prometheus.to_param)
end
end
end
diff --git a/app/models/integration.rb b/app/models/integration.rb
index 333f3020736..363131dc03a 100644
--- a/app/models/integration.rb
+++ b/app/models/integration.rb
@@ -48,8 +48,12 @@ class Integration < ApplicationRecord
flowdock
hangouts_chat
irker
+ jenkins jira
packagist pipelines_email pivotaltracker pushover
mattermost mattermost_slash_commands microsoft_teams mock_ci mock_monitoring
+ redmine
+ slack slack_slash_commands
+ teamcity
].to_set.freeze
def self.renamed?(name)
diff --git a/app/models/integrations/base_monitoring.rb b/app/models/integrations/base_monitoring.rb
new file mode 100644
index 00000000000..280eeda7c6c
--- /dev/null
+++ b/app/models/integrations/base_monitoring.rb
@@ -0,0 +1,23 @@
+# frozen_string_literal: true
+
+# Base class for monitoring services
+#
+# These services integrate with a deployment solution like Prometheus
+# to provide additional features for environments.
+module Integrations
+ class BaseMonitoring < Integration
+ default_value_for :category, 'monitoring'
+
+ def self.supported_events
+ %w()
+ end
+
+ def can_query?
+ raise NotImplementedError
+ end
+
+ def query(_, *_)
+ raise NotImplementedError
+ end
+ end
+end
diff --git a/app/models/integrations/jira.rb b/app/models/integrations/jira.rb
index aa143cc28e1..40734a29724 100644
--- a/app/models/integrations/jira.rb
+++ b/app/models/integrations/jira.rb
@@ -272,6 +272,10 @@ module Integrations
test(nil)[:success]
end
+ def configured?
+ active? && valid_connection?
+ end
+
def test(_)
result = server_info
success = result.present?
diff --git a/app/models/integrations/mock_monitoring.rb b/app/models/integrations/mock_monitoring.rb
new file mode 100644
index 00000000000..235c8e6fefc
--- /dev/null
+++ b/app/models/integrations/mock_monitoring.rb
@@ -0,0 +1,25 @@
+# frozen_string_literal: true
+
+module Integrations
+ class MockMonitoring < BaseMonitoring
+ def title
+ 'Mock monitoring'
+ end
+
+ def description
+ 'Mock monitoring service'
+ end
+
+ def self.to_param
+ 'mock_monitoring'
+ end
+
+ def metrics(environment)
+ Gitlab::Json.parse(File.read(Rails.root + 'spec/fixtures/metrics.json'))
+ end
+
+ def can_test?
+ false
+ end
+ end
+end
diff --git a/app/models/integrations/prometheus.rb b/app/models/integrations/prometheus.rb
new file mode 100644
index 00000000000..f395579df3a
--- /dev/null
+++ b/app/models/integrations/prometheus.rb
@@ -0,0 +1,205 @@
+# frozen_string_literal: true
+
+module Integrations
+ class Prometheus < BaseMonitoring
+ include PrometheusAdapter
+
+ # Access to prometheus is directly through the API
+ prop_accessor :api_url
+ prop_accessor :google_iap_service_account_json
+ prop_accessor :google_iap_audience_client_id
+ boolean_accessor :manual_configuration
+
+ # We need to allow the self-monitoring project to connect to the internal
+ # Prometheus instance.
+ # Since the internal Prometheus instance is usually a localhost URL, we need
+ # to allow localhost URLs when the following conditions are true:
+ # 1. project is the self-monitoring project.
+ # 2. api_url is the internal Prometheus URL.
+ with_options presence: true do
+ validates :api_url, public_url: true, if: ->(object) { object.manual_configuration? && !object.allow_local_api_url? }
+ validates :api_url, url: true, if: ->(object) { object.manual_configuration? && object.allow_local_api_url? }
+ end
+
+ before_save :synchronize_service_state
+
+ after_save :clear_reactive_cache!
+
+ after_commit :track_events
+
+ after_create_commit :create_default_alerts
+
+ scope :preload_project, -> { preload(:project) }
+ scope :with_clusters_with_cilium, -> { joins(project: [:clusters]).merge(Clusters::Cluster.with_available_cilium) }
+
+ def initialize_properties
+ if properties.nil?
+ self.properties = {}
+ end
+ end
+
+ def show_active_box?
+ false
+ end
+
+ def title
+ 'Prometheus'
+ end
+
+ def description
+ s_('PrometheusService|Monitor application health with Prometheus metrics and dashboards')
+ end
+
+ def self.to_param
+ 'prometheus'
+ end
+
+ def fields
+ [
+ {
+ type: 'checkbox',
+ name: 'manual_configuration',
+ title: s_('PrometheusService|Active'),
+ help: s_('PrometheusService|Select this checkbox to override the auto configuration settings with your own settings.'),
+ required: true
+ },
+ {
+ type: 'text',
+ name: 'api_url',
+ title: 'API URL',
+ placeholder: s_('PrometheusService|https://prometheus.example.com/'),
+ help: s_('PrometheusService|The Prometheus API base URL.'),
+ required: true
+ },
+ {
+ type: 'text',
+ name: 'google_iap_audience_client_id',
+ title: 'Google IAP Audience Client ID',
+ placeholder: s_('PrometheusService|IAP_CLIENT_ID.apps.googleusercontent.com'),
+ help: s_('PrometheusService|PrometheusService|The ID of the IAP-secured resource.'),
+ autocomplete: 'off',
+ required: false
+ },
+ {
+ type: 'textarea',
+ name: 'google_iap_service_account_json',
+ title: 'Google IAP Service Account JSON',
+ placeholder: s_('PrometheusService|{ "type": "service_account", "project_id": ... }'),
+ help: s_('PrometheusService|The contents of the credentials.json file of your service account.'),
+ required: false
+ }
+ ]
+ end
+
+ # Check we can connect to the Prometheus API
+ def test(*args)
+ prometheus_client.ping
+ { success: true, result: 'Checked API endpoint' }
+ rescue Gitlab::PrometheusClient::Error => err
+ { success: false, result: err }
+ end
+
+ def prometheus_client
+ return unless should_return_client?
+
+ options = prometheus_client_default_options.merge(
+ allow_local_requests: allow_local_api_url?
+ )
+
+ if behind_iap?
+ # Adds the Authorization header
+ options[:headers] = iap_client.apply({})
+ end
+
+ Gitlab::PrometheusClient.new(api_url, options)
+ end
+
+ def prometheus_available?
+ return false if template?
+ return false unless project
+
+ project.all_clusters.enabled.eager_load(:integration_prometheus).any? do |cluster|
+ cluster.integration_prometheus_available?
+ end
+ end
+
+ def allow_local_api_url?
+ allow_local_requests_from_web_hooks_and_services? ||
+ (self_monitoring_project? && internal_prometheus_url?)
+ end
+
+ def configured?
+ should_return_client?
+ end
+
+ private
+
+ def self_monitoring_project?
+ project && project.id == current_settings.self_monitoring_project_id
+ end
+
+ def internal_prometheus_url?
+ api_url.present? && api_url == ::Gitlab::Prometheus::Internal.uri
+ end
+
+ def allow_local_requests_from_web_hooks_and_services?
+ current_settings.allow_local_requests_from_web_hooks_and_services?
+ end
+
+ def should_return_client?
+ api_url.present? && manual_configuration? && active? && valid?
+ end
+
+ def current_settings
+ Gitlab::CurrentSettings.current_application_settings
+ end
+
+ def synchronize_service_state
+ self.active = prometheus_available? || manual_configuration?
+
+ true
+ end
+
+ def track_events
+ if enabled_manual_prometheus?
+ Gitlab::Tracking.event('cluster:services:prometheus', 'enabled_manual_prometheus')
+ elsif disabled_manual_prometheus?
+ Gitlab::Tracking.event('cluster:services:prometheus', 'disabled_manual_prometheus')
+ end
+
+ true
+ end
+
+ def enabled_manual_prometheus?
+ manual_configuration_changed? && manual_configuration?
+ end
+
+ def disabled_manual_prometheus?
+ manual_configuration_changed? && !manual_configuration?
+ end
+
+ def create_default_alerts
+ return unless project_id
+
+ ::Prometheus::CreateDefaultAlertsWorker.perform_async(project_id)
+ end
+
+ def behind_iap?
+ manual_configuration? && google_iap_audience_client_id.present? && google_iap_service_account_json.present?
+ end
+
+ def clean_google_iap_service_account
+ return unless google_iap_service_account_json
+
+ google_iap_service_account_json
+ .then { |json| Gitlab::Json.parse(json) }
+ .except('token_credential_uri')
+ end
+
+ def iap_client
+ @iap_client ||= Google::Auth::Credentials
+ .new(clean_google_iap_service_account, target_audience: google_iap_audience_client_id)
+ .client
+ end
+ end
+end
diff --git a/app/models/project.rb b/app/models/project.rb
index 5d71318dbef..ea88eda0959 100644
--- a/app/models/project.rb
+++ b/app/models/project.rb
@@ -172,25 +172,25 @@ class Project < ApplicationRecord
has_one :flowdock_integration, class_name: 'Integrations::Flowdock'
has_one :hangouts_chat_integration, class_name: 'Integrations::HangoutsChat'
has_one :irker_integration, class_name: 'Integrations::Irker'
- has_one :jenkins_service, class_name: 'Integrations::Jenkins'
- has_one :jira_service, class_name: 'Integrations::Jira'
+ has_one :jenkins_integration, class_name: 'Integrations::Jenkins'
+ has_one :jira_integration, class_name: 'Integrations::Jira'
has_one :mattermost_integration, class_name: 'Integrations::Mattermost'
has_one :mattermost_slash_commands_integration, class_name: 'Integrations::MattermostSlashCommands'
has_one :microsoft_teams_integration, class_name: 'Integrations::MicrosoftTeams'
has_one :mock_ci_integration, class_name: 'Integrations::MockCi'
- has_one :mock_monitoring_integration, class_name: 'MockMonitoringService'
+ has_one :mock_monitoring_integration, class_name: 'Integrations::MockMonitoring'
has_one :packagist_integration, class_name: 'Integrations::Packagist'
has_one :pipelines_email_integration, class_name: 'Integrations::PipelinesEmail'
has_one :pivotaltracker_integration, class_name: 'Integrations::Pivotaltracker'
+ has_one :prometheus_service, class_name: 'Integrations::Prometheus', inverse_of: :project
has_one :pushover_integration, class_name: 'Integrations::Pushover'
- has_one :redmine_service, class_name: 'Integrations::Redmine'
- has_one :slack_service, class_name: 'Integrations::Slack'
- has_one :slack_slash_commands_service, class_name: 'Integrations::SlackSlashCommands'
- has_one :teamcity_service, class_name: 'Integrations::Teamcity'
+ has_one :redmine_integration, class_name: 'Integrations::Redmine'
+ has_one :slack_integration, class_name: 'Integrations::Slack'
+ has_one :slack_slash_commands_integration, class_name: 'Integrations::SlackSlashCommands'
+ has_one :teamcity_integration, class_name: 'Integrations::Teamcity'
has_one :unify_circuit_service, class_name: 'Integrations::UnifyCircuit'
has_one :webex_teams_service, class_name: 'Integrations::WebexTeams'
has_one :youtrack_service, class_name: 'Integrations::Youtrack'
- has_one :prometheus_service, inverse_of: :project
has_one :root_of_fork_network,
foreign_key: 'root_project_id',
@@ -542,7 +542,7 @@ class Project < ApplicationRecord
scope :for_milestones, ->(ids) { joins(:milestones).where('milestones.id' => ids).distinct }
scope :with_push, -> { joins(:events).merge(Event.pushed_action) }
scope :with_project_feature, -> { joins('LEFT JOIN project_features ON projects.id = project_features.project_id') }
- scope :with_active_jira_services, -> { joins(:integrations).merge(::Integrations::Jira.active) }
+ scope :with_active_jira_integrations, -> { joins(:integrations).merge(::Integrations::Jira.active) }
scope :with_jira_dvcs_cloud, -> { joins(:feature_usage).merge(ProjectFeatureUsage.with_jira_dvcs_integration_enabled(cloud: true)) }
scope :with_jira_dvcs_server, -> { joins(:feature_usage).merge(ProjectFeatureUsage.with_jira_dvcs_integration_enabled(cloud: false)) }
scope :inc_routes, -> { includes(:route, namespace: :route) }
diff --git a/app/models/project_services/mock_monitoring_service.rb b/app/models/project_services/mock_monitoring_service.rb
deleted file mode 100644
index 25ae0f6b60d..00000000000
--- a/app/models/project_services/mock_monitoring_service.rb
+++ /dev/null
@@ -1,23 +0,0 @@
-# frozen_string_literal: true
-
-class MockMonitoringService < MonitoringService
- def title
- 'Mock monitoring'
- end
-
- def description
- 'Mock monitoring service'
- end
-
- def self.to_param
- 'mock_monitoring'
- end
-
- def metrics(environment)
- Gitlab::Json.parse(File.read(Rails.root + 'spec/fixtures/metrics.json'))
- end
-
- def can_test?
- false
- end
-end
diff --git a/app/models/project_services/monitoring_service.rb b/app/models/project_services/monitoring_service.rb
deleted file mode 100644
index ea65a200027..00000000000
--- a/app/models/project_services/monitoring_service.rb
+++ /dev/null
@@ -1,21 +0,0 @@
-# frozen_string_literal: true
-
-# Base class for monitoring services
-#
-# These services integrate with a deployment solution like Prometheus
-# to provide additional features for environments.
-class MonitoringService < Integration
- default_value_for :category, 'monitoring'
-
- def self.supported_events
- %w()
- end
-
- def can_query?
- raise NotImplementedError
- end
-
- def query(_, *_)
- raise NotImplementedError
- end
-end
diff --git a/app/models/project_services/prometheus_service.rb b/app/models/project_services/prometheus_service.rb
deleted file mode 100644
index a289c1c2afb..00000000000
--- a/app/models/project_services/prometheus_service.rb
+++ /dev/null
@@ -1,203 +0,0 @@
-# frozen_string_literal: true
-
-class PrometheusService < MonitoringService
- include PrometheusAdapter
-
- # Access to prometheus is directly through the API
- prop_accessor :api_url
- prop_accessor :google_iap_service_account_json
- prop_accessor :google_iap_audience_client_id
- boolean_accessor :manual_configuration
-
- # We need to allow the self-monitoring project to connect to the internal
- # Prometheus instance.
- # Since the internal Prometheus instance is usually a localhost URL, we need
- # to allow localhost URLs when the following conditions are true:
- # 1. project is the self-monitoring project.
- # 2. api_url is the internal Prometheus URL.
- with_options presence: true do
- validates :api_url, public_url: true, if: ->(object) { object.manual_configuration? && !object.allow_local_api_url? }
- validates :api_url, url: true, if: ->(object) { object.manual_configuration? && object.allow_local_api_url? }
- end
-
- before_save :synchronize_service_state
-
- after_save :clear_reactive_cache!
-
- after_commit :track_events
-
- after_create_commit :create_default_alerts
-
- scope :preload_project, -> { preload(:project) }
- scope :with_clusters_with_cilium, -> { joins(project: [:clusters]).merge(Clusters::Cluster.with_available_cilium) }
-
- def initialize_properties
- if properties.nil?
- self.properties = {}
- end
- end
-
- def show_active_box?
- false
- end
-
- def title
- 'Prometheus'
- end
-
- def description
- s_('PrometheusService|Monitor application health with Prometheus metrics and dashboards')
- end
-
- def self.to_param
- 'prometheus'
- end
-
- def fields
- [
- {
- type: 'checkbox',
- name: 'manual_configuration',
- title: s_('PrometheusService|Active'),
- help: s_('PrometheusService|Select this checkbox to override the auto configuration settings with your own settings.'),
- required: true
- },
- {
- type: 'text',
- name: 'api_url',
- title: 'API URL',
- placeholder: s_('PrometheusService|https://prometheus.example.com/'),
- help: s_('PrometheusService|The Prometheus API base URL.'),
- required: true
- },
- {
- type: 'text',
- name: 'google_iap_audience_client_id',
- title: 'Google IAP Audience Client ID',
- placeholder: s_('PrometheusService|IAP_CLIENT_ID.apps.googleusercontent.com'),
- help: s_('PrometheusService|PrometheusService|The ID of the IAP-secured resource.'),
- autocomplete: 'off',
- required: false
- },
- {
- type: 'textarea',
- name: 'google_iap_service_account_json',
- title: 'Google IAP Service Account JSON',
- placeholder: s_('PrometheusService|{ "type": "service_account", "project_id": ... }'),
- help: s_('PrometheusService|The contents of the credentials.json file of your service account.'),
- required: false
- }
- ]
- end
-
- # Check we can connect to the Prometheus API
- def test(*args)
- prometheus_client.ping
- { success: true, result: 'Checked API endpoint' }
- rescue Gitlab::PrometheusClient::Error => err
- { success: false, result: err }
- end
-
- def prometheus_client
- return unless should_return_client?
-
- options = prometheus_client_default_options.merge(
- allow_local_requests: allow_local_api_url?
- )
-
- if behind_iap?
- # Adds the Authorization header
- options[:headers] = iap_client.apply({})
- end
-
- Gitlab::PrometheusClient.new(api_url, options)
- end
-
- def prometheus_available?
- return false if template?
- return false unless project
-
- project.all_clusters.enabled.eager_load(:integration_prometheus).any? do |cluster|
- cluster.integration_prometheus_available?
- end
- end
-
- def allow_local_api_url?
- allow_local_requests_from_web_hooks_and_services? ||
- (self_monitoring_project? && internal_prometheus_url?)
- end
-
- def configured?
- should_return_client?
- end
-
- private
-
- def self_monitoring_project?
- project && project.id == current_settings.self_monitoring_project_id
- end
-
- def internal_prometheus_url?
- api_url.present? && api_url == ::Gitlab::Prometheus::Internal.uri
- end
-
- def allow_local_requests_from_web_hooks_and_services?
- current_settings.allow_local_requests_from_web_hooks_and_services?
- end
-
- def should_return_client?
- api_url.present? && manual_configuration? && active? && valid?
- end
-
- def current_settings
- Gitlab::CurrentSettings.current_application_settings
- end
-
- def synchronize_service_state
- self.active = prometheus_available? || manual_configuration?
-
- true
- end
-
- def track_events
- if enabled_manual_prometheus?
- Gitlab::Tracking.event('cluster:services:prometheus', 'enabled_manual_prometheus')
- elsif disabled_manual_prometheus?
- Gitlab::Tracking.event('cluster:services:prometheus', 'disabled_manual_prometheus')
- end
-
- true
- end
-
- def enabled_manual_prometheus?
- manual_configuration_changed? && manual_configuration?
- end
-
- def disabled_manual_prometheus?
- manual_configuration_changed? && !manual_configuration?
- end
-
- def create_default_alerts
- return unless project_id
-
- Prometheus::CreateDefaultAlertsWorker.perform_async(project_id)
- end
-
- def behind_iap?
- manual_configuration? && google_iap_audience_client_id.present? && google_iap_service_account_json.present?
- end
-
- def clean_google_iap_service_account
- return unless google_iap_service_account_json
-
- google_iap_service_account_json
- .then { |json| Gitlab::Json.parse(json) }
- .except('token_credential_uri')
- end
-
- def iap_client
- @iap_client ||= Google::Auth::Credentials
- .new(clean_google_iap_service_account, target_audience: google_iap_audience_client_id)
- .client
- end
-end
diff --git a/app/serializers/merge_request_diff_entity.rb b/app/serializers/merge_request_diff_entity.rb
index e197c3d0fbb..02b81b707b9 100644
--- a/app/serializers/merge_request_diff_entity.rb
+++ b/app/serializers/merge_request_diff_entity.rb
@@ -16,6 +16,7 @@ class MergeRequestDiffEntity < Grape::Entity
end
expose :created_at
+ expose :state
expose :commits_count
expose :latest?, as: :latest
diff --git a/app/services/ci/pipeline_schedules/calculate_next_run_service.rb b/app/services/ci/pipeline_schedules/calculate_next_run_service.rb
index 9978b2d4775..9c8f6b47288 100644
--- a/app/services/ci/pipeline_schedules/calculate_next_run_service.rb
+++ b/app/services/ci/pipeline_schedules/calculate_next_run_service.rb
@@ -12,15 +12,16 @@ module Ci
return fallback_method.call unless plan_cron&.cron_valid?
now = Time.zone.now
+ plan_min_run = plan_cron.next_time_from(now)
schedule_next_run = schedule_cron.next_time_from(now)
- return schedule_next_run if worker_cron.match?(schedule_next_run) && plan_cron.match?(schedule_next_run)
+ return schedule_next_run if worker_cron.match?(schedule_next_run) && plan_min_run <= schedule_next_run
- plan_next_run = plan_cron.next_time_from(now)
+ plan_next_run = plan_cron.next_time_from(schedule_next_run)
return plan_next_run if worker_cron.match?(plan_next_run)
- worker_next_run = worker_cron.next_time_from(now)
- return worker_next_run if plan_cron.match?(worker_next_run)
+ worker_next_run = worker_cron.next_time_from(schedule_next_run)
+ return worker_next_run if plan_min_run <= worker_next_run
worker_cron.next_time_from(plan_next_run)
end
diff --git a/app/services/jira/requests/base.rb b/app/services/jira/requests/base.rb
index bae8298d5c8..e4e2736ca2f 100644
--- a/app/services/jira/requests/base.rb
+++ b/app/services/jira/requests/base.rb
@@ -7,20 +7,20 @@ module Jira
JIRA_API_VERSION = 2
- def initialize(jira_service, params = {})
- @project = jira_service&.project
- @jira_service = jira_service
+ def initialize(jira_integration, params = {})
+ @project = jira_integration&.project
+ @jira_integration = jira_integration
end
def execute
- return ServiceResponse.error(message: _('Jira service not configured.')) unless jira_service&.active?
+ return ServiceResponse.error(message: _('Jira service not configured.')) unless jira_integration&.active?
request
end
private
- attr_reader :jira_service, :project
+ attr_reader :jira_integration, :project
# We have to add the context_path here because the Jira client is not taking it into account
def base_api_url
@@ -37,7 +37,7 @@ module Jira
end
def client
- @client ||= jira_service.client
+ @client ||= jira_integration.client
end
def request
diff --git a/app/services/jira/requests/projects/list_service.rb b/app/services/jira/requests/projects/list_service.rb
index 373c536974a..ac9e9bf0be9 100644
--- a/app/services/jira/requests/projects/list_service.rb
+++ b/app/services/jira/requests/projects/list_service.rb
@@ -6,8 +6,8 @@ module Jira
class ListService < Base
extend ::Gitlab::Utils::Override
- def initialize(jira_service, params = {})
- super(jira_service, params)
+ def initialize(jira_integration, params = {})
+ super(jira_integration, params)
@query = params[:query]
end
diff --git a/app/services/jira_import/users_importer.rb b/app/services/jira_import/users_importer.rb
index 5b2f91efc38..667a2836acc 100644
--- a/app/services/jira_import/users_importer.rb
+++ b/app/services/jira_import/users_importer.rb
@@ -32,9 +32,9 @@ module JiraImport
end
def user_mapper_service_factory
- if project.jira_service.data_fields.deployment_server?
+ if project.jira_integration.data_fields.deployment_server?
ServerUsersMapperService.new(user, project, start_at)
- elsif project.jira_service.data_fields.deployment_cloud?
+ elsif project.jira_integration.data_fields.deployment_cloud?
CloudUsersMapperService.new(user, project, start_at)
else
raise ArgumentError
diff --git a/app/services/jira_import/users_mapper_service.rb b/app/services/jira_import/users_mapper_service.rb
index 6c8610bfbf3..760f06a1cfb 100644
--- a/app/services/jira_import/users_mapper_service.rb
+++ b/app/services/jira_import/users_mapper_service.rb
@@ -13,7 +13,7 @@ module JiraImport
def initialize(current_user, project, start_at)
@current_user = current_user
@project = project
- @jira_service = project.jira_service
+ @jira_integration = project.jira_integration
@start_at = start_at
end
@@ -29,14 +29,14 @@ module JiraImport
private
- attr_reader :current_user, :project, :jira_service, :start_at
+ attr_reader :current_user, :project, :jira_integration, :start_at
def jira_users
@jira_users ||= client.get(url)
end
def client
- @client ||= jira_service.client
+ @client ||= jira_integration.client
end
def url
diff --git a/app/services/metrics_service.rb b/app/services/metrics_service.rb
index 222a5c8c79c..d27328f89cd 100644
--- a/app/services/metrics_service.rb
+++ b/app/services/metrics_service.rb
@@ -4,7 +4,7 @@ require 'prometheus/client/formats/text'
class MetricsService
def prometheus_metrics_text
- Prometheus::Client::Formats::Text.marshal_multiprocess(multiprocess_metrics_path)
+ ::Prometheus::Client::Formats::Text.marshal_multiprocess(multiprocess_metrics_path)
end
def metrics_text
diff --git a/app/services/projects/create_service.rb b/app/services/projects/create_service.rb
index 7dd9280e5b1..e2bde27da63 100644
--- a/app/services/projects/create_service.rb
+++ b/app/services/projects/create_service.rb
@@ -193,7 +193,7 @@ module Projects
# Deprecated: https://gitlab.com/gitlab-org/gitlab/-/issues/326665
def create_prometheus_service
- service = @project.find_or_initialize_service(::PrometheusService.to_param)
+ service = @project.find_or_initialize_service(::Integrations::Prometheus.to_param)
# If the service has already been inserted in the database, that
# means it came from a template, and there's nothing more to do.
diff --git a/app/services/projects/operations/update_service.rb b/app/services/projects/operations/update_service.rb
index c0734171ee5..4fb2cede7ce 100644
--- a/app/services/projects/operations/update_service.rb
+++ b/app/services/projects/operations/update_service.rb
@@ -102,7 +102,7 @@ module Projects
def prometheus_integration_params
return {} unless attrs = params[:prometheus_integration_attributes]
- service = project.find_or_initialize_service(::PrometheusService.to_param)
+ service = project.find_or_initialize_service(::Integrations::Prometheus.to_param)
service.assign_attributes(attrs)
{ prometheus_service_attributes: service.attributes.except(*%w(id project_id created_at updated_at)) }
diff --git a/app/views/admin/dashboard/index.html.haml b/app/views/admin/dashboard/index.html.haml
index 58c65bdc8c7..ec3daf6c494 100644
--- a/app/views/admin/dashboard/index.html.haml
+++ b/app/views/admin/dashboard/index.html.haml
@@ -3,15 +3,14 @@
- billable_users_url = help_page_path('subscriptions/self_managed/index', anchor: 'billable-users')
- billable_users_link_start = ''.html_safe % { url: billable_users_url }
+= render_if_exists 'shared/qrtly_reconciliation_alert'
+
- if @notices
- @notices.each do |notice|
.js-vue-alert{ 'v-cloak': true, data: { variant: notice[:type],
dismissible: true.to_s } }
= notice[:message].html_safe
-- if Gitlab.ee? && display_upcoming_reconciliation_alert?
- #js-qrtly-reconciliation-alert{ data: upcoming_reconciliation_hash }
-
- if @license.present?
.license-panel.gl-mt-5
= render_if_exists 'admin/licenses/summary'
diff --git a/app/views/clusters/clusters/aws/_new.html.haml b/app/views/clusters/clusters/aws/_new.html.haml
index 93e8b1241a8..bdd4b76bba0 100644
--- a/app/views/clusters/clusters/aws/_new.html.haml
+++ b/app/views/clusters/clusters/aws/_new.html.haml
@@ -12,6 +12,6 @@
'role-arn' => @aws_role.role_arn,
'instance-types' => @instance_types,
'kubernetes-integration-help-path' => help_page_path('user/project/clusters/index'),
- 'account-and-external-ids-help-path' => help_page_path('user/project/clusters/add_eks_clusters.md', anchor: 'new-eks-cluster'),
- 'create-role-arn-help-path' => help_page_path('user/project/clusters/add_eks_clusters.md', anchor: 'new-eks-cluster'),
+ 'account-and-external-ids-help-path' => help_page_path('user/project/clusters/add_eks_clusters.md', anchor: 'create-a-new-certificate-based-eks-cluster'),
+ 'create-role-arn-help-path' => help_page_path('user/project/clusters/add_eks_clusters.md', anchor: 'create-a-new-certificate-based-eks-cluster'),
'external-link-icon' => sprite_icon('external-link') } }
diff --git a/app/views/groups/show.html.haml b/app/views/groups/show.html.haml
index 628425bf463..76850f0a884 100644
--- a/app/views/groups/show.html.haml
+++ b/app/views/groups/show.html.haml
@@ -6,6 +6,8 @@
- if show_thanks_for_purchase_banner?
= render_if_exists 'shared/thanks_for_purchase_banner', plan_title: plan_title, quantity: params[:purchased_quantity].to_i
+= render_if_exists 'shared/qrtly_reconciliation_alert', group: @group
+
- if show_invite_banner?(@group)
= content_for :group_invite_members_banner do
.container-fluid.container-limited{ class: "gl-pb-2! gl-pt-6! #{@content_class}" }
diff --git a/app/views/layouts/_page.html.haml b/app/views/layouts/_page.html.haml
index 2b63e2c647c..7eaa6190a05 100644
--- a/app/views/layouts/_page.html.haml
+++ b/app/views/layouts/_page.html.haml
@@ -19,6 +19,7 @@
= render_if_exists "layouts/header/ee_subscribable_banner"
= render_if_exists "shared/namespace_storage_limit_alert"
= render_if_exists "shared/new_user_signups_cap_reached_alert"
+ = yield :page_level_alert
= yield :customize_homepage_banner
- unless @hide_breadcrumbs
= render "layouts/nav/breadcrumbs"
diff --git a/app/views/projects/import/jira/show.html.haml b/app/views/projects/import/jira/show.html.haml
index 3c0664e4d5f..29296ce23c9 100644
--- a/app/views/projects/import/jira/show.html.haml
+++ b/app/views/projects/import/jira/show.html.haml
@@ -1,7 +1,7 @@
.js-jira-import-root{ data: { project_path: @project.full_path,
issues_path: project_issues_path(@project),
jira_integration_path: edit_project_service_path(@project, :jira),
- is_jira_configured: @project.jira_service&.active? && @project.jira_service&.valid_connection?.to_s,
+ is_jira_configured: @project.jira_integration&.configured?.to_s,
in_progress_illustration: image_path('illustrations/export-import.svg'),
project_id: @project.id,
setup_illustration: image_path('illustrations/manual_action.svg') } }
diff --git a/app/views/projects/issues/index.html.haml b/app/views/projects/issues/index.html.haml
index 3e8442eee86..ecf10cd4821 100644
--- a/app/views/projects/issues/index.html.haml
+++ b/app/views/projects/issues/index.html.haml
@@ -9,7 +9,7 @@
= auto_discovery_link_tag(:atom, safe_params.merge(rss_url_options).to_h, title: "#{@project.name} issues")
.js-jira-issues-import-status{ data: { can_edit: can?(current_user, :admin_project, @project).to_s,
- is_jira_configured: @project.jira_service.present?.to_s,
+ is_jira_configured: @project.jira_integration.present?.to_s,
issues_path: project_issues_path(@project),
project_path: @project.full_path } }
diff --git a/app/views/projects/prometheus/metrics/edit.html.haml b/app/views/projects/prometheus/metrics/edit.html.haml
index 15a9c922ca6..d308824571e 100644
--- a/app/views/projects/prometheus/metrics/edit.html.haml
+++ b/app/views/projects/prometheus/metrics/edit.html.haml
@@ -1,6 +1,6 @@
- add_to_breadcrumbs _("Settings"), edit_project_path(@project)
- add_to_breadcrumbs _("Integrations"), project_settings_integrations_path(@project)
-- add_to_breadcrumbs "Prometheus", edit_project_service_path(@project, PrometheusService)
+- add_to_breadcrumbs "Prometheus", edit_project_service_path(@project, ::Integrations::Prometheus)
- breadcrumb_title s_('Metrics|Edit metric')
- page_title @metric.title, s_('Metrics|Edit metric')
= render 'form', project: @project, metric: @metric
diff --git a/app/views/projects/prometheus/metrics/new.html.haml b/app/views/projects/prometheus/metrics/new.html.haml
index fa925d090cb..8415ec9ee41 100644
--- a/app/views/projects/prometheus/metrics/new.html.haml
+++ b/app/views/projects/prometheus/metrics/new.html.haml
@@ -1,6 +1,6 @@
- add_to_breadcrumbs _("Settings"), edit_project_path(@project)
- add_to_breadcrumbs _("Integrations"), project_settings_integrations_path(@project)
-- add_to_breadcrumbs "Prometheus", edit_project_service_path(@project, PrometheusService)
+- add_to_breadcrumbs "Prometheus", edit_project_service_path(@project, ::Integrations::Prometheus)
- breadcrumb_title s_('Metrics|New metric')
- page_title s_('Metrics|New metric')
= render 'form', project: @project, metric: @metric
diff --git a/app/views/shared/issuable/_bulk_update_sidebar.html.haml b/app/views/shared/issuable/_bulk_update_sidebar.html.haml
index bbbb728d048..3a526a9f306 100644
--- a/app/views/shared/issuable/_bulk_update_sidebar.html.haml
+++ b/app/views/shared/issuable/_bulk_update_sidebar.html.haml
@@ -6,21 +6,13 @@
= form_tag [:bulk_update, @project, type], method: :post, class: "bulk-update" do
.block.issuable-sidebar-header
.filter-item.inline.update-issues-btn.float-left
- = button_tag _('Update all'), class: "gl-button btn update-selected-issues btn-confirm", disabled: true
+ = button_tag _('Update all'), class: "gl-button btn js-update-selected-issues btn-confirm", disabled: true
= button_tag _('Cancel'), class: "gl-button btn btn-default js-bulk-update-menu-hide float-right"
- if params[:state] != 'merged'
.block
.title
= _('Status')
- .filter-item
- = dropdown_tag(_("Select status"), options: { toggle_class: "js-issue-status", title: _("Change status"), dropdown_class: "dropdown-menu-status dropdown-menu-selectable", data: { field_name: "update[state_event]", default_label: _("Status") } } ) do
- %ul
- %li
- %a{ href: "#", data: { id: "reopen" } }
- = _('Open')
- %li
- %a{ href: "#", data: { id: "close" } }
- = _('Closed')
+ .js-issue-status
.block
.title
= _('Assignee')
diff --git a/app/workers/projects/post_creation_worker.rb b/app/workers/projects/post_creation_worker.rb
index 1970f79729f..9065bb90e82 100644
--- a/app/workers/projects/post_creation_worker.rb
+++ b/app/workers/projects/post_creation_worker.rb
@@ -21,7 +21,7 @@ module Projects
private
def create_prometheus_service(project)
- service = project.find_or_initialize_service(::PrometheusService.to_param)
+ service = project.find_or_initialize_service(::Integrations::Prometheus.to_param)
# If the service has already been inserted in the database, that
# means it came from a template, and there's nothing more to do.
diff --git a/app/workers/prometheus/create_default_alerts_worker.rb b/app/workers/prometheus/create_default_alerts_worker.rb
index 0dba752ced1..9d163cd828e 100644
--- a/app/workers/prometheus/create_default_alerts_worker.rb
+++ b/app/workers/prometheus/create_default_alerts_worker.rb
@@ -15,7 +15,7 @@ module Prometheus
return unless project
- result = Prometheus::CreateDefaultAlertsService.new(project: project).execute
+ result = ::Prometheus::CreateDefaultAlertsService.new(project: project).execute
log_info(result.message) if result.error?
end
diff --git a/config/feature_flags/development/security_ci_lint_authorization.yml b/config/feature_flags/development/security_ci_lint_authorization.yml
deleted file mode 100644
index 73b3bd45727..00000000000
--- a/config/feature_flags/development/security_ci_lint_authorization.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-name: security_ci_lint_authorization
-introduced_by_url: https://gitlab.com/gitlab-org/security/gitlab/-/merge_requests/1279
-rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/326708
-milestone: '14.0'
-type: development
-group: group::pipeline authoring
-default_enabled: false
diff --git a/config/initializers/7_prometheus_metrics.rb b/config/initializers/7_prometheus_metrics.rb
index 8dee21016f9..ebd251abe22 100644
--- a/config/initializers/7_prometheus_metrics.rb
+++ b/config/initializers/7_prometheus_metrics.rb
@@ -15,14 +15,14 @@ def prometheus_default_multiproc_dir
end
end
-Prometheus::Client.configure do |config|
+::Prometheus::Client.configure do |config|
config.logger = Gitlab::AppLogger
config.initial_mmap_file_size = 4 * 1024
config.multiprocess_files_dir = ENV['prometheus_multiproc_dir'] || prometheus_default_multiproc_dir
- config.pid_provider = Prometheus::PidProvider.method(:worker_id)
+ config.pid_provider = ::Prometheus::PidProvider.method(:worker_id)
end
Gitlab::Application.configure do |config|
@@ -43,7 +43,7 @@ if !Rails.env.test? && Gitlab::Metrics.prometheus_metrics_enabled?
# Thus, we order these events to run `reinitialize_on_pid_change` with `force: true` first.
Gitlab::Cluster::LifecycleEvents.on_master_start do
# Ensure that stale Prometheus metrics don't accumulate over time
- Prometheus::CleanupMultiprocDirService.new.execute
+ ::Prometheus::CleanupMultiprocDirService.new.execute
::Prometheus::Client.reinitialize_on_pid_change(force: true)
@@ -64,7 +64,7 @@ if !Rails.env.test? && Gitlab::Metrics.prometheus_metrics_enabled?
end
Gitlab::Cluster::LifecycleEvents.on_worker_start do
- defined?(::Prometheus::Client.reinitialize_on_pid_change) && Prometheus::Client.reinitialize_on_pid_change
+ defined?(::Prometheus::Client.reinitialize_on_pid_change) && ::Prometheus::Client.reinitialize_on_pid_change
Gitlab::Metrics::Samplers::RubySampler.initialize_instance.start
Gitlab::Metrics::Samplers::DatabaseSampler.initialize_instance.start
diff --git a/db/migrate/20210614124111_add_devops_adoption_sast_dast_indexes.rb b/db/post_migrate/20210614124111_add_devops_adoption_sast_dast_indexes.rb
similarity index 100%
rename from db/migrate/20210614124111_add_devops_adoption_sast_dast_indexes.rb
rename to db/post_migrate/20210614124111_add_devops_adoption_sast_dast_indexes.rb
diff --git a/doc/administration/auditor_users.md b/doc/administration/auditor_users.md
index 96bfbd88ddf..5f31ed709f2 100644
--- a/doc/administration/auditor_users.md
+++ b/doc/administration/auditor_users.md
@@ -53,17 +53,16 @@ helpful:
you can create an Auditor user and then share the credentials with those users
to which you want to grant access.
-## Adding an Auditor user
+## Add an Auditor user
-To create a new Auditor user:
+To create an Auditor user:
-1. Create a new user or edit an existing one by navigating to
- **Admin Area > Users**. The option of the access level is located in
- the 'Access' section.
-
- 
-
-1. Select **Save changes** or **Create user** for the changes to take effect.
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Overview > Users**.
+1. Create a new user or edit an existing one, and in the **Access** section
+ select Auditor.
+1. Select **Create user** or **Save changes** if you created a new user or
+ edited an existing one respectively.
To revoke Auditor permissions from a user, make them a regular user by
following the previous steps.
diff --git a/doc/administration/geo/disaster_recovery/background_verification.md b/doc/administration/geo/disaster_recovery/background_verification.md
index c09daeec824..f03cd64c14e 100644
--- a/doc/administration/geo/disaster_recovery/background_verification.md
+++ b/doc/administration/geo/disaster_recovery/background_verification.md
@@ -58,19 +58,25 @@ Feature.enable('geo_repository_verification')
## Repository verification
-Go to the **Admin Area > Geo** dashboard on the **primary** node and expand
-the **Verification information** section for that node to view automatic checksumming
-status for each data type. Successes are shown in green, pending work
-in gray, and failures in red.
+On the **primary** node:
-
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
+1. Expand **Verification information** tab for that node to view automatic checksumming
+ status for repositories and wikis. Successes are shown in green, pending work
+ in gray, and failures in red.
-Go to the **Admin Area > Geo** dashboard on the **secondary** node and expand
-the **Verification information** section for that node to view automatic verification
-status for each data type. As with checksumming, successes are shown in
-green, pending work in gray, and failures in red.
+ 
-
+On the **secondary** node:
+
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
+1. Expand **Verification information** tab for that node to view automatic checksumming
+ status for repositories and wikis. Successes are shown in green, pending work
+ in gray, and failures in red.
+
+ 
## Using checksums to compare Geo nodes
@@ -92,11 +98,14 @@ data. The default and recommended re-verification interval is 7 days, though
an interval as short as 1 day can be set. Shorter intervals reduce risk but
increase load and vice versa.
-Go to the **Admin Area > Geo** dashboard on the **primary** node, and
-click the **Edit** button for the **primary** node to customize the minimum
-re-verification interval:
+On the **primary** node:
-
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
+1. Select **Edit** for the **primary** node to customize the minimum
+ re-verification interval:
+
+ 
The automatic background re-verification is enabled by default, but you can
disable if you need. Run the following commands in a Rails console on the
@@ -141,17 +150,19 @@ sudo gitlab-rake geo:verification:wiki:reset
If the **primary** and **secondary** nodes have a checksum verification mismatch, the cause may not be apparent. To find the cause of a checksum mismatch:
-1. Go to the **Admin Area > Overview > Projects** dashboard on the **primary** node, find the
- project that you want to check the checksum differences and click on the
- **Edit** button:
- 
+1. On the **primary** node:
+ 1. On the top bar, select **Menu >** **{admin}** **Admin**.
+ 1. On the left sidebar, select **Overview > Projects**.
+ 1. Find the project that you want to check the checksum differences and
+ select its name.
+ 1. On the project administration page get the **Gitaly storage name**,
+ and **Gitaly relative path**.
-1. On the project administration page get the **Gitaly storage name**, and **Gitaly relative path**:
- 
+ 
1. Go to the project's repository directory on both **primary** and **secondary** nodes
(the path is usually `/var/opt/gitlab/git-data/repositories`). Note that if `git_data_dirs`
- is customized, check the directory layout on your server to be sure.
+ is customized, check the directory layout on your server to be sure:
```shell
cd /var/opt/gitlab/git-data/repositories
diff --git a/doc/administration/geo/disaster_recovery/img/checksum-differences-admin-projects.png b/doc/administration/geo/disaster_recovery/img/checksum-differences-admin-projects.png
deleted file mode 100644
index 85759d903a4..00000000000
Binary files a/doc/administration/geo/disaster_recovery/img/checksum-differences-admin-projects.png and /dev/null differ
diff --git a/doc/administration/geo/disaster_recovery/planned_failover.md b/doc/administration/geo/disaster_recovery/planned_failover.md
index 633d787473e..5c15523ac78 100644
--- a/doc/administration/geo/disaster_recovery/planned_failover.md
+++ b/doc/administration/geo/disaster_recovery/planned_failover.md
@@ -109,13 +109,16 @@ The maintenance window won't end until Geo replication and verification is
completely finished. To keep the window as short as possible, you should
ensure these processes are close to 100% as possible during active use.
-Go to the **Admin Area > Geo** dashboard on the **secondary** node to
-review status. Replicated objects (shown in green) should be close to 100%,
-and there should be no failures (shown in red). If a large proportion of
-objects aren't yet replicated (shown in gray), consider giving the node more
-time to complete
+On the **secondary** node:
-
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
+ Replicated objects (shown in green) should be close to 100%,
+ and there should be no failures (shown in red). If a large proportion of
+ objects aren't yet replicated (shown in gray), consider giving the node more
+ time to complete
+
+ 
If any objects are failing to replicate, this should be investigated before
scheduling the maintenance window. Following a planned failover, anything that
@@ -134,23 +137,26 @@ This [content was moved to another location](background_verification.md).
### Notify users of scheduled maintenance
-On the **primary** node, navigate to **Admin Area > Messages**, add a broadcast
-message. You can check under **Admin Area > Geo** to estimate how long it
-takes to finish syncing. An example message would be:
+On the **primary** node:
-> A scheduled maintenance takes place at XX:XX UTC. We expect it to take
-> less than 1 hour.
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Messages**.
+1. Add a message notifying users on the maintenance window.
+ You can check under **Geo > Nodes** to estimate how long it
+ takes to finish syncing.
+1. Select **Add broadcast message**.
## Prevent updates to the **primary** node
To ensure that all data is replicated to a secondary site, updates (write requests) need to
-be disabled on the primary site:
+be disabled on the **primary** site:
-1. Enable [maintenance mode](../../maintenance_mode/index.md).
-
-1. Disable non-Geo periodic background jobs on the **primary** node by navigating
- to **Admin Area > Monitoring > Background Jobs > Cron**, pressing `Disable All`,
- and then pressing `Enable` for the `geo_sidekiq_cron_config_worker` cron job.
+1. Enable [maintenance mode](../../maintenance_mode/index.md) on the **primary** node.
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Monitoring > Background Jobs**.
+1. On the Sidekiq dashboard, select **Cron**.
+1. Select `Disable All` to disable non-Geo periodic background jobs.
+1. Select `Enable` for the `geo_sidekiq_cron_config_worker` cron job.
This job re-enables several other cron jobs that are essential for planned
failover to complete successfully.
@@ -158,23 +164,28 @@ be disabled on the primary site:
1. If you are manually replicating any data not managed by Geo, trigger the
final replication process now.
-1. On the **primary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues**
- and wait for all queues except those with `geo` in the name to drop to 0.
- These queues contain work that has been submitted by your users; failing over
- before it is completed, causes the work to be lost.
-1. On the **primary** node, navigate to **Admin Area > Geo** and wait for the
- following conditions to be true of the **secondary** node you are failing over to:
+1. On the **primary** node:
+ 1. On the top bar, select **Menu >** **{admin}** **Admin**.
+ 1. On the left sidebar, select **Monitoring > Background Jobs**.
+ 1. On the Sidekiq dashboard, select **Queues**, and wait for all queues except
+ those with `geo` in the name to drop to 0.
+ These queues contain work that has been submitted by your users; failing over
+ before it is completed, causes the work to be lost.
+ 1. On the left sidebar, select **Geo > Nodes** and wait for the
+ following conditions to be true of the **secondary** node you are failing over to:
- - All replication meters to each 100% replicated, 0% failures.
- - All verification meters reach 100% verified, 0% failures.
- - Database replication lag is 0ms.
- - The Geo log cursor is up to date (0 events behind).
+ - All replication meters reach 100% replicated, 0% failures.
+ - All verification meters reach 100% verified, 0% failures.
+ - Database replication lag is 0ms.
+ - The Geo log cursor is up to date (0 events behind).
-1. On the **secondary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues**
- and wait for all the `geo` queues to drop to 0 queued and 0 running jobs.
-1. On the **secondary** node, use [these instructions](../../raketasks/check.md)
- to verify the integrity of CI artifacts, LFS objects, and uploads in file
- storage.
+1. On the **secondary** node:
+ 1. On the top bar, select **Menu >** **{admin}** **Admin**.
+ 1. On the left sidebar, select **Monitoring > Background Jobs**.
+ 1. On the Sidekiq dashboard, select **Queues**, and wait for all the `geo`
+ queues to drop to 0 queued and 0 running jobs.
+ 1. [Run an integrity check](../../raketasks/check.md) to verify the integrity
+ of CI artifacts, LFS objects, and uploads in file storage.
At this point, your **secondary** node contains an up-to-date copy of everything the
**primary** node has, meaning nothing was lost when you fail over.
diff --git a/doc/administration/geo/disaster_recovery/runbooks/planned_failover_multi_node.md b/doc/administration/geo/disaster_recovery/runbooks/planned_failover_multi_node.md
index e19aa671b89..4cfe781c7a4 100644
--- a/doc/administration/geo/disaster_recovery/runbooks/planned_failover_multi_node.md
+++ b/doc/administration/geo/disaster_recovery/runbooks/planned_failover_multi_node.md
@@ -63,13 +63,16 @@ Before following any of those steps, make sure you have `root` access to the
**secondary** to promote it, since there isn't provided an automated way to
promote a Geo replica and perform a failover.
-On the **secondary** node, navigate to the **Admin Area > Geo** dashboard to
-review its status. Replicated objects (shown in green) should be close to 100%,
-and there should be no failures (shown in red). If a large proportion of
-objects aren't yet replicated (shown in gray), consider giving the node more
-time to complete.
+On the **secondary** node:
-
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes** to see its status.
+ Replicated objects (shown in green) should be close to 100%,
+ and there should be no failures (shown in red). If a large proportion of
+ objects aren't yet replicated (shown in gray), consider giving the node more
+ time to complete.
+
+ 
If any objects are failing to replicate, this should be investigated before
scheduling the maintenance window. After a planned failover, anything that
@@ -126,11 +129,14 @@ follow these steps to avoid unnecessary data loss:
existing Git repository with an SSH remote URL. The server should refuse
connection.
- 1. On the **primary** node, disable non-Geo periodic background jobs by navigating
- to **Admin Area > Monitoring > Background Jobs > Cron**, clicking `Disable All`,
- and then clicking `Enable` for the `geo_sidekiq_cron_config_worker` cron job.
- This job will re-enable several other cron jobs that are essential for planned
- failover to complete successfully.
+ 1. On the **primary** node:
+ 1. On the top bar, select **Menu >** **{admin}** **Admin**.
+ 1. On the left sidebar, select **Monitoring > Background Jobs**.
+ 1. On the Sidekiq dhasboard, select **Cron**.
+ 1. Select `Disable All` to disable any non-Geo periodic background jobs.
+ 1. Select `Enable` for the `geo_sidekiq_cron_config_worker` cron job.
+ This job will re-enable several other cron jobs that are essential for planned
+ failover to complete successfully.
1. Finish replicating and verifying all data:
@@ -141,22 +147,28 @@ follow these steps to avoid unnecessary data loss:
1. If you are manually replicating any
[data not managed by Geo](../../replication/datatypes.md#limitations-on-replicationverification),
trigger the final replication process now.
- 1. On the **primary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues**
- and wait for all queues except those with `geo` in the name to drop to 0.
- These queues contain work that has been submitted by your users; failing over
- before it is completed will cause the work to be lost.
- 1. On the **primary** node, navigate to **Admin Area > Geo** and wait for the
- following conditions to be true of the **secondary** node you are failing over to:
- - All replication meters to each 100% replicated, 0% failures.
- - All verification meters reach 100% verified, 0% failures.
- - Database replication lag is 0ms.
- - The Geo log cursor is up to date (0 events behind).
+ 1. On the **primary** node:
+ 1. On the top bar, select **Menu >** **{admin}** **Admin**.
+ 1. On the left sidebar, select **Monitoring > Background Jobs**.
+ 1. On the Sidekiq dashboard, select **Queues**, and wait for all queues except
+ those with `geo` in the name to drop to 0.
+ These queues contain work that has been submitted by your users; failing over
+ before it is completed, causes the work to be lost.
+ 1. On the left sidebar, select **Geo > Nodes** and wait for the
+ following conditions to be true of the **secondary** node you are failing over to:
- 1. On the **secondary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues**
- and wait for all the `geo` queues to drop to 0 queued and 0 running jobs.
- 1. On the **secondary** node, use [these instructions](../../../raketasks/check.md)
- to verify the integrity of CI artifacts, LFS objects, and uploads in file
- storage.
+ - All replication meters reach 100% replicated, 0% failures.
+ - All verification meters reach 100% verified, 0% failures.
+ - Database replication lag is 0ms.
+ - The Geo log cursor is up to date (0 events behind).
+
+ 1. On the **secondary** node:
+ 1. On the top bar, select **Menu >** **{admin}** **Admin**.
+ 1. On the left sidebar, select **Monitoring > Background Jobs**.
+ 1. On the Sidekiq dashboard, select **Queues**, and wait for all the `geo`
+ queues to drop to 0 queued and 0 running jobs.
+ 1. [Run an integrity check](../../../raketasks/check.md) to verify the integrity
+ of CI artifacts, LFS objects, and uploads in file storage.
At this point, your **secondary** node will contain an up-to-date copy of everything the
**primary** node has, meaning nothing will be lost when you fail over.
diff --git a/doc/administration/geo/disaster_recovery/runbooks/planned_failover_single_node.md b/doc/administration/geo/disaster_recovery/runbooks/planned_failover_single_node.md
index 9b5c3f00040..6caeddad51a 100644
--- a/doc/administration/geo/disaster_recovery/runbooks/planned_failover_single_node.md
+++ b/doc/administration/geo/disaster_recovery/runbooks/planned_failover_single_node.md
@@ -114,11 +114,14 @@ follow these steps to avoid unnecessary data loss:
existing Git repository with an SSH remote URL. The server should refuse
connection.
- 1. On the **primary** node, disable non-Geo periodic background jobs by navigating
- to **Admin Area > Monitoring > Background Jobs > Cron**, clicking `Disable All`,
- and then clicking `Enable` for the `geo_sidekiq_cron_config_worker` cron job.
- This job will re-enable several other cron jobs that are essential for planned
- failover to complete successfully.
+ 1. On the **primary** node:
+ 1. On the top bar, select **Menu >** **{admin}** **Admin**.
+ 1. On the left sidebar, select **Monitoring > Background Jobs**.
+ 1. On the Sidekiq dhasboard, select **Cron**.
+ 1. Select `Disable All` to disable any non-Geo periodic background jobs.
+ 1. Select `Enable` for the `geo_sidekiq_cron_config_worker` cron job.
+ This job will re-enable several other cron jobs that are essential for planned
+ failover to complete successfully.
1. Finish replicating and verifying all data:
@@ -129,22 +132,28 @@ follow these steps to avoid unnecessary data loss:
1. If you are manually replicating any
[data not managed by Geo](../../replication/datatypes.md#limitations-on-replicationverification),
trigger the final replication process now.
- 1. On the **primary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues**
- and wait for all queues except those with `geo` in the name to drop to 0.
- These queues contain work that has been submitted by your users; failing over
- before it is completed will cause the work to be lost.
- 1. On the **primary** node, navigate to **Admin Area > Geo** and wait for the
- following conditions to be true of the **secondary** node you are failing over to:
- - All replication meters to each 100% replicated, 0% failures.
- - All verification meters reach 100% verified, 0% failures.
- - Database replication lag is 0ms.
- - The Geo log cursor is up to date (0 events behind).
+ 1. On the **primary** node:
+ 1. On the top bar, select **Menu >** **{admin}** **Admin**.
+ 1. On the left sidebar, select **Monitoring > Background Jobs**.
+ 1. On the Sidekiq dashboard, select **Queues**, and wait for all queues except
+ those with `geo` in the name to drop to 0.
+ These queues contain work that has been submitted by your users; failing over
+ before it is completed, causes the work to be lost.
+ 1. On the left sidebar, select **Geo > Nodes** and wait for the
+ following conditions to be true of the **secondary** node you are failing over to:
- 1. On the **secondary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues**
- and wait for all the `geo` queues to drop to 0 queued and 0 running jobs.
- 1. On the **secondary** node, use [these instructions](../../../raketasks/check.md)
- to verify the integrity of CI artifacts, LFS objects, and uploads in file
- storage.
+ - All replication meters reach 100% replicated, 0% failures.
+ - All verification meters reach 100% verified, 0% failures.
+ - Database replication lag is 0ms.
+ - The Geo log cursor is up to date (0 events behind).
+
+ 1. On the **secondary** node:
+ 1. On the top bar, select **Menu >** **{admin}** **Admin**.
+ 1. On the left sidebar, select **Monitoring > Background Jobs**.
+ 1. On the Sidekiq dashboard, select **Queues**, and wait for all the `geo`
+ queues to drop to 0 queued and 0 running jobs.
+ 1. [Run an integrity check](../../../raketasks/check.md) to verify the integrity
+ of CI artifacts, LFS objects, and uploads in file storage.
At this point, your **secondary** node will contain an up-to-date copy of everything the
**primary** node has, meaning nothing will be lost when you fail over.
diff --git a/doc/administration/geo/replication/configuration.md b/doc/administration/geo/replication/configuration.md
index 8a1ea0ad3f2..926c4c565aa 100644
--- a/doc/administration/geo/replication/configuration.md
+++ b/doc/administration/geo/replication/configuration.md
@@ -196,9 +196,9 @@ keys must be manually replicated to the **secondary** node.
gitlab-ctl reconfigure
```
-1. Visit the **primary** node's **Admin Area > Geo**
- (`/admin/geo/nodes`) in your browser.
-1. Click the **New node** button.
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
+1. Select **New node**.

1. Fill in **Name** with the `gitlab_rails['geo_node_name']` in
`/etc/gitlab/gitlab.rb`. These values must always match *exactly*, character
@@ -209,7 +209,7 @@ keys must be manually replicated to the **secondary** node.
1. Optionally, choose which groups or storage shards should be replicated by the
**secondary** node. Leave blank to replicate all. Read more in
[selective synchronization](#selective-synchronization).
-1. Click the **Add node** button to add the **secondary** node.
+1. Select **Add node** to add the **secondary** node.
1. SSH into your GitLab **secondary** server and restart the services:
```shell
@@ -252,18 +252,22 @@ on the **secondary** node.
Geo synchronizes repositories over HTTP/HTTPS, and therefore requires this clone
method to be enabled. This is enabled by default, but if converting an existing node to Geo it should be checked:
-1. Go to **Admin Area > Settings** (`/admin/application_settings/general`) on the **primary** node.
-1. Expand "Visibility and access controls".
+On the **primary** node:
+
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Settings > General**.
+1. Expand **Visibility and access controls**.
1. Ensure "Enabled Git access protocols" is set to either "Both SSH and HTTP(S)" or "Only HTTP(S)".
### Step 6. Verify proper functioning of the **secondary** node
-Your **secondary** node is now configured!
+You can sign in to the **secondary** node with the same credentials you used with
+the **primary** node. After you sign in:
-You can sign in to the _secondary_ node with the same credentials you used with
-the _primary_ node. Visit the _secondary_ node's **Admin Area > Geo**
-(`/admin/geo/nodes`) in your browser to determine if it's correctly identified
-as a _secondary_ Geo node, and if Geo is enabled.
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
+1. Verify that it's correctly identified as a **secondary** Geo node, and that
+ Geo is enabled.
The initial replication, or 'backfill', is probably still in progress. You
can monitor the synchronization process on each Geo node from the **primary**
diff --git a/doc/administration/geo/replication/disable_geo.md b/doc/administration/geo/replication/disable_geo.md
index c71cf80d0c1..ba01c55a157 100644
--- a/doc/administration/geo/replication/disable_geo.md
+++ b/doc/administration/geo/replication/disable_geo.md
@@ -33,9 +33,12 @@ to do that.
## Remove the primary site from the UI
-1. Go to **Admin Area > Geo** (`/admin/geo/nodes`).
-1. Click the **Remove** button for the **primary** node.
-1. Confirm by clicking **Remove** when the prompt appears.
+To remove the **primary** site:
+
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
+1. Select **Remove** for the **primary** node.
+1. Confirm by selecting **Remove** when the prompt appears.
## Remove secondary replication slots
diff --git a/doc/administration/geo/replication/docker_registry.md b/doc/administration/geo/replication/docker_registry.md
index ad890a07883..83007767215 100644
--- a/doc/administration/geo/replication/docker_registry.md
+++ b/doc/administration/geo/replication/docker_registry.md
@@ -127,7 +127,10 @@ For each application and Sidekiq node on the **secondary** site:
### Verify replication
-To verify Container Registry replication is working, go to **Admin Area > Geo**
-(`/admin/geo/nodes`) on the **secondary** site.
-The initial replication, or "backfill", is probably still in progress.
+To verify Container Registry replication is working, on the **secondary** site:
+
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
+ The initial replication, or "backfill", is probably still in progress.
+
You can monitor the synchronization process on each Geo site from the **primary** site's **Geo Nodes** dashboard in your browser.
diff --git a/doc/administration/geo/replication/object_storage.md b/doc/administration/geo/replication/object_storage.md
index 7dd831092a3..90a41ed3e1c 100644
--- a/doc/administration/geo/replication/object_storage.md
+++ b/doc/administration/geo/replication/object_storage.md
@@ -21,7 +21,7 @@ To have:
[Read more about using object storage with GitLab](../../object_storage.md).
-## Enabling GitLab managed object storage replication
+## Enabling GitLab-managed object storage replication
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/10586) in GitLab 12.4.
@@ -31,10 +31,11 @@ This is a [**beta** feature](https://about.gitlab.com/handbook/product/#beta) an
**Secondary** sites can replicate files stored on the **primary** site regardless of
whether they are stored on the local file system or in object storage.
-To enable GitLab replication, you must:
+To enable GitLab replication:
-1. Go to **Admin Area > Geo**.
-1. Press **Edit** on the **secondary** site.
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
+1. Select **Edit** on the **secondary** site.
1. In the **Synchronization Settings** section, find the **Allow this secondary node to replicate content on Object Storage**
checkbox to enable it.
diff --git a/doc/administration/geo/replication/remove_geo_site.md b/doc/administration/geo/replication/remove_geo_site.md
index a42a4c4eb47..274eb28dbc9 100644
--- a/doc/administration/geo/replication/remove_geo_site.md
+++ b/doc/administration/geo/replication/remove_geo_site.md
@@ -9,7 +9,8 @@ type: howto
**Secondary** sites can be removed from the Geo cluster using the Geo administration page of the **primary** site. To remove a **secondary** site:
-1. Go to **Admin Area > Geo** (`/admin/geo/nodes`).
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
1. Select the **Remove** button for the **secondary** site you want to remove.
1. Confirm by selecting **Remove** when the prompt appears.
diff --git a/doc/administration/geo/replication/troubleshooting.md b/doc/administration/geo/replication/troubleshooting.md
index 7c1f7cf7a8d..c00f523957c 100644
--- a/doc/administration/geo/replication/troubleshooting.md
+++ b/doc/administration/geo/replication/troubleshooting.md
@@ -25,8 +25,12 @@ Before attempting more advanced troubleshooting:
### Check the health of the **secondary** node
-Visit the **primary** node's **Admin Area > Geo** (`/admin/geo/nodes`) in
-your browser. We perform the following health checks on each **secondary** node
+On the **primary** node:
+
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
+
+We perform the following health checks on each **secondary** node
to help identify if something is wrong:
- Is the node running?
@@ -129,7 +133,8 @@ Geo finds the current machine's Geo node name in `/etc/gitlab/gitlab.rb` by:
- Using the `gitlab_rails['geo_node_name']` setting.
- If that is not defined, using the `external_url` setting.
-This name is used to look up the node with the same **Name** in **Admin Area > Geo**.
+This name is used to look up the node with the same **Name** in the **Geo Nodes**
+dashboard.
To check if the current machine has a node name that matches a node in the
database, run the check task:
@@ -739,8 +744,11 @@ If you are able to log in to the **primary** node, but you receive this error
when attempting to log into a **secondary**, you should check that the Geo
node's URL matches its external URL.
-1. On the primary, visit **Admin Area > Geo**.
-1. Find the affected **secondary** and click **Edit**.
+On the **primary** node:
+
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
+1. Find the affected **secondary** site and select **Edit**.
1. Ensure the **URL** field matches the value found in `/etc/gitlab/gitlab.rb`
in `external_url "https://gitlab.example.com"` on the frontend server(s) of
the **secondary** node.
diff --git a/doc/administration/geo/replication/tuning.md b/doc/administration/geo/replication/tuning.md
index a4aad3dec68..9807f3e6444 100644
--- a/doc/administration/geo/replication/tuning.md
+++ b/doc/administration/geo/replication/tuning.md
@@ -7,20 +7,28 @@ type: howto
# Tuning Geo **(PREMIUM SELF)**
-## Changing the sync/verification capacity values
+You can limit the number of concurrent operations the nodes can run
+in the background.
-In **Admin Area > Geo** (`/admin/geo/nodes`),
-there are several variables that can be tuned to improve performance of Geo:
+## Changing the sync/verification concurrency values
-- Repository sync capacity
-- File sync capacity
-- Container repositories sync capacity
-- Verification capacity
+On the **primary** site:
-Increasing capacity values will increase the number of jobs that are scheduled.
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
+1. Select **Edit** of the secondary node you want to tune.
+1. Under **Tuning settings**, there are several variables that can be tuned to
+ improve the performance of Geo:
+
+ - Repository synchronization concurrency limit
+ - File synchronization concurrency limit
+ - Container repositories synchronization concurrency limit
+ - Verification concurrency limit
+
+Increasing the concurrency values will increase the number of jobs that are scheduled.
However, this may not lead to more downloads in parallel unless the number of
-available Sidekiq threads is also increased. For example, if repository sync
-capacity is increased from 25 to 50, you may also want to increase the number
+available Sidekiq threads is also increased. For example, if repository synchronization
+concurrency is increased from 25 to 50, you may also want to increase the number
of Sidekiq threads from 25 to 50. See the
[Sidekiq concurrency documentation](../../operations/extra_sidekiq_processes.md#number-of-threads)
for more details.
diff --git a/doc/administration/housekeeping.md b/doc/administration/housekeeping.md
index 9668b7277c2..a89e8a2bad5 100644
--- a/doc/administration/housekeeping.md
+++ b/doc/administration/housekeeping.md
@@ -9,25 +9,27 @@ info: To determine the technical writer assigned to the Stage/Group associated w
GitLab supports and automates housekeeping tasks within your current repository,
such as compressing file revisions and removing unreachable objects.
-## Automatic housekeeping
+## Configure housekeeping
GitLab automatically runs `git gc` and `git repack` on repositories
-after Git pushes. You can change how often this happens or turn it off in
-**Admin Area > Settings > Repository** (`/admin/application_settings/repository`).
+after Git pushes.
-## Manual housekeeping
+You can change how often this happens or turn it off:
-The housekeeping function runs `repack` or `gc` depending on the
-**Housekeeping** settings configured in **Admin Area > Settings > Repository**.
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Settings > Repository**.
+1. Expand **Repository maintenance**.
+1. Configure the Housekeeping options.
+1. Select **Save changes**.
-For example in the following scenario a `git repack -d` will be executed:
+For example, in the following scenario a `git repack -d` will be executed:
- Project: pushes since GC counter (`pushes_since_gc`) = `10`
- Git GC period = `200`
- Full repack period = `50`
When the `pushes_since_gc` value is 50 a `repack -A -d --pack-kept-objects` runs, similarly when
-the `pushes_since_gc` value is 200 a `git gc` runs.
+the `pushes_since_gc` value is 200 a `git gc` runs:
- `git gc` ([man page](https://mirrors.edge.kernel.org/pub/software/scm/git/docs/git-gc.html)) runs a number of housekeeping tasks,
such as compressing file revisions (to reduce disk space and increase performance)
@@ -38,12 +40,6 @@ the `pushes_since_gc` value is 200 a `git gc` runs.
Housekeeping also [removes unreferenced LFS files](../raketasks/cleanup.md#remove-unreferenced-lfs-files)
from your project on the same schedule as the `git gc` operation, freeing up storage space for your project.
-To manually start the housekeeping process:
-
-1. In your project, go to **Settings > General**.
-1. Expand the **Advanced** section.
-1. Select **Run housekeeping**.
-
## How housekeeping handles pool repositories
Housekeeping for pool repositories is handled differently from standard repositories.
diff --git a/doc/administration/img/auditor_access_form.png b/doc/administration/img/auditor_access_form.png
deleted file mode 100644
index c179a7d3b0a..00000000000
Binary files a/doc/administration/img/auditor_access_form.png and /dev/null differ
diff --git a/doc/administration/maintenance_mode/index.md b/doc/administration/maintenance_mode/index.md
index c73a49287db..2f5d366f927 100644
--- a/doc/administration/maintenance_mode/index.md
+++ b/doc/administration/maintenance_mode/index.md
@@ -21,10 +21,11 @@ Maintenance Mode allows most external actions that do not change internal state.
There are three ways to enable Maintenance Mode as an administrator:
- **Web UI**:
- 1. Go to **Admin Area > Settings > General**, expand **Maintenance Mode**, and toggle **Enable Maintenance Mode**.
+ 1. On the top bar, select **Menu >** **{admin}** **Admin**.
+ 1. On the left sidebar, select **Settings > General**.
+ 1. Expand **Maintenance Mode**, and toggle **Enable Maintenance Mode**.
You can optionally add a message for the banner as well.
-
- 1. Click **Save** for the changes to take effect.
+ 1. Select **Save changes**.
- **API**:
@@ -44,9 +45,11 @@ There are three ways to enable Maintenance Mode as an administrator:
There are three ways to disable Maintenance Mode:
- **Web UI**:
- 1. Go to **Admin Area > Settings > General**, expand **Maintenance Mode**, and toggle **Enable Maintenance Mode**.
-
- 1. Click **Save** for the changes to take effect.
+ 1. On the top bar, select **Menu >** **{admin}** **Admin**.
+ 1. On the left sidebar, select **Settings > General**.
+ 1. Expand **Maintenance Mode**, and toggle **Enable Maintenance Mode**.
+ You can optionally add a message for the banner as well.
+ 1. Select **Save changes**.
- **API**:
@@ -166,7 +169,10 @@ Background jobs (cron jobs, Sidekiq) continue running as is, because background
[During a planned Geo failover](../geo/disaster_recovery/planned_failover.md#prevent-updates-to-the-primary-node),
it is recommended that you disable all cron jobs except for those related to Geo.
-You can monitor queues and disable jobs in **Admin Area > Monitoring > Background Jobs**.
+To monitor queues and disable jobs:
+
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Monitoring > Background Jobs**.
### Incident management
diff --git a/doc/administration/operations/extra_sidekiq_processes.md b/doc/administration/operations/extra_sidekiq_processes.md
index ed89d11da75..b910a789d29 100644
--- a/doc/administration/operations/extra_sidekiq_processes.md
+++ b/doc/administration/operations/extra_sidekiq_processes.md
@@ -87,10 +87,10 @@ To start multiple processes:
sudo gitlab-ctl reconfigure
```
-After the extra Sidekiq processes are added, navigate to
-**Admin Area > Monitoring > Background Jobs** (`/admin/background_jobs`) in GitLab.
+To view the Sidekiq processes in GitLab:
-
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Monitoring > Background Jobs**.
## Negate settings
diff --git a/doc/administration/operations/fast_ssh_key_lookup.md b/doc/administration/operations/fast_ssh_key_lookup.md
index 8acc40da4ab..bb0756cf948 100644
--- a/doc/administration/operations/fast_ssh_key_lookup.md
+++ b/doc/administration/operations/fast_ssh_key_lookup.md
@@ -104,11 +104,13 @@ In the case of lookup failures (which are common), the `authorized_keys`
file is still scanned. So Git SSH performance would still be slow for many
users as long as a large file exists.
-You can disable any more writes to the `authorized_keys` file by unchecking
-`Write to "authorized_keys" file` in the **Admin Area > Settings > Network > Performance optimization** of your GitLab
-installation.
+To disable any more writes to the `authorized_keys` file:
-
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Settings > Network**.
+1. Expand **Performance optimization**.
+1. Clear the **Write to "authorized_keys" file** checkbox.
+1. Select **Save changes**.
Again, confirm that SSH is working by removing your user's SSH key in the UI,
adding a new one, and attempting to pull a repository.
diff --git a/doc/administration/operations/img/sidekiq-cluster.png b/doc/administration/operations/img/sidekiq-cluster.png
deleted file mode 100644
index 3899385eb8f..00000000000
Binary files a/doc/administration/operations/img/sidekiq-cluster.png and /dev/null differ
diff --git a/doc/administration/operations/img/write_to_authorized_keys_setting.png b/doc/administration/operations/img/write_to_authorized_keys_setting.png
deleted file mode 100644
index f6227a6057b..00000000000
Binary files a/doc/administration/operations/img/write_to_authorized_keys_setting.png and /dev/null differ
diff --git a/doc/administration/pages/index.md b/doc/administration/pages/index.md
index 54af9950bb1..ec95f9f9382 100644
--- a/doc/administration/pages/index.md
+++ b/doc/administration/pages/index.md
@@ -800,7 +800,7 @@ To explicitly enable API source:
1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
-Or if you want to use legacy confiration source you can:
+Or if you want to use legacy configuration source you can:
1. Add the following to your `/etc/gitlab/gitlab.rb` file:
diff --git a/doc/administration/polling.md b/doc/administration/polling.md
index f6732b8edc6..d3f558eeaaa 100644
--- a/doc/administration/polling.md
+++ b/doc/administration/polling.md
@@ -9,23 +9,24 @@ info: To determine the technical writer assigned to the Stage/Group associated w
The GitLab UI polls for updates for different resources (issue notes, issue
titles, pipeline statuses, etc.) on a schedule appropriate to the resource.
-In **[Admin Area](../user/admin_area/index.md) > Settings > Preferences > Real-time features**,
-you can configure "Polling
-interval multiplier". This multiplier is applied to all resources at once,
-and decimal values are supported. For the sake of the examples below, we will
-say that issue notes poll every 2 seconds, and issue titles poll every 5
-seconds; these are _not_ the actual values.
+To configure the polling interval multiplier:
-- 1 is the default, and recommended for most installations. (Issue notes poll
- every 2 seconds, and issue titles poll every 5 seconds.)
-- 0 disables UI polling completely. (On the next poll, clients stop
- polling for updates.)
-- A value greater than 1 slows polling down. If you see issues with
- database load from lots of clients polling for updates, increasing the
- multiplier from 1 can be a good compromise, rather than disabling polling
- completely. (For example: If this is set to 2, then issue notes poll every 4
- seconds, and issue titles poll every 10 seconds.)
-- A value between 0 and 1 makes the UI poll more frequently (so updates
- show in other sessions faster), but is **not recommended**. 1 should be
- fast enough. (For example, if this is set to 0.5, then issue notes poll every
- 1 second, and issue titles poll every 2.5 seconds.)
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Settings > Preferences**.
+1. Expand **Real-time features**.
+1. Set a value for the polling interval multiplier. This multiplier is applied
+ to all resources at once, and decimal values are supported:
+
+ - `1.0` is the default, and recommended for most installations.
+ - `0` disables UI polling completely. On the next poll, clients stop
+ polling for updates.
+ - A value greater than `1` slows polling down. If you see issues with
+ database load from lots of clients polling for updates, increasing the
+ multiplier from 1 can be a good compromise, rather than disabling polling
+ completely. For example, if you set the value to `2`, all polling intervals
+ are multiplied by 2, which means that polling happens half as frequently.
+ - A value between `0` and `1` makes the UI poll more frequently (so updates
+ show in other sessions faster), but is **not recommended**. `1` should be
+ fast enough.
+
+1. Select **Save changes**.
diff --git a/doc/administration/raketasks/check.md b/doc/administration/raketasks/check.md
index 7f344a00f72..f7c91aa6b47 100644
--- a/doc/administration/raketasks/check.md
+++ b/doc/administration/raketasks/check.md
@@ -207,8 +207,7 @@ above.
### Dangling commits
`gitlab:git:fsck` can find dangling commits. To fix them, try
-[manually triggering housekeeping](../housekeeping.md#manual-housekeeping)
-for the affected project(s).
+[enabling housekeeping](../housekeeping.md).
If the issue persists, try triggering `gc` via the
[Rails Console](../operations/rails_console.md#starting-a-rails-console-session):
diff --git a/doc/administration/raketasks/project_import_export.md b/doc/administration/raketasks/project_import_export.md
index cd6ffc957b1..80321d75d66 100644
--- a/doc/administration/raketasks/project_import_export.md
+++ b/doc/administration/raketasks/project_import_export.md
@@ -50,8 +50,13 @@ Note the following:
- Importing is only possible if the version of the import and export GitLab instances are
compatible as described in the [Version history](../../user/project/settings/import_export.md#version-history).
-- The project import option must be enabled in
- application settings (`/admin/application_settings/general`) under **Import sources**, which is available
- under **Admin Area > Settings > Visibility and access controls**.
+- The project import option must be enabled:
+
+ 1. On the top bar, select **Menu >** **{admin}** **Admin**.
+ 1. On the left sidebar, select **Settings > General**.
+ 1. Expand **Visibility and access controls**.
+ 1. Under **Import sources**, check the "Project export enabled" option.
+ 1. Select **Save changes**.
+
- The exports are stored in a temporary directory and are deleted every
24 hours by a specific worker.
diff --git a/doc/administration/raketasks/storage.md b/doc/administration/raketasks/storage.md
index 5b6d4e16d8d..cee63a6cae5 100644
--- a/doc/administration/raketasks/storage.md
+++ b/doc/administration/raketasks/storage.md
@@ -107,12 +107,15 @@ to project IDs 50 to 100 in an Omnibus GitLab installation:
sudo gitlab-rake gitlab:storage:migrate_to_hashed ID_FROM=50 ID_TO=100
```
-You can monitor the progress in the **Admin Area > Monitoring > Background Jobs** page.
-There is a specific queue you can watch to see how long it will take to finish:
-`hashed_storage:hashed_storage_project_migrate`.
+To monitor the progress in GitLab:
-After it reaches zero, you can confirm every project has been migrated by running the commands above.
-If you find it necessary, you can run this migration script again to schedule missing projects.
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Monitoring > Background Jobs**.
+1. Watch how long the `hashed_storage:hashed_storage_project_migrate` queue
+ will take to finish. After it reaches zero, you can confirm every project
+ has been migrated by running the commands above.
+
+If you find it necessary, you can run the previous migration script again to schedule missing projects.
Any error or warning is logged in Sidekiq's log file.
@@ -120,7 +123,7 @@ If [Geo](../geo/index.md) is enabled, each project that is successfully migrated
generates an event to replicate the changes on any **secondary** nodes.
You only need the `gitlab:storage:migrate_to_hashed` Rake task to migrate your repositories, but there are
-[additional commands(#list-projects-and-attachments) to help you inspect projects and attachments in both legacy and hashed storage.
+[additional commands](#list-projects-and-attachments) to help you inspect projects and attachments in both legacy and hashed storage.
## Rollback from hashed storage to legacy storage
diff --git a/doc/administration/troubleshooting/gitlab_rails_cheat_sheet.md b/doc/administration/troubleshooting/gitlab_rails_cheat_sheet.md
index 92070a86a0d..583765fab3b 100644
--- a/doc/administration/troubleshooting/gitlab_rails_cheat_sheet.md
+++ b/doc/administration/troubleshooting/gitlab_rails_cheat_sheet.md
@@ -275,7 +275,7 @@ integration active:
p = Project.find_by_sql("SELECT p.id FROM projects p LEFT JOIN services s ON p.id = s.project_id WHERE s.type = 'JiraService' AND s.active = true")
p.each do |project|
- project.jira_service.update_attribute(:password, '')
+ project.jira_integration.update_attribute(:password, '')
end
```
@@ -286,9 +286,9 @@ To change all Jira project to use the instance-level integration settings:
1. In a Rails console:
```ruby
- jira_service_instance_id = JiraService.find_by(instance: true).id
- JiraService.where(active: true, instance: false, template: false, inherit_from_id: nil).find_each do |service|
- service.update_attribute(:inherit_from_id, jira_service_instance_id)
+ jira_integration_instance_id = Integrations::Jira.find_by(instance: true).id
+ Integrations::Jira.where(active: true, instance: false, template: false, inherit_from_id: nil).find_each do |integration|
+ integration.update_attribute(:inherit_from_id, jira_integration_instance_id)
end
```
diff --git a/doc/api/graphql/reference/index.md b/doc/api/graphql/reference/index.md
index 9ac9f91648b..b1ff11a4307 100644
--- a/doc/api/graphql/reference/index.md
+++ b/doc/api/graphql/reference/index.md
@@ -3273,7 +3273,7 @@ Input type: `PrometheusIntegrationResetTokenInput`
| Name | Type | Description |
| ---- | ---- | ----------- |
| `clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
-| `id` | [`PrometheusServiceID!`](#prometheusserviceid) | The ID of the integration to mutate. |
+| `id` | [`IntegrationsPrometheusID!`](#integrationsprometheusid) | The ID of the integration to mutate. |
#### Fields
@@ -3294,7 +3294,7 @@ Input type: `PrometheusIntegrationUpdateInput`
| `active` | [`Boolean`](#boolean) | Whether the integration is receiving alerts. |
| `apiUrl` | [`String`](#string) | Endpoint at which Prometheus can be queried. |
| `clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
-| `id` | [`PrometheusServiceID!`](#prometheusserviceid) | The ID of the integration to mutate. |
+| `id` | [`IntegrationsPrometheusID!`](#integrationsprometheusid) | The ID of the integration to mutate. |
#### Fields
@@ -15365,6 +15365,13 @@ An example `IncidentManagementOncallRotationID` is: `"gid://gitlab/IncidentManag
Represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1.
+### `IntegrationsPrometheusID`
+
+A `IntegrationsPrometheusID` is a global ID. It is encoded as a string.
+
+An example `IntegrationsPrometheusID` is: `"gid://gitlab/Integrations::Prometheus/1"`.
+The older format `"gid://gitlab/PrometheusService/1"` was deprecated in 14.1.
+
### `IssuableID`
A `IssuableID` is a global ID. It is encoded as a string.
@@ -15510,12 +15517,6 @@ A `ProjectID` is a global ID. It is encoded as a string.
An example `ProjectID` is: `"gid://gitlab/Project/1"`.
-### `PrometheusServiceID`
-
-A `PrometheusServiceID` is a global ID. It is encoded as a string.
-
-An example `PrometheusServiceID` is: `"gid://gitlab/PrometheusService/1"`.
-
### `ReleasesLinkID`
A `ReleasesLinkID` is a global ID. It is encoded as a string.
diff --git a/doc/api/group_clusters.md b/doc/api/group_clusters.md
index 6853e38cc32..c942da79581 100644
--- a/doc/api/group_clusters.md
+++ b/doc/api/group_clusters.md
@@ -258,7 +258,7 @@ Example request:
```shell
curl --header "Private-Token: " "https://gitlab.example.com/api/v4/groups/26/clusters/24" \
-H "Content-Type:application/json" \
---request PUT --data '{"name":"new-cluster-name","domain":"new-domain.com","api_url":"https://new-api-url.com"}'
+--request PUT --data '{"name":"new-cluster-name","domain":"new-domain.com","platform_kubernetes_attributes":{"api_url":"https://10.10.101.1:6433"}}'
```
Example response:
diff --git a/doc/development/i18n/externalization.md b/doc/development/i18n/externalization.md
index 7ea8378b6db..796a1f44ccd 100644
--- a/doc/development/i18n/externalization.md
+++ b/doc/development/i18n/externalization.md
@@ -363,7 +363,7 @@ use `%{created_at}` in Ruby but `%{createdAt}` in JavaScript. Make sure to
// => When x == 2: 'Last 2 days'
```
-The `n_` method should only be used to fetch pluralized translations of the same
+The `n_` and `n__` methods should only be used to fetch pluralized translations of the same
string, not to control the logic of showing different strings for different
quantities. Some languages have different quantities of target plural forms.
For example, Chinese (simplified) has only one target plural form in our
@@ -376,7 +376,7 @@ For example, use this:
if selected_projects.one?
selected_projects.first.name
else
- n__("Project selected", "%d projects selected", selected_projects.count)
+ n_("Project selected", "%d projects selected", selected_projects.count)
end
```
diff --git a/doc/install/azure/index.md b/doc/install/azure/index.md
index 0d62e4d1215..1351489642e 100644
--- a/doc/install/azure/index.md
+++ b/doc/install/azure/index.md
@@ -238,9 +238,11 @@ in this section whenever you need to update GitLab.
### Check the current version
-To determine the version of GitLab you're currently running,
-go to the **{admin}** **Admin Area**, and find the version
-under the **Components** table.
+To determine the version of GitLab you're currently running:
+
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Overview > Dashboard**.
+1. Find the version under the **Components** table.
If there's a newer available version of GitLab that contains one or more
security fixes, GitLab displays an **Update asap** notification message that
diff --git a/doc/user/admin_area/geo_nodes.md b/doc/user/admin_area/geo_nodes.md
index 32b1555c33d..19a76d0938b 100644
--- a/doc/user/admin_area/geo_nodes.md
+++ b/doc/user/admin_area/geo_nodes.md
@@ -10,7 +10,10 @@ type: howto
You can configure various settings for GitLab Geo nodes. For more information, see
[Geo documentation](../../administration/geo/index.md).
-On the primary node, go to **Admin Area > Geo**. On secondary nodes, go to **Admin Area > Geo > Nodes**.
+On either the primary or secondary node:
+
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
## Common settings
@@ -61,8 +64,13 @@ The **primary** node's Internal URL is used by **secondary** nodes to contact it
[External URL](https://docs.gitlab.com/omnibus/settings/configuration.html#configuring-the-external-url-for-gitlab)
which is used by users. Internal URL does not need to be a private address.
-Internal URL defaults to External URL, but you can customize it under
-**Admin Area > Geo > Nodes**.
+Internal URL defaults to external URL, but you can also customize it:
+
+1. On the top bar, select **Menu >** **{admin}** **Admin**.
+1. On the left sidebar, select **Geo > Nodes**.
+1. Select **Edit** on the node you want to customize.
+1. Edit the internal URL.
+1. Select **Save changes**.
WARNING:
We recommend using an HTTPS connection while configuring the Geo nodes. To avoid
diff --git a/doc/user/clusters/management_project.md b/doc/user/clusters/management_project.md
index f741ab2d95a..435ca8f76c5 100644
--- a/doc/user/clusters/management_project.md
+++ b/doc/user/clusters/management_project.md
@@ -22,7 +22,7 @@ This can be useful for:
## Permissions
Only the management project receives `cluster-admin` privileges. All
-other projects continue to receive [namespace scoped `edit` level privileges](../project/clusters/add_remove_clusters.md#rbac-cluster-resources).
+other projects continue to receive [namespace scoped `edit` level privileges](../project/clusters/cluster_access.md#rbac-cluster-resources).
Management projects are restricted to the following:
diff --git a/doc/user/group/clusters/index.md b/doc/user/group/clusters/index.md
index 4de464822f7..b0414ca6fa4 100644
--- a/doc/user/group/clusters/index.md
+++ b/doc/user/group/clusters/index.md
@@ -163,7 +163,7 @@ are deployed to the Kubernetes cluster, see the documentation for
## Security of runners
For important information about securely configuring runners, see
-[Security of runners](../../project/clusters/add_remove_clusters.md#security-of-runners)
+[Security of runners](../../project/clusters/cluster_access.md#security-of-runners)
documentation for project-level clusters.
## More information
diff --git a/doc/user/project/clusters/add_eks_clusters.md b/doc/user/project/clusters/add_eks_clusters.md
index 58bdb3d698f..4bcee054701 100644
--- a/doc/user/project/clusters/add_eks_clusters.md
+++ b/doc/user/project/clusters/add_eks_clusters.md
@@ -4,85 +4,57 @@ group: Configure
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
-# Adding EKS clusters **(FREE)**
+# EKS clusters (DEPRECATED) **(FREE)**
-GitLab supports adding new and existing EKS clusters.
+> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/22392) in GitLab 12.5.
+> - [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) in GitLab 14.0.
-## EKS requirements
+WARNING:
+Use [Infrastrucure as Code](../../infrastructure/index.md) to create new clusters. The method described in this document is deprecated as of GitLab 14.0.
-Before creating your first cluster on Amazon EKS with the GitLab integration, make sure the following
-requirements are met:
+Through GitLab, you can create new clusters and add existing clusters hosted on Amazon Elastic
+Kubernetes Service (EKS).
-- An [Amazon Web Services](https://aws.amazon.com/) account is set up and you are able to log in.
-- You have permissions to manage IAM resources.
-- If you want to use an [existing EKS cluster](#existing-eks-cluster):
- - An Amazon EKS cluster with worker nodes properly configured.
- - `kubectl` [installed and configured](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html#get-started-kubectl)
- for access to the EKS cluster.
+## Add an existing EKS cluster
-### Additional requirements for self-managed instances **(FREE SELF)**
+If you already have an EKS cluster and want to integrate it with GitLab,
+see how to [add an existing cluster](add_existing_cluster.md).
-If you are using a self-managed GitLab instance, GitLab must first be configured with a set of
-Amazon credentials. These credentials are used to assume an Amazon IAM role provided by the user
-creating the cluster. Create an IAM user and ensure it has permissions to assume the role(s) that
-your users need to create EKS clusters.
+## Create a new certificate-based EKS cluster
-For example, the following policy document allows assuming a role whose name starts with
-`gitlab-eks-` in account `123456789012`:
+Prerequisites:
-```json
-{
- "Version": "2012-10-17",
- "Statement": {
- "Effect": "Allow",
- "Action": "sts:AssumeRole",
- "Resource": "arn:aws:iam::123456789012:role/gitlab-eks-*"
- }
-}
-```
+- An [Amazon Web Services](https://aws.amazon.com/) account.
+- Permissions to manage IAM resources.
-### Configure Amazon authentication
+For instance-level clusters, see [additional requirements for self-managed instances](#additional-requirements-for-self-managed-instances). **(FREE SELF)**
-To configure Amazon authentication in GitLab, generate an access key for the IAM user in the Amazon AWS console, and following the steps below.
+To create new Kubernetes clusters for your project, group, or instance through the certificate-based method:
-1. Navigate to **Admin Area > Settings > General** and expand the **Amazon EKS** section.
-1. Check **Enable Amazon EKS integration**.
-1. Enter your **Account ID**.
-1. Depending on your configuration, enter your access key and ID:
+1. [Define the access control (RBAC or ABAC) for your cluster](cluster_access.md).
+1. [Create a cluster in GitLab](#create-a-new-eks-cluster-in-gitlab).
+1. [Prepare the cluster in Amazon](#prepare-the-cluster-in-amazon).
+1. [Configure your cluster's data in GitLab](#configure-your-clusters-data-in-gitlab).
- - _GitLab 13.7 and later, and using an instance profile_: You may leave
- **Access key ID** and **Secret access key** blank.
- Read [Instance profiles](#instance-profiles) for more information.
- - _All GitLab versions_: Enter your access key credentials into
- **Access key ID** and **Secret access key**.
+Further steps:
-1. Click **Save changes**.
+1. [Create a default Storage Class](#create-a-default-storage-class).
+1. [Deploy the app to EKS](#deploy-the-app-to-eks).
-#### Instance profiles
+### Create a new EKS cluster in GitLab
-> Introduced in [GitLab 13.7](https://gitlab.com/gitlab-org/gitlab/-/issues/291015).
+To create a new EKS cluster:
-You may leave `Access key ID` and `Secret access key` fields blank if
-you are using an instance profile
-[to pass an IAM role to an EC2 instance](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html).
-Instance profiles dynamically retrieve temporary credentials from AWS when needed.
-
-## New EKS cluster
-
-> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/22392) in GitLab 12.5.
-
-To create and add a new Kubernetes cluster to your project, group, or instance:
-
-1. Navigate to your:
+1. Go to your:
- Project's **Infrastructure > Kubernetes clusters** page, for a project-level cluster.
- Group's **Kubernetes** page, for a group-level cluster.
- - **Admin Area > Kubernetes**, for an instance-level cluster.
-1. Click **Integrate with a cluster certificate**.
+ - **Menu >** **{admin}** **Admin > Kubernetes**, for an instance-level cluster.
+1. Select **Integrate with a cluster certificate**.
1. Under the **Create new cluster** tab, click **Amazon EKS** to display an
`Account ID` and `External ID` needed for later steps.
1. In the [IAM Management Console](https://console.aws.amazon.com/iam/home), create an IAM policy:
1. From the left panel, select **Policies**.
- 1. Click **Create Policy**, which opens a new window.
+ 1. Select **Create Policy**, which opens a new window.
1. Select the **JSON** tab, and paste the following snippet in place of the
existing content. These permissions give GitLab the ability to create
resources, but not delete them:
@@ -133,132 +105,163 @@ To create and add a new Kubernetes cluster to your project, group, or instance:
}
```
- If an error is encountered during the creation process, changes will
- not be rolled back and you must remove resources manually. You can do this by deleting
- the relevant [CloudFormation stack](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-console-delete-stack.html)
+ If you get an error during this process, GitLab does not roll back the changes. You must remove resources manually. You can do this by deleting
+ the relevant [CloudFormation stack](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-console-delete-stack.html).
1. Click **Review policy**.
1. Enter a suitable name for this policy, and click **Create Policy**. You can now close this window.
-1. In the [IAM Management Console](https://console.aws.amazon.com/iam/home), create an **EKS IAM role** following the [Amazon EKS cluster IAM role instructions](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html). This role should exist so that Kubernetes clusters managed by Amazon EKS can make calls to other AWS services on your behalf to manage the resources that you use with the service.
- In addition to the policies that guide suggests, you must also include the `AmazonEKSClusterPolicy`
- policy for this role in order for GitLab to manage the EKS cluster correctly.
-1. In the [IAM Management Console](https://console.aws.amazon.com/iam/home), create another IAM role which will be used by GitLab to authenticate with AWS. Follow these steps to create it:
- 1. On the AWS IAM console, select **Roles** from the left panel.
- 1. Click **Create role**.
- 1. Under `Select type of trusted entity`, select **Another AWS account**.
- 1. Enter the Account ID from GitLab into the `Account ID` field.
- 1. Check **Require external ID**.
- 1. Enter the External ID from GitLab into the `External ID` field.
- 1. Click **Next: Permissions**, and select the policy you just created.
- 1. Click **Next: Tags**, and optionally enter any tags you wish to associate with this role.
- 1. Click **Next: Review**.
- 1. Enter a role name and optional description into the fields provided.
- 1. Click **Create role**, the new role name displays at the top. Click on its name and copy the `Role ARN` from the newly created role.
-1. In GitLab, enter the copied role ARN into the `Role ARN` field.
-1. In the **Cluster Region** field, enter the [region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) you plan to use for your new cluster. GitLab confirms you have access to this region when authenticating your role.
-1. Click **Authenticate with AWS**.
-1. Choose your cluster's settings:
- - **Kubernetes cluster name** - The name you wish to give the cluster.
- - **Environment scope** - The [associated environment](index.md#setting-the-environment-scope) to this cluster.
- - **Kubernetes version** - The [Kubernetes version](index.md#supported-cluster-versions) to use.
- - **Service role** - Select the **EKS IAM role** you created earlier to allow Amazon EKS
- and the Kubernetes control plane to manage AWS resources on your behalf.
+### Prepare the cluster in Amazon
- NOTE:
- This IAM role is _not_ the IAM role you created in the previous step. It should be
- the one you created much earlier by following the
- [Amazon EKS cluster IAM role](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html)
- guide.
- - **Key pair name** - Select the [key pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
- that you can use to connect to your worker nodes if required.
- - **VPC** - Select a [VPC](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html)
- to use for your EKS Cluster resources.
- - **Subnets** - Choose the [subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)
- in your VPC where your worker nodes run. You must select at least two.
- - **Security group** - Choose the [security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html)
- to apply to the EKS-managed Elastic Network Interfaces that are created in your worker node subnets.
- - **Instance type** - The [instance type](https://aws.amazon.com/ec2/instance-types/) of your worker nodes.
- - **Node count** - The number of worker nodes.
- - **GitLab-managed cluster** - Leave this checked if you want GitLab to manage namespaces and service accounts for this cluster.
- See the [Managed clusters section](index.md#gitlab-managed-clusters) for more information.
-1. Finally, click the **Create Kubernetes cluster** button.
+1. [Create an **EKS IAM role** for your cluster](#create-an-eks-iam-role-for-your-cluster) (**role A**).
+1. [Create **another EKS IAM role** for GitLab authentication with Amazon](#create-another-eks-iam-role-for-gitlab-authentication-with-amazon) (**role B**).
+
+#### Create an EKS IAM role for your cluster
+
+In the [IAM Management Console](https://console.aws.amazon.com/iam/home),
+create an **EKS IAM role** (**role A**) following the [Amazon EKS cluster IAM role instructions](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html).
+This role is necessary so that Kubernetes clusters managed by Amazon EKS can make calls to other AWS
+services on your behalf to manage the resources that you use with the service.
+
+For GitLab to manage the EKS cluster correctly, you must include `AmazonEKSClusterPolicy` in
+addition to the policies the guide suggests.
+
+#### Create another EKS IAM role for GitLab authentication with Amazon
+
+In the [IAM Management Console](https://console.aws.amazon.com/iam/home),
+create another IAM role (**role B**) for GitLab authentication with AWS:
+
+1. On the AWS IAM console, select **Roles** from the left panel.
+1. Click **Create role**.
+1. Under **Select type of trusted entity**, select **Another AWS account**.
+1. Enter the Account ID from GitLab into the **Account ID** field.
+1. Check **Require external ID**.
+1. Enter the External ID from GitLab into the **External ID** field.
+1. Click **Next: Permissions**, and select the policy you just created.
+1. Click **Next: Tags**, and optionally enter any tags you wish to associate with this role.
+1. Click **Next: Review**.
+1. Enter a role name and optional description into the fields provided.
+1. Click **Create role**. The new role name displays at the top. Click on its name and copy the
+ `Role ARN` from the newly created role.
+
+### Configure your cluster's data in GitLab
+
+1. Back in GitLab, enter the copied role ARN into the **Role ARN** field.
+1. In the **Cluster Region** field, enter the [region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) you plan to use for your new cluster. GitLab confirms you have access to this region when authenticating your role.
+1. Select **Authenticate with AWS**.
+1. Adjust your [cluster's settings](#cluster-settings).
+1. Select the **Create Kubernetes cluster** button.
After about 10 minutes, your cluster is ready to go.
NOTE:
-If you have [installed and configured](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html#get-started-kubectl) `kubectl` and you would like to manage your cluster with it, you must add your AWS external ID in the AWS configuration. For more information on how to configure AWS CLI, see [using an IAM role in the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html#cli-configure-role-xaccount).
+If you have [installed and configured](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html#get-started-kubectl) `kubectl` and you would like to manage your cluster with it, you must add your AWS external ID in the AWS configuration. For more information on how to configure AWS CLI, see [using an IAM role in the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html#cli-configure-role-xaccount).
-### Cluster creation flow
+#### Cluster settings
-The following sequence illustrates how GitLab works with AWS to create an EKS cluster:
+When you create a new cluster, you have the following settings:
-```mermaid
-sequenceDiagram
- autonumber
- participant G as GitLab
- participant A as AWS
- participant E as EKS cluster
- alt static credentials
- G->>G: Load AWS Access and secret key
- end
- alt IAM instance profile
- G->>A: Fetch temporary credentials
- A->>G: Temporary access credentials
- end
- G->>A: AssumeRole: EKS Provision Role
- A->>A: Check account, external IDs
- A->>A: Check permissions
- A->>G: New access credentials
- note over G: user selects EKS cluster options
- note over G,A: Use Service Role credentials
- G->>A: CreateStack (CloudFormation)
- A->>G: Received
- G->>G: Wait 5 minutes
- loop Poll for cluster creation
- G->>A: DescribeStacks
- A->>G: CREATE_IN_PROGRESS
- end
- note over G,E: EKS Cluster Created
- G->>A: DescribeStacks
- A->>G: CREATE_COMPLETE
- G->>E: kubectl create role (service account)
- E->>G: OK
+| Setting | Description |
+| ----------------------- |------------ |
+| Kubernetes cluster name | Your cluster's name. |
+| Environment scope | The [associated environment](index.md#setting-the-environment-scope). |
+| Service role | The **EKS IAM role** (**role A**). |
+| Kubernetes version | The [Kubernetes version](index.md#supported-cluster-versions) for your cluster. |
+| Key pair name | The [key pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) that you can use to connect to your worker nodes. |
+| VPC | The [VPC](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) to use for your EKS Cluster resources. |
+| Subnets | The [subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in your VPC where your worker nodes run. Two are required. |
+| Security group | The [security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) to apply to the EKS-managed Elastic Network Interfaces that are created in your worker node subnets. |
+| Instance type | The [instance type](https://aws.amazon.com/ec2/instance-types/) of your worker nodes. |
+| Node count | The number of worker nodes. |
+| GitLab-managed cluster | Check if you want GitLab to manage namespaces and service accounts for this cluster. |
+
+## Create a default Storage Class
+
+Amazon EKS doesn't have a default Storage Class out of the box, which means
+requests for persistent volumes are not automatically fulfilled. As part
+of Auto DevOps, the deployed PostgreSQL instance requests persistent storage,
+and without a default storage class it cannot start.
+
+If a default Storage Class doesn't already exist and is desired, follow Amazon's
+[guide on storage classes](https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html)
+to create one.
+
+Alternatively, disable PostgreSQL by setting the project variable
+[`POSTGRES_ENABLED`](../../../topics/autodevops/customize.md#cicd-variables) to `false`.
+
+## Deploy the app to EKS
+
+With RBAC disabled and services deployed,
+[Auto DevOps](../../../topics/autodevops/index.md) can now be leveraged
+to build, test, and deploy the app.
+
+[Enable Auto DevOps](../../../topics/autodevops/index.md#at-the-project-level)
+if not already enabled. If a wildcard DNS entry was created resolving to the
+Load Balancer, enter it in the `domain` field under the Auto DevOps settings.
+Otherwise, the deployed app isn't externally available outside of the cluster.
+
+
+
+GitLab creates a new pipeline, which begins to build, test, and deploy the app.
+
+After the pipeline has finished, your app runs in EKS, and is available
+to users. Click on **CI/CD > Environments**.
+
+
+
+GitLab displays a list of the environments and their deploy status, as well as
+options to browse to the app, view monitoring metrics, and even access a shell
+on the running pod.
+
+## Additional requirements for self-managed instances **(FREE SELF)**
+
+If you are using a self-managed GitLab instance, you need to configure
+Amazon credentials. GitLab uses these credentials to assume an Amazon IAM role to create your cluster.
+
+Create an IAM user and ensure it has permissions to assume the role(s) that
+your users need to create EKS clusters.
+
+For example, the following policy document allows assuming a role whose name starts with
+`gitlab-eks-` in account `123456789012`:
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": {
+ "Effect": "Allow",
+ "Action": "sts:AssumeRole",
+ "Resource": "arn:aws:iam::123456789012:role/gitlab-eks-*"
+ }
+}
```
-First, GitLab must obtain an initial set of credentials to communicate with the AWS API.
-These credentials can be retrieved in one of two ways:
+### Configure Amazon authentication
-- Statically through the [Configure Amazon authentication](#configure-amazon-authentication).
-- Dynamically via an IAM instance profile ([introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/291015) in GitLab 13.7).
+To configure Amazon authentication in GitLab, generate an access key for the
+IAM user in the Amazon AWS console, and follow these steps:
-After GitLab retrieves the AWS credentials, it makes an
-[AssumeRole](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
-API call to obtain credentials for the Provision Role. AWS confirms
-the request has the correct account ID, external ID, and permissions.
+1. In GitLab, on the top bar, select **Menu >** **{admin}** **Admin > Settings > General** and expand the **Amazon EKS** section.
+1. Check **Enable Amazon EKS integration**.
+1. Enter your **Account ID**.
+1. Enter your [access key and ID](#eks-access-key-and-id).
+1. Click **Save changes**.
-If the request is valid, AWS returns a new set of temporary credentials GitLab
-uses to load the **Create cluster** options page.
+#### EKS access key and ID
-On the **Create cluster** page, the user must select a **Service Role**, which is
-the IAM role that is actually used to create the cluster, and other options
-such as the Kubernetes cluster name, Kubernetes version, and region.
-After the user clicks the **Create Kubernetes cluster** button, GitLab
-submits a CloudFormation API request to create an EKS cluster with the given parameters
-from the user. GitLab waits 5 minutes before checking whether the cluster was created,
-and polls once a minute for up to 30 minutes.
+> Instance profiles were [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/291015) in GitLab 13.7.
-After GitLab receives a `CREATE_COMPLETE` message from AWS, GitLab talks
-to the EKS cluster to create a Kubernetes service account with `cluster-admin`
-privileges, and updates its internal database to reflect the newly-created
-Kubernetes cluster. From this point forward, GitLab uses this service account to
-interact with the cluster.
+If you're using GitLab 13.7 or later, you can use instance profiles to
+dynamically retrieve temporary credentials from AWS when needed.
+In this case, leave the `Access key ID` and `Secret access key` fields blank
+and [pass an IAM role to an EC2 instance](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html).
-### Troubleshooting creating a new cluster
+Otherwise, enter your access key credentials into **Access key ID** and **Secret access key**.
+
+## Troubleshooting
The following errors are commonly encountered when creating a new cluster.
-#### Validation failed: Role ARN must be a valid Amazon Resource Name
+### Validation failed: Role ARN must be a valid Amazon Resource Name
Check that the `Provision Role ARN` is correct. An example of a valid ARN:
@@ -266,7 +269,7 @@ Check that the `Provision Role ARN` is correct. An example of a valid ARN:
arn:aws:iam::123456789012:role/gitlab-eks-provision'
```
-#### Access denied: User `arn:aws:iam::x` is not authorized to perform: `sts:AssumeRole` on resource: `arn:aws:iam::y`
+### Access denied: User `arn:aws:iam::x` is not authorized to perform: `sts:AssumeRole` on resource: `arn:aws:iam::y`
This error occurs when the credentials defined in the
[Configure Amazon authentication](#configure-amazon-authentication) cannot assume the role defined by the
@@ -280,7 +283,7 @@ Provision Role ARN. Check that:

-#### Could not load Security Groups for this VPC
+### Could not load Security Groups for this VPC
When populating options in the configuration form, GitLab returns this error
because GitLab has successfully assumed your provided role, but the role has
@@ -307,46 +310,3 @@ This role should be the role you created by following the
[EKS cluster IAM role](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html) guide.
In addition to the policies that guide suggests, you must also include the
`AmazonEKSClusterPolicy` policy for this role in order for GitLab to manage the EKS cluster correctly.
-
-## Existing EKS cluster
-
-For information on adding an existing EKS cluster, see
-[Existing Kubernetes cluster](add_remove_clusters.md#existing-kubernetes-cluster).
-
-### Create a default Storage Class
-
-Amazon EKS doesn't have a default Storage Class out of the box, which means
-requests for persistent volumes are not automatically fulfilled. As part
-of Auto DevOps, the deployed PostgreSQL instance requests persistent storage,
-and without a default storage class it cannot start.
-
-If a default Storage Class doesn't already exist and is desired, follow Amazon's
-[guide on storage classes](https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html)
-to create one.
-
-Alternatively, disable PostgreSQL by setting the project variable
-[`POSTGRES_ENABLED`](../../../topics/autodevops/customize.md#cicd-variables) to `false`.
-
-### Deploy the app to EKS
-
-With RBAC disabled and services deployed,
-[Auto DevOps](../../../topics/autodevops/index.md) can now be leveraged
-to build, test, and deploy the app.
-
-[Enable Auto DevOps](../../../topics/autodevops/index.md#at-the-project-level)
-if not already enabled. If a wildcard DNS entry was created resolving to the
-Load Balancer, enter it in the `domain` field under the Auto DevOps settings.
-Otherwise, the deployed app isn't externally available outside of the cluster.
-
-
-
-GitLab creates a new pipeline, which begins to build, test, and deploy the app.
-
-After the pipeline has finished, your app runs in EKS, and is available
-to users. Click on **CI/CD > Environments**.
-
-
-
-GitLab displays a list of the environments and their deploy status, as well as
-options to browse to the app, view monitoring metrics, and even access a shell
-on the running pod.
diff --git a/doc/user/project/clusters/add_existing_cluster.md b/doc/user/project/clusters/add_existing_cluster.md
new file mode 100644
index 00000000000..5f564eadb44
--- /dev/null
+++ b/doc/user/project/clusters/add_existing_cluster.md
@@ -0,0 +1,224 @@
+---
+stage: Configure
+group: Configure
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
+---
+
+# Add an existing Kubernetes cluster
+
+If you have an existing Kubernetes cluster, you can add it to a project, group,
+or instance and benefit from the integration with GitLab.
+
+## Prerequisites
+
+See the prerequisites below to add existing clusters to GitLab.
+
+### All clusters
+
+To add any cluster to GitLab, you need:
+
+- Either a GitLab.com account or an account for a self-managed installation
+running GitLab 12.5 or later.
+- Maintainer permissions for group-level and project-level clusters.
+- Access to the Admin area for instance-level clusters. **(FREE SELF)**
+- A Kubernetes cluster.
+- Cluster administration access to the cluster with `kubectl`.
+
+You can host your cluster in [EKS](#eks-clusters), [GKE](#gke-clusters),
+on premises, and with other providers.
+To host them on premises and with other providers,
+use either the EKS or GKE method to guide you through and enter your cluster's
+settings manually.
+
+WARNING:
+GitLab doesn't support `arm64` clusters. See the issue
+[Helm Tiller fails to install on `arm64` cluster](https://gitlab.com/gitlab-org/gitlab/-/issues/29838)
+for details.
+
+### EKS clusters
+
+To add an existing **EKS** cluster, you need:
+
+- An Amazon EKS cluster with worker nodes properly configured.
+- `kubectl` [installed and configured](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html#get-started-kubectl)
+for access to the EKS cluster.
+- Ensure the token of the account has administrator privileges for the cluster.
+
+### GKE clusters
+
+To add an existing **GKE** cluster, you need:
+
+- The `container.clusterRoleBindings.create` permission to create a cluster
+role binding. You can follow the [Google Cloud documentation](https://cloud.google.com/iam/docs/granting-changing-revoking-access)
+to grant access.
+
+## How to add an existing cluster
+
+
+
+To add a Kubernetes cluster to your project, group, or instance:
+
+1. Navigate to your:
+ 1. Project's **{cloud-gear}** **Infrastructure > Kubernetes clusters** page, for a project-level cluster.
+ 1. Group's **{cloud-gear}** **Kubernetes** page, for a group-level cluster.
+ 1. **Menu >** **{admin}** **Admin >** **{cloud-gear}** **Kubernetes** page, for an instance-level cluster.
+1. Click **Add Kubernetes cluster**.
+1. Click the **Add existing cluster** tab and fill in the details:
+ 1. **Kubernetes cluster name** (required) - The name you wish to give the cluster.
+ 1. **Environment scope** (required) - The
+ [associated environment](index.md#setting-the-environment-scope) to this cluster.
+ 1. **API URL** (required) -
+ It's the URL that GitLab uses to access the Kubernetes API. Kubernetes
+ exposes several APIs, we want the "base" URL that is common to all of them.
+ For example, `https://kubernetes.example.com` rather than `https://kubernetes.example.com/api/v1`.
+
+ Get the API URL by running this command:
+
+ ```shell
+ kubectl cluster-info | grep -E 'Kubernetes master|Kubernetes control plane' | awk '/http/ {print $NF}'
+ ```
+
+ 1. **CA certificate** (required) - A valid Kubernetes certificate is needed to authenticate to the cluster. We use the certificate created by default.
+ 1. List the secrets with `kubectl get secrets`, and one should be named similar to
+ `default-token-xxxxx`. Copy that token name for use below.
+ 1. Get the certificate by running this command:
+
+ ```shell
+ kubectl get secret -o jsonpath="{['data']['ca\.crt']}" | base64 --decode
+ ```
+
+ If the command returns the entire certificate chain, you must copy the Root CA
+ certificate and any intermediate certificates at the bottom of the chain.
+ A chain file has following structure:
+
+ ```plaintext
+ -----BEGIN MY CERTIFICATE-----
+ -----END MY CERTIFICATE-----
+ -----BEGIN INTERMEDIATE CERTIFICATE-----
+ -----END INTERMEDIATE CERTIFICATE-----
+ -----BEGIN INTERMEDIATE CERTIFICATE-----
+ -----END INTERMEDIATE CERTIFICATE-----
+ -----BEGIN ROOT CERTIFICATE-----
+ -----END ROOT CERTIFICATE-----
+ ```
+
+ 1. **Token** -
+ GitLab authenticates against Kubernetes using service tokens, which are
+ scoped to a particular `namespace`.
+ **The token used should belong to a service account with
+ [`cluster-admin`](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles)
+ privileges.** To create this service account:
+ 1. Create a file called `gitlab-admin-service-account.yaml` with contents:
+
+ ```yaml
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: gitlab
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: gitlab-admin
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+ subjects:
+ - kind: ServiceAccount
+ name: gitlab
+ namespace: kube-system
+ ```
+
+ 1. Apply the service account and cluster role binding to your cluster:
+
+ ```shell
+ kubectl apply -f gitlab-admin-service-account.yaml
+ ```
+
+ You need the `container.clusterRoleBindings.create` permission
+ to create cluster-level roles. If you do not have this permission,
+ you can alternatively enable Basic Authentication and then run the
+ `kubectl apply` command as an administrator:
+
+ ```shell
+ kubectl apply -f gitlab-admin-service-account.yaml --username=admin --password=
+ ```
+
+ NOTE:
+ Basic Authentication can be turned on and the password credentials
+ can be obtained using the Google Cloud Console.
+
+ Output:
+
+ ```shell
+ serviceaccount "gitlab" created
+ clusterrolebinding "gitlab-admin" created
+ ```
+
+ 1. Retrieve the token for the `gitlab` service account:
+
+ ```shell
+ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep gitlab | awk '{print $1}')
+ ```
+
+ Copy the `` value from the output:
+
+ ```plaintext
+ Name: gitlab-token-b5zv4
+ Namespace: kube-system
+ Labels:
+ Annotations: kubernetes.io/service-account.name=gitlab
+ kubernetes.io/service-account.uid=bcfe66ac-39be-11e8-97e8-026dce96b6e8
+
+ Type: kubernetes.io/service-account-token
+
+ Data
+ ====
+ ca.crt: 1025 bytes
+ namespace: 11 bytes
+ token:
+ ```
+
+ 1. **GitLab-managed cluster** - Leave this checked if you want GitLab to manage namespaces and service accounts for this cluster.
+ See the [Managed clusters section](index.md#gitlab-managed-clusters) for more information.
+ 1. **Project namespace** (optional) - You don't have to fill this in. By leaving
+ it blank, GitLab creates one for you. Also:
+ - Each project should have a unique namespace.
+ - The project namespace is not necessarily the namespace of the secret, if
+ you're using a secret with broader permissions, like the secret from `default`.
+ - You should **not** use `default` as the project namespace.
+ - If you or someone created a secret specifically for the project, usually
+ with limited permissions, the secret's namespace and project namespace may
+ be the same.
+
+1. Select the **Add Kubernetes cluster** button.
+
+After about 10 minutes, your cluster is ready.
+
+## Disable Role-Based Access Control (RBAC) (optional)
+
+When connecting a cluster via GitLab integration, you may specify whether the
+cluster is RBAC-enabled or not. This affects how GitLab interacts with the
+cluster for certain operations. If you did *not* check the **RBAC-enabled cluster**
+checkbox at creation time, GitLab assumes RBAC is disabled for your cluster
+when interacting with it. If so, you must disable RBAC on your cluster for the
+integration to work properly.
+
+
+
+WARNING:
+Disabling RBAC means that any application running in the cluster,
+or user who can authenticate to the cluster, has full API access. This is a
+[security concern](index.md#security-implications), and may not be desirable.
+
+To effectively disable RBAC, global permissions can be applied granting full access:
+
+```shell
+kubectl create clusterrolebinding permissive-binding \
+ --clusterrole=cluster-admin \
+ --user=admin \
+ --user=kubelet \
+ --group=system:serviceaccounts
+```
diff --git a/doc/user/project/clusters/add_gke_clusters.md b/doc/user/project/clusters/add_gke_clusters.md
index 9f0e5603785..1d820302f8b 100644
--- a/doc/user/project/clusters/add_gke_clusters.md
+++ b/doc/user/project/clusters/add_gke_clusters.md
@@ -4,7 +4,15 @@ group: Configure
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
-# Adding GKE clusters **(FREE)**
+# GKE clusters (DEPRECATED) **(FREE)**
+
+> - [Deprecated](https://gitlab.com/groups/gitlab-org/-/epics/6049) in GitLab 14.0.
+
+WARNING:
+Use [Infrastrucure as Code](../../infrastructure/index.md) to create new clusters. The method described in this document is deprecated as of GitLab 14.0.
+
+Through GitLab, you can create new clusters and add existing clusters hosted on Amazon Elastic
+Kubernetes Service (EKS).
GitLab supports adding new and existing GKE clusters.
@@ -19,7 +27,12 @@ requirements are met:
take up to 10 minutes after you create a project. For more information see the
["Before you begin" section of the Kubernetes Engine docs](https://cloud.google.com/kubernetes-engine/docs/quickstart#before-you-begin).
-## New GKE cluster
+## Add an existing GKE cluster
+
+If you already have a GKE cluster and want to integrate it with GitLab,
+see how to [add an existing cluster](add_existing_cluster.md).
+
+## Create new GKE cluster
Starting from [GitLab 12.4](https://gitlab.com/gitlab-org/gitlab/-/issues/25925), all the GKE clusters
provisioned by GitLab are [VPC-native](https://cloud.google.com/kubernetes-engine/docs/how-to/alias-ips).
@@ -30,13 +43,13 @@ Note the following:
at the instance level. If that's not the case, ask your GitLab administrator to enable it. On
GitLab.com, this is enabled.
- Starting from [GitLab 12.1](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/55902), all GKE clusters
- created by GitLab are RBAC-enabled. Take a look at the [RBAC section](add_remove_clusters.md#rbac-cluster-resources) for
+ created by GitLab are RBAC-enabled. Take a look at the [RBAC section](cluster_access.md#rbac-cluster-resources) for
more information.
- Starting from [GitLab 12.5](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/18341), the
cluster's pod address IP range is set to `/16` instead of the regular `/14`. `/16` is a CIDR
notation.
- GitLab requires basic authentication enabled and a client certificate issued for the cluster to
- set up an [initial service account](add_remove_clusters.md#access-controls). In [GitLab versions
+ set up an [initial service account](cluster_access.md). In [GitLab versions
11.10 and later](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/58208), the cluster creation process
explicitly requests GKE to create clusters with basic authentication enabled and a client
certificate.
@@ -49,7 +62,7 @@ To create and add a new Kubernetes cluster to your project, group, or instance:
- Project's **{cloud-gear}** **Infrastructure > Kubernetes clusters** page, for a project-level
cluster.
- Group's **{cloud-gear}** **Kubernetes** page, for a group-level cluster.
- - **Admin Area >** **{cloud-gear}** **Kubernetes** page, for an instance-level cluster.
+ - **Menu >** **{admin}** **Admin >** **{cloud-gear}** **Kubernetes** page, for an instance-level cluster.
1. Click **Integrate with a cluster certificate**.
1. Under the **Create new cluster** tab, click **Google GKE**.
1. Connect your Google account if you haven't done already by clicking the
@@ -81,8 +94,3 @@ You can choose to use Cloud Run for Anthos in place of installing Knative and Is
separately after the cluster has been created. This means that Cloud Run
(Knative), Istio, and HTTP Load Balancing are enabled on the cluster
from the start, and cannot be installed or uninstalled.
-
-## Existing GKE cluster
-
-For information on adding an existing GKE cluster, see
-[Existing Kubernetes cluster](add_remove_clusters.md#existing-kubernetes-cluster).
diff --git a/doc/user/project/clusters/add_remove_clusters.md b/doc/user/project/clusters/add_remove_clusters.md
index 2ecbc4a2ff5..6cada5648cb 100644
--- a/doc/user/project/clusters/add_remove_clusters.md
+++ b/doc/user/project/clusters/add_remove_clusters.md
@@ -4,28 +4,16 @@ group: Configure
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
-# Add a cluster using cluster certificates **(FREE)**
+# Add a cluster using cluster certificates (DEPRECATED) **(FREE)**
-> [Deprecated](https://gitlab.com/groups/gitlab-org/-/epics/6049) in GitLab 14.0.
+> [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) in GitLab 14.0.
WARNING:
Creating a new cluster or adding an existing cluster to GitLab through the certificate-based method
is deprecated and no longer recommended. Kubernetes cluster, similar to any other
-infrastructure, should be created, updated, and maintained using [Infrastructure as Code](../../infrastructure/index.md).
+infrastructure, should be created, updated, maintained using [Infrastructure as Code](../../infrastructure/index.md).
GitLab is developing a built-in capability to create clusters with Terraform.
-You can follow along in this [epic](https://gitlab.com/groups/gitlab-org/-/epics/6049).
-
-GitLab offers integrated cluster creation for the following Kubernetes providers:
-
-- Google Kubernetes Engine (GKE).
-- Amazon Elastic Kubernetes Service (EKS).
-
-GitLab can also integrate with any standard Kubernetes provider, either on-premise or hosted.
-
-NOTE:
-Watch the webcast [Scalable app deployment with GitLab and Google Cloud Platform](https://about.gitlab.com/webcast/scalable-app-deploy/)
-and learn how to spin up a Kubernetes cluster managed by Google Cloud Platform (GCP)
-in a few clicks.
+You can follow along in this [epic](https://gitlab.com/groups/gitlab-org/-/epics/6049).
NOTE:
Every new Google Cloud Platform (GCP) account receives
@@ -35,351 +23,76 @@ accounts to get started with the GitLab integration with Google Kubernetes Engin
[Follow this link](https://cloud.google.com/partners/partnercredit/?pcn_code=0014M00001h35gDQAQ#contact-form)
to apply for credit.
-## Before you begin
-
-Before [adding a Kubernetes cluster](#create-new-cluster) using GitLab, you need:
-
-- GitLab itself. Either:
- - A [GitLab.com account](https://about.gitlab.com/pricing/#gitlab-com).
- - A [self-managed installation](https://about.gitlab.com/pricing/#self-managed) with GitLab version
- 12.5 or later. This ensures the GitLab UI can be used for cluster creation.
-- The following GitLab access:
- - [Maintainer role for a project](../../permissions.md#project-members-permissions) for a
- project-level cluster.
- - [Maintainer role for a group](../../permissions.md#group-members-permissions) for a
- group-level cluster.
- - [Admin Area access](../../admin_area/index.md) for a self-managed instance-level
- cluster. **(FREE SELF)**
-
-## Access controls
-
-> - Restricted service account for deployment was [introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/51716) in GitLab 11.5.
-
-When creating a cluster in GitLab, you are asked if you would like to create either:
-
-- A [Role-based access control (RBAC)](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
- cluster, which is the GitLab default and recommended option.
-- An [Attribute-based access control (ABAC)](https://kubernetes.io/docs/reference/access-authn-authz/abac/) cluster.
-
-When GitLab creates the cluster,
-a `gitlab` service account with `cluster-admin` privileges is created in the `default` namespace
-to manage the newly created cluster.
-
-Helm also creates additional service accounts and other resources for each
-installed application. Consult the documentation of the Helm charts for each application
-for details.
-
-If you are [adding an existing Kubernetes cluster](add_remove_clusters.md#add-existing-cluster),
-ensure the token of the account has administrator privileges for the cluster.
-
-The resources created by GitLab differ depending on the type of cluster.
-
-### Important notes
-
-Note the following about access controls:
-
-- Environment-specific resources are only created if your cluster is
- [managed by GitLab](index.md#gitlab-managed-clusters).
-- If your cluster was created before GitLab 12.2, it uses a single namespace for all project
- environments.
-
-### RBAC cluster resources
-
-GitLab creates the following resources for RBAC clusters.
-
-| Name | Type | Details | Created when |
-|:----------------------|:---------------------|:-----------------------------------------------------------------------------------------------------------|:-----------------------|
-| `gitlab` | `ServiceAccount` | `default` namespace | Creating a new cluster |
-| `gitlab-admin` | `ClusterRoleBinding` | [`cluster-admin`](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) roleRef | Creating a new cluster |
-| `gitlab-token` | `Secret` | Token for `gitlab` ServiceAccount | Creating a new cluster |
-| `tiller` | `ServiceAccount` | `gitlab-managed-apps` namespace | Installing Helm charts |
-| `tiller-admin` | `ClusterRoleBinding` | `cluster-admin` roleRef | Installing Helm charts |
-| Environment namespace | `Namespace` | Contains all environment-specific resources | Deploying to a cluster |
-| Environment namespace | `ServiceAccount` | Uses namespace of environment | Deploying to a cluster |
-| Environment namespace | `Secret` | Token for environment ServiceAccount | Deploying to a cluster |
-| Environment namespace | `RoleBinding` | [`admin`](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) roleRef | Deploying to a cluster |
-
-The environment namespace `RoleBinding` was
-[updated](https://gitlab.com/gitlab-org/gitlab/-/issues/31113) in GitLab 13.6
-to `admin` roleRef. Previously, the `edit` roleRef was used.
-
-### ABAC cluster resources
-
-GitLab creates the following resources for ABAC clusters.
-
-| Name | Type | Details | Created when |
-|:----------------------|:---------------------|:-------------------------------------|:---------------------------|
-| `gitlab` | `ServiceAccount` | `default` namespace | Creating a new cluster |
-| `gitlab-token` | `Secret` | Token for `gitlab` ServiceAccount | Creating a new cluster |
-| `tiller` | `ServiceAccount` | `gitlab-managed-apps` namespace | Installing Helm charts |
-| `tiller-admin` | `ClusterRoleBinding` | `cluster-admin` roleRef | Installing Helm charts |
-| Environment namespace | `Namespace` | Contains all environment-specific resources | Deploying to a cluster |
-| Environment namespace | `ServiceAccount` | Uses namespace of environment | Deploying to a cluster |
-| Environment namespace | `Secret` | Token for environment ServiceAccount | Deploying to a cluster |
-
-### Security of runners
-
-Runners have the [privileged mode](https://docs.gitlab.com/runner/executors/docker.html#the-privileged-mode)
-enabled by default, which allows them to execute special commands and run
-Docker in Docker. This functionality is needed to run some of the
-[Auto DevOps](../../../topics/autodevops/index.md)
-jobs. This implies the containers are running in privileged mode and you should,
-therefore, be aware of some important details.
-
-The privileged flag gives all capabilities to the running container, which in
-turn can do almost everything that the host can do. Be aware of the
-inherent security risk associated with performing `docker run` operations on
-arbitrary images as they effectively have root access.
-
-If you don't want to use a runner in privileged mode, either:
-
-- Use shared runners on GitLab.com. They don't have this security issue.
-- Set up your own runners using the configuration described at
- [shared runners](../../gitlab_com/index.md#shared-runners) using
- [`docker+machine`](https://docs.gitlab.com/runner/executors/docker_machine.html).
+NOTE:
+Watch the webcast [Scalable app deployment with GitLab and Google Cloud Platform](https://about.gitlab.com/webcast/scalable-app-deploy/)
+and learn how to spin up a Kubernetes cluster managed by Google Cloud Platform (GCP)
+in a few clicks.
## Create new cluster
-New clusters can be created using GitLab on Google Kubernetes Engine (GKE) or
-Amazon Elastic Kubernetes Service (EKS) at the project, group, or instance level:
+> The certificate-based method for creating clusters from GitLab was [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) in GitLab 14.0.
-1. Navigate to your:
- - Project's **{cloud-gear}** **Infrastructure > Kubernetes clusters** page, for a project-level
- cluster.
- - Group's **{cloud-gear}** **Kubernetes** page, for a group-level cluster.
- - **Admin Area >** **{cloud-gear}** **Kubernetes** page, for an instance-level cluster.
-1. Click **Integrate with a cluster certificate**.
-1. Click the **Create new cluster** tab.
-1. Click either **Amazon EKS** or **Google GKE**, and follow the instructions for your desired service:
- - [Amazon EKS](add_eks_clusters.md#new-eks-cluster).
- - [Google GKE](add_gke_clusters.md#creating-the-cluster-on-gke).
+As of GitLab 14.0, use [Infrastructure as Code](../../infrastructure/index.md)
+to **safely create your new cluster from GitLab**.
-After creating a cluster, you can [install runners](https://docs.gitlab.com/runner/install/kubernetes.html),
-add a [cluster management project](../../clusters/management_project.md),
-configure [Auto DevOps](../../../topics/autodevops/index.md),
-or start [deploying right away](index.md#deploying-to-a-kubernetes-cluster).
+The certificate-based method is **deprecated** and scheduled for removal in
+GitLab 15.0. However, you can still use it until then. Through
+this method, you can host your cluster in EKS, GKE, on premises, and with other
+providers. To host them on premises and with other providers,
+use either the EKS or GKE method to guide you through and enter your cluster's
+settings manually:
+
+- [New cluster hosted on Google Kubernetes Engine (GKE)](add_eks_clusters.md).
+- [New cluster hosted on Amazon Elastic Kubernetes Service (EKS)](add_gke_clusters.md).
## Add existing cluster
-If you have an existing Kubernetes cluster, you can add it to a project, group,
-or instance, and [install runners](https://docs.gitlab.com/runner/install/kubernetes.html)
-on it (the cluster does not need to be added to GitLab first).
+If you already have a cluster and want to integrate it with GitLab, see how to
+[add an existing cluster](add_existing_cluster.md).
-After adding a cluster, you can add a [cluster management project](../../clusters/management_project.md),
-configure [Auto DevOps](../../../topics/autodevops/index.md),
-or start [deploying right away](index.md#deploying-to-a-kubernetes-cluster).
+## Configure your cluster
-### Existing Kubernetes cluster
+As of GitLab 14.0, use the [GitLab Kubernetes Agent](../../clusters/agent/index.md) to configure your cluster.
-To add a Kubernetes cluster to your project, group, or instance:
+## Disable a cluster
-1. Navigate to your:
- 1. Project's **{cloud-gear}** **Infrastructure > Kubernetes clusters** page, for a project-level
- cluster.
- 1. Group's **{cloud-gear}** **Kubernetes** page, for a group-level cluster.
- 1. **Admin Area >** **{cloud-gear}** **Kubernetes** page, for an instance-level cluster.
-1. Click **Add Kubernetes cluster**.
-1. Click the **Add existing cluster** tab and fill in the details:
- 1. **Kubernetes cluster name** (required) - The name you wish to give the cluster.
- 1. **Environment scope** (required) - The
- [associated environment](index.md#setting-the-environment-scope) to this cluster.
- 1. **API URL** (required) -
- It's the URL that GitLab uses to access the Kubernetes API. Kubernetes
- exposes several APIs, we want the "base" URL that is common to all of them.
- For example, `https://kubernetes.example.com` rather than `https://kubernetes.example.com/api/v1`.
+When you successfully create a new Kubernetes cluster or add an existing
+one to GitLab, the cluster connection to GitLab becomes enabled. To disable it:
- Get the API URL by running this command:
-
- ```shell
- kubectl cluster-info | grep -E 'Kubernetes master|Kubernetes control plane' | awk '/http/ {print $NF}'
- ```
-
- 1. **CA certificate** (required) - A valid Kubernetes certificate is needed to authenticate to the cluster. We use the certificate created by default.
- 1. List the secrets with `kubectl get secrets`, and one should be named similar to
- `default-token-xxxxx`. Copy that token name for use below.
- 1. Get the certificate by running this command:
-
- ```shell
- kubectl get secret -o jsonpath="{['data']['ca\.crt']}" | base64 --decode
- ```
-
- If the command returns the entire certificate chain, you must copy the Root CA
- certificate and any intermediate certificates at the bottom of the chain.
- A chain file has following structure:
-
- ```plaintext
- -----BEGIN MY CERTIFICATE-----
- -----END MY CERTIFICATE-----
- -----BEGIN INTERMEDIATE CERTIFICATE-----
- -----END INTERMEDIATE CERTIFICATE-----
- -----BEGIN INTERMEDIATE CERTIFICATE-----
- -----END INTERMEDIATE CERTIFICATE-----
- -----BEGIN ROOT CERTIFICATE-----
- -----END ROOT CERTIFICATE-----
- ```
-
- 1. **Token** -
- GitLab authenticates against Kubernetes using service tokens, which are
- scoped to a particular `namespace`.
- **The token used should belong to a service account with
- [`cluster-admin`](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles)
- privileges.** To create this service account:
- 1. Create a file called `gitlab-admin-service-account.yaml` with contents:
-
- ```yaml
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: gitlab
- namespace: kube-system
- ---
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRoleBinding
- metadata:
- name: gitlab-admin
- roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: cluster-admin
- subjects:
- - kind: ServiceAccount
- name: gitlab
- namespace: kube-system
- ```
-
- 1. Apply the service account and cluster role binding to your cluster:
-
- ```shell
- kubectl apply -f gitlab-admin-service-account.yaml
- ```
-
- You need the `container.clusterRoleBindings.create` permission
- to create cluster-level roles. If you do not have this permission,
- you can alternatively enable Basic Authentication and then run the
- `kubectl apply` command as an administrator:
-
- ```shell
- kubectl apply -f gitlab-admin-service-account.yaml --username=admin --password=
- ```
-
- NOTE:
- Basic Authentication can be turned on and the password credentials
- can be obtained using the Google Cloud Console.
-
- Output:
-
- ```shell
- serviceaccount "gitlab" created
- clusterrolebinding "gitlab-admin" created
- ```
-
- 1. Retrieve the token for the `gitlab` service account:
-
- ```shell
- kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep gitlab | awk '{print $1}')
- ```
-
- Copy the `` value from the output:
-
- ```plaintext
- Name: gitlab-token-b5zv4
- Namespace: kube-system
- Labels:
- Annotations: kubernetes.io/service-account.name=gitlab
- kubernetes.io/service-account.uid=bcfe66ac-39be-11e8-97e8-026dce96b6e8
-
- Type: kubernetes.io/service-account-token
-
- Data
- ====
- ca.crt: 1025 bytes
- namespace: 11 bytes
- token:
- ```
-
- NOTE:
- For GKE clusters, you need the
- `container.clusterRoleBindings.create` permission to create a cluster
- role binding. You can follow the [Google Cloud
- documentation](https://cloud.google.com/iam/docs/granting-changing-revoking-access)
- to grant access.
-
- 1. **GitLab-managed cluster** - Leave this checked if you want GitLab to manage namespaces and service accounts for this cluster.
- See the [Managed clusters section](index.md#gitlab-managed-clusters) for more information.
- 1. **Project namespace** (optional) - You don't have to fill it in; by leaving
- it blank, GitLab creates one for you. Also:
- - Each project should have a unique namespace.
- - The project namespace is not necessarily the namespace of the secret, if
- you're using a secret with broader permissions, like the secret from `default`.
- - You should **not** use `default` as the project namespace.
- - If you or someone created a secret specifically for the project, usually
- with limited permissions, the secret's namespace and project namespace may
- be the same.
-
-1. Finally, click the **Create Kubernetes cluster** button.
-
-After a couple of minutes, your cluster is ready.
-
-#### Disable Role-Based Access Control (RBAC) (optional)
-
-When connecting a cluster via GitLab integration, you may specify whether the
-cluster is RBAC-enabled or not. This affects how GitLab interacts with the
-cluster for certain operations. If you did *not* check the **RBAC-enabled cluster**
-checkbox at creation time, GitLab assumes RBAC is disabled for your cluster
-when interacting with it. If so, you must disable RBAC on your cluster for the
-integration to work properly.
-
-
-
-WARNING:
-Disabling RBAC means that any application running in the cluster,
-or user who can authenticate to the cluster, has full API access. This is a
-[security concern](index.md#security-implications), and may not be desirable.
-
-To effectively disable RBAC, global permissions can be applied granting full access:
-
-```shell
-kubectl create clusterrolebinding permissive-binding \
- --clusterrole=cluster-admin \
- --user=admin \
- --user=kubelet \
- --group=system:serviceaccounts
-```
-
-## Enabling or disabling integration
-
-The Kubernetes cluster integration enables after you have successfully either created
-a new cluster or added an existing one. To disable Kubernetes cluster integration:
-
-1. Navigate to your:
- - Project's **{cloud-gear}** **Infrastructure > Kubernetes clusters** page, for a project-level
- cluster.
+1. Go to your:
+ - Project's **{cloud-gear}** **Infrastructure > Kubernetes clusters** page, for a project-level cluster.
- Group's **{cloud-gear}** **Kubernetes** page, for a group-level cluster.
- - **Admin Area >** **{cloud-gear}** **Kubernetes** page, for an instance-level cluster.
-1. Click on the name of the cluster.
-1. Click the **GitLab Integration** toggle.
+ - **Menu >** **{admin}** **Admin >** **{cloud-gear}** **Kubernetes** page, for an instance-level cluster.
+1. Select the name of the cluster you want to disable.
+1. Toggle **GitLab Integration** off (in gray).
1. Click **Save changes**.
-## Removing integration
+## Remove a cluster
-To remove the Kubernetes cluster integration from your project, first navigate to the **Advanced Settings** tab of the cluster details page and either:
+> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/26815) in GitLab 12.6, you can remove cluster integrations and resources.
-- Select **Remove integration**, to remove only the Kubernetes integration.
-- [From GitLab 12.6](https://gitlab.com/gitlab-org/gitlab/-/issues/26815), select
- **Remove integration and resources**, to also remove all related GitLab cluster resources (for
- example, namespaces, roles, and bindings) when removing the integration.
+When you remove a cluster integration, you only remove the cluster relationship
+to GitLab, not the cluster. To remove the cluster itself, visit your cluster's
+GKE or EKS dashboard to do it from their UI or use `kubectl`.
-When removing the cluster integration, note:
+You need at least Maintainer [permissions](../../permissions.md) to your
+project or group to remove the integration with GitLab.
-- You need Maintainer [permissions](../../permissions.md) and above to remove a Kubernetes cluster
- integration.
-- When you remove a cluster, you only remove its relationship to GitLab, not the cluster itself. To
- remove the cluster, you can do so by visiting the GKE or EKS dashboard, or using `kubectl`.
+When removing a cluster integration, you have two options:
-## Learn more
+- **Remove integration**: remove only the Kubernetes integration.
+- **Remove integration and resources**: remove the cluster integration and
+all GitLab cluster-related resources such as namespaces, roles, and bindings.
-To learn more on automatically deploying your applications,
-read about [Auto DevOps](../../../topics/autodevops/index.md).
+To remove the Kubernetes cluster integration:
+
+1. Go to your cluster details page.
+1. Select the **Advanced Settings** tab.
+1. Select either **Remove integration** or **Remove integration and resources**.
+
+## Access controls
+
+See [cluster access controls (RBAC or ABAC)](cluster_access.md).
## Troubleshooting
diff --git a/doc/user/project/clusters/cluster_access.md b/doc/user/project/clusters/cluster_access.md
new file mode 100644
index 00000000000..713a60b2dd0
--- /dev/null
+++ b/doc/user/project/clusters/cluster_access.md
@@ -0,0 +1,88 @@
+---
+stage: Configure
+group: Configure
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
+---
+
+# Cluster access controls (RBAC or ABAC)
+
+> Restricted service account for deployment was [introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/51716) in GitLab 11.5.
+
+When creating a cluster in GitLab, you are asked if you would like to create either:
+
+- A [Role-based access control (RBAC)](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
+ cluster, which is the GitLab default and recommended option.
+- An [Attribute-based access control (ABAC)](https://kubernetes.io/docs/reference/access-authn-authz/abac/) cluster.
+
+When GitLab creates the cluster,
+a `gitlab` service account with `cluster-admin` privileges is created in the `default` namespace
+to manage the newly created cluster.
+
+Helm also creates additional service accounts and other resources for each
+installed application. Consult the documentation of the Helm charts for each application
+for details.
+
+If you are [adding an existing Kubernetes cluster](add_remove_clusters.md#add-existing-cluster),
+ensure the token of the account has administrator privileges for the cluster.
+
+The resources created by GitLab differ depending on the type of cluster.
+
+## Important notes
+
+Note the following about access controls:
+
+- Environment-specific resources are only created if your cluster is
+ [managed by GitLab](index.md#gitlab-managed-clusters).
+- If your cluster was created before GitLab 12.2, it uses a single namespace for all project
+ environments.
+
+## RBAC cluster resources
+
+GitLab creates the following resources for RBAC clusters.
+
+| Name | Type | Details | Created when |
+|:----------------------|:---------------------|:-----------------------------------------------------------------------------------------------------------|:-----------------------|
+| `gitlab` | `ServiceAccount` | `default` namespace | Creating a new cluster |
+| `gitlab-admin` | `ClusterRoleBinding` | [`cluster-admin`](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) roleRef | Creating a new cluster |
+| `gitlab-token` | `Secret` | Token for `gitlab` ServiceAccount | Creating a new cluster |
+| Environment namespace | `Namespace` | Contains all environment-specific resources | Deploying to a cluster |
+| Environment namespace | `ServiceAccount` | Uses namespace of environment | Deploying to a cluster |
+| Environment namespace | `Secret` | Token for environment ServiceAccount | Deploying to a cluster |
+| Environment namespace | `RoleBinding` | [`admin`](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) roleRef | Deploying to a cluster |
+
+The environment namespace `RoleBinding` was
+[updated](https://gitlab.com/gitlab-org/gitlab/-/issues/31113) in GitLab 13.6
+to `admin` roleRef. Previously, the `edit` roleRef was used.
+
+## ABAC cluster resources
+
+GitLab creates the following resources for ABAC clusters.
+
+| Name | Type | Details | Created when |
+|:----------------------|:---------------------|:-------------------------------------|:---------------------------|
+| `gitlab` | `ServiceAccount` | `default` namespace | Creating a new cluster |
+| `gitlab-token` | `Secret` | Token for `gitlab` ServiceAccount | Creating a new cluster |
+| Environment namespace | `Namespace` | Contains all environment-specific resources | Deploying to a cluster |
+| Environment namespace | `ServiceAccount` | Uses namespace of environment | Deploying to a cluster |
+| Environment namespace | `Secret` | Token for environment ServiceAccount | Deploying to a cluster |
+
+## Security of runners
+
+Runners have the [privileged mode](https://docs.gitlab.com/runner/executors/docker.html#the-privileged-mode)
+enabled by default, which allows them to execute special commands and run
+Docker in Docker. This functionality is needed to run some of the
+[Auto DevOps](../../../topics/autodevops/index.md)
+jobs. This implies the containers are running in privileged mode and you should,
+therefore, be aware of some important details.
+
+The privileged flag gives all capabilities to the running container, which in
+turn can do almost everything that the host can do. Be aware of the
+inherent security risk associated with performing `docker run` operations on
+arbitrary images as they effectively have root access.
+
+If you don't want to use a runner in privileged mode, either:
+
+- Use shared runners on GitLab.com. They don't have this security issue.
+- Set up your own runners using the configuration described at
+[shared runners](../../gitlab_com/index.md#shared-runners) using
+[`docker+machine`](https://docs.gitlab.com/runner/executors/docker_machine.html).
diff --git a/doc/user/project/clusters/index.md b/doc/user/project/clusters/index.md
index 8dd8ed52dd7..0ab2de002ea 100644
--- a/doc/user/project/clusters/index.md
+++ b/doc/user/project/clusters/index.md
@@ -12,35 +12,31 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> - [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/39840) in
> GitLab 11.11 for [instances](../../instance/clusters/index.md).
-Using the GitLab project Kubernetes integration, you can:
+You can use GitLab to manage your clusters and [benefit from the GitLab-Kubernetes integration](#benefit-from-the-gitlab-kubernetes-integration).
-- Use [Review Apps](../../../ci/review_apps/index.md).
-- Run [pipelines](../../../ci/pipelines/index.md).
+See the [supported cluster versions](#supported-cluster-versions) before
+you begin.
+
+## Benefit from the GitLab-Kubernetes integration
+
+Using the GitLab-Kubernetes integration, you can benefit of GitLab
+features such as:
+
+- Preview your applications with [Review Apps](../../../ci/review_apps/index.md).
+- Create GitLab CI/CD [Pipelines](../../../ci/pipelines/index.md) to build, test, and deploy to your cluster.
- [Deploy](#deploying-to-a-kubernetes-cluster) your applications.
-- Detect and [monitor Kubernetes](#monitoring-your-kubernetes-cluster).
-- Use it with [Auto DevOps](#auto-devops).
+- Detect and [monitor](#monitoring-your-kubernetes-cluster) your clusters.
+- Use [Auto DevOps](#auto-devops) to automate the CI/CD process.
- Use [Web terminals](#web-terminals).
- Use [Deploy Boards](#deploy-boards).
- Use [Canary Deployments](#canary-deployments). **(PREMIUM)**
- Use [deployment variables](#deployment-variables).
-- Use [role-based or attribute-based access controls](add_remove_clusters.md#access-controls).
+- Use [role-based or attribute-based access controls](cluster_access.md).
- View [Logs](#viewing-pod-logs).
- Run serverless workloads on [Kubernetes with Knative](serverless/index.md).
+- Connect GitLab to in-cluster applications using [cluster integrations](../../clusters/integrations.md).
-Besides integration at the project level, Kubernetes clusters can also be
-integrated at the [group level](../../group/clusters/index.md) or
-[GitLab instance level](../../instance/clusters/index.md).
-
-To view your project level Kubernetes clusters, navigate to **Infrastructure > Kubernetes clusters**
-from your project. On this page, you can [add a new cluster](#adding-and-removing-clusters)
-and view information about your existing clusters, such as:
-
-- Nodes count.
-- Rough estimates of memory and CPU usage.
-
-## Setting up
-
-### Supported cluster versions
+## Supported cluster versions
GitLab is committed to support at least two production-ready Kubernetes minor
versions at any given time. We regularly review the versions we support, and
@@ -61,19 +57,31 @@ Kubernetes version to any supported version at any time:
Some GitLab features may support versions outside the range provided here.
-NOTE:
-[GKE Cluster creation](add_remove_clusters.md#create-new-cluster) by GitLab is currently not supported for Kubernetes 1.19+. For these versions you can create the cluster through GCP, then [Add existing cluster](add_remove_clusters.md#add-existing-cluster). See [the related issue](https://gitlab.com/gitlab-org/gitlab/-/issues/331922) for more information.
+## Add and remove clusters
-### Adding and removing clusters
+You can create new or add existing clusters to GitLab:
-See [Adding and removing Kubernetes clusters](add_remove_clusters.md) for details on how
-to:
+- On the project-level, to have a cluster dedicated to a project.
+- On the [group level](../../group/clusters/index.md), to use the same cluster across multiple projects within your group.
+- On the [instance level](../../instance/clusters/index.md), to use the same cluster across multiple groups and projects. **(FREE SELF)**
-- Create a cluster in Google Cloud Platform (GCP) or Amazon Elastic Kubernetes Service
- (EKS) using the GitLab UI.
-- Add an integration to an existing cluster from any Kubernetes platform.
+To create new clusters, use one of the following methods:
-### Multiple Kubernetes clusters
+- [Infrastructure as Code](../../infrastructure/index.md) (**recommended**).
+- [Cluster certificates](add_remove_clusters.md) (**deprecated**).
+
+You can also [add existing clusters](add_existing_cluster.md) to GitLab.
+
+## View your clusters
+
+To view your project-level Kubernetes clusters, to go **Infrastructure > Kubernetes clusters**
+from your project. On this page, you can add a new cluster
+and view information about your existing clusters, such as:
+
+- Nodes count.
+- Rough estimates of memory and CPU usage.
+
+## Multiple Kubernetes clusters
> - Introduced in [GitLab Premium](https://about.gitlab.com/pricing/) 10.3
> - [Moved](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/35094) to GitLab Free in 13.2.
@@ -85,7 +93,7 @@ Add another cluster, like you did the first time, and make sure to
[set an environment scope](#setting-the-environment-scope) that
differentiates the new cluster from the rest.
-#### Setting the environment scope
+### Setting the environment scope
When adding more than one Kubernetes cluster to your project, you need to differentiate
them with an environment scope. The environment scope associates clusters with [environments](../../../ci/environments/index.md) similar to how the
@@ -141,8 +149,8 @@ The results:
## Configuring your Kubernetes cluster
-After [adding a Kubernetes cluster](add_remove_clusters.md) to GitLab, read this section that covers
-important considerations for configuring Kubernetes clusters with GitLab.
+Use the [GitLab Kubernetes Agent](../../clusters/agent/index.md) to safely
+configure your clusters. Otherwise, there are [security implications](#security-implications).
### Security implications
@@ -172,6 +180,8 @@ for your deployment jobs to use. Otherwise, a namespace is created for you.
#### Important notes
+Note the following with GitLab and clusters:
+
Be aware that manually managing resources that have been created by GitLab, like
namespaces and service accounts, can cause unexpected errors. If this occurs, try
[clearing the cluster cache](#clearing-the-cluster-cache).
@@ -253,6 +263,11 @@ This list provides a generic solution, and some GitLab-specific approaches:
If you see a trailing `%` on some Kubernetes versions, do not include it.
+## Cluster integrations
+
+See the available [cluster integrations](../../clusters/integrations.md)
+to integrate third-party applications with your clusters through GitLab.
+
## Cluster management project
Attach a [Cluster management project](../../clusters/management_project.md)
@@ -261,14 +276,8 @@ installation, such as an Ingress controller.
## Auto DevOps
-Auto DevOps automatically detects, builds, tests, deploys, and monitors your
-applications.
-
-To make full use of Auto DevOps (Auto Deploy, Auto Review Apps, and
-Auto Monitoring) the Kubernetes project integration must be enabled. However,
-Kubernetes clusters can be used without Auto DevOps.
-
-[Read more about Auto DevOps](../../../topics/autodevops/index.md).
+You can use [Auto DevOps](../../../topics/autodevops/index.md) to automatically
+detect, build, test, deploy, and monitor your applications.
## Deploying to a Kubernetes cluster
@@ -309,7 +318,7 @@ GitLab CI/CD build environment to deployment jobs. Deployment jobs have
| Deployment Variable | Description |
|----------------------------|-------------|
| `KUBE_URL` | Equal to the API URL. |
-| `KUBE_TOKEN` | The Kubernetes token of the [environment service account](add_remove_clusters.md#access-controls). Prior to GitLab 11.5, `KUBE_TOKEN` was the Kubernetes token of the main service account of the cluster integration. |
+| `KUBE_TOKEN` | The Kubernetes token of the [environment service account](cluster_access.md). Prior to GitLab 11.5, `KUBE_TOKEN` was the Kubernetes token of the main service account of the cluster integration. |
| `KUBE_NAMESPACE` | The namespace associated with the project's deployment service account. In the format `--`. For GitLab-managed clusters, a matching namespace is automatically created by GitLab in the cluster. If your cluster was created before GitLab 12.2, the default `KUBE_NAMESPACE` is set to `-`. |
| `KUBE_CA_PEM_FILE` | Path to a file containing PEM data. Only present if a custom CA bundle was specified. |
| `KUBE_CA_PEM` | (**deprecated**) Raw PEM data. Only if a custom CA bundle was specified. |
diff --git a/lib/api/helpers/services_helpers.rb b/lib/api/helpers/services_helpers.rb
index ca13ea0789a..ebf7b124015 100644
--- a/lib/api/helpers/services_helpers.rb
+++ b/lib/api/helpers/services_helpers.rb
@@ -799,20 +799,20 @@ module API
::Integrations::Packagist,
::Integrations::PipelinesEmail,
::Integrations::Pivotaltracker,
+ ::Integrations::Prometheus,
::Integrations::Pushover,
::Integrations::Redmine,
::Integrations::Slack,
::Integrations::SlackSlashCommands,
::Integrations::Teamcity,
- ::Integrations::Youtrack,
- ::PrometheusService
+ ::Integrations::Youtrack
]
end
def self.development_service_classes
[
::Integrations::MockCi,
- ::MockMonitoringService
+ ::Integrations::MockMonitoring
]
end
end
diff --git a/lib/api/lint.rb b/lib/api/lint.rb
index 3580a7b5e24..945cdf3edb2 100644
--- a/lib/api/lint.rb
+++ b/lib/api/lint.rb
@@ -11,11 +11,7 @@ module API
optional :include_merged_yaml, type: Boolean, desc: 'Whether or not to include merged CI config yaml in the response'
end
post '/lint' do
- if Feature.enabled?(:security_ci_lint_authorization)
- unauthorized! if (Gitlab::CurrentSettings.signup_disabled? || Gitlab::CurrentSettings.signup_limited?) && current_user.nil?
- else
- unauthorized! if Gitlab::CurrentSettings.signup_disabled? && current_user.nil?
- end
+ unauthorized! if (Gitlab::CurrentSettings.signup_disabled? || Gitlab::CurrentSettings.signup_limited?) && current_user.nil?
result = Gitlab::Ci::YamlProcessor.new(params[:content], user: current_user).execute
diff --git a/lib/gitlab/database_importers/self_monitoring/project/create_service.rb b/lib/gitlab/database_importers/self_monitoring/project/create_service.rb
index d1ada8c723e..e3617805c83 100644
--- a/lib/gitlab/database_importers/self_monitoring/project/create_service.rb
+++ b/lib/gitlab/database_importers/self_monitoring/project/create_service.rb
@@ -75,13 +75,13 @@ module Gitlab
if response
# In the add_prometheus_manual_configuration method, the Prometheus
- # server_address config is saved as an api_url in the PrometheusService
- # model. There are validates hooks in the PrometheusService model that
- # check if the project associated with the PrometheusService is the
+ # server_address config is saved as an api_url in the Integrations::Prometheus
+ # model. There are validates hooks in the Integrations::Prometheus model that
+ # check if the project associated with the Integrations::Prometheus is the
# self_monitoring project. It checks
# Gitlab::CurrentSettings.self_monitoring_project_id, which is why the
# Gitlab::CurrentSettings cache needs to be expired here, so that
- # PrometheusService sees the latest self_monitoring_project_id.
+ # Integrations::Prometheus sees the latest self_monitoring_project_id.
Gitlab::CurrentSettings.expire_current_application_settings
success(result)
else
diff --git a/lib/gitlab/global_id/deprecations.rb b/lib/gitlab/global_id/deprecations.rb
index ac4a44e0e10..2753e2b8372 100644
--- a/lib/gitlab/global_id/deprecations.rb
+++ b/lib/gitlab/global_id/deprecations.rb
@@ -9,11 +9,12 @@ module Gitlab
# Example:
#
# DEPRECATIONS = [
- # Deprecation.new(old_model_name: 'PrometheusService', new_model_name: 'Integrations::Prometheus', milestone: '14.0')
+ # Deprecation.new(old_model_name: 'PrometheusService', new_model_name: 'Integrations::Prometheus', milestone: '14.1')
# ].freeze
DEPRECATIONS = [
# This works around an accidentally released argument named as `"EEIterationID"` in 7000489db.
- Deprecation.new(old_model_name: 'EEIteration', new_model_name: 'Iteration', milestone: '13.3')
+ Deprecation.new(old_model_name: 'EEIteration', new_model_name: 'Iteration', milestone: '13.3'),
+ Deprecation.new(old_model_name: 'PrometheusService', new_model_name: 'Integrations::Prometheus', milestone: '14.1')
].freeze
# Maps of the DEPRECATIONS Hash for quick access.
diff --git a/lib/gitlab/integrations/sti_type.rb b/lib/gitlab/integrations/sti_type.rb
index 9d7254f49f7..b87c9936570 100644
--- a/lib/gitlab/integrations/sti_type.rb
+++ b/lib/gitlab/integrations/sti_type.rb
@@ -5,9 +5,9 @@ module Gitlab
class StiType < ActiveRecord::Type::String
NAMESPACED_INTEGRATIONS = Set.new(%w(
Asana Assembla Bamboo Bugzilla Buildkite Campfire Confluence CustomIssueTracker Datadog
- Discord DroneCi EmailsOnPush Ewm ExternalWiki Flowdock HangoutsChat Irker
- Jenkins Jira Mattermost MattermostSlashCommands MicrosoftTeams MockCi Packagist PipelinesEmail Pivotaltracker
- Pushover Redmine Slack SlackSlashCommands Teamcity UnifyCircuit Youtrack WebexTeams
+ Discord DroneCi EmailsOnPush Ewm ExternalWiki Flowdock HangoutsChat Irker Jenkins Jira Mattermost
+ MattermostSlashCommands MicrosoftTeams MockCi MockMonitoring Packagist PipelinesEmail Pivotaltracker
+ Prometheus Pushover Redmine Slack SlackSlashCommands Teamcity UnifyCircuit Youtrack WebexTeams
)).freeze
def cast(value)
diff --git a/lib/gitlab/jira_import.rb b/lib/gitlab/jira_import.rb
index 75d6fdc07b6..60344e4be68 100644
--- a/lib/gitlab/jira_import.rb
+++ b/lib/gitlab/jira_import.rb
@@ -19,10 +19,10 @@ module Gitlab
return unless configuration_check
- jira_service = project.jira_service
+ jira_integration = project.jira_integration
- raise Projects::ImportService::Error, _('Jira integration not configured.') unless jira_service&.active?
- raise Projects::ImportService::Error, _('Unable to connect to the Jira instance. Please check your Jira integration configuration.') unless jira_service&.valid_connection?
+ raise Projects::ImportService::Error, _('Jira integration not configured.') unless jira_integration&.active?
+ raise Projects::ImportService::Error, _('Unable to connect to the Jira instance. Please check your Jira integration configuration.') unless jira_integration&.valid_connection?
end
def self.jira_item_cache_key(project_id, jira_item_id, collection_type)
diff --git a/lib/gitlab/jira_import/base_importer.rb b/lib/gitlab/jira_import/base_importer.rb
index 688254bf91f..2b83f0492cb 100644
--- a/lib/gitlab/jira_import/base_importer.rb
+++ b/lib/gitlab/jira_import/base_importer.rb
@@ -14,7 +14,7 @@ module Gitlab
raise Projects::ImportService::Error, _('Unable to find Jira project to import data from.') unless @jira_project_key
@project = project
- @client = project.jira_service.client
+ @client = project.jira_integration.client
@formatter = Gitlab::ImportFormatter.new
end
diff --git a/lib/gitlab/usage_data.rb b/lib/gitlab/usage_data.rb
index 415a5bff261..fda87a3dc4d 100644
--- a/lib/gitlab/usage_data.rb
+++ b/lib/gitlab/usage_data.rb
@@ -426,9 +426,9 @@ module Gitlab
projects_jira_dvcs_server_active: count(ProjectFeatureUsage.with_jira_dvcs_integration_enabled(cloud: false))
}
- jira_service_data_hash = jira_service_data
- results[:projects_jira_server_active] = jira_service_data_hash[:projects_jira_server_active]
- results[:projects_jira_cloud_active] = jira_service_data_hash[:projects_jira_cloud_active]
+ jira_integration_data_hash = jira_integration_data
+ results[:projects_jira_server_active] = jira_integration_data_hash[:projects_jira_server_active]
+ results[:projects_jira_cloud_active] = jira_integration_data_hash[:projects_jira_cloud_active]
results
rescue ActiveRecord::StatementInvalid
@@ -650,9 +650,9 @@ module Gitlab
todos: distinct_count(::Todo.where(time_period), :author_id),
service_desk_enabled_projects: distinct_count_service_desk_enabled_projects(time_period),
service_desk_issues: count(::Issue.service_desk.where(time_period)),
- projects_jira_active: distinct_count(::Project.with_active_jira_services.where(time_period), :creator_id),
- projects_jira_dvcs_cloud_active: distinct_count(::Project.with_active_jira_services.with_jira_dvcs_cloud.where(time_period), :creator_id),
- projects_jira_dvcs_server_active: distinct_count(::Project.with_active_jira_services.with_jira_dvcs_server.where(time_period), :creator_id)
+ projects_jira_active: distinct_count(::Project.with_active_jira_integrations.where(time_period), :creator_id),
+ projects_jira_dvcs_cloud_active: distinct_count(::Project.with_active_jira_integrations.with_jira_dvcs_cloud.where(time_period), :creator_id),
+ projects_jira_dvcs_server_active: distinct_count(::Project.with_active_jira_integrations.with_jira_dvcs_server.where(time_period), :creator_id)
}
end
# rubocop: enable CodeReuse/ActiveRecord
diff --git a/lib/gitlab/usage_data_non_sql_metrics.rb b/lib/gitlab/usage_data_non_sql_metrics.rb
index bc72a96a468..44d5baa42f6 100644
--- a/lib/gitlab/usage_data_non_sql_metrics.rb
+++ b/lib/gitlab/usage_data_non_sql_metrics.rb
@@ -31,7 +31,7 @@ module Gitlab
def minimum_id(model, column = nil)
end
- def jira_service_data
+ def jira_integration_data
{
projects_jira_server_active: 0,
projects_jira_cloud_active: 0
diff --git a/lib/gitlab/usage_data_queries.rb b/lib/gitlab/usage_data_queries.rb
index da01b68e8fc..63e6cf03d1f 100644
--- a/lib/gitlab/usage_data_queries.rb
+++ b/lib/gitlab/usage_data_queries.rb
@@ -48,7 +48,7 @@ module Gitlab
end
end
- def jira_service_data
+ def jira_integration_data
{
projects_jira_server_active: 0,
projects_jira_cloud_active: 0
diff --git a/lib/gitlab/utils/usage_data.rb b/lib/gitlab/utils/usage_data.rb
index 4ea5b5a87de..faa524d171c 100644
--- a/lib/gitlab/utils/usage_data.rb
+++ b/lib/gitlab/utils/usage_data.rb
@@ -217,7 +217,7 @@ module Gitlab
end
# rubocop: disable UsageData/LargeTable:
- def jira_service_data
+ def jira_integration_data
data = {
projects_jira_server_active: 0,
projects_jira_cloud_active: 0
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index 1cf8baf2a3a..680db3491be 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -2806,13 +2806,16 @@ msgstr ""
msgid "Admin|Admin notes"
msgstr ""
-msgid "Admin|Learn more about quarterly reconcilliation"
+msgid "Admin|Learn more about quarterly reconciliation"
msgstr ""
msgid "Admin|Note"
msgstr ""
-msgid "Admin|Quarterly reconcilliation will occur on %{qrtlyDate}"
+msgid "Admin|Quarterly reconciliation will occur on %{qrtlyDate}"
+msgstr ""
+
+msgid "Admin|The number of max seats used for your namespace is currently exceeding the number of seats in your subscription. On %{qrtlyDate}, GitLab will process a quarterly reconciliation and automatically bill you a prorated amount for the overage. There is no action needed from you. If you have a credit card on file, it will be charged. Otherwise, you will receive an invoice."
msgstr ""
msgid "Admin|The number of maximum users for your instance is currently exceeding the number of users in license. On %{qrtlyDate}, GitLab will process a quarterly reconciliation and automatically bill you a prorated amount for the overage. There is no action needed from you. If you have a credit card on file, it will be charged. Otherwise, you will receive an invoice."
@@ -5959,9 +5962,6 @@ msgstr ""
msgid "Cannot delete %{profile_name} referenced in security policy"
msgstr ""
-msgid "Cannot enable shared runners because parent group does not allow it"
-msgstr ""
-
msgid "Cannot have multiple Jira imports running at the same time"
msgstr ""
@@ -38174,6 +38174,9 @@ msgstr ""
msgid "cannot be enabled unless all domains have TLS certificates"
msgstr ""
+msgid "cannot be enabled until a valid credit credit is on file"
+msgstr ""
+
msgid "cannot be modified"
msgstr ""
diff --git a/spec/controllers/admin/integrations_controller_spec.rb b/spec/controllers/admin/integrations_controller_spec.rb
index 79c39784173..ae01c814f20 100644
--- a/spec/controllers/admin/integrations_controller_spec.rb
+++ b/spec/controllers/admin/integrations_controller_spec.rb
@@ -37,10 +37,10 @@ RSpec.describe Admin::IntegrationsController do
describe '#update' do
include JiraServiceHelper
- let(:integration) { create(:jira_service, :instance) }
+ let(:integration) { create(:jira_integration, :instance) }
before do
- stub_jira_service_test
+ stub_jira_integration_test
allow(PropagateIntegrationWorker).to receive(:perform_async)
put :update, params: { id: integration.class.to_param, service: { url: url } }
@@ -75,8 +75,8 @@ RSpec.describe Admin::IntegrationsController do
end
describe '#reset' do
- let_it_be(:integration) { create(:jira_service, :instance) }
- let_it_be(:inheriting_integration) { create(:jira_service, inherit_from_id: integration.id) }
+ let_it_be(:integration) { create(:jira_integration, :instance) }
+ let_it_be(:inheriting_integration) { create(:jira_integration, inherit_from_id: integration.id) }
subject do
post :reset, params: { id: integration.class.to_param }
diff --git a/spec/controllers/admin/services_controller_spec.rb b/spec/controllers/admin/services_controller_spec.rb
index 995282ca4bb..06ff8f0db94 100644
--- a/spec/controllers/admin/services_controller_spec.rb
+++ b/spec/controllers/admin/services_controller_spec.rb
@@ -11,7 +11,7 @@ RSpec.describe Admin::ServicesController do
describe 'GET #edit' do
let(:service) do
- create(:jira_service, :template)
+ create(:jira_integration, :template)
end
it 'successfully displays the template' do
@@ -30,7 +30,7 @@ RSpec.describe Admin::ServicesController do
context 'when instance integration exists' do
before do
- create(:jira_service, :instance)
+ create(:jira_integration, :instance)
end
it 'redirects to the admin application integration page' do
diff --git a/spec/controllers/groups/settings/integrations_controller_spec.rb b/spec/controllers/groups/settings/integrations_controller_spec.rb
index 4f1f6dcaae4..a6ef0223491 100644
--- a/spec/controllers/groups/settings/integrations_controller_spec.rb
+++ b/spec/controllers/groups/settings/integrations_controller_spec.rb
@@ -63,11 +63,11 @@ RSpec.describe Groups::Settings::IntegrationsController do
describe '#update' do
include JiraServiceHelper
- let(:integration) { create(:jira_service, project: nil, group_id: group.id) }
+ let(:integration) { create(:jira_integration, project: nil, group_id: group.id) }
before do
group.add_owner(user)
- stub_jira_service_test
+ stub_jira_integration_test
put :update, params: { group_id: group, id: integration.class.to_param, service: { url: url } }
end
@@ -93,8 +93,8 @@ RSpec.describe Groups::Settings::IntegrationsController do
end
describe '#reset' do
- let_it_be(:integration) { create(:jira_service, group: group, project: nil) }
- let_it_be(:inheriting_integration) { create(:jira_service, inherit_from_id: integration.id) }
+ let_it_be(:integration) { create(:jira_integration, group: group, project: nil) }
+ let_it_be(:inheriting_integration) { create(:jira_integration, inherit_from_id: integration.id) }
subject do
post :reset, params: { group_id: group, id: integration.class.to_param }
diff --git a/spec/controllers/projects/import/jira_controller_spec.rb b/spec/controllers/projects/import/jira_controller_spec.rb
index 37a7fce0c23..5288c0fcf21 100644
--- a/spec/controllers/projects/import/jira_controller_spec.rb
+++ b/spec/controllers/projects/import/jira_controller_spec.rb
@@ -12,7 +12,7 @@ RSpec.describe Projects::Import::JiraController do
def ensure_correct_config
sign_in(user)
project.add_maintainer(user)
- stub_jira_service_test
+ stub_jira_integration_test
end
shared_examples 'redirect with error' do |error|
@@ -54,8 +54,8 @@ RSpec.describe Projects::Import::JiraController do
context 'when loged user is a developer' do
before do
- create(:jira_service, project: project)
- stub_jira_service_test
+ create(:jira_integration, project: project)
+ stub_jira_integration_test
sign_in(user)
project.add_developer(user)
@@ -72,7 +72,7 @@ RSpec.describe Projects::Import::JiraController do
it_behaves_like 'users without permissions'
- context 'jira service configuration' do
+ context 'jira integration configuration' do
before do
sign_in(user)
project.add_maintainer(user)
@@ -80,14 +80,14 @@ RSpec.describe Projects::Import::JiraController do
context 'when Jira service is not enabled for the project' do
it 'does not query Jira service' do
- expect(project).not_to receive(:jira_service)
+ expect(project).not_to receive(:jira_integration)
end
it_behaves_like 'template with no message'
end
context 'when Jira service is not configured correctly for the project' do
- let_it_be(:jira_service) { create(:jira_service, project: project) }
+ let_it_be(:jira_integration) { create(:jira_integration, project: project) }
before do
WebMock.stub_request(:get, 'https://jira.example.com/rest/api/2/serverInfo')
diff --git a/spec/controllers/projects/issues_controller_spec.rb b/spec/controllers/projects/issues_controller_spec.rb
index 7569a18baeb..3385505bb62 100644
--- a/spec/controllers/projects/issues_controller_spec.rb
+++ b/spec/controllers/projects/issues_controller_spec.rb
@@ -17,7 +17,7 @@ RSpec.describe Projects::IssuesController do
before do
sign_in(user)
project.add_developer(user)
- create(:jira_service, project: project)
+ create(:jira_integration, project: project)
end
context 'when GitLab issues disabled' do
diff --git a/spec/controllers/projects/prometheus/metrics_controller_spec.rb b/spec/controllers/projects/prometheus/metrics_controller_spec.rb
index c7c3be20f29..5338b77bd08 100644
--- a/spec/controllers/projects/prometheus/metrics_controller_spec.rb
+++ b/spec/controllers/projects/prometheus/metrics_controller_spec.rb
@@ -141,7 +141,7 @@ RSpec.describe Projects::Prometheus::MetricsController do
expect(flash[:notice]).to include('Metric was successfully added.')
- expect(response).to redirect_to(edit_project_service_path(project, PrometheusService))
+ expect(response).to redirect_to(edit_project_service_path(project, ::Integrations::Prometheus))
end
end
@@ -164,7 +164,7 @@ RSpec.describe Projects::Prometheus::MetricsController do
it 'destroys the metric' do
delete :destroy, params: project_params(id: metric.id)
- expect(response).to redirect_to(edit_project_service_path(project, PrometheusService))
+ expect(response).to redirect_to(edit_project_service_path(project, ::Integrations::Prometheus))
expect(PrometheusMetric.find_by(id: metric.id)).to be_nil
end
end
diff --git a/spec/controllers/projects/runners_controller_spec.rb b/spec/controllers/projects/runners_controller_spec.rb
index 39b45a7133c..70ff77d7ff0 100644
--- a/spec/controllers/projects/runners_controller_spec.rb
+++ b/spec/controllers/projects/runners_controller_spec.rb
@@ -111,7 +111,7 @@ RSpec.describe Projects::RunnersController do
expect(response).to have_gitlab_http_status(:unauthorized)
expect(project.shared_runners_enabled).to eq(false)
- expect(json_response['error']).to eq('Cannot enable shared runners because parent group does not allow it')
+ expect(json_response['error']).to eq('Shared runners enabled cannot be enabled because parent group does not allow it')
end
end
end
diff --git a/spec/controllers/projects/services_controller_spec.rb b/spec/controllers/projects/services_controller_spec.rb
index bd8a4da76ae..aad65880795 100644
--- a/spec/controllers/projects/services_controller_spec.rb
+++ b/spec/controllers/projects/services_controller_spec.rb
@@ -8,7 +8,7 @@ RSpec.describe Projects::ServicesController do
let(:project) { create(:project, :repository) }
let(:user) { create(:user) }
- let(:service) { create(:jira_service, project: project) }
+ let(:service) { create(:jira_integration, project: project) }
let(:service_params) { { username: 'username', password: 'password', url: 'http://example.com' } }
before do
@@ -56,7 +56,7 @@ RSpec.describe Projects::ServicesController do
end
it 'returns success' do
- stub_jira_service_test
+ stub_jira_integration_test
expect(Gitlab::HTTP).to receive(:get).with('/rest/api/2/serverInfo', any_args).and_call_original
@@ -67,7 +67,7 @@ RSpec.describe Projects::ServicesController do
end
it 'returns success' do
- stub_jira_service_test
+ stub_jira_integration_test
expect(Gitlab::HTTP).to receive(:get).with('/rest/api/2/serverInfo', any_args).and_call_original
@@ -130,7 +130,9 @@ RSpec.describe Projects::ServicesController do
end
context 'with the Slack integration' do
- let_it_be(:service) { build(:slack_service) }
+ let_it_be(:integration) { build(:integrations_slack) }
+
+ let(:service) { integration } # TODO: remove when https://gitlab.com/gitlab-org/gitlab/-/issues/330300 is complete
it 'returns an error response when the URL is blocked' do
put :test, params: project_params(service: { webhook: 'http://127.0.0.1' })
@@ -210,7 +212,7 @@ RSpec.describe Projects::ServicesController do
it_behaves_like 'service update'
end
- context 'wehn param `inherit_from_id` is set to empty string' do
+ context 'when param `inherit_from_id` is set to empty string' do
let(:service_params) { { inherit_from_id: '' } }
it 'sets inherit_from_id to nil' do
@@ -218,8 +220,8 @@ RSpec.describe Projects::ServicesController do
end
end
- context 'wehn param `inherit_from_id` is set to some value' do
- let(:instance_service) { create(:jira_service, :instance) }
+ context 'when param `inherit_from_id` is set to some value' do
+ let(:instance_service) { create(:jira_integration, :instance) }
let(:service_params) { { inherit_from_id: instance_service.id } }
it 'sets inherit_from_id to value' do
@@ -230,7 +232,7 @@ RSpec.describe Projects::ServicesController do
describe 'as JSON' do
before do
- stub_jira_service_test
+ stub_jira_integration_test
put :update, params: project_params(service: service_params, format: :json)
end
diff --git a/spec/factories/integration_data.rb b/spec/factories/integration_data.rb
index a6b2693b8df..a7406794437 100644
--- a/spec/factories/integration_data.rb
+++ b/spec/factories/integration_data.rb
@@ -4,7 +4,7 @@
# The factories are used when creating integrations.
FactoryBot.define do
factory :jira_tracker_data, class: 'Integrations::JiraTrackerData' do
- integration factory: :jira_service
+ integration factory: :jira_integration
end
factory :issue_tracker_data, class: 'Integrations::IssueTrackerData' do
diff --git a/spec/factories/integrations.rb b/spec/factories/integrations.rb
index a64d047c89b..47c9f885845 100644
--- a/spec/factories/integrations.rb
+++ b/spec/factories/integrations.rb
@@ -27,7 +27,7 @@ FactoryBot.define do
end
end
- factory :prometheus_service do
+ factory :prometheus_service, class: 'Integrations::Prometheus' do
project
active { true }
properties do
@@ -45,7 +45,7 @@ FactoryBot.define do
token { 'test' }
end
- factory :jira_service, class: 'Integrations::Jira' do
+ factory :jira_integration, class: 'Integrations::Jira' do
project
active { true }
type { 'JiraService' }
@@ -91,7 +91,7 @@ FactoryBot.define do
issue_tracker
end
- factory :redmine_service, class: 'Integrations::Redmine' do
+ factory :redmine_integration, class: 'Integrations::Redmine' do
project
active { true }
issue_tracker
@@ -160,14 +160,15 @@ FactoryBot.define do
password { 'my-secret-password' }
end
- factory :slack_service, class: 'Integrations::Slack' do
+ # avoids conflict with slack_integration factory
+ factory :integrations_slack, class: 'Integrations::Slack' do
project
active { true }
webhook { 'https://slack.service.url' }
type { 'SlackService' }
end
- factory :slack_slash_commands_service, class: 'Integrations::SlackSlashCommands' do
+ factory :slack_slash_commands_integration, class: 'Integrations::SlackSlashCommands' do
project
active { true }
type { 'SlackSlashCommandsService' }
diff --git a/spec/factories/projects.rb b/spec/factories/projects.rb
index 6641d8749f9..b2f6ae5573d 100644
--- a/spec/factories/projects.rb
+++ b/spec/factories/projects.rb
@@ -396,7 +396,7 @@ FactoryBot.define do
factory :redmine_project, parent: :project do
has_external_issue_tracker { true }
- redmine_service
+ redmine_integration
end
factory :youtrack_project, parent: :project do
@@ -408,7 +408,7 @@ FactoryBot.define do
factory :jira_project, parent: :project do
has_external_issue_tracker { true }
- jira_service
+ jira_integration
end
factory :prometheus_project, parent: :project do
diff --git a/spec/factories/usage_data.rb b/spec/factories/usage_data.rb
index 2aa926e4dd8..de1e4aa82fa 100644
--- a/spec/factories/usage_data.rb
+++ b/spec/factories/usage_data.rb
@@ -9,10 +9,10 @@ FactoryBot.define do
projects << create(:project, :repository)
group = create(:group)
create(:board, project: projects[0])
- create(:jira_service, project: projects[0])
- create(:jira_service, :without_properties_callback, project: projects[1])
- create(:jira_service, :jira_cloud_service, project: projects[2])
- create(:jira_service, :without_properties_callback, project: projects[3], properties: { url: 'https://mysite.atlassian.net' })
+ create(:jira_integration, project: projects[0])
+ create(:jira_integration, :without_properties_callback, project: projects[1])
+ create(:jira_integration, :jira_cloud_service, project: projects[2])
+ create(:jira_integration, :without_properties_callback, project: projects[3], properties: { url: 'https://mysite.atlassian.net' })
jira_label = create(:label, project: projects[0])
create(:jira_import_state, :finished, project: projects[0], label: jira_label, failed_to_import_count: 2, imported_issues_count: 7, total_issue_count: 9)
create(:jira_import_state, :finished, project: projects[1], label: jira_label, imported_issues_count: 3, total_issue_count: 3)
diff --git a/spec/features/admin/services/admin_visits_service_templates_spec.rb b/spec/features/admin/services/admin_visits_service_templates_spec.rb
index 9d011b97f63..d367867ebb5 100644
--- a/spec/features/admin/services/admin_visits_service_templates_spec.rb
+++ b/spec/features/admin/services/admin_visits_service_templates_spec.rb
@@ -4,7 +4,7 @@ require 'spec_helper'
RSpec.describe 'Admin visits service templates' do
let(:admin) { create(:user, :admin) }
- let(:slack_service) { Integration.for_template.find { |s| s.type == 'SlackService' } }
+ let(:slack_integration) { Integration.for_template.find { |s| s.type == 'SlackService' } }
before do
sign_in(admin)
@@ -23,7 +23,7 @@ RSpec.describe 'Admin visits service templates' do
context 'with an active service template' do
before do
- create(:slack_service, :template, active: true)
+ create(:integrations_slack, :template, active: true)
visit(admin_application_settings_services_path)
end
@@ -33,20 +33,20 @@ RSpec.describe 'Admin visits service templates' do
context 'without instance-level integration' do
it 'shows a link to service template' do
- expect(page).to have_link('Slack', href: edit_admin_application_settings_service_path(slack_service.id))
- expect(page).not_to have_link('Slack', href: edit_admin_application_settings_integration_path(slack_service))
+ expect(page).to have_link('Slack', href: edit_admin_application_settings_service_path(slack_integration.id))
+ expect(page).not_to have_link('Slack', href: edit_admin_application_settings_integration_path(slack_integration))
end
end
context 'with instance-level integration' do
before do
- create(:slack_service, instance: true, project: nil)
+ create(:integrations_slack, instance: true, project: nil)
visit(admin_application_settings_services_path)
end
it 'shows a link to instance-level integration' do
- expect(page).not_to have_link('Slack', href: edit_admin_application_settings_service_path(slack_service.id))
- expect(page).to have_link('Slack', href: edit_admin_application_settings_integration_path(slack_service))
+ expect(page).not_to have_link('Slack', href: edit_admin_application_settings_service_path(slack_integration.id))
+ expect(page).to have_link('Slack', href: edit_admin_application_settings_integration_path(slack_integration))
end
end
end
diff --git a/spec/features/issuables/markdown_references/jira_spec.rb b/spec/features/issuables/markdown_references/jira_spec.rb
index a3a259e21a1..ae9c8d31c02 100644
--- a/spec/features/issuables/markdown_references/jira_spec.rb
+++ b/spec/features/issuables/markdown_references/jira_spec.rb
@@ -81,7 +81,7 @@ RSpec.describe "Jira", :js do
context "when both external and internal issues trackers are enabled for the actual project" do
before do
- create(:jira_service, project: actual_project)
+ create(:jira_integration, project: actual_project)
end
include_examples "correct references" do
@@ -94,7 +94,7 @@ RSpec.describe "Jira", :js do
let(:actual_project) { create(:project, :public, :repository, :issues_disabled) }
before do
- create(:jira_service, project: actual_project)
+ create(:jira_integration, project: actual_project)
end
include_examples "correct references" do
@@ -125,7 +125,7 @@ RSpec.describe "Jira", :js do
context "when both external and internal issues trackers are enabled for the actual project" do
before do
- create(:jira_service, project: actual_project)
+ create(:jira_integration, project: actual_project)
end
include_examples "correct references" do
@@ -138,7 +138,7 @@ RSpec.describe "Jira", :js do
let(:actual_project) { create(:project, :public, :repository, :issues_disabled) }
before do
- create(:jira_service, project: actual_project)
+ create(:jira_integration, project: actual_project)
end
include_examples "correct references" do
diff --git a/spec/features/issues/user_bulk_edits_issues_spec.rb b/spec/features/issues/user_bulk_edits_issues_spec.rb
index e34c16e27ba..44c23813e3c 100644
--- a/spec/features/issues/user_bulk_edits_issues_spec.rb
+++ b/spec/features/issues/user_bulk_edits_issues_spec.rb
@@ -13,26 +13,26 @@ RSpec.describe 'Multiple issue updating from issues#index', :js do
end
context 'status' do
- it 'sets to closed' do
+ it 'sets to closed', :js do
visit project_issues_path(project)
click_button 'Edit issues'
check 'Select all'
click_button 'Select status'
- click_link 'Closed'
+ click_button 'Closed'
click_update_issues_button
expect(page).to have_selector('.issue', count: 0)
end
- it 'sets to open' do
+ it 'sets to open', :js do
create_closed
visit project_issues_path(project, state: 'closed')
click_button 'Edit issues'
check 'Select all'
click_button 'Select status'
- click_link 'Open'
+ click_button 'Open'
click_update_issues_button
expect(page).to have_selector('.issue', count: 0)
diff --git a/spec/features/merge_requests/user_mass_updates_spec.rb b/spec/features/merge_requests/user_mass_updates_spec.rb
index 0fe69c5ca5b..46c12784ea8 100644
--- a/spec/features/merge_requests/user_mass_updates_spec.rb
+++ b/spec/features/merge_requests/user_mass_updates_spec.rb
@@ -18,7 +18,7 @@ RSpec.describe 'Merge requests > User mass updates', :js do
visit project_merge_requests_path(project)
end
- it 'closes merge request' do
+ it 'closes merge request', :js do
change_status('Closed')
expect(page).to have_selector('.merge-request', count: 0)
@@ -31,7 +31,7 @@ RSpec.describe 'Merge requests > User mass updates', :js do
visit project_merge_requests_path(project, state: 'closed')
end
- it 'reopens merge request' do
+ it 'reopens merge request', :js do
change_status('Open')
expect(page).to have_selector('.merge-request', count: 0)
@@ -109,7 +109,7 @@ RSpec.describe 'Merge requests > User mass updates', :js do
click_button 'Edit merge requests'
check 'Select all'
click_button 'Select status'
- click_link text
+ click_button text
click_update_merge_requests_button
end
diff --git a/spec/features/projects/integrations/user_activates_jira_spec.rb b/spec/features/projects/integrations/user_activates_jira_spec.rb
index 10f84aae93f..d7679d38cae 100644
--- a/spec/features/projects/integrations/user_activates_jira_spec.rb
+++ b/spec/features/projects/integrations/user_activates_jira_spec.rb
@@ -65,7 +65,7 @@ RSpec.describe 'User activates Jira', :js do
include JiraServiceHelper
before do
- stub_jira_service_test
+ stub_jira_integration_test
visit_project_integration('Jira')
fill_form(disable: true)
click_save_integration
@@ -105,14 +105,14 @@ RSpec.describe 'User activates Jira', :js do
click_save_integration
expect(page).to have_content('Jira settings saved and active.')
- expect(project.reload.jira_service.data_fields).to have_attributes(
+ expect(project.reload.jira_integration.data_fields).to have_attributes(
jira_issue_transition_automatic: false,
jira_issue_transition_id: '1, 2, 3'
)
end
it 'using automatic transitions' do
- create(:jira_service, project: project, jira_issue_transition_automatic: false, jira_issue_transition_id: '1, 2, 3')
+ create(:jira_integration, project: project, jira_issue_transition_automatic: false, jira_issue_transition_id: '1, 2, 3')
visit_project_integration('Jira')
expect(page).to have_field('Enable Jira transitions', checked: true)
@@ -123,14 +123,14 @@ RSpec.describe 'User activates Jira', :js do
click_save_integration
expect(page).to have_content('Jira settings saved and active.')
- expect(project.reload.jira_service.data_fields).to have_attributes(
+ expect(project.reload.jira_integration.data_fields).to have_attributes(
jira_issue_transition_automatic: true,
jira_issue_transition_id: ''
)
end
it 'disabling issue transitions' do
- create(:jira_service, project: project, jira_issue_transition_automatic: true, jira_issue_transition_id: '1, 2, 3')
+ create(:jira_integration, project: project, jira_issue_transition_automatic: true, jira_issue_transition_id: '1, 2, 3')
visit_project_integration('Jira')
expect(page).to have_field('Enable Jira transitions', checked: true)
@@ -140,7 +140,7 @@ RSpec.describe 'User activates Jira', :js do
click_save_integration
expect(page).to have_content('Jira settings saved and active.')
- expect(project.reload.jira_service.data_fields).to have_attributes(
+ expect(project.reload.jira_integration.data_fields).to have_attributes(
jira_issue_transition_automatic: false,
jira_issue_transition_id: ''
)
diff --git a/spec/features/projects/services/user_activates_slack_notifications_spec.rb b/spec/features/projects/services/user_activates_slack_notifications_spec.rb
index dec83ff1489..d5fe8b083ba 100644
--- a/spec/features/projects/services/user_activates_slack_notifications_spec.rb
+++ b/spec/features/projects/services/user_activates_slack_notifications_spec.rb
@@ -20,12 +20,12 @@ RSpec.describe 'User activates Slack notifications', :js do
end
context 'when service is already configured' do
- let(:service) { Integrations::Slack.new }
- let(:project) { create(:project, slack_service: service) }
+ let(:integration) { Integrations::Slack.new }
+ let(:project) { create(:project, slack_integration: integration) }
before do
- service.fields
- service.update!(
+ integration.fields
+ integration.update!(
push_channel: 1,
issue_channel: 2,
merge_request_channel: 3,
@@ -34,7 +34,7 @@ RSpec.describe 'User activates Slack notifications', :js do
pipeline_channel: 6,
wiki_page_channel: 7)
- visit(edit_project_service_path(project, service))
+ visit(edit_project_service_path(project, integration))
end
it 'filters events by channel' do
diff --git a/spec/frontend/issuable_bulk_update_sidebar/components/status_select_spec.js b/spec/frontend/issuable_bulk_update_sidebar/components/status_select_spec.js
new file mode 100644
index 00000000000..09dcb963154
--- /dev/null
+++ b/spec/frontend/issuable_bulk_update_sidebar/components/status_select_spec.js
@@ -0,0 +1,77 @@
+import { GlDropdown, GlDropdownItem } from '@gitlab/ui';
+import { shallowMount } from '@vue/test-utils';
+import StatusSelect from '~/issuable_bulk_update_sidebar/components/status_select.vue';
+import { ISSUE_STATUS_SELECT_OPTIONS } from '~/issuable_bulk_update_sidebar/constants';
+
+describe('StatusSelect', () => {
+ let wrapper;
+
+ const findDropdown = () => wrapper.findComponent(GlDropdown);
+ const findAllDropdownItems = () => wrapper.findAllComponents(GlDropdownItem);
+ const findHiddenInput = () => wrapper.find('input');
+
+ function createComponent() {
+ wrapper = shallowMount(StatusSelect);
+ }
+
+ afterEach(() => {
+ wrapper.destroy();
+ });
+
+ describe('with no value selected', () => {
+ beforeEach(() => {
+ createComponent();
+ });
+
+ it('renders default text', () => {
+ expect(findDropdown().props('text')).toBe('Select status');
+ });
+
+ it('renders dropdown items with `is-checked` prop set to `false`', () => {
+ const dropdownItems = findAllDropdownItems();
+
+ expect(dropdownItems.at(0).props('isChecked')).toBe(false);
+ expect(dropdownItems.at(1).props('isChecked')).toBe(false);
+ });
+ });
+
+ describe('when selecting a value', () => {
+ const selectItemAtIndex = 0;
+
+ beforeEach(async () => {
+ createComponent();
+ await findAllDropdownItems().at(selectItemAtIndex).vm.$emit('click');
+ });
+
+ it('updates value of the hidden input', () => {
+ expect(findHiddenInput().attributes('value')).toBe(
+ ISSUE_STATUS_SELECT_OPTIONS[selectItemAtIndex].value,
+ );
+ });
+
+ it('updates the dropdown text prop', () => {
+ expect(findDropdown().props('text')).toBe(
+ ISSUE_STATUS_SELECT_OPTIONS[selectItemAtIndex].text,
+ );
+ });
+
+ it('sets dropdown item `is-checked` prop to `true`', () => {
+ const dropdownItems = findAllDropdownItems();
+
+ expect(dropdownItems.at(0).props('isChecked')).toBe(true);
+ expect(dropdownItems.at(1).props('isChecked')).toBe(false);
+ });
+
+ describe('when selecting the value that is already selected', () => {
+ it('clears dropdown selection', async () => {
+ await findAllDropdownItems().at(selectItemAtIndex).vm.$emit('click');
+
+ const dropdownItems = findAllDropdownItems();
+
+ expect(dropdownItems.at(0).props('isChecked')).toBe(false);
+ expect(dropdownItems.at(1).props('isChecked')).toBe(false);
+ expect(findDropdown().props('text')).toBe('Select status');
+ });
+ });
+ });
+});
diff --git a/spec/frontend/issuable_spec.js b/spec/frontend/issuable_spec.js
index 9c8f1e04609..e0bd7b802c9 100644
--- a/spec/frontend/issuable_spec.js
+++ b/spec/frontend/issuable_spec.js
@@ -1,5 +1,5 @@
+import issuableInitBulkUpdateSidebar from '~/issuable_bulk_update_sidebar/issuable_init_bulk_update_sidebar';
import IssuableIndex from '~/issuable_index';
-import issuableInitBulkUpdateSidebar from '~/issuable_init_bulk_update_sidebar';
describe('Issuable', () => {
describe('initBulkUpdate', () => {
diff --git a/spec/frontend/vue_shared/components/security_reports/artifact_downloads/merge_request_artifact_download_spec.js b/spec/frontend/vue_shared/components/security_reports/artifact_downloads/merge_request_artifact_download_spec.js
index d58c87d66cb..395c74dcba6 100644
--- a/spec/frontend/vue_shared/components/security_reports/artifact_downloads/merge_request_artifact_download_spec.js
+++ b/spec/frontend/vue_shared/components/security_reports/artifact_downloads/merge_request_artifact_download_spec.js
@@ -3,7 +3,7 @@ import Vue from 'vue';
import VueApollo from 'vue-apollo';
import createMockApollo from 'helpers/mock_apollo_helper';
import {
- expectedDownloadDropdownProps,
+ expectedDownloadDropdownPropsWithTitle,
securityReportMergeRequestDownloadPathsQueryResponse,
} from 'jest/vue_shared/security_reports/mock_data';
import createFlash from '~/flash';
@@ -80,7 +80,7 @@ describe('Merge request artifact Download', () => {
});
it('renders the download dropdown', () => {
- expect(findDownloadDropdown().props()).toEqual(expectedDownloadDropdownProps);
+ expect(findDownloadDropdown().props()).toEqual(expectedDownloadDropdownPropsWithTitle);
});
});
diff --git a/spec/frontend/vue_shared/security_reports/components/security_report_download_dropdown_spec.js b/spec/frontend/vue_shared/security_reports/components/security_report_download_dropdown_spec.js
index 9138d2d3f4c..4b75da0b126 100644
--- a/spec/frontend/vue_shared/security_reports/components/security_report_download_dropdown_spec.js
+++ b/spec/frontend/vue_shared/security_reports/components/security_report_download_dropdown_spec.js
@@ -40,14 +40,13 @@ describe('SecurityReportDownloadDropdown component', () => {
expect(findDropdown().props('loading')).toBe(false);
});
- it('renders a dropdown items for each artifact', () => {
+ it('renders a dropdown item for each artifact', () => {
artifacts.forEach((artifact, i) => {
const item = findDropdownItems().at(i);
expect(item.text()).toContain(artifact.name);
- expect(item.attributes()).toMatchObject({
- href: artifact.path,
- download: expect.any(String),
- });
+
+ expect(item.element.getAttribute('href')).toBe(artifact.path);
+ expect(item.element.getAttribute('download')).toBeDefined();
});
});
});
@@ -61,4 +60,32 @@ describe('SecurityReportDownloadDropdown component', () => {
expect(findDropdown().props('loading')).toBe(true);
});
});
+
+ describe('given title props', () => {
+ beforeEach(() => {
+ createComponent({ artifacts: [], loading: true, title: 'test title' });
+ });
+
+ it('should render title', () => {
+ expect(findDropdown().attributes('title')).toBe('test title');
+ });
+
+ it('should not render text', () => {
+ expect(findDropdown().text().trim()).toBe('');
+ });
+ });
+
+ describe('given text props', () => {
+ beforeEach(() => {
+ createComponent({ artifacts: [], loading: true, text: 'test text' });
+ });
+
+ it('should not render title', () => {
+ expect(findDropdown().props().title).not.toBeDefined();
+ });
+
+ it('should render text', () => {
+ expect(findDropdown().props().text).toContain('test text');
+ });
+ });
});
diff --git a/spec/frontend/vue_shared/security_reports/mock_data.js b/spec/frontend/vue_shared/security_reports/mock_data.js
index bd9ce3b7314..06631710509 100644
--- a/spec/frontend/vue_shared/security_reports/mock_data.js
+++ b/spec/frontend/vue_shared/security_reports/mock_data.js
@@ -581,9 +581,18 @@ export const secretDetectionArtifacts = [
},
];
-export const expectedDownloadDropdownProps = {
+export const expectedDownloadDropdownPropsWithTitle = {
loading: false,
artifacts: [...secretDetectionArtifacts, ...sastArtifacts],
+ text: '',
+ title: 'Download results',
+};
+
+export const expectedDownloadDropdownPropsWithText = {
+ loading: false,
+ artifacts: [...secretDetectionArtifacts, ...sastArtifacts],
+ title: '',
+ text: 'Download results',
};
/**
diff --git a/spec/frontend/vue_shared/security_reports/security_reports_app_spec.js b/spec/frontend/vue_shared/security_reports/security_reports_app_spec.js
index 038d7754776..bef538e1ff1 100644
--- a/spec/frontend/vue_shared/security_reports/security_reports_app_spec.js
+++ b/spec/frontend/vue_shared/security_reports/security_reports_app_spec.js
@@ -8,7 +8,7 @@ import createMockApollo from 'helpers/mock_apollo_helper';
import { trimText } from 'helpers/text_helper';
import waitForPromises from 'helpers/wait_for_promises';
import {
- expectedDownloadDropdownProps,
+ expectedDownloadDropdownPropsWithText,
securityReportMergeRequestDownloadPathsQueryNoArtifactsResponse,
securityReportMergeRequestDownloadPathsQueryResponse,
sastDiffSuccessMock,
@@ -99,7 +99,7 @@ describe('Security reports app', () => {
});
it('renders the download dropdown', () => {
- expect(findDownloadDropdown().props()).toEqual(expectedDownloadDropdownProps);
+ expect(findDownloadDropdown().props()).toEqual(expectedDownloadDropdownPropsWithText);
});
it('renders the expected message', () => {
@@ -203,7 +203,7 @@ describe('Security reports app', () => {
});
it('renders the download dropdown', () => {
- expect(findDownloadDropdown().props()).toEqual(expectedDownloadDropdownProps);
+ expect(findDownloadDropdown().props()).toEqual(expectedDownloadDropdownPropsWithText);
});
});
@@ -225,7 +225,7 @@ describe('Security reports app', () => {
});
it('renders the download dropdown', () => {
- expect(findDownloadDropdown().props()).toEqual(expectedDownloadDropdownProps);
+ expect(findDownloadDropdown().props()).toEqual(expectedDownloadDropdownPropsWithText);
});
});
@@ -247,7 +247,7 @@ describe('Security reports app', () => {
});
it('renders the download dropdown', () => {
- expect(findDownloadDropdown().props()).toEqual(expectedDownloadDropdownProps);
+ expect(findDownloadDropdown().props()).toEqual(expectedDownloadDropdownPropsWithText);
});
});
diff --git a/spec/graphql/mutations/alert_management/prometheus_integration/create_spec.rb b/spec/graphql/mutations/alert_management/prometheus_integration/create_spec.rb
index 7ab0f43d674..888b56d6a23 100644
--- a/spec/graphql/mutations/alert_management/prometheus_integration/create_spec.rb
+++ b/spec/graphql/mutations/alert_management/prometheus_integration/create_spec.rb
@@ -32,7 +32,7 @@ RSpec.describe Mutations::AlertManagement::PrometheusIntegration::Create do
context 'when UpdateService responds with success' do
it 'returns the integration with no errors' do
expect(resolve).to eq(
- integration: ::PrometheusService.last!,
+ integration: ::Integrations::Prometheus.last!,
errors: []
)
end
diff --git a/spec/graphql/resolvers/projects/jira_projects_resolver_spec.rb b/spec/graphql/resolvers/projects/jira_projects_resolver_spec.rb
index c375345250d..8c36153d485 100644
--- a/spec/graphql/resolvers/projects/jira_projects_resolver_spec.rb
+++ b/spec/graphql/resolvers/projects/jira_projects_resolver_spec.rb
@@ -22,7 +22,7 @@ RSpec.describe Resolvers::Projects::JiraProjectsResolver do
end
context 'when project has no Jira service' do
- let_it_be(:jira_service) { nil }
+ let_it_be(:jira_integration) { nil }
context 'when user is a maintainer' do
before do
@@ -34,7 +34,7 @@ RSpec.describe Resolvers::Projects::JiraProjectsResolver do
end
context 'when project has Jira service' do
- let(:jira_service) { create(:jira_service, project: project) }
+ let(:jira_integration) { create(:jira_integration, project: project) }
context 'when user is a developer' do
before do
@@ -98,6 +98,6 @@ RSpec.describe Resolvers::Projects::JiraProjectsResolver do
end
def resolve_jira_projects(args = {}, context = { current_user: user })
- resolve(described_class, obj: jira_service, args: args, ctx: context)
+ resolve(described_class, obj: jira_integration, args: args, ctx: context)
end
end
diff --git a/spec/graphql/resolvers/projects/services_resolver_spec.rb b/spec/graphql/resolvers/projects/services_resolver_spec.rb
index a1b631113b2..6da99c8448e 100644
--- a/spec/graphql/resolvers/projects/services_resolver_spec.rb
+++ b/spec/graphql/resolvers/projects/services_resolver_spec.rb
@@ -40,7 +40,7 @@ RSpec.describe Resolvers::Projects::ServicesResolver do
context 'when project has services' do
let_it_be(:project) { create(:project, :private) }
- let_it_be(:jira_service) { create(:jira_service, project: project) }
+ let_it_be(:jira_integration) { create(:jira_integration, project: project) }
context 'when user cannot access services' do
context 'when anonymous user' do
diff --git a/spec/graphql/types/projects/service_type_spec.rb b/spec/graphql/types/projects/service_type_spec.rb
index 567bdfaec24..cb09f1ca6cc 100644
--- a/spec/graphql/types/projects/service_type_spec.rb
+++ b/spec/graphql/types/projects/service_type_spec.rb
@@ -7,7 +7,7 @@ RSpec.describe Types::Projects::ServiceType do
describe ".resolve_type" do
it 'resolves the corresponding type for objects' do
- expect(described_class.resolve_type(build(:jira_service), {})).to eq(Types::Projects::Services::JiraServiceType)
+ expect(described_class.resolve_type(build(:jira_integration), {})).to eq(Types::Projects::Services::JiraServiceType)
expect(described_class.resolve_type(build(:service), {})).to eq(Types::Projects::Services::BaseServiceType)
expect(described_class.resolve_type(build(:drone_ci_integration), {})).to eq(Types::Projects::Services::BaseServiceType)
expect(described_class.resolve_type(build(:custom_issue_tracker_integration), {})).to eq(Types::Projects::Services::BaseServiceType)
diff --git a/spec/helpers/operations_helper_spec.rb b/spec/helpers/operations_helper_spec.rb
index e1bd477bc75..61c6bb1ccf7 100644
--- a/spec/helpers/operations_helper_spec.rb
+++ b/spec/helpers/operations_helper_spec.rb
@@ -21,7 +21,7 @@ RSpec.describe OperationsHelper do
end
context 'initial service configuration' do
- let_it_be(:prometheus_service) { PrometheusService.new(project: project) }
+ let_it_be(:prometheus_service) { ::Integrations::Prometheus.new(project: project) }
before do
allow(project).to receive(:find_or_initialize_service).and_call_original
diff --git a/spec/helpers/services_helper_spec.rb b/spec/helpers/services_helper_spec.rb
index 6dd872225ba..af1873509f0 100644
--- a/spec/helpers/services_helper_spec.rb
+++ b/spec/helpers/services_helper_spec.rb
@@ -36,8 +36,8 @@ RSpec.describe ServicesHelper do
subject { helper.integration_form_data(integration) }
- context 'Slack service' do
- let(:integration) { build(:slack_service) }
+ context 'with Slack integration' do
+ let(:integration) { build(:integrations_slack) }
it { is_expected.to include(*fields) }
it { is_expected.not_to include(*jira_fields) }
@@ -48,14 +48,14 @@ RSpec.describe ServicesHelper do
end
context 'Jira service' do
- let(:integration) { build(:jira_service) }
+ let(:integration) { build(:jira_integration) }
it { is_expected.to include(*fields, *jira_fields) }
end
end
describe '#scoped_reset_integration_path' do
- let(:integration) { build_stubbed(:jira_service) }
+ let(:integration) { build_stubbed(:jira_integration) }
let(:group) { nil }
subject { helper.scoped_reset_integration_path(integration, group: group) }
@@ -75,7 +75,7 @@ RSpec.describe ServicesHelper do
end
context 'when a new integration is not persisted' do
- let_it_be(:integration) { build(:jira_service) }
+ let_it_be(:integration) { build(:jira_integration) }
it 'returns an empty string' do
is_expected.to eq('')
diff --git a/spec/lib/banzai/filter/references/external_issue_reference_filter_spec.rb b/spec/lib/banzai/filter/references/external_issue_reference_filter_spec.rb
index 7557b9a118d..503477b2115 100644
--- a/spec/lib/banzai/filter/references/external_issue_reference_filter_spec.rb
+++ b/spec/lib/banzai/filter/references/external_issue_reference_filter_spec.rb
@@ -118,7 +118,7 @@ RSpec.describe Banzai::Filter::References::ExternalIssueReferenceFilter do
end
context "redmine project" do
- let_it_be(:service) { create(:redmine_service, project: project) }
+ let_it_be(:integration) { create(:redmine_integration, project: project) }
before do
project.update!(issues_enabled: false)
@@ -183,7 +183,7 @@ RSpec.describe Banzai::Filter::References::ExternalIssueReferenceFilter do
end
context "jira project" do
- let_it_be(:service) { create(:jira_service, project: project) }
+ let_it_be(:service) { create(:jira_integration, project: project) }
let(:reference) { issue.to_reference }
diff --git a/spec/lib/gitlab/background_migration/backfill_jira_tracker_deployment_type2_spec.rb b/spec/lib/gitlab/background_migration/backfill_jira_tracker_deployment_type2_spec.rb
index 7fe82420364..58864aac084 100644
--- a/spec/lib/gitlab/background_migration/backfill_jira_tracker_deployment_type2_spec.rb
+++ b/spec/lib/gitlab/background_migration/backfill_jira_tracker_deployment_type2_spec.rb
@@ -3,18 +3,18 @@
require 'spec_helper'
RSpec.describe Gitlab::BackgroundMigration::BackfillJiraTrackerDeploymentType2, :migration, schema: 20201028182809 do
- let_it_be(:jira_service_temp) { described_class::JiraServiceTemp }
+ let_it_be(:jira_integration_temp) { described_class::JiraServiceTemp }
let_it_be(:jira_tracker_data_temp) { described_class::JiraTrackerDataTemp }
let_it_be(:atlassian_host) { 'https://api.atlassian.net' }
let_it_be(:mixedcase_host) { 'https://api.AtlassiaN.nEt' }
let_it_be(:server_host) { 'https://my.server.net' }
- let(:jira_service) { jira_service_temp.create!(type: 'JiraService', active: true, category: 'issue_tracker') }
+ let(:jira_integration) { jira_integration_temp.create!(type: 'JiraService', active: true, category: 'issue_tracker') }
subject { described_class.new }
def create_tracker_data(options = {})
- jira_tracker_data_temp.create!({ service_id: jira_service.id }.merge(options))
+ jira_tracker_data_temp.create!({ service_id: jira_integration.id }.merge(options))
end
describe '#perform' do
diff --git a/spec/lib/gitlab/background_migration/migrate_issue_trackers_sensitive_data_spec.rb b/spec/lib/gitlab/background_migration/migrate_issue_trackers_sensitive_data_spec.rb
index 80879c8c6d9..f2cd2acd4f3 100644
--- a/spec/lib/gitlab/background_migration/migrate_issue_trackers_sensitive_data_spec.rb
+++ b/spec/lib/gitlab/background_migration/migrate_issue_trackers_sensitive_data_spec.rb
@@ -283,11 +283,11 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
end
context 'with Jira service with invalid properties, valid Jira service and valid bugzilla service' do
- let!(:jira_service_invalid) do
+ let!(:jira_integration_invalid) do
services.create!(id: 19, title: 'invalid - title', description: 'invalid - description', type: 'JiraService', properties: 'invalid data', category: 'issue_tracker')
end
- let!(:jira_service_valid) do
+ let!(:jira_integration_valid) do
services.create!(id: 20, type: 'JiraService', properties: jira_properties.to_json, category: 'issue_tracker')
end
@@ -298,21 +298,21 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
it 'migrates data for the valid service' do
subject
- jira_service_invalid.reload
- expect(JiraTrackerData.find_by(service_id: jira_service_invalid.id)).to be_nil
- expect(jira_service_invalid.title).to eq('invalid - title')
- expect(jira_service_invalid.description).to eq('invalid - description')
- expect(jira_service_invalid.properties).to eq('invalid data')
+ jira_integration_invalid.reload
+ expect(JiraTrackerData.find_by(service_id: jira_integration_invalid.id)).to be_nil
+ expect(jira_integration_invalid.title).to eq('invalid - title')
+ expect(jira_integration_invalid.description).to eq('invalid - description')
+ expect(jira_integration_invalid.properties).to eq('invalid data')
- jira_service_valid.reload
- data = JiraTrackerData.find_by(service_id: jira_service_valid.id)
+ jira_integration_valid.reload
+ data = JiraTrackerData.find_by(service_id: jira_integration_valid.id)
expect(data.url).to eq(url)
expect(data.api_url).to eq(api_url)
expect(data.username).to eq(username)
expect(data.password).to eq(password)
- expect(jira_service_valid.title).to eq(title)
- expect(jira_service_valid.description).to eq(description)
+ expect(jira_integration_valid.title).to eq(title)
+ expect(jira_integration_valid.description).to eq(description)
bugzilla_integration_valid.reload
data = IssueTrackerData.find_by(service_id: bugzilla_integration_valid.id)
diff --git a/spec/lib/gitlab/database_importers/self_monitoring/project/create_service_spec.rb b/spec/lib/gitlab/database_importers/self_monitoring/project/create_service_spec.rb
index 28291508ac0..fe08cdb1213 100644
--- a/spec/lib/gitlab/database_importers/self_monitoring/project/create_service_spec.rb
+++ b/spec/lib/gitlab/database_importers/self_monitoring/project/create_service_spec.rb
@@ -140,7 +140,7 @@ RSpec.describe Gitlab::DatabaseImporters::SelfMonitoring::Project::CreateService
integrations = result[:project].reload.integrations
expect(integrations.count).to eq(1)
- # Ensures PrometheusService#self_monitoring_project? is true
+ # Ensures Integrations::Prometheus#self_monitoring_project? is true
expect(integrations.first.allow_local_api_url?).to be_truthy
end
diff --git a/spec/lib/gitlab/import_export/all_models.yml b/spec/lib/gitlab/import_export/all_models.yml
index 98da5bbcb1d..92f694bd6d4 100644
--- a/spec/lib/gitlab/import_export/all_models.yml
+++ b/spec/lib/gitlab/import_export/all_models.yml
@@ -369,7 +369,7 @@ project:
- emails_on_push_integration
- pipelines_email_integration
- mattermost_slash_commands_integration
-- slack_slash_commands_service
+- slack_slash_commands_integration
- irker_integration
- packagist_integration
- pivotaltracker_integration
@@ -377,17 +377,17 @@ project:
- flowdock_integration
- assembla_integration
- asana_integration
-- slack_service
+- slack_integration
- microsoft_teams_integration
- mattermost_integration
- hangouts_chat_integration
- unify_circuit_service
- buildkite_integration
- bamboo_integration
-- teamcity_service
+- teamcity_integration
- pushover_integration
-- jira_service
-- redmine_service
+- jira_integration
+- redmine_integration
- youtrack_service
- custom_issue_tracker_integration
- bugzilla_integration
@@ -485,7 +485,7 @@ project:
- protected_environments
- mirror_user
- push_rule
-- jenkins_service
+- jenkins_integration
- index_status
- feature_usage
- approval_rules
diff --git a/spec/lib/gitlab/jira_import/base_importer_spec.rb b/spec/lib/gitlab/jira_import/base_importer_spec.rb
index 9d8143775f9..479551095de 100644
--- a/spec/lib/gitlab/jira_import/base_importer_spec.rb
+++ b/spec/lib/gitlab/jira_import/base_importer_spec.rb
@@ -9,10 +9,10 @@ RSpec.describe Gitlab::JiraImport::BaseImporter do
describe 'with any inheriting class' do
context 'when project validation is ok' do
- let!(:jira_service) { create(:jira_service, project: project) }
+ let!(:jira_integration) { create(:jira_integration, project: project) }
before do
- stub_jira_service_test
+ stub_jira_integration_test
allow(Gitlab::JiraImport).to receive(:validate_project_settings!)
end
diff --git a/spec/lib/gitlab/jira_import/issues_importer_spec.rb b/spec/lib/gitlab/jira_import/issues_importer_spec.rb
index 4a32f0fd3a9..aead5405bd1 100644
--- a/spec/lib/gitlab/jira_import/issues_importer_spec.rb
+++ b/spec/lib/gitlab/jira_import/issues_importer_spec.rb
@@ -9,12 +9,12 @@ RSpec.describe Gitlab::JiraImport::IssuesImporter do
let_it_be(:current_user) { create(:user) }
let_it_be(:project) { create(:project) }
let_it_be(:jira_import) { create(:jira_import_state, project: project, user: current_user) }
- let_it_be(:jira_service) { create(:jira_service, project: project) }
+ let_it_be(:jira_integration) { create(:jira_integration, project: project) }
subject { described_class.new(project) }
before do
- stub_jira_service_test
+ stub_jira_integration_test
end
describe '#imported_items_cache_key' do
diff --git a/spec/lib/gitlab/jira_import/labels_importer_spec.rb b/spec/lib/gitlab/jira_import/labels_importer_spec.rb
index db98a83cb3c..71440590815 100644
--- a/spec/lib/gitlab/jira_import/labels_importer_spec.rb
+++ b/spec/lib/gitlab/jira_import/labels_importer_spec.rb
@@ -8,7 +8,7 @@ RSpec.describe Gitlab::JiraImport::LabelsImporter do
let_it_be(:user) { create(:user) }
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, group: group) }
- let_it_be(:jira_service) { create(:jira_service, project: project) }
+ let_it_be(:jira_integration) { create(:jira_integration, project: project) }
let(:importer) { described_class.new(project) }
@@ -20,7 +20,7 @@ RSpec.describe Gitlab::JiraImport::LabelsImporter do
describe '#execute', :clean_gitlab_redis_cache do
before do
- stub_jira_service_test
+ stub_jira_integration_test
end
context 'when label is missing from jira import' do
diff --git a/spec/lib/gitlab/jira_import_spec.rb b/spec/lib/gitlab/jira_import_spec.rb
index 94fdff984d5..a7c73e79641 100644
--- a/spec/lib/gitlab/jira_import_spec.rb
+++ b/spec/lib/gitlab/jira_import_spec.rb
@@ -31,12 +31,12 @@ RSpec.describe Gitlab::JiraImport do
end
end
- context 'when Jira service was not setup' do
+ context 'when Jira integration was not setup' do
it_behaves_like 'raise Jira import error', 'Jira integration not configured.'
end
- context 'when Jira service exists' do
- let!(:jira_service) { create(:jira_service, project: project, active: true) }
+ context 'when Jira integration exists' do
+ let!(:jira_integration) { create(:jira_integration, project: project, active: true) }
context 'when Jira connection is not valid' do
before do
@@ -50,14 +50,14 @@ RSpec.describe Gitlab::JiraImport do
end
before do
- stub_jira_service_test
+ stub_jira_integration_test
end
context 'without user param' do
it_behaves_like 'jira configuration base checks'
context 'when jira connection is valid' do
- let!(:jira_service) { create(:jira_service, project: project, active: true) }
+ let!(:jira_integration) { create(:jira_integration, project: project, active: true) }
it 'does not return any error' do
expect { subject }.not_to raise_error
@@ -77,8 +77,8 @@ RSpec.describe Gitlab::JiraImport do
it_behaves_like 'jira configuration base checks'
- context 'when jira service is configured' do
- let!(:jira_service) { create(:jira_service, project: project, active: true) }
+ context 'when jira integration is configured' do
+ let!(:jira_integration) { create(:jira_integration, project: project, active: true) }
context 'when issues feature is disabled' do
let_it_be(:project, reload: true) { create(:project, :issues_disabled) }
@@ -96,7 +96,7 @@ RSpec.describe Gitlab::JiraImport do
context 'when user does not have permissions to run the import' do
before do
- create(:jira_service, project: project, active: true)
+ create(:jira_integration, project: project, active: true)
project.add_developer(user)
end
diff --git a/spec/lib/gitlab/reference_extractor_spec.rb b/spec/lib/gitlab/reference_extractor_spec.rb
index 229d49868d4..e040ddb1be4 100644
--- a/spec/lib/gitlab/reference_extractor_spec.rb
+++ b/spec/lib/gitlab/reference_extractor_spec.rb
@@ -227,7 +227,7 @@ RSpec.describe Gitlab::ReferenceExtractor do
context 'with an inactive external issue tracker' do
let(:project) { create(:project) }
- let!(:jira_service) { create(:jira_service, project: project, active: false) }
+ let!(:jira_integration) { create(:jira_integration, project: project, active: false) }
let(:issue) { create(:issue, project: project) }
context 'when GitLab issues are enabled' do
diff --git a/spec/lib/gitlab/usage_data_spec.rb b/spec/lib/gitlab/usage_data_spec.rb
index ea82de186f5..8652f9c4a58 100644
--- a/spec/lib/gitlab/usage_data_spec.rb
+++ b/spec/lib/gitlab/usage_data_spec.rb
@@ -435,8 +435,8 @@ RSpec.describe Gitlab::UsageData, :aggregate_failures do
create(:issue, project: project, author: User.support_bot)
create(:note, project: project, noteable: issue, author: user)
create(:todo, project: project, target: issue, author: user)
- create(:jira_service, :jira_cloud_service, active: true, project: create(:project, :jira_dvcs_cloud, creator: user))
- create(:jira_service, active: true, project: create(:project, :jira_dvcs_server, creator: user))
+ create(:jira_integration, :jira_cloud_service, active: true, project: create(:project, :jira_dvcs_cloud, creator: user))
+ create(:jira_integration, active: true, project: create(:project, :jira_dvcs_server, creator: user))
end
expect(described_class.usage_activity_by_stage_plan({})).to include(
diff --git a/spec/migrations/20190924152703_migrate_issue_trackers_data_spec.rb b/spec/migrations/20190924152703_migrate_issue_trackers_data_spec.rb
index 2999332509a..ea5192375f3 100644
--- a/spec/migrations/20190924152703_migrate_issue_trackers_data_spec.rb
+++ b/spec/migrations/20190924152703_migrate_issue_trackers_data_spec.rb
@@ -14,11 +14,11 @@ RSpec.describe MigrateIssueTrackersData do
}
end
- let!(:jira_service) do
+ let!(:jira_integration) do
services.create!(type: 'JiraService', properties: properties, category: 'issue_tracker')
end
- let!(:jira_service_nil) do
+ let!(:jira_integration_nil) do
services.create!(type: 'JiraService', properties: nil, category: 'issue_tracker')
end
@@ -55,7 +55,7 @@ RSpec.describe MigrateIssueTrackersData do
freeze_time do
migrate!
- expect(migration_name).to be_scheduled_delayed_migration(3.minutes, jira_service.id, bugzilla_integration.id)
+ expect(migration_name).to be_scheduled_delayed_migration(3.minutes, jira_integration.id, bugzilla_integration.id)
expect(migration_name).to be_scheduled_delayed_migration(6.minutes, youtrack_service.id, gitlab_service.id)
expect(BackgroundMigrationWorker.jobs.size).to eq(2)
end
diff --git a/spec/migrations/20200130145430_reschedule_migrate_issue_trackers_data_spec.rb b/spec/migrations/20200130145430_reschedule_migrate_issue_trackers_data_spec.rb
index 5516e2af3f1..90bbdca4d9c 100644
--- a/spec/migrations/20200130145430_reschedule_migrate_issue_trackers_data_spec.rb
+++ b/spec/migrations/20200130145430_reschedule_migrate_issue_trackers_data_spec.rb
@@ -14,11 +14,11 @@ RSpec.describe RescheduleMigrateIssueTrackersData do
}
end
- let!(:jira_service) do
+ let!(:jira_integration) do
services.create!(id: 10, type: 'JiraService', properties: properties, category: 'issue_tracker')
end
- let!(:jira_service_nil) do
+ let!(:jira_integration_nil) do
services.create!(id: 11, type: 'JiraService', properties: nil, category: 'issue_tracker')
end
@@ -56,7 +56,7 @@ RSpec.describe RescheduleMigrateIssueTrackersData do
freeze_time do
migrate!
- expect(migration_name).to be_scheduled_delayed_migration(3.minutes, jira_service.id, bugzilla_integration.id)
+ expect(migration_name).to be_scheduled_delayed_migration(3.minutes, jira_integration.id, bugzilla_integration.id)
expect(migration_name).to be_scheduled_delayed_migration(6.minutes, youtrack_service.id, gitlab_service.id)
expect(BackgroundMigrationWorker.jobs.size).to eq(2)
end
diff --git a/spec/migrations/delete_template_services_duplicated_by_type_spec.rb b/spec/migrations/delete_template_services_duplicated_by_type_spec.rb
index b5a29436159..577fea984da 100644
--- a/spec/migrations/delete_template_services_duplicated_by_type_spec.rb
+++ b/spec/migrations/delete_template_services_duplicated_by_type_spec.rb
@@ -14,11 +14,11 @@ RSpec.describe DeleteTemplateServicesDuplicatedByType do
end
it 'deletes service templates duplicated by type except the one with the lowest ID' do
- jenkins_service_id = services.where(type: 'JenkinsService').order(:id).pluck(:id).first
- jira_service_id = services.where(type: 'JiraService').pluck(:id).first
+ jenkins_integration_id = services.where(type: 'JenkinsService').order(:id).pluck(:id).first
+ jira_integration_id = services.where(type: 'JiraService').pluck(:id).first
migrate!
- expect(services.pluck(:id)).to contain_exactly(jenkins_service_id, jira_service_id)
+ expect(services.pluck(:id)).to contain_exactly(jenkins_integration_id, jira_integration_id)
end
end
diff --git a/spec/models/ci/pipeline_schedule_spec.rb b/spec/models/ci/pipeline_schedule_spec.rb
index cf73460bf1e..8de3ebb18b9 100644
--- a/spec/models/ci/pipeline_schedule_spec.rb
+++ b/spec/models/ci/pipeline_schedule_spec.rb
@@ -123,8 +123,15 @@ RSpec.describe Ci::PipelineSchedule do
'*/5 * * * *' | '0 * * * *' | (1.day.in_minutes / 1.hour.in_minutes).to_i | true | Time.zone.local(2021, 5, 27, 11, 0) | Time.zone.local(2021, 5, 27, 12, 0)
'*/5 * * * *' | '0 * * * *' | (1.day.in_minutes / 2.hours.in_minutes).to_i | true | Time.zone.local(2021, 5, 27, 11, 0) | Time.zone.local(2021, 5, 27, 12, 5)
'*/5 * * * *' | '0 1 * * *' | (1.day.in_minutes / 1.hour.in_minutes).to_i | true | Time.zone.local(2021, 5, 27, 1, 0) | Time.zone.local(2021, 5, 28, 1, 0)
- '*/5 * * * *' | '0 1 * * *' | (1.day.in_minutes / 1.hour.in_minutes).to_i | true | Time.zone.local(2021, 5, 27, 1, 0) | Time.zone.local(2021, 5, 28, 1, 0)
+ '*/5 * * * *' | '0 1 * * *' | (1.day.in_minutes / 10).to_i | true | Time.zone.local(2021, 5, 27, 1, 0) | Time.zone.local(2021, 5, 28, 1, 0)
+ '*/5 * * * *' | '0 1 * * *' | (1.day.in_minutes / 8).to_i | true | Time.zone.local(2021, 5, 27, 1, 0) | Time.zone.local(2021, 5, 28, 1, 0)
'*/5 * * * *' | '0 1 1 * *' | (1.day.in_minutes / 1.hour.in_minutes).to_i | true | Time.zone.local(2021, 5, 1, 1, 0) | Time.zone.local(2021, 6, 1, 1, 0)
+ '*/9 * * * *' | '0 1 1 * *' | (1.day.in_minutes / 1.hour.in_minutes).to_i | true | Time.zone.local(2021, 5, 1, 1, 9) | Time.zone.local(2021, 6, 1, 1, 0)
+ '*/9 * * * *' | '0 1 1 * *' | (1.day.in_minutes / 1.hour.in_minutes).to_i | false | Time.zone.local(2021, 5, 1, 1, 9) | Time.zone.local(2021, 6, 1, 1, 9)
+ '*/5 * * * *' | '59 14 * * *' | (1.day.in_minutes / 1.hour.in_minutes).to_i | true | Time.zone.local(2021, 5, 1, 15, 0) | Time.zone.local(2021, 5, 2, 15, 0)
+ '*/5 * * * *' | '59 14 * * *' | (1.day.in_minutes / 1.hour.in_minutes).to_i | false | Time.zone.local(2021, 5, 1, 15, 0) | Time.zone.local(2021, 5, 2, 15, 0)
+ '*/5 * * * *' | '45 21 1 2 *' | (1.day.in_minutes / 5).to_i | true | Time.zone.local(2021, 2, 1, 21, 45) | Time.zone.local(2022, 2, 1, 21, 45)
+ '*/5 * * * *' | '45 21 1 2 *' | (1.day.in_minutes / 5).to_i | false | Time.zone.local(2021, 2, 1, 21, 45) | Time.zone.local(2022, 2, 1, 21, 50)
end
with_them do
diff --git a/spec/models/concerns/has_integrations_spec.rb b/spec/models/concerns/has_integrations_spec.rb
index 6e55a1c8b01..6b3f75bfcfd 100644
--- a/spec/models/concerns/has_integrations_spec.rb
+++ b/spec/models/concerns/has_integrations_spec.rb
@@ -7,14 +7,14 @@ RSpec.describe HasIntegrations do
let_it_be(:project_2) { create(:project) }
let_it_be(:project_3) { create(:project) }
let_it_be(:project_4) { create(:project) }
- let_it_be(:instance_integration) { create(:jira_service, :instance) }
+ let_it_be(:instance_integration) { create(:jira_integration, :instance) }
before do
- create(:jira_service, project: project_1, inherit_from_id: instance_integration.id)
- create(:jira_service, project: project_2, inherit_from_id: nil)
- create(:jira_service, group: create(:group), project: nil, inherit_from_id: nil)
- create(:jira_service, project: project_3, inherit_from_id: nil)
- create(:slack_service, project: project_4, inherit_from_id: nil)
+ create(:jira_integration, project: project_1, inherit_from_id: instance_integration.id)
+ create(:jira_integration, project: project_2, inherit_from_id: nil)
+ create(:jira_integration, group: create(:group), project: nil, inherit_from_id: nil)
+ create(:jira_integration, project: project_3, inherit_from_id: nil)
+ create(:integrations_slack, project: project_4, inherit_from_id: nil)
end
describe '.with_custom_integration_for' do
diff --git a/spec/models/concerns/integrations/has_data_fields_spec.rb b/spec/models/concerns/integrations/has_data_fields_spec.rb
index 54e0ac9c5a5..b28fef571c6 100644
--- a/spec/models/concerns/integrations/has_data_fields_spec.rb
+++ b/spec/models/concerns/integrations/has_data_fields_spec.rb
@@ -84,7 +84,7 @@ RSpec.describe Integrations::HasDataFields do
context 'when data are stored in data_fields' do
let(:service) do
- create(:jira_service, url: url, username: username)
+ create(:jira_integration, url: url, username: username)
end
it_behaves_like 'data fields'
@@ -111,45 +111,52 @@ RSpec.describe Integrations::HasDataFields do
end
context 'when data are stored in properties' do
- let(:service) { create(:jira_service, :without_properties_callback, properties: properties) }
+ let(:integration) { create(:jira_integration, :without_properties_callback, properties: properties) }
- it_behaves_like 'data fields'
+ it_behaves_like 'data fields' do
+ let(:service) { integration }
+ end
describe '{arg}_was?' do
it 'returns nil when the property has not been assigned a new value' do
- service.username = 'new_username'
- service.validate
- expect(service.url_was).to be_nil
+ integration.username = 'new_username'
+ integration.validate
+
+ expect(integration.url_was).to be_nil
end
it 'returns initial value when the property has been assigned a different value' do
- service.url = 'http://example.com'
- service.validate
- expect(service.url_was).to eq('http://url.com')
+ integration.url = 'http://example.com'
+ integration.validate
+
+ expect(integration.url_was).to eq('http://url.com')
end
it 'returns initial value when the property has been re-assigned the same value' do
- service.url = 'http://url.com'
- service.validate
- expect(service.url_was).to eq('http://url.com')
+ integration.url = 'http://url.com'
+ integration.validate
+
+ expect(integration.url_was).to eq('http://url.com')
end
end
end
context 'when data are stored in both properties and data_fields' do
- let(:service) do
- create(:jira_service, :without_properties_callback, active: false, properties: properties).tap do |integration|
+ let(:integration) do
+ create(:jira_integration, :without_properties_callback, active: false, properties: properties).tap do |integration|
create(:jira_tracker_data, properties.merge(integration: integration))
end
end
- it_behaves_like 'data fields'
+ it_behaves_like 'data fields' do
+ let(:service) { integration }
+ end
describe '{arg}_was?' do
it 'returns nil' do
- service.url = 'http://example.com'
- service.validate
- expect(service.url_was).to be_nil
+ integration.url = 'http://example.com'
+ integration.validate
+ expect(integration.url_was).to be_nil
end
end
end
diff --git a/spec/models/deployment_metrics_spec.rb b/spec/models/deployment_metrics_spec.rb
index fadfc1b63ac..a7a58cc9f85 100644
--- a/spec/models/deployment_metrics_spec.rb
+++ b/spec/models/deployment_metrics_spec.rb
@@ -20,7 +20,7 @@ RSpec.describe DeploymentMetrics do
end
context 'with a Prometheus Service' do
- let(:prometheus_service) { instance_double(PrometheusService, can_query?: true, configured?: true) }
+ let(:prometheus_service) { instance_double(::Integrations::Prometheus, can_query?: true, configured?: true) }
before do
allow(deployment.project).to receive(:find_or_initialize_service).with('prometheus').and_return prometheus_service
@@ -30,7 +30,7 @@ RSpec.describe DeploymentMetrics do
end
context 'with a Prometheus Service that cannot query' do
- let(:prometheus_service) { instance_double(PrometheusService, configured?: true, can_query?: false) }
+ let(:prometheus_service) { instance_double(::Integrations::Prometheus, configured?: true, can_query?: false) }
before do
allow(deployment.project).to receive(:find_or_initialize_service).with('prometheus').and_return prometheus_service
@@ -40,7 +40,7 @@ RSpec.describe DeploymentMetrics do
end
context 'with a Prometheus Service that is not configured' do
- let(:prometheus_service) { instance_double(PrometheusService, configured?: false, can_query?: false) }
+ let(:prometheus_service) { instance_double(::Integrations::Prometheus, configured?: false, can_query?: false) }
before do
allow(deployment.project).to receive(:find_or_initialize_service).with('prometheus').and_return prometheus_service
@@ -64,7 +64,7 @@ RSpec.describe DeploymentMetrics do
describe '#metrics' do
let(:deployment) { create(:deployment, :success) }
- let(:prometheus_adapter) { instance_double(PrometheusService, can_query?: true, configured?: true) }
+ let(:prometheus_adapter) { instance_double(::Integrations::Prometheus, can_query?: true, configured?: true) }
let(:deployment_metrics) { described_class.new(deployment.project, deployment) }
subject { deployment_metrics.metrics }
diff --git a/spec/models/group_spec.rb b/spec/models/group_spec.rb
index 8f4bc43c38a..0a08b15a1eb 100644
--- a/spec/models/group_spec.rb
+++ b/spec/models/group_spec.rb
@@ -565,11 +565,11 @@ RSpec.describe Group do
describe '.without_integration' do
let(:another_group) { create(:group) }
- let(:instance_integration) { build(:jira_service, :instance) }
+ let(:instance_integration) { build(:jira_integration, :instance) }
before do
- create(:jira_service, group: group, project: nil)
- create(:slack_service, group: another_group, project: nil)
+ create(:jira_integration, group: group, project: nil)
+ create(:integrations_slack, group: another_group, project: nil)
end
it 'returns groups without integration' do
diff --git a/spec/models/integration_spec.rb b/spec/models/integration_spec.rb
index d4ea3e5d08a..e7661bf3eed 100644
--- a/spec/models/integration_spec.rb
+++ b/spec/models/integration_spec.rb
@@ -68,9 +68,9 @@ RSpec.describe Integration do
describe 'Scopes' do
describe '.by_type' do
- let!(:service1) { create(:jira_service) }
- let!(:service2) { create(:jira_service) }
- let!(:service3) { create(:redmine_service) }
+ let!(:service1) { create(:jira_integration) }
+ let!(:service2) { create(:jira_integration) }
+ let!(:service3) { create(:redmine_integration) }
subject { described_class.by_type(type) }
@@ -88,8 +88,8 @@ RSpec.describe Integration do
end
describe '.for_group' do
- let!(:service1) { create(:jira_service, project_id: nil, group_id: group.id) }
- let!(:service2) { create(:jira_service) }
+ let!(:service1) { create(:jira_integration, project_id: nil, group_id: group.id) }
+ let!(:service2) { create(:jira_integration) }
it 'returns the right group service' do
expect(described_class.for_group(group)).to match_array([service1])
@@ -234,15 +234,17 @@ RSpec.describe Integration do
end
describe '.find_or_initialize_non_project_specific_integration' do
- let!(:service1) { create(:jira_service, project_id: nil, group_id: group.id) }
- let!(:service2) { create(:jira_service) }
+ let!(:integration_1) { create(:jira_integration, project_id: nil, group_id: group.id) }
+ let!(:integration_2) { create(:jira_integration) }
- it 'returns the right service' do
- expect(Integration.find_or_initialize_non_project_specific_integration('jira', group_id: group)).to eq(service1)
+ it 'returns the right integration' do
+ expect(Integration.find_or_initialize_non_project_specific_integration('jira', group_id: group))
+ .to eq(integration_1)
end
- it 'does not create a new service' do
- expect { Integration.find_or_initialize_non_project_specific_integration('redmine', group_id: group) }.not_to change { Integration.count }
+ it 'does not create a new integration' do
+ expect { Integration.find_or_initialize_non_project_specific_integration('redmine', group_id: group) }
+ .not_to change(Integration, :count)
end
end
@@ -280,7 +282,7 @@ RSpec.describe Integration do
context 'with a few existing instances' do
before do
- create(:jira_service, :instance)
+ create(:jira_integration, :instance)
end
it_behaves_like 'service instances'
@@ -326,7 +328,7 @@ RSpec.describe Integration do
context 'with a few existing templates' do
before do
- create(:jira_service, :template)
+ create(:jira_integration, :template)
end
it 'creates the rest of the service templates' do
@@ -353,7 +355,7 @@ RSpec.describe Integration do
end
context 'when integration is an instance-level integration' do
- let(:integration) { create(:jira_service, :instance) }
+ let(:integration) { create(:jira_integration, :instance) }
it 'sets inherit_from_id from integration' do
service = described_class.build_from_integration(integration, project_id: project.id)
@@ -363,7 +365,7 @@ RSpec.describe Integration do
end
context 'when integration is a group-level integration' do
- let(:integration) { create(:jira_service, group: group, project: nil) }
+ let(:integration) { create(:jira_integration, group: group, project: nil) }
it 'sets inherit_from_id from integration' do
service = described_class.build_from_integration(integration, project_id: project.id)
@@ -418,7 +420,7 @@ RSpec.describe Integration do
context 'when data are stored in properties' do
let(:properties) { data_params }
let!(:integration) do
- create(:jira_service, :without_properties_callback, template: true, properties: properties.merge(additional: 'something'))
+ create(:jira_integration, :without_properties_callback, template: true, properties: properties.merge(additional: 'something'))
end
it_behaves_like 'service creation from an integration'
@@ -426,7 +428,7 @@ RSpec.describe Integration do
context 'when data are stored in separated fields' do
let(:integration) do
- create(:jira_service, :template, data_params.merge(properties: {}))
+ create(:jira_integration, :template, data_params.merge(properties: {}))
end
it_behaves_like 'service creation from an integration'
@@ -435,7 +437,7 @@ RSpec.describe Integration do
context 'when data are stored in both properties and separated fields' do
let(:properties) { data_params }
let(:integration) do
- create(:jira_service, :without_properties_callback, active: true, template: true, properties: properties).tap do |integration|
+ create(:jira_integration, :without_properties_callback, active: true, template: true, properties: properties).tap do |integration|
create(:jira_tracker_data, data_params.merge(integration: integration))
end
end
@@ -472,26 +474,26 @@ RSpec.describe Integration do
end
describe '.default_integration' do
- context 'with an instance-level service' do
- let_it_be(:instance_service) { create(:jira_service, :instance) }
+ context 'with an instance-level integration' do
+ let_it_be(:instance_integration) { create(:jira_integration, :instance) }
- it 'returns the instance service' do
- expect(described_class.default_integration('JiraService', project)).to eq(instance_service)
+ it 'returns the instance integration' do
+ expect(described_class.default_integration('JiraService', project)).to eq(instance_integration)
end
- it 'returns nil for nonexistent service type' do
+ it 'returns nil for nonexistent integration type' do
expect(described_class.default_integration('HipchatService', project)).to eq(nil)
end
- context 'with a group service' do
- let_it_be(:group_service) { create(:jira_service, group_id: group.id, project_id: nil) }
+ context 'with a group integration' do
+ let_it_be(:group_integration) { create(:jira_integration, group_id: group.id, project_id: nil) }
- it 'returns the group service for a project' do
- expect(described_class.default_integration('JiraService', project)).to eq(group_service)
+ it 'returns the group integration for a project' do
+ expect(described_class.default_integration('JiraService', project)).to eq(group_integration)
end
- it 'returns the instance service for a group' do
- expect(described_class.default_integration('JiraService', group)).to eq(instance_service)
+ it 'returns the instance integration for a group' do
+ expect(described_class.default_integration('JiraService', group)).to eq(instance_integration)
end
context 'with a subgroup' do
@@ -499,27 +501,27 @@ RSpec.describe Integration do
let!(:project) { create(:project, group: subgroup) }
- it 'returns the closest group service for a project' do
- expect(described_class.default_integration('JiraService', project)).to eq(group_service)
+ it 'returns the closest group integration for a project' do
+ expect(described_class.default_integration('JiraService', project)).to eq(group_integration)
end
- it 'returns the closest group service for a subgroup' do
- expect(described_class.default_integration('JiraService', subgroup)).to eq(group_service)
+ it 'returns the closest group integration for a subgroup' do
+ expect(described_class.default_integration('JiraService', subgroup)).to eq(group_integration)
end
- context 'having a service with custom settings' do
- let!(:subgroup_service) { create(:jira_service, group_id: subgroup.id, project_id: nil) }
+ context 'having a integration with custom settings' do
+ let!(:subgroup_integration) { create(:jira_integration, group_id: subgroup.id, project_id: nil) }
- it 'returns the closest group service for a project' do
- expect(described_class.default_integration('JiraService', project)).to eq(subgroup_service)
+ it 'returns the closest group integration for a project' do
+ expect(described_class.default_integration('JiraService', project)).to eq(subgroup_integration)
end
end
- context 'having a service inheriting settings' do
- let!(:subgroup_service) { create(:jira_service, group_id: subgroup.id, project_id: nil, inherit_from_id: group_service.id) }
+ context 'having a integration inheriting settings' do
+ let!(:subgroup_integration) { create(:jira_integration, group_id: subgroup.id, project_id: nil, inherit_from_id: group_integration.id) }
- it 'returns the closest group service which does not inherit from its parent for a project' do
- expect(described_class.default_integration('JiraService', project)).to eq(group_service)
+ it 'returns the closest group integration which does not inherit from its parent for a project' do
+ expect(described_class.default_integration('JiraService', project)).to eq(group_integration)
end
end
end
@@ -528,7 +530,7 @@ RSpec.describe Integration do
end
describe '.create_from_active_default_integrations' do
- context 'with an active service template' do
+ context 'with an active integration template' do
let_it_be(:template_integration) { create(:prometheus_service, :template, api_url: 'https://prometheus.template.com/') }
it 'creates a service from the template' do
@@ -669,11 +671,8 @@ RSpec.describe Integration do
end
describe '.integration_name_to_model' do
- it 'returns the model for the given service name', :aggregate_failures do
+ it 'returns the model for the given service name' do
expect(described_class.integration_name_to_model('asana')).to eq(Integrations::Asana)
- # TODO We can remove this test when all models have been namespaced:
- # https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60968#note_570994955
- expect(described_class.integration_name_to_model('prometheus')).to eq(PrometheusService)
end
it 'raises an error if service name is invalid' do
diff --git a/spec/models/integrations/jenkins_spec.rb b/spec/models/integrations/jenkins_spec.rb
index 2374dfe4480..792ff3e8763 100644
--- a/spec/models/integrations/jenkins_spec.rb
+++ b/spec/models/integrations/jenkins_spec.rb
@@ -30,8 +30,8 @@ RSpec.describe Integrations::Jenkins do
end
describe 'username validation' do
- before do
- @jenkins_service = described_class.create!(
+ let(:jenkins_integration) do
+ described_class.create!(
active: active,
project: project,
properties: {
@@ -43,7 +43,7 @@ RSpec.describe Integrations::Jenkins do
)
end
- subject { @jenkins_service }
+ subject { jenkins_integration }
context 'when the service is active' do
let(:active) { true }
@@ -84,7 +84,7 @@ RSpec.describe Integrations::Jenkins do
describe '#hook_url' do
let(:username) { nil }
let(:password) { nil }
- let(:jenkins_service) do
+ let(:jenkins_integration) do
described_class.new(
project: project,
properties: {
@@ -96,7 +96,7 @@ RSpec.describe Integrations::Jenkins do
)
end
- subject { jenkins_service.hook_url }
+ subject { jenkins_integration.hook_url }
context 'when the jenkins_url has no relative path' do
let(:jenkins_url) { 'http://jenkins.example.com/' }
@@ -138,10 +138,10 @@ RSpec.describe Integrations::Jenkins do
user = create(:user, username: 'username')
project = create(:project, name: 'project')
push_sample_data = Gitlab::DataBuilder::Push.build_sample(project, user)
- jenkins_service = described_class.create!(jenkins_params)
+ jenkins_integration = described_class.create!(jenkins_params)
stub_request(:post, jenkins_hook_url).with(headers: { 'Authorization' => jenkins_authorization })
- result = jenkins_service.test(push_sample_data)
+ result = jenkins_integration.test(push_sample_data)
expect(result).to eq({ success: true, result: '' })
end
@@ -152,20 +152,20 @@ RSpec.describe Integrations::Jenkins do
let(:namespace) { create(:group, :private) }
let(:project) { create(:project, :private, name: 'project', namespace: namespace) }
let(:push_sample_data) { Gitlab::DataBuilder::Push.build_sample(project, user) }
- let(:jenkins_service) { described_class.create!(jenkins_params) }
+ let(:jenkins_integration) { described_class.create!(jenkins_params) }
before do
stub_request(:post, jenkins_hook_url)
end
it 'invokes the Jenkins API' do
- jenkins_service.execute(push_sample_data)
+ jenkins_integration.execute(push_sample_data)
expect(a_request(:post, jenkins_hook_url)).to have_been_made.once
end
it 'adds default web hook headers to the request' do
- jenkins_service.execute(push_sample_data)
+ jenkins_integration.execute(push_sample_data)
expect(
a_request(:post, jenkins_hook_url)
@@ -174,7 +174,7 @@ RSpec.describe Integrations::Jenkins do
end
it 'request url contains properly serialized username and password' do
- jenkins_service.execute(push_sample_data)
+ jenkins_integration.execute(push_sample_data)
expect(
a_request(:post, 'http://jenkins.example.com/project/my_project')
@@ -187,8 +187,8 @@ RSpec.describe Integrations::Jenkins do
let(:project) { create(:project) }
context 'when a password was previously set' do
- before do
- @jenkins_service = described_class.create!(
+ let(:jenkins_integration) do
+ described_class.create!(
project: project,
properties: {
jenkins_url: 'http://jenkins.example.com/',
@@ -199,42 +199,47 @@ RSpec.describe Integrations::Jenkins do
end
it 'resets password if url changed' do
- @jenkins_service.jenkins_url = 'http://jenkins-edited.example.com/'
- @jenkins_service.save!
- expect(@jenkins_service.password).to be_nil
+ jenkins_integration.jenkins_url = 'http://jenkins-edited.example.com/'
+ jenkins_integration.save!
+
+ expect(jenkins_integration.password).to be_nil
end
it 'resets password if username is blank' do
- @jenkins_service.username = ''
- @jenkins_service.save!
- expect(@jenkins_service.password).to be_nil
+ jenkins_integration.username = ''
+ jenkins_integration.save!
+
+ expect(jenkins_integration.password).to be_nil
end
it 'does not reset password if username changed' do
- @jenkins_service.username = 'some_name'
- @jenkins_service.save!
- expect(@jenkins_service.password).to eq('password')
+ jenkins_integration.username = 'some_name'
+ jenkins_integration.save!
+
+ expect(jenkins_integration.password).to eq('password')
end
it 'does not reset password if new url is set together with password, even if it\'s the same password' do
- @jenkins_service.jenkins_url = 'http://jenkins_edited.example.com/'
- @jenkins_service.password = 'password'
- @jenkins_service.save!
- expect(@jenkins_service.password).to eq('password')
- expect(@jenkins_service.jenkins_url).to eq('http://jenkins_edited.example.com/')
+ jenkins_integration.jenkins_url = 'http://jenkins_edited.example.com/'
+ jenkins_integration.password = 'password'
+ jenkins_integration.save!
+
+ expect(jenkins_integration.password).to eq('password')
+ expect(jenkins_integration.jenkins_url).to eq('http://jenkins_edited.example.com/')
end
it 'resets password if url changed, even if setter called multiple times' do
- @jenkins_service.jenkins_url = 'http://jenkins1.example.com/'
- @jenkins_service.jenkins_url = 'http://jenkins1.example.com/'
- @jenkins_service.save!
- expect(@jenkins_service.password).to be_nil
+ jenkins_integration.jenkins_url = 'http://jenkins1.example.com/'
+ jenkins_integration.jenkins_url = 'http://jenkins1.example.com/'
+ jenkins_integration.save!
+
+ expect(jenkins_integration.password).to be_nil
end
end
context 'when no password was previously set' do
- before do
- @jenkins_service = described_class.create!(
+ let(:jenkins_integration) do
+ described_class.create!(
project: create(:project),
properties: {
jenkins_url: 'http://jenkins.example.com/',
@@ -244,11 +249,12 @@ RSpec.describe Integrations::Jenkins do
end
it 'saves password if new url is set together with password' do
- @jenkins_service.jenkins_url = 'http://jenkins_edited.example.com/'
- @jenkins_service.password = 'password'
- @jenkins_service.save!
- expect(@jenkins_service.password).to eq('password')
- expect(@jenkins_service.jenkins_url).to eq('http://jenkins_edited.example.com/')
+ jenkins_integration.jenkins_url = 'http://jenkins_edited.example.com/'
+ jenkins_integration.password = 'password'
+ jenkins_integration.save!
+
+ expect(jenkins_integration.password).to eq('password')
+ expect(jenkins_integration.jenkins_url).to eq('http://jenkins_edited.example.com/')
end
end
end
diff --git a/spec/models/integrations/jira_spec.rb b/spec/models/integrations/jira_spec.rb
index f6310866773..d63f5261915 100644
--- a/spec/models/integrations/jira_spec.rb
+++ b/spec/models/integrations/jira_spec.rb
@@ -14,7 +14,7 @@ RSpec.describe Integrations::Jira do
let(:password) { 'jira-password' }
let(:transition_id) { 'test27' }
let(:server_info_results) { { 'deploymentType' => 'Cloud' } }
- let(:jira_service) do
+ let(:jira_integration) do
described_class.new(
project: project,
url: url,
@@ -100,7 +100,7 @@ RSpec.describe Integrations::Jira do
end
describe '#fields' do
- let(:service) { create(:jira_service) }
+ let(:service) { create(:jira_integration) }
subject(:fields) { service.fields }
@@ -164,13 +164,13 @@ RSpec.describe Integrations::Jira do
end
context 'when loading serverInfo' do
- let(:jira_service) { subject }
+ let(:jira_integration) { subject }
context 'from a Cloud instance' do
let(:server_info_results) { { 'deploymentType' => 'Cloud' } }
it 'is detected' do
- expect(jira_service.jira_tracker_data.deployment_cloud?).to be_truthy
+ expect(jira_integration.jira_tracker_data.deployment_cloud?).to be_truthy
end
end
@@ -178,7 +178,7 @@ RSpec.describe Integrations::Jira do
let(:server_info_results) { { 'deploymentType' => 'Server' } }
it 'is detected' do
- expect(jira_service.jira_tracker_data.deployment_server?).to be_truthy
+ expect(jira_integration.jira_tracker_data.deployment_server?).to be_truthy
end
end
@@ -189,7 +189,7 @@ RSpec.describe Integrations::Jira do
let(:api_url) { 'http://example-api.atlassian.net' }
it 'deployment_type is set to cloud' do
- expect(jira_service.jira_tracker_data.deployment_cloud?).to be_truthy
+ expect(jira_integration.jira_tracker_data.deployment_cloud?).to be_truthy
end
end
@@ -197,7 +197,7 @@ RSpec.describe Integrations::Jira do
let(:api_url) { 'http://my-jira-api.someserver.com' }
it 'deployment_type is set to server' do
- expect(jira_service.jira_tracker_data.deployment_server?).to be_truthy
+ expect(jira_integration.jira_tracker_data.deployment_server?).to be_truthy
end
end
end
@@ -210,7 +210,7 @@ RSpec.describe Integrations::Jira do
it 'deployment_type is set to cloud' do
expect(Gitlab::AppLogger).to receive(:warn).with(message: "Jira API returned no ServerInfo, setting deployment_type from URL", server_info: server_info_results, url: api_url)
- expect(jira_service.jira_tracker_data.deployment_cloud?).to be_truthy
+ expect(jira_integration.jira_tracker_data.deployment_cloud?).to be_truthy
end
end
@@ -219,7 +219,7 @@ RSpec.describe Integrations::Jira do
it 'deployment_type is set to server' do
expect(Gitlab::AppLogger).to receive(:warn).with(message: "Jira API returned no ServerInfo, setting deployment_type from URL", server_info: server_info_results, url: api_url)
- expect(jira_service.jira_tracker_data.deployment_server?).to be_truthy
+ expect(jira_integration.jira_tracker_data.deployment_server?).to be_truthy
end
end
end
@@ -487,7 +487,7 @@ RSpec.describe Integrations::Jira do
context 'when data are stored in properties' do
let(:properties) { data_params }
let!(:service) do
- create(:jira_service, :without_properties_callback, properties: properties.merge(additional: 'something'))
+ create(:jira_integration, :without_properties_callback, properties: properties.merge(additional: 'something'))
end
it_behaves_like 'handles jira fields'
@@ -495,7 +495,7 @@ RSpec.describe Integrations::Jira do
context 'when data are stored in separated fields' do
let(:service) do
- create(:jira_service, data_params.merge(properties: {}))
+ create(:jira_integration, data_params.merge(properties: {}))
end
it_behaves_like 'handles jira fields'
@@ -504,7 +504,7 @@ RSpec.describe Integrations::Jira do
context 'when data are stored in both properties and separated fields' do
let(:properties) { data_params }
let(:service) do
- create(:jira_service, :without_properties_callback, active: false, properties: properties).tap do |integration|
+ create(:jira_integration, :without_properties_callback, active: false, properties: properties).tap do |integration|
create(:jira_tracker_data, data_params.merge(integration: integration))
end
end
@@ -522,7 +522,7 @@ RSpec.describe Integrations::Jira do
end
it 'call the Jira API to get the issue' do
- jira_service.find_issue(issue_key)
+ jira_integration.find_issue(issue_key)
expect(WebMock).to have_requested(:get, issue_url)
end
@@ -531,7 +531,7 @@ RSpec.describe Integrations::Jira do
let(:issue_url) { "#{url}/rest/api/2/issue/#{issue_key}?expand=renderedFields,transitions" }
it 'calls the Jira API with the options to get the issue' do
- jira_service.find_issue(issue_key, rendered_fields: true, transitions: true)
+ jira_integration.find_issue(issue_key, rendered_fields: true, transitions: true)
expect(WebMock).to have_requested(:get, issue_url)
end
@@ -558,16 +558,16 @@ RSpec.describe Integrations::Jira do
end
subject(:close_issue) do
- jira_service.close_issue(resource, ExternalIssue.new(issue_key, project))
+ jira_integration.close_issue(resource, ExternalIssue.new(issue_key, project))
end
before do
- jira_service.jira_issue_transition_id = '999'
+ jira_integration.jira_issue_transition_id = '999'
# These stubs are needed to test Integrations::Jira#close_issue.
# We close the issue then do another request to API to check if it got closed.
# Here is stubbed the API return with a closed and an opened issues.
- open_issue = JIRA::Resource::Issue.new(jira_service.client, attrs: issue_fields.deep_stringify_keys)
+ open_issue = JIRA::Resource::Issue.new(jira_integration.client, attrs: issue_fields.deep_stringify_keys)
closed_issue = open_issue.dup
allow(open_issue).to receive(:resolution).and_return(false)
allow(closed_issue).to receive(:resolution).and_return(true)
@@ -585,7 +585,7 @@ RSpec.describe Integrations::Jira do
let(:external_issue) { ExternalIssue.new('JIRA-123', project) }
def close_issue
- jira_service.close_issue(resource, external_issue, current_user)
+ jira_integration.close_issue(resource, external_issue, current_user)
end
it 'calls Jira API' do
@@ -636,7 +636,7 @@ RSpec.describe Integrations::Jira do
context 'when "comment_on_event_enabled" is set to false' do
it 'creates Remote Link reference but does not create comment' do
- allow(jira_service).to receive_messages(comment_on_event_enabled: false)
+ allow(jira_integration).to receive_messages(comment_on_event_enabled: false)
close_issue
expect(WebMock).not_to have_requested(:post, comment_url)
@@ -709,12 +709,12 @@ RSpec.describe Integrations::Jira do
end
it 'logs exception when transition id is not valid' do
- allow(jira_service).to receive(:log_error)
+ allow(jira_integration).to receive(:log_error)
WebMock.stub_request(:post, transitions_url).with(basic_auth: %w(jira-username jira-password)).and_raise("Bad Request")
close_issue
- expect(jira_service).to have_received(:log_error).with(
+ expect(jira_integration).to have_received(:log_error).with(
"Issue transition failed",
error: hash_including(
exception_class: 'StandardError',
@@ -734,7 +734,7 @@ RSpec.describe Integrations::Jira do
context 'when custom transition IDs are blank' do
before do
- jira_service.jira_issue_transition_id = ''
+ jira_integration.jira_issue_transition_id = ''
end
it 'does not transition the issue' do
@@ -755,7 +755,7 @@ RSpec.describe Integrations::Jira do
end
before do
- jira_service.jira_issue_transition_automatic = true
+ jira_integration.jira_issue_transition_automatic = true
close_issue
end
@@ -789,7 +789,7 @@ RSpec.describe Integrations::Jira do
context 'when using multiple transition ids' do
before do
- allow(jira_service).to receive_messages(jira_issue_transition_id: '1,2,3')
+ allow(jira_integration).to receive_messages(jira_issue_transition_id: '1,2,3')
end
it 'calls the api with transition ids separated by comma' do
@@ -805,7 +805,7 @@ RSpec.describe Integrations::Jira do
end
it 'calls the api with transition ids separated by semicolon' do
- allow(jira_service).to receive_messages(jira_issue_transition_id: '1;2;3')
+ allow(jira_integration).to receive_messages(jira_issue_transition_id: '1;2;3')
close_issue
@@ -864,7 +864,7 @@ RSpec.describe Integrations::Jira do
let(:jira_issue) { ExternalIssue.new('JIRA-123', project) }
- subject { jira_service.create_cross_reference_note(jira_issue, resource, user) }
+ subject { jira_integration.create_cross_reference_note(jira_issue, resource, user) }
shared_examples 'creates a comment on Jira' do
let(:issue_url) { "#{url}/rest/api/2/issue/JIRA-123" }
@@ -936,7 +936,7 @@ RSpec.describe Integrations::Jira do
let(:server_info_results) { { 'url' => 'http://url', 'deploymentType' => 'Cloud' } }
def server_info
- jira_service.test(nil)
+ jira_integration.test(nil)
end
context 'when the test succeeds' do
@@ -946,7 +946,7 @@ RSpec.describe Integrations::Jira do
end
it 'gets Jira project with API URL if set' do
- jira_service.update!(api_url: 'http://jira.api.com')
+ jira_integration.update!(api_url: 'http://jira.api.com')
expect(server_info).to eq(success: true, result: server_info_results)
expect(WebMock).to have_requested(:get, /jira.api.com/)
@@ -961,13 +961,13 @@ RSpec.describe Integrations::Jira do
WebMock.stub_request(:get, test_url).with(basic_auth: [username, password])
.to_raise(JIRA::HTTPError.new(double(message: error_message)))
- expect(jira_service).to receive(:log_error).with(
+ expect(jira_integration).to receive(:log_error).with(
'Error sending message',
client_url: 'http://jira.example.com',
error: error_message
)
- expect(jira_service.test(nil)).to eq(success: false, result: error_message)
+ expect(jira_integration.test(nil)).to eq(success: false, result: error_message)
end
end
end
@@ -983,17 +983,17 @@ RSpec.describe Integrations::Jira do
}
allow(Gitlab.config).to receive(:issues_tracker).and_return(settings)
- service = project.create_jira_service(active: true)
+ integration = project.create_jira_integration(active: true)
- expect(service.url).to eq('http://jira.sample/projects/project_a')
- expect(service.api_url).to eq('http://jira.sample/api')
+ expect(integration.url).to eq('http://jira.sample/projects/project_a')
+ expect(integration.api_url).to eq('http://jira.sample/api')
end
end
it 'removes trailing slashes from url' do
- service = described_class.new(url: 'http://jira.test.com/path/')
+ integration = described_class.new(url: 'http://jira.test.com/path/')
- expect(service.url).to eq('http://jira.test.com/path')
+ expect(integration.url).to eq('http://jira.test.com/path')
end
end
@@ -1063,19 +1063,65 @@ RSpec.describe Integrations::Jira do
describe '#issue_transition_enabled?' do
it 'returns true if automatic transitions are enabled' do
- jira_service.jira_issue_transition_automatic = true
+ jira_integration.jira_issue_transition_automatic = true
- expect(jira_service.issue_transition_enabled?).to be(true)
+ expect(jira_integration.issue_transition_enabled?).to be(true)
end
it 'returns true if custom transitions are set' do
- jira_service.jira_issue_transition_id = '1, 2, 3'
+ jira_integration.jira_issue_transition_id = '1, 2, 3'
- expect(jira_service.issue_transition_enabled?).to be(true)
+ expect(jira_integration.issue_transition_enabled?).to be(true)
end
it 'returns false if automatic and custom transitions are disabled' do
- expect(jira_service.issue_transition_enabled?).to be(false)
+ expect(jira_integration.issue_transition_enabled?).to be(false)
+ end
+ end
+
+ describe 'valid_connection? and configured?' do
+ before do
+ allow(jira_integration).to receive(:test).with(nil).and_return(test_result)
+ end
+
+ context 'when the test fails' do
+ let(:test_result) { { success: false } }
+
+ it 'is falsey' do
+ expect(jira_integration).not_to be_valid_connection
+ end
+
+ it 'implies that configured? is also falsey' do
+ expect(jira_integration).not_to be_configured
+ end
+ end
+
+ context 'when the test succeeds' do
+ let(:test_result) { { success: true } }
+
+ it 'is truthy' do
+ expect(jira_integration).to be_valid_connection
+ end
+
+ context 'when the integration is active' do
+ before do
+ jira_integration.active = true
+ end
+
+ it 'implies that configured? is also truthy' do
+ expect(jira_integration).to be_configured
+ end
+ end
+
+ context 'when the integration is inactive' do
+ before do
+ jira_integration.active = false
+ end
+
+ it 'implies that configured? is falsey' do
+ expect(jira_integration).not_to be_configured
+ end
+ end
end
end
end
diff --git a/spec/models/project_services/prometheus_service_spec.rb b/spec/models/integrations/prometheus_spec.rb
similarity index 99%
rename from spec/models/project_services/prometheus_service_spec.rb
rename to spec/models/integrations/prometheus_spec.rb
index a2025388fab..e3e17738cfb 100644
--- a/spec/models/project_services/prometheus_service_spec.rb
+++ b/spec/models/integrations/prometheus_spec.rb
@@ -4,7 +4,7 @@ require 'spec_helper'
require 'googleauth'
-RSpec.describe PrometheusService, :use_clean_rails_memory_store_caching, :snowplow do
+RSpec.describe Integrations::Prometheus, :use_clean_rails_memory_store_caching, :snowplow do
include PrometheusHelpers
include ReactiveCachingHelpers
diff --git a/spec/models/integrations/slack_slash_commands_spec.rb b/spec/models/integrations/slack_slash_commands_spec.rb
index a9d3c820a3c..b9d26312d64 100644
--- a/spec/models/integrations/slack_slash_commands_spec.rb
+++ b/spec/models/integrations/slack_slash_commands_spec.rb
@@ -19,7 +19,7 @@ RSpec.describe Integrations::SlackSlashCommands do
end
let(:service) do
- project.create_slack_slash_commands_service(
+ project.create_slack_slash_commands_integration(
properties: { token: 'token' },
active: true
)
diff --git a/spec/models/integrations/slack_spec.rb b/spec/models/integrations/slack_spec.rb
index e598c528967..4661d9c8291 100644
--- a/spec/models/integrations/slack_spec.rb
+++ b/spec/models/integrations/slack_spec.rb
@@ -10,7 +10,7 @@ RSpec.describe Integrations::Slack do
stub_request(:post, "https://slack.service.url/")
end
- let_it_be(:slack_service) { create(:slack_service, branches_to_be_notified: 'all') }
+ let_it_be(:slack_integration) { create(:integrations_slack, branches_to_be_notified: 'all') }
it 'uses only known events', :aggregate_failures do
described_class::SUPPORTED_EVENTS_FOR_USAGE_LOG.each do |action|
@@ -26,7 +26,7 @@ RSpec.describe Integrations::Slack do
it 'increases the usage data counter' do
expect(Gitlab::UsageDataCounters::HLLRedisCounter).to receive(:track_event).with(event_name, values: user.id).and_call_original
- slack_service.execute(data)
+ slack_integration.execute(data)
end
end
@@ -38,7 +38,7 @@ RSpec.describe Integrations::Slack do
it 'does not increase the usage data counter' do
expect(Gitlab::UsageDataCounters::HLLRedisCounter).not_to receive(:track_event).with('i_ecosystem_slack_service_pipeline_notification', values: user.id)
- slack_service.execute(data)
+ slack_integration.execute(data)
end
end
@@ -126,7 +126,7 @@ RSpec.describe Integrations::Slack do
it 'does not increase the usage data counter' do
expect(Gitlab::UsageDataCounters::HLLRedisCounter).not_to receive(:track_event)
- slack_service.execute(data)
+ slack_integration.execute(data)
end
end
end
diff --git a/spec/models/integrations/teamcity_spec.rb b/spec/models/integrations/teamcity_spec.rb
index b88a4722ad4..5ed65bf7f46 100644
--- a/spec/models/integrations/teamcity_spec.rb
+++ b/spec/models/integrations/teamcity_spec.rb
@@ -10,7 +10,7 @@ RSpec.describe Integrations::Teamcity, :use_clean_rails_memory_store_caching do
let(:teamcity_full_url) { 'http://gitlab.com/teamcity/httpAuth/app/rest/builds/branch:unspecified:any,revision:123' }
let(:project) { create(:project) }
- subject(:service) do
+ subject(:integration) do
described_class.create!(
project: project,
properties: {
@@ -28,14 +28,14 @@ RSpec.describe Integrations::Teamcity, :use_clean_rails_memory_store_caching do
end
describe 'Validations' do
- context 'when service is active' do
+ context 'when integration is active' do
before do
subject.active = true
end
it { is_expected.to validate_presence_of(:build_type) }
it { is_expected.to validate_presence_of(:teamcity_url) }
- it_behaves_like 'issue tracker service URL attribute', :teamcity_url
+ it_behaves_like 'issue tracker integration URL attribute', :teamcity_url
describe '#username' do
it 'does not validate the presence of username if password is nil' do
@@ -66,7 +66,7 @@ RSpec.describe Integrations::Teamcity, :use_clean_rails_memory_store_caching do
end
end
- context 'when service is inactive' do
+ context 'when integration is inactive' do
before do
subject.active = false
end
@@ -79,71 +79,66 @@ RSpec.describe Integrations::Teamcity, :use_clean_rails_memory_store_caching do
end
describe 'Callbacks' do
+ let(:teamcity_integration) { integration }
+
describe 'before_update :reset_password' do
context 'when a password was previously set' do
it 'resets password if url changed' do
- teamcity_service = service
+ teamcity_integration.teamcity_url = 'http://gitlab1.com'
+ teamcity_integration.save!
- teamcity_service.teamcity_url = 'http://gitlab1.com'
- teamcity_service.save!
-
- expect(teamcity_service.password).to be_nil
+ expect(teamcity_integration.password).to be_nil
end
it 'does not reset password if username changed' do
- teamcity_service = service
+ teamcity_integration.username = 'some_name'
+ teamcity_integration.save!
- teamcity_service.username = 'some_name'
- teamcity_service.save!
-
- expect(teamcity_service.password).to eq('password')
+ expect(teamcity_integration.password).to eq('password')
end
it "does not reset password if new url is set together with password, even if it's the same password" do
- teamcity_service = service
+ teamcity_integration.teamcity_url = 'http://gitlab_edited.com'
+ teamcity_integration.password = 'password'
+ teamcity_integration.save!
- teamcity_service.teamcity_url = 'http://gitlab_edited.com'
- teamcity_service.password = 'password'
- teamcity_service.save!
-
- expect(teamcity_service.password).to eq('password')
- expect(teamcity_service.teamcity_url).to eq('http://gitlab_edited.com')
+ expect(teamcity_integration.password).to eq('password')
+ expect(teamcity_integration.teamcity_url).to eq('http://gitlab_edited.com')
end
end
it 'saves password if new url is set together with password when no password was previously set' do
- teamcity_service = service
- teamcity_service.password = nil
+ teamcity_integration.password = nil
- teamcity_service.teamcity_url = 'http://gitlab_edited.com'
- teamcity_service.password = 'password'
- teamcity_service.save!
+ teamcity_integration.teamcity_url = 'http://gitlab_edited.com'
+ teamcity_integration.password = 'password'
+ teamcity_integration.save!
- expect(teamcity_service.password).to eq('password')
- expect(teamcity_service.teamcity_url).to eq('http://gitlab_edited.com')
+ expect(teamcity_integration.password).to eq('password')
+ expect(teamcity_integration.teamcity_url).to eq('http://gitlab_edited.com')
end
end
end
describe '#build_page' do
it 'returns the contents of the reactive cache' do
- stub_reactive_cache(service, { build_page: 'foo' }, 'sha', 'ref')
+ stub_reactive_cache(integration, { build_page: 'foo' }, 'sha', 'ref')
- expect(service.build_page('sha', 'ref')).to eq('foo')
+ expect(integration.build_page('sha', 'ref')).to eq('foo')
end
end
describe '#commit_status' do
it 'returns the contents of the reactive cache' do
- stub_reactive_cache(service, { commit_status: 'foo' }, 'sha', 'ref')
+ stub_reactive_cache(integration, { commit_status: 'foo' }, 'sha', 'ref')
- expect(service.commit_status('sha', 'ref')).to eq('foo')
+ expect(integration.commit_status('sha', 'ref')).to eq('foo')
end
end
describe '#calculate_reactive_cache' do
context 'build_page' do
- subject { service.calculate_reactive_cache('123', 'unused')[:build_page] }
+ subject { integration.calculate_reactive_cache('123', 'unused')[:build_page] }
it 'returns a specific URL when status is 500' do
stub_request(status: 500)
@@ -179,7 +174,7 @@ RSpec.describe Integrations::Teamcity, :use_clean_rails_memory_store_caching do
end
context 'commit_status' do
- subject { service.calculate_reactive_cache('123', 'unused')[:commit_status] }
+ subject { integration.calculate_reactive_cache('123', 'unused')[:commit_status] }
it 'sets commit status to :error when status is 500' do
stub_request(status: 500)
@@ -243,25 +238,25 @@ RSpec.describe Integrations::Teamcity, :use_clean_rails_memory_store_caching do
it 'handles push request correctly' do
stub_post_to_build_queue(branch: 'dev-123_branch')
- expect(service.execute(data)).to include('Ok')
+ expect(integration.execute(data)).to include('Ok')
end
it 'returns nil when ref is blank' do
data[:after] = Gitlab::Git::BLANK_SHA
- expect(service.execute(data)).to be_nil
+ expect(integration.execute(data)).to be_nil
end
it 'returns nil when there is no content' do
data[:total_commits_count] = 0
- expect(service.execute(data)).to be_nil
+ expect(integration.execute(data)).to be_nil
end
it 'returns nil when a merge request is opened for the same ref' do
create(:merge_request, source_project: project, source_branch: 'dev-123_branch')
- expect(service.execute(data)).to be_nil
+ expect(integration.execute(data)).to be_nil
end
end
@@ -283,26 +278,26 @@ RSpec.describe Integrations::Teamcity, :use_clean_rails_memory_store_caching do
it 'handles merge request correctly' do
stub_post_to_build_queue(branch: 'dev-123_branch')
- expect(service.execute(data)).to include('Ok')
+ expect(integration.execute(data)).to include('Ok')
end
it 'returns nil when merge request is not opened' do
data[:object_attributes][:state] = 'closed'
- expect(service.execute(data)).to be_nil
+ expect(integration.execute(data)).to be_nil
end
it 'returns nil unless merge request is marked as unchecked' do
data[:object_attributes][:merge_status] = 'can_be_merged'
- expect(service.execute(data)).to be_nil
+ expect(integration.execute(data)).to be_nil
end
end
it 'returns nil when event is not supported' do
data = { object_kind: 'foo' }
- expect(service.execute(data)).to be_nil
+ expect(integration.execute(data)).to be_nil
end
end
diff --git a/spec/models/merge_request_spec.rb b/spec/models/merge_request_spec.rb
index 73b1cb13f19..11637de6c37 100644
--- a/spec/models/merge_request_spec.rb
+++ b/spec/models/merge_request_spec.rb
@@ -779,7 +779,7 @@ RSpec.describe MergeRequest, factory_default: :keep do
context 'when both internal and external issue trackers are enabled' do
before do
- create(:jira_service, project: subject.project)
+ create(:jira_integration, project: subject.project)
subject.project.reload
end
@@ -1310,7 +1310,7 @@ RSpec.describe MergeRequest, factory_default: :keep do
subject.project.add_developer(subject.author)
commit = double(:commit, safe_message: 'Fixes TEST-3')
- create(:jira_service, project: subject.project)
+ create(:jira_integration, project: subject.project)
subject.project.reload
allow(subject).to receive(:commits).and_return([commit])
diff --git a/spec/models/project_spec.rb b/spec/models/project_spec.rb
index 84fcb24e4c5..287f0a1b540 100644
--- a/spec/models/project_spec.rb
+++ b/spec/models/project_spec.rb
@@ -35,7 +35,7 @@ RSpec.describe Project, factory_default: :keep do
it { is_expected.to have_many(:hooks) }
it { is_expected.to have_many(:protected_branches) }
it { is_expected.to have_many(:exported_protected_branches) }
- it { is_expected.to have_one(:slack_service) }
+ it { is_expected.to have_one(:slack_integration) }
it { is_expected.to have_one(:microsoft_teams_integration) }
it { is_expected.to have_one(:mattermost_integration) }
it { is_expected.to have_one(:hangouts_chat_integration) }
@@ -55,13 +55,13 @@ RSpec.describe Project, factory_default: :keep do
it { is_expected.to have_one(:pivotaltracker_integration) }
it { is_expected.to have_one(:flowdock_integration) }
it { is_expected.to have_one(:assembla_integration) }
- it { is_expected.to have_one(:slack_slash_commands_service) }
+ it { is_expected.to have_one(:slack_slash_commands_integration) }
it { is_expected.to have_one(:mattermost_slash_commands_integration) }
it { is_expected.to have_one(:buildkite_integration) }
it { is_expected.to have_one(:bamboo_integration) }
- it { is_expected.to have_one(:teamcity_service) }
- it { is_expected.to have_one(:jira_service) }
- it { is_expected.to have_one(:redmine_service) }
+ it { is_expected.to have_one(:teamcity_integration) }
+ it { is_expected.to have_one(:jira_integration) }
+ it { is_expected.to have_one(:redmine_integration) }
it { is_expected.to have_one(:youtrack_service) }
it { is_expected.to have_one(:custom_issue_tracker_integration) }
it { is_expected.to have_one(:bugzilla_integration) }
@@ -1446,13 +1446,13 @@ RSpec.describe Project, factory_default: :keep do
end
end
- describe '.with_active_jira_services' do
- it 'returns the correct project' do
- active_jira_service = create(:jira_service)
+ describe '.with_active_jira_integrations' do
+ it 'returns the correct integrations' do
+ active_jira_integration = create(:jira_integration)
active_service = create(:service, active: true)
- expect(described_class.with_active_jira_services).to include(active_jira_service.project)
- expect(described_class.with_active_jira_services).not_to include(active_service.project)
+ expect(described_class.with_active_jira_integrations).to include(active_jira_integration.project)
+ expect(described_class.with_active_jira_integrations).not_to include(active_service.project)
end
end
@@ -5369,26 +5369,26 @@ RSpec.describe Project, factory_default: :keep do
end
describe '#execute_services' do
- let(:service) { create(:slack_service, push_events: true, merge_requests_events: false, active: true) }
+ let(:integration) { create(:integrations_slack, push_events: true, merge_requests_events: false, active: true) }
- it 'executes services with the specified scope' do
+ it 'executes integrations with the specified scope' do
data = 'any data'
expect_next_found_instance_of(Integrations::Slack) do |instance|
expect(instance).to receive(:async_execute).with(data).once
end
- service.project.execute_services(data, :push_hooks)
+ integration.project.execute_services(data, :push_hooks)
end
- it 'does not execute services that don\'t match the specified scope' do
+ it 'does not execute integration that don\'t match the specified scope' do
expect(Integrations::Slack).not_to receive(:allocate).and_wrap_original do |method|
method.call.tap do |instance|
expect(instance).not_to receive(:async_execute)
end
end
- service.project.execute_services(anything, :merge_request_hooks)
+ integration.project.execute_services(anything, :merge_request_hooks)
end
end
@@ -5942,7 +5942,7 @@ RSpec.describe Project, factory_default: :keep do
context 'without an exisiting integration, nor instance-level or template' do
it 'builds the service if instance or template does not exists' do
- expect(subject.find_or_initialize_service('prometheus')).to be_a(PrometheusService)
+ expect(subject.find_or_initialize_service('prometheus')).to be_a(::Integrations::Prometheus)
expect(subject.find_or_initialize_service('prometheus').api_url).to be_nil
end
end
diff --git a/spec/policies/project_policy_spec.rb b/spec/policies/project_policy_spec.rb
index d0fe0cca8a1..27df461af1c 100644
--- a/spec/policies/project_policy_spec.rb
+++ b/spec/policies/project_policy_spec.rb
@@ -70,7 +70,7 @@ RSpec.describe ProjectPolicy do
context 'when external tracker configured' do
it 'does not include the issues permissions' do
- create(:jira_service, project: project)
+ create(:jira_integration, project: project)
expect_disallowed :read_issue, :read_issue_iid, :create_issue, :update_issue, :admin_issue, :create_incident
end
diff --git a/spec/requests/api/graphql/mutations/alert_management/prometheus_integration/create_spec.rb b/spec/requests/api/graphql/mutations/alert_management/prometheus_integration/create_spec.rb
index 0ef61ae0d5b..dd7806bdbe0 100644
--- a/spec/requests/api/graphql/mutations/alert_management/prometheus_integration/create_spec.rb
+++ b/spec/requests/api/graphql/mutations/alert_management/prometheus_integration/create_spec.rb
@@ -42,7 +42,7 @@ RSpec.describe 'Creating a new Prometheus Integration' do
it 'creates a new integration' do
post_graphql_mutation(mutation, current_user: current_user)
- new_integration = ::PrometheusService.last!
+ new_integration = ::Integrations::Prometheus.last!
integration_response = mutation_response['integration']
expect(response).to have_gitlab_http_status(:success)
diff --git a/spec/requests/api/graphql/mutations/jira_import/start_spec.rb b/spec/requests/api/graphql/mutations/jira_import/start_spec.rb
index e7124512ef1..75668c9e6ae 100644
--- a/spec/requests/api/graphql/mutations/jira_import/start_spec.rb
+++ b/spec/requests/api/graphql/mutations/jira_import/start_spec.rb
@@ -80,17 +80,17 @@ RSpec.describe 'Starting a Jira Import' do
end
end
- context 'when project has no Jira service' do
+ context 'when project has no Jira integration' do
it_behaves_like 'a mutation that returns errors in the response', errors: ['Jira integration not configured.']
end
- context 'when when project has Jira service' do
- let!(:service) { create(:jira_service, project: project) }
+ context 'when when project has Jira integration' do
+ let!(:service) { create(:jira_integration, project: project) }
before do
project.reload
- stub_jira_service_test
+ stub_jira_integration_test
end
context 'when issues feature are disabled' do
diff --git a/spec/requests/api/graphql/project/base_service_spec.rb b/spec/requests/api/graphql/project/base_service_spec.rb
index af462c4a639..5dc0f55db88 100644
--- a/spec/requests/api/graphql/project/base_service_spec.rb
+++ b/spec/requests/api/graphql/project/base_service_spec.rb
@@ -7,9 +7,9 @@ RSpec.describe 'query Jira service' do
let_it_be(:current_user) { create(:user) }
let_it_be(:project) { create(:project) }
- let_it_be(:jira_service) { create(:jira_service, project: project) }
+ let_it_be(:jira_integration) { create(:jira_integration, project: project) }
let_it_be(:bugzilla_integration) { create(:bugzilla_integration, project: project) }
- let_it_be(:redmine_service) { create(:redmine_service, project: project) }
+ let_it_be(:redmine_integration) { create(:redmine_integration, project: project) }
let(:query) do
%(
diff --git a/spec/requests/api/graphql/project/jira_service_spec.rb b/spec/requests/api/graphql/project/jira_service_spec.rb
index 905a669bf0d..64e9e04ae44 100644
--- a/spec/requests/api/graphql/project/jira_service_spec.rb
+++ b/spec/requests/api/graphql/project/jira_service_spec.rb
@@ -7,7 +7,7 @@ RSpec.describe 'query Jira service' do
let_it_be(:current_user) { create(:user) }
let_it_be(:project) { create(:project) }
- let_it_be(:jira_service) { create(:jira_service, project: project) }
+ let_it_be(:jira_integration) { create(:jira_integration, project: project) }
let(:query) do
%(
diff --git a/spec/requests/api/projects_spec.rb b/spec/requests/api/projects_spec.rb
index e7e26c34a83..ce7f56fe81d 100644
--- a/spec/requests/api/projects_spec.rb
+++ b/spec/requests/api/projects_spec.rb
@@ -386,7 +386,7 @@ RSpec.describe API::Projects do
end
context 'when external issue tracker is enabled' do
- let!(:jira_service) { create(:jira_service, project: project) }
+ let!(:jira_integration) { create(:jira_integration, project: project) }
it 'includes open_issues_count' do
get api('/projects', user)
diff --git a/spec/requests/api/services_spec.rb b/spec/requests/api/services_spec.rb
index 740a583ab74..9e9c1e02529 100644
--- a/spec/requests/api/services_spec.rb
+++ b/spec/requests/api/services_spec.rb
@@ -121,9 +121,9 @@ RSpec.describe API::Services do
end
def deactive_service!
- return initialized_service.update!(active: false) unless initialized_service.is_a?(PrometheusService)
+ return initialized_service.update!(active: false) unless initialized_service.is_a?(::Integrations::Prometheus)
- # PrometheusService sets `#active` itself within a `before_save`:
+ # Integrations::Prometheus sets `#active` itself within a `before_save`:
initialized_service.manual_configuration = false
initialized_service.save!
end
@@ -239,7 +239,7 @@ RSpec.describe API::Services do
let(:service_name) { 'slack_slash_commands' }
before do
- project.create_slack_slash_commands_service(
+ project.create_slack_slash_commands_integration(
active: true,
properties: { token: 'token' }
)
diff --git a/spec/serializers/merge_request_diff_entity_spec.rb b/spec/serializers/merge_request_diff_entity_spec.rb
index 9bf95e68874..fae0cd7a0f9 100644
--- a/spec/serializers/merge_request_diff_entity_spec.rb
+++ b/spec/serializers/merge_request_diff_entity_spec.rb
@@ -29,7 +29,7 @@ RSpec.describe MergeRequestDiffEntity do
expect(subject).to include(
:version_index, :created_at, :commits_count,
:latest, :short_commit_sha, :version_path,
- :compare_path
+ :compare_path, :state
)
end
end
diff --git a/spec/serializers/service_event_entity_spec.rb b/spec/serializers/service_event_entity_spec.rb
index 91254c7dd27..f610c8f1488 100644
--- a/spec/serializers/service_event_entity_spec.rb
+++ b/spec/serializers/service_event_entity_spec.rb
@@ -12,7 +12,7 @@ RSpec.describe ServiceEventEntity do
end
describe '#as_json' do
- context 'service without fields' do
+ context 'integration without fields' do
let(:integration) { create(:emails_on_push_integration, push_events: true) }
let(:event) { 'push' }
@@ -24,8 +24,8 @@ RSpec.describe ServiceEventEntity do
end
end
- context 'service with fields' do
- let(:integration) { create(:slack_service, note_events: false, note_channel: 'note-channel') }
+ context 'integration with fields' do
+ let(:integration) { create(:integrations_slack, note_events: false, note_channel: 'note-channel') }
let(:event) { 'note' }
it 'exposes correct attributes' do
diff --git a/spec/serializers/service_field_entity_spec.rb b/spec/serializers/service_field_entity_spec.rb
index 20ca98416f8..6e9ebfb66d9 100644
--- a/spec/serializers/service_field_entity_spec.rb
+++ b/spec/serializers/service_field_entity_spec.rb
@@ -5,18 +5,18 @@ require 'spec_helper'
RSpec.describe ServiceFieldEntity do
let(:request) { double('request') }
- subject { described_class.new(field, request: request, service: service).as_json }
+ subject { described_class.new(field, request: request, service: integration).as_json }
before do
- allow(request).to receive(:service).and_return(service)
+ allow(request).to receive(:service).and_return(integration)
end
describe '#as_json' do
context 'Jira Service' do
- let(:service) { create(:jira_service) }
+ let(:integration) { create(:jira_integration) }
context 'field with type text' do
- let(:field) { service.global_fields.find { |field| field[:name] == 'username' } }
+ let(:field) { integration_field('username') }
it 'exposes correct attributes' do
expected_hash = {
@@ -35,7 +35,7 @@ RSpec.describe ServiceFieldEntity do
end
context 'field with type password' do
- let(:field) { service.global_fields.find { |field| field[:name] == 'password' } }
+ let(:field) { integration_field('password') }
it 'exposes correct attributes but hides password' do
expected_hash = {
@@ -56,10 +56,9 @@ RSpec.describe ServiceFieldEntity do
context 'EmailsOnPush Service' do
let(:integration) { create(:emails_on_push_integration, send_from_committer_email: '1') }
- let(:service) { integration } # TODO: remove when https://gitlab.com/gitlab-org/gitlab/-/issues/330300 is complete
context 'field with type checkbox' do
- let(:field) { integration.global_fields.find { |field| field[:name] == 'send_from_committer_email' } }
+ let(:field) { integration_field('send_from_committer_email') }
it 'exposes correct attributes and casts value to Boolean' do
expected_hash = {
@@ -78,7 +77,7 @@ RSpec.describe ServiceFieldEntity do
end
context 'field with type select' do
- let(:field) { integration.global_fields.find { |field| field[:name] == 'branches_to_be_notified' } }
+ let(:field) { integration_field('branches_to_be_notified') }
it 'exposes correct attributes' do
expected_hash = {
@@ -97,4 +96,8 @@ RSpec.describe ServiceFieldEntity do
end
end
end
+
+ def integration_field(name)
+ integration.global_fields.find { |f| f[:name] == name }
+ end
end
diff --git a/spec/services/admin/propagate_integration_service_spec.rb b/spec/services/admin/propagate_integration_service_spec.rb
index 13320528e4f..151658fe429 100644
--- a/spec/services/admin/propagate_integration_service_spec.rb
+++ b/spec/services/admin/propagate_integration_service_spec.rb
@@ -7,20 +7,20 @@ RSpec.describe Admin::PropagateIntegrationService do
include JiraServiceHelper
before do
- stub_jira_service_test
+ stub_jira_integration_test
end
let(:group) { create(:group) }
let_it_be(:project) { create(:project) }
- let_it_be(:instance_integration) { create(:jira_service, :instance) }
- let_it_be(:not_inherited_integration) { create(:jira_service, project: project) }
+ let_it_be(:instance_integration) { create(:jira_integration, :instance) }
+ let_it_be(:not_inherited_integration) { create(:jira_integration, project: project) }
let_it_be(:inherited_integration) do
- create(:jira_service, project: create(:project), inherit_from_id: instance_integration.id)
+ create(:jira_integration, project: create(:project), inherit_from_id: instance_integration.id)
end
let_it_be(:different_type_inherited_integration) do
- create(:redmine_service, project: project, inherit_from_id: instance_integration.id)
+ create(:redmine_integration, project: project, inherit_from_id: instance_integration.id)
end
context 'with inherited integration' do
@@ -55,7 +55,7 @@ RSpec.describe Admin::PropagateIntegrationService do
end
context 'for a group-level integration' do
- let(:group_integration) { create(:jira_service, group: group, project: nil) }
+ let(:group_integration) { create(:jira_integration, group: group, project: nil) }
context 'with a project without integration' do
let(:another_project) { create(:project, group: group) }
@@ -81,7 +81,7 @@ RSpec.describe Admin::PropagateIntegrationService do
context 'with a subgroup with integration' do
let(:subgroup) { create(:group, parent: group) }
- let(:subgroup_integration) { create(:jira_service, group: subgroup, project: nil, inherit_from_id: group_integration.id) }
+ let(:subgroup_integration) { create(:jira_integration, group: subgroup, project: nil, inherit_from_id: group_integration.id) }
it 'calls to PropagateIntegrationInheritDescendantWorker' do
expect(PropagateIntegrationInheritDescendantWorker).to receive(:perform_async)
diff --git a/spec/services/bulk_create_integration_service_spec.rb b/spec/services/bulk_create_integration_service_spec.rb
index 8369eb48088..ebfd988f26e 100644
--- a/spec/services/bulk_create_integration_service_spec.rb
+++ b/spec/services/bulk_create_integration_service_spec.rb
@@ -6,13 +6,13 @@ RSpec.describe BulkCreateIntegrationService do
include JiraServiceHelper
before_all do
- stub_jira_service_test
+ stub_jira_integration_test
end
let_it_be(:excluded_group) { create(:group) }
let_it_be(:excluded_project) { create(:project, group: excluded_group) }
- let(:instance_integration) { create(:jira_service, :instance) }
- let(:template_integration) { create(:jira_service, :template) }
+ let(:instance_integration) { create(:jira_integration, :instance) }
+ let(:template_integration) { create(:jira_integration, :template) }
let(:excluded_attributes) { %w[id project_id group_id inherit_from_id instance template created_at updated_at] }
shared_examples 'creates integration from batch ids' do
@@ -49,7 +49,7 @@ RSpec.describe BulkCreateIntegrationService do
context 'with a project association' do
let!(:project) { create(:project) }
- let(:created_integration) { project.jira_service }
+ let(:created_integration) { project.jira_integration }
let(:batch) { Project.where(id: project.id) }
let(:association) { 'project' }
@@ -73,8 +73,8 @@ RSpec.describe BulkCreateIntegrationService do
context 'with a project association' do
let!(:project) { create(:project, group: group) }
- let(:integration) { create(:jira_service, group: group, project: nil) }
- let(:created_integration) { project.jira_service }
+ let(:integration) { create(:jira_integration, group: group, project: nil) }
+ let(:created_integration) { project.jira_integration }
let(:batch) { Project.where(id: Project.minimum(:id)..Project.maximum(:id)).without_integration(integration).in_namespace(integration.group.self_and_descendants) }
let(:association) { 'project' }
let(:inherit_from_id) { integration.id }
@@ -85,7 +85,7 @@ RSpec.describe BulkCreateIntegrationService do
context 'with a group association' do
let!(:subgroup) { create(:group, parent: group) }
- let(:integration) { create(:jira_service, group: group, project: nil, inherit_from_id: instance_integration.id) }
+ let(:integration) { create(:jira_integration, group: group, project: nil, inherit_from_id: instance_integration.id) }
let(:created_integration) { Integration.find_by(group: subgroup) }
let(:batch) { Group.where(id: subgroup.id) }
let(:association) { 'group' }
@@ -101,7 +101,7 @@ RSpec.describe BulkCreateIntegrationService do
context 'with a project association' do
let!(:project) { create(:project) }
- let(:created_integration) { project.jira_service }
+ let(:created_integration) { project.jira_integration }
let(:batch) { Project.where(id: project.id) }
let(:association) { 'project' }
let(:inherit_from_id) { integration.id }
diff --git a/spec/services/bulk_update_integration_service_spec.rb b/spec/services/bulk_update_integration_service_spec.rb
index a866e0852bc..b6b7d1936a2 100644
--- a/spec/services/bulk_update_integration_service_spec.rb
+++ b/spec/services/bulk_update_integration_service_spec.rb
@@ -6,7 +6,7 @@ RSpec.describe BulkUpdateIntegrationService do
include JiraServiceHelper
before_all do
- stub_jira_service_test
+ stub_jira_integration_test
end
let(:excluded_attributes) { %w[id project_id group_id inherit_from_id instance template created_at updated_at] }
diff --git a/spec/services/git/branch_push_service_spec.rb b/spec/services/git/branch_push_service_spec.rb
index cc3ba21f002..d52e3a0b6fa 100644
--- a/spec/services/git/branch_push_service_spec.rb
+++ b/spec/services/git/branch_push_service_spec.rb
@@ -411,13 +411,13 @@ RSpec.describe Git::BranchPushService, services: true do
context "for jira issue tracker" do
include JiraServiceHelper
- let(:jira_tracker) { project.create_jira_service if project.jira_service.nil? }
+ let(:jira_tracker) { project.create_jira_integration if project.jira_integration.nil? }
before do
- # project.create_jira_service doesn't seem to invalidate the cache here
+ # project.create_jira_integration doesn't seem to invalidate the cache here
project.has_external_issue_tracker = true
- stub_jira_service_test
- jira_service_settings
+ stub_jira_integration_test
+ jira_integration_settings
stub_jira_urls("JIRA-1")
allow(closing_commit).to receive_messages({
diff --git a/spec/services/groups/transfer_service_spec.rb b/spec/services/groups/transfer_service_spec.rb
index 2fbd5eeef5f..889b5551746 100644
--- a/spec/services/groups/transfer_service_spec.rb
+++ b/spec/services/groups/transfer_service_spec.rb
@@ -241,7 +241,7 @@ RSpec.describe Groups::TransferService do
context 'when the group is allowed to be transferred' do
let_it_be(:new_parent_group, reload: true) { create(:group, :public) }
- let_it_be(:new_parent_group_integration) { create(:slack_service, group: new_parent_group, project: nil, webhook: 'http://new-group.slack.com') }
+ let_it_be(:new_parent_group_integration) { create(:integrations_slack, group: new_parent_group, project: nil, webhook: 'http://new-group.slack.com') }
before do
allow(PropagateIntegrationWorker).to receive(:perform_async)
@@ -277,8 +277,8 @@ RSpec.describe Groups::TransferService do
let(:new_created_integration) { Integration.find_by(group: group) }
context 'with an inherited integration' do
- let_it_be(:instance_integration) { create(:slack_service, :instance, webhook: 'http://project.slack.com') }
- let_it_be(:group_integration) { create(:slack_service, group: group, project: nil, webhook: 'http://group.slack.com', inherit_from_id: instance_integration.id) }
+ let_it_be(:instance_integration) { create(:integrations_slack, :instance, webhook: 'http://project.slack.com') }
+ let_it_be(:group_integration) { create(:integrations_slack, group: group, project: nil, webhook: 'http://group.slack.com', inherit_from_id: instance_integration.id) }
it 'replaces inherited integrations', :aggregate_failures do
expect(new_created_integration.webhook).to eq(new_parent_group_integration.webhook)
@@ -288,7 +288,7 @@ RSpec.describe Groups::TransferService do
end
context 'with a custom integration' do
- let_it_be(:group_integration) { create(:slack_service, group: group, project: nil, webhook: 'http://group.slack.com') }
+ let_it_be(:group_integration) { create(:integrations_slack, group: group, project: nil, webhook: 'http://group.slack.com') }
it 'does not updates the integrations', :aggregate_failures do
expect { transfer_service.execute(new_parent_group) }.not_to change { group_integration.webhook }
diff --git a/spec/services/integrations/test/project_service_spec.rb b/spec/services/integrations/test/project_service_spec.rb
index 8417f8c4c8a..2e09dea11f1 100644
--- a/spec/services/integrations/test/project_service_spec.rb
+++ b/spec/services/integrations/test/project_service_spec.rb
@@ -7,7 +7,8 @@ RSpec.describe Integrations::Test::ProjectService do
describe '#execute' do
let_it_be(:project) { create(:project) }
- let(:integration) { create(:slack_service, project: project) }
+
+ let(:integration) { create(:integrations_slack, project: project) }
let(:user) { project.owner }
let(:event) { nil }
let(:sample_data) { { data: 'sample' } }
@@ -32,7 +33,7 @@ RSpec.describe Integrations::Test::ProjectService do
context 'with event specified' do
context 'event not supported by integration' do
- let(:integration) { create(:jira_service, project: project) }
+ let(:integration) { create(:jira_integration, project: project) }
let(:event) { 'push' }
it 'returns error message' do
diff --git a/spec/services/issues/close_service_spec.rb b/spec/services/issues/close_service_spec.rb
index 0b315422be8..4a285600d42 100644
--- a/spec/services/issues/close_service_spec.rb
+++ b/spec/services/issues/close_service_spec.rb
@@ -81,7 +81,7 @@ RSpec.describe Issues::CloseService do
describe '#close_issue' do
context 'with external issue' do
context 'with an active external issue tracker supporting close_issue' do
- let!(:external_issue_tracker) { create(:jira_service, project: project) }
+ let!(:external_issue_tracker) { create(:jira_integration, project: project) }
it 'closes the issue on the external issue tracker' do
project.reload
@@ -92,7 +92,7 @@ RSpec.describe Issues::CloseService do
end
context 'with inactive external issue tracker supporting close_issue' do
- let!(:external_issue_tracker) { create(:jira_service, project: project, active: false) }
+ let!(:external_issue_tracker) { create(:jira_integration, project: project, active: false) }
it 'does not close the issue on the external issue tracker' do
project.reload
diff --git a/spec/services/jira/requests/projects/list_service_spec.rb b/spec/services/jira/requests/projects/list_service_spec.rb
index 0fff51b1226..1230ac250d0 100644
--- a/spec/services/jira/requests/projects/list_service_spec.rb
+++ b/spec/services/jira/requests/projects/list_service_spec.rb
@@ -5,17 +5,17 @@ require 'spec_helper'
RSpec.describe Jira::Requests::Projects::ListService do
include AfterNextHelpers
- let(:jira_service) { create(:jira_service) }
+ let(:jira_integration) { create(:jira_integration) }
let(:params) { {} }
describe '#execute' do
- let(:service) { described_class.new(jira_service, params) }
+ let(:service) { described_class.new(jira_integration, params) }
subject { service.execute }
- context 'without jira_service' do
+ context 'without jira_integration' do
before do
- jira_service.update!(active: false)
+ jira_integration.update!(active: false)
end
it 'returns an error response' do
@@ -24,8 +24,8 @@ RSpec.describe Jira::Requests::Projects::ListService do
end
end
- context 'when jira_service is nil' do
- let(:jira_service) { nil }
+ context 'when jira_integration is nil' do
+ let(:jira_integration) { nil }
it 'returns an error response' do
expect(subject.error?).to be_truthy
@@ -33,7 +33,7 @@ RSpec.describe Jira::Requests::Projects::ListService do
end
end
- context 'with jira_service' do
+ context 'with jira_integration' do
context 'when validations and params are ok' do
let(:response_headers) { { 'content-type' => 'application/json' } }
let(:response_body) { [].to_json }
@@ -59,7 +59,7 @@ RSpec.describe Jira::Requests::Projects::ListService do
end
context 'when jira runs on a subpath' do
- let(:jira_service) { create(:jira_service, url: 'http://jira.example.com/jira') }
+ let(:jira_integration) { create(:jira_integration, url: 'http://jira.example.com/jira') }
let(:expected_url_pattern) { /.*jira.example.com\/jira\/rest\/api\/2\/project/ }
it 'takes the subpath into account' do
diff --git a/spec/services/jira_import/start_import_service_spec.rb b/spec/services/jira_import/start_import_service_spec.rb
index a10928355ef..36d639cd724 100644
--- a/spec/services/jira_import/start_import_service_spec.rb
+++ b/spec/services/jira_import/start_import_service_spec.rb
@@ -28,10 +28,10 @@ RSpec.describe JiraImport::StartImportService do
end
context 'when project validation is ok' do
- let!(:jira_service) { create(:jira_service, project: project, active: true) }
+ let!(:jira_integration) { create(:jira_integration, project: project, active: true) }
before do
- stub_jira_service_test
+ stub_jira_integration_test
allow(Gitlab::JiraImport).to receive(:validate_project_settings!)
end
diff --git a/spec/services/jira_import/users_importer_spec.rb b/spec/services/jira_import/users_importer_spec.rb
index 2e8c556d62c..af408847260 100644
--- a/spec/services/jira_import/users_importer_spec.rb
+++ b/spec/services/jira_import/users_importer_spec.rb
@@ -33,7 +33,7 @@ RSpec.describe JiraImport::UsersImporter do
end
before do
- stub_jira_service_test
+ stub_jira_integration_test
project.add_maintainer(user)
end
@@ -45,7 +45,7 @@ RSpec.describe JiraImport::UsersImporter do
RSpec.shared_examples 'maps Jira users to GitLab users' do |users_mapper_service:|
context 'when Jira import is configured correctly' do
- let_it_be(:jira_service) { create(:jira_service, project: project, active: true, url: "http://jira.example.net") }
+ let_it_be(:jira_integration) { create(:jira_integration, project: project, active: true, url: "http://jira.example.net") }
context 'when users mapper service raises an error' do
let(:error) { Timeout::Error.new }
@@ -98,9 +98,9 @@ RSpec.describe JiraImport::UsersImporter do
context 'when Jira instance is of Server deployment type' do
before do
- allow(project).to receive(:jira_service).and_return(jira_service)
+ allow(project).to receive(:jira_integration).and_return(jira_integration)
- jira_service.data_fields.deployment_server!
+ jira_integration.data_fields.deployment_server!
end
it_behaves_like 'maps Jira users to GitLab users', users_mapper_service: JiraImport::ServerUsersMapperService
@@ -108,9 +108,9 @@ RSpec.describe JiraImport::UsersImporter do
context 'when Jira instance is of Cloud deployment type' do
before do
- allow(project).to receive(:jira_service).and_return(jira_service)
+ allow(project).to receive(:jira_integration).and_return(jira_integration)
- jira_service.data_fields.deployment_cloud!
+ jira_integration.data_fields.deployment_cloud!
end
it_behaves_like 'maps Jira users to GitLab users', users_mapper_service: JiraImport::CloudUsersMapperService
diff --git a/spec/services/merge_requests/build_service_spec.rb b/spec/services/merge_requests/build_service_spec.rb
index d10f82289bd..0f282384661 100644
--- a/spec/services/merge_requests/build_service_spec.rb
+++ b/spec/services/merge_requests/build_service_spec.rb
@@ -252,8 +252,8 @@ RSpec.describe MergeRequests::BuildService do
context 'when the source branch matches an issue' do
where(:factory, :source_branch, :closing_message) do
- :jira_service | 'FOO-123-fix-issue' | 'Closes FOO-123'
- :jira_service | 'fix-issue' | nil
+ :jira_integration | 'FOO-123-fix-issue' | 'Closes FOO-123'
+ :jira_integration | 'fix-issue' | nil
:custom_issue_tracker_integration | '123-fix-issue' | 'Closes #123'
:custom_issue_tracker_integration | 'fix-issue' | nil
nil | '123-fix-issue' | 'Closes #123'
@@ -351,8 +351,8 @@ RSpec.describe MergeRequests::BuildService do
context 'when the source branch matches an issue' do
where(:factory, :source_branch, :title, :closing_message) do
- :jira_service | 'FOO-123-fix-issue' | 'Resolve FOO-123 "Fix issue"' | 'Closes FOO-123'
- :jira_service | 'fix-issue' | 'Fix issue' | nil
+ :jira_integration | 'FOO-123-fix-issue' | 'Resolve FOO-123 "Fix issue"' | 'Closes FOO-123'
+ :jira_integration | 'fix-issue' | 'Fix issue' | nil
:custom_issue_tracker_integration | '123-fix-issue' | 'Resolve #123 "Fix issue"' | 'Closes #123'
:custom_issue_tracker_integration | 'fix-issue' | 'Fix issue' | nil
nil | '123-fix-issue' | 'Resolve "A bug"' | 'Closes #123'
@@ -400,8 +400,8 @@ RSpec.describe MergeRequests::BuildService do
context 'when the source branch matches an issue' do
where(:factory, :source_branch, :title, :closing_message) do
- :jira_service | 'FOO-123-fix-issue' | 'Resolve FOO-123 "Fix issue"' | 'Closes FOO-123'
- :jira_service | 'fix-issue' | 'Fix issue' | nil
+ :jira_integration | 'FOO-123-fix-issue' | 'Resolve FOO-123 "Fix issue"' | 'Closes FOO-123'
+ :jira_integration | 'fix-issue' | 'Fix issue' | nil
:custom_issue_tracker_integration | '123-fix-issue' | 'Resolve #123 "Fix issue"' | 'Closes #123'
:custom_issue_tracker_integration | 'fix-issue' | 'Fix issue' | nil
nil | '123-fix-issue' | 'Resolve "A bug"' | 'Closes #123'
diff --git a/spec/services/merge_requests/merge_service_spec.rb b/spec/services/merge_requests/merge_service_spec.rb
index 503c0282bd6..b3af4d67896 100644
--- a/spec/services/merge_requests/merge_service_spec.rb
+++ b/spec/services/merge_requests/merge_service_spec.rb
@@ -163,14 +163,14 @@ RSpec.describe MergeRequests::MergeService do
context 'with Jira integration' do
include JiraServiceHelper
- let(:jira_tracker) { project.create_jira_service }
+ let(:jira_tracker) { project.create_jira_integration }
let(:jira_issue) { ExternalIssue.new('JIRA-123', project) }
let(:commit) { double('commit', safe_message: "Fixes #{jira_issue.to_reference}") }
before do
- stub_jira_service_test
+ stub_jira_integration_test
project.update!(has_external_issue_tracker: true)
- jira_service_settings
+ jira_integration_settings
stub_jira_urls(jira_issue.id)
allow(merge_request).to receive(:commits).and_return([commit])
end
diff --git a/spec/services/projects/create_service_spec.rb b/spec/services/projects/create_service_spec.rb
index ac0b6cc8ef1..b8e48cef171 100644
--- a/spec/services/projects/create_service_spec.rb
+++ b/spec/services/projects/create_service_spec.rb
@@ -703,7 +703,7 @@ RSpec.describe Projects::CreateService, '#execute' do
create(:clusters_integrations_prometheus, cluster: cluster)
end
- it 'creates PrometheusService record', :aggregate_failures do
+ it 'creates Integrations::Prometheus record', :aggregate_failures do
project = create_project(user, opts.merge!(namespace_id: group.id))
service = project.prometheus_service
@@ -720,7 +720,7 @@ RSpec.describe Projects::CreateService, '#execute' do
create(:clusters_integrations_prometheus, cluster: cluster)
end
- it 'creates PrometheusService record', :aggregate_failures do
+ it 'creates Integrations::Prometheus record', :aggregate_failures do
project = create_project(user, opts)
service = project.prometheus_service
@@ -731,7 +731,7 @@ RSpec.describe Projects::CreateService, '#execute' do
it 'cleans invalid record and logs warning', :aggregate_failures do
invalid_service_record = build(:prometheus_service, properties: { api_url: nil, manual_configuration: true }.to_json)
- allow(PrometheusService).to receive(:new).and_return(invalid_service_record)
+ allow(::Integrations::Prometheus).to receive(:new).and_return(invalid_service_record)
expect(Gitlab::ErrorTracking).to receive(:track_exception).with(an_instance_of(ActiveRecord::RecordInvalid), include(extra: { project_id: a_kind_of(Integer) }))
project = create_project(user, opts)
@@ -741,7 +741,7 @@ RSpec.describe Projects::CreateService, '#execute' do
end
context 'shared Prometheus integration is not available' do
- it 'does not persist PrometheusService record', :aggregate_failures do
+ it 'does not persist Integrations::Prometheus record' do
project = create_project(user, opts)
expect(project.prometheus_service).to be_nil
diff --git a/spec/services/projects/transfer_service_spec.rb b/spec/services/projects/transfer_service_spec.rb
index 3171abfb36f..2856d2ee2da 100644
--- a/spec/services/projects/transfer_service_spec.rb
+++ b/spec/services/projects/transfer_service_spec.rb
@@ -7,7 +7,7 @@ RSpec.describe Projects::TransferService do
let_it_be(:user) { create(:user) }
let_it_be(:group) { create(:group) }
- let_it_be(:group_integration) { create(:slack_service, group: group, project: nil, webhook: 'http://group.slack.com') }
+ let_it_be(:group_integration) { create(:integrations_slack, group: group, project: nil, webhook: 'http://group.slack.com') }
let(:project) { create(:project, :repository, :legacy_storage, namespace: user.namespace) }
subject(:execute_transfer) { described_class.new(project, user).execute(group).tap { project.reload } }
@@ -121,24 +121,24 @@ RSpec.describe Projects::TransferService do
context 'with a project integration' do
let_it_be_with_reload(:project) { create(:project, namespace: user.namespace) }
- let_it_be(:instance_integration) { create(:slack_service, :instance, webhook: 'http://project.slack.com') }
+ let_it_be(:instance_integration) { create(:integrations_slack, :instance, webhook: 'http://project.slack.com') }
context 'with an inherited integration' do
- let_it_be(:project_integration) { create(:slack_service, project: project, webhook: 'http://project.slack.com', inherit_from_id: instance_integration.id) }
+ let_it_be(:project_integration) { create(:integrations_slack, project: project, webhook: 'http://project.slack.com', inherit_from_id: instance_integration.id) }
it 'replaces inherited integrations', :aggregate_failures do
execute_transfer
- expect(project.slack_service.webhook).to eq(group_integration.webhook)
+ expect(project.slack_integration.webhook).to eq(group_integration.webhook)
expect(Integration.count).to eq(3)
end
end
context 'with a custom integration' do
- let_it_be(:project_integration) { create(:slack_service, project: project, webhook: 'http://project.slack.com') }
+ let_it_be(:project_integration) { create(:integrations_slack, project: project, webhook: 'http://project.slack.com') }
it 'does not updates the integrations' do
- expect { execute_transfer }.not_to change { project.slack_service.webhook }
+ expect { execute_transfer }.not_to change { project.slack_integration.webhook }
end
end
end
diff --git a/spec/services/projects/update_service_spec.rb b/spec/services/projects/update_service_spec.rb
index e1b22da2e61..315329263b5 100644
--- a/spec/services/projects/update_service_spec.rb
+++ b/spec/services/projects/update_service_spec.rb
@@ -503,7 +503,7 @@ RSpec.describe Projects::UpdateService do
it 'creates new record' do
expect { update_project(project, user, prometheus_service_attributes: prometheus_service_attributes) }
- .to change { ::PrometheusService.where(project: project).count }
+ .to change { ::Integrations::Prometheus.where(project: project).count }
.from(0)
.to(1)
end
@@ -519,7 +519,7 @@ RSpec.describe Projects::UpdateService do
it 'does not create new record' do
expect { update_project(project, user, prometheus_service_attributes: prometheus_service_attributes) }
- .not_to change { ::PrometheusService.where(project: project).count }
+ .not_to change { ::Integrations::Prometheus.where(project: project).count }
end
end
end
diff --git a/spec/services/prometheus/proxy_service_spec.rb b/spec/services/prometheus/proxy_service_spec.rb
index f22ea361fde..b78683cace7 100644
--- a/spec/services/prometheus/proxy_service_spec.rb
+++ b/spec/services/prometheus/proxy_service_spec.rb
@@ -65,7 +65,7 @@ RSpec.describe Prometheus::ProxyService do
end
describe '#execute' do
- let(:prometheus_adapter) { instance_double(PrometheusService) }
+ let(:prometheus_adapter) { instance_double(::Integrations::Prometheus) }
let(:params) { ActionController::Parameters.new(query: '1').permit! }
subject { described_class.new(environment, 'GET', 'query', params) }
diff --git a/spec/services/system_note_service_spec.rb b/spec/services/system_note_service_spec.rb
index 54cef164f1c..395fada574e 100644
--- a/spec/services/system_note_service_spec.rb
+++ b/spec/services/system_note_service_spec.rb
@@ -355,15 +355,15 @@ RSpec.describe SystemNoteService do
let(:issue) { create(:issue, project: project) }
let(:merge_request) { create(:merge_request, :simple, target_project: project, source_project: project) }
let(:jira_issue) { ExternalIssue.new("JIRA-1", project)}
- let(:jira_tracker) { project.jira_service }
+ let(:jira_tracker) { project.jira_integration }
let(:commit) { project.commit }
let(:comment_url) { jira_api_comment_url(jira_issue.id) }
let(:success_message) { "SUCCESS: Successfully posted to http://jira.example.net." }
before do
- stub_jira_service_test
+ stub_jira_integration_test
stub_jira_urls(jira_issue.id)
- jira_service_settings
+ jira_integration_settings
end
def cross_reference(type, link_exists = false)
diff --git a/spec/services/system_notes/issuables_service_spec.rb b/spec/services/system_notes/issuables_service_spec.rb
index 0eb327ea7f1..d2d9eba2bc6 100644
--- a/spec/services/system_notes/issuables_service_spec.rb
+++ b/spec/services/system_notes/issuables_service_spec.rb
@@ -728,7 +728,7 @@ RSpec.describe ::SystemNotes::IssuablesService do
let(:noteable) { ExternalIssue.new('EXT-1234', project) }
it 'is false with issue tracker supporting referencing' do
- create(:jira_service, project: project)
+ create(:jira_integration, project: project)
project.reload
expect(service.cross_reference_disallowed?(noteable)).to be_falsey
diff --git a/spec/support/helpers/jira_service_helper.rb b/spec/support/helpers/jira_service_helper.rb
index ce908d53f88..3cfd0de06e8 100644
--- a/spec/support/helpers/jira_service_helper.rb
+++ b/spec/support/helpers/jira_service_helper.rb
@@ -4,7 +4,7 @@ module JiraServiceHelper
JIRA_URL = "http://jira.example.net"
JIRA_API = JIRA_URL + "/rest/api/2"
- def jira_service_settings
+ def jira_integration_settings
url = JIRA_URL
username = 'jira-user'
password = 'my-secret-password'
@@ -77,7 +77,7 @@ module JiraServiceHelper
JIRA_API + "/issue/#{issue_id}"
end
- def stub_jira_service_test
+ def stub_jira_integration_test
WebMock.stub_request(:get, /serverInfo/).to_return(body: { url: 'http://url' }.to_json)
end
diff --git a/spec/support/shared_contexts/features/integrations/integrations_shared_context.rb b/spec/support/shared_contexts/features/integrations/integrations_shared_context.rb
index e532b42fd1c..588e73394b7 100644
--- a/spec/support/shared_contexts/features/integrations/integrations_shared_context.rb
+++ b/spec/support/shared_contexts/features/integrations/integrations_shared_context.rb
@@ -46,7 +46,7 @@ Integration.available_services_names.each do |service|
before do
enable_license_for_service(service)
- stub_jira_service_test if service == 'jira'
+ stub_jira_integration_test if service == 'jira'
end
def initialize_service(service, attrs = {})
diff --git a/spec/support/shared_contexts/requests/api/graphql/jira_import/jira_projects_context.rb b/spec/support/shared_contexts/requests/api/graphql/jira_import/jira_projects_context.rb
index de40b926a1c..6d34675e8e5 100644
--- a/spec/support/shared_contexts/requests/api/graphql/jira_import/jira_projects_context.rb
+++ b/spec/support/shared_contexts/requests/api/graphql/jira_import/jira_projects_context.rb
@@ -4,8 +4,8 @@ RSpec.shared_context 'Jira projects request context' do
let(:url) { 'https://jira.example.com' }
let(:username) { 'jira-username' }
let(:password) { 'jira-password' }
- let!(:jira_service) do
- create(:jira_service,
+ let!(:jira_integration) do
+ create(:jira_integration,
project: project,
url: url,
username: username,
diff --git a/spec/support/shared_examples/models/concerns/integrations/slack_mattermost_notifier_shared_examples.rb b/spec/support/shared_examples/models/concerns/integrations/slack_mattermost_notifier_shared_examples.rb
index 66448aca2c5..1bba398f59e 100644
--- a/spec/support/shared_examples/models/concerns/integrations/slack_mattermost_notifier_shared_examples.rb
+++ b/spec/support/shared_examples/models/concerns/integrations/slack_mattermost_notifier_shared_examples.rb
@@ -8,7 +8,7 @@ RSpec.shared_examples Integrations::SlackMattermostNotifier do |service_name|
def execute_with_options(options)
receive(:new).with(webhook_url, options.merge(http_client: Integrations::SlackMattermostNotifier::HTTPClient))
- .and_return(double(:slack_service).as_null_object)
+ .and_return(double(:slack_integration).as_null_object)
end
describe "Associations" do
diff --git a/spec/support/shared_examples/models/issue_tracker_service_shared_examples.rb b/spec/support/shared_examples/models/issue_tracker_service_shared_examples.rb
index b275d594792..9d1a55b5f73 100644
--- a/spec/support/shared_examples/models/issue_tracker_service_shared_examples.rb
+++ b/spec/support/shared_examples/models/issue_tracker_service_shared_examples.rb
@@ -1,6 +1,6 @@
# frozen_string_literal: true
-RSpec.shared_examples 'issue tracker service URL attribute' do |url_attr|
+RSpec.shared_examples 'issue tracker integration URL attribute' do |url_attr|
it { is_expected.to allow_value('https://example.com').for(url_attr) }
it { is_expected.not_to allow_value('example.com').for(url_attr) }
@@ -8,6 +8,12 @@ RSpec.shared_examples 'issue tracker service URL attribute' do |url_attr|
it { is_expected.not_to allow_value('herp-and-derp').for(url_attr) }
end
+# TODO: clean up:
+# remove when https://gitlab.com/gitlab-org/gitlab/-/issues/330300 has been completed
+RSpec.shared_examples 'issue tracker service URL attribute' do |url_attr|
+ it_behaves_like 'issue tracker integration URL attribute', url_attr
+end
+
RSpec.shared_examples 'allows project key on reference pattern' do |url_attr|
it 'allows underscores in the project name' do
expect(described_class.reference_pattern.match('EXT_EXT-1234')[0]).to eq 'EXT_EXT-1234'
diff --git a/spec/support/shared_examples/services/jira_import/user_mapper_services_shared_examples.rb b/spec/support/shared_examples/services/jira_import/user_mapper_services_shared_examples.rb
index cbe5c7d89db..0151723793e 100644
--- a/spec/support/shared_examples/services/jira_import/user_mapper_services_shared_examples.rb
+++ b/spec/support/shared_examples/services/jira_import/user_mapper_services_shared_examples.rb
@@ -3,7 +3,7 @@
RSpec.shared_examples 'mapping jira users' do
let(:client) { double }
- let_it_be(:jira_service) { create(:jira_service, project: project, active: true) }
+ let_it_be(:jira_integration) { create(:jira_integration, project: project, active: true) }
before do
allow(subject).to receive(:client).and_return(client)
diff --git a/spec/views/layouts/nav/sidebar/_project.html.haml_spec.rb b/spec/views/layouts/nav/sidebar/_project.html.haml_spec.rb
index a1aa7c04b67..172fe249eb6 100644
--- a/spec/views/layouts/nav/sidebar/_project.html.haml_spec.rb
+++ b/spec/views/layouts/nav/sidebar/_project.html.haml_spec.rb
@@ -313,7 +313,7 @@ RSpec.describe 'layouts/nav/sidebar/_project' do
end
context 'with Jira issue tracker' do
- let_it_be(:jira) { create(:jira_service, project: project, issues_enabled: false) }
+ let_it_be(:jira) { create(:jira_integration, project: project, issues_enabled: false) }
it 'has a link to the Jira issue tracker' do
render
diff --git a/spec/views/projects/services/_form.haml_spec.rb b/spec/views/projects/services/_form.haml_spec.rb
index f063e73dae4..177f703ba6c 100644
--- a/spec/views/projects/services/_form.haml_spec.rb
+++ b/spec/views/projects/services/_form.haml_spec.rb
@@ -15,7 +15,7 @@ RSpec.describe 'projects/services/_form' do
current_user: user,
can?: true,
current_application_settings: Gitlab::CurrentSettings.current_application_settings,
- integration: project.redmine_service,
+ integration: project.redmine_integration,
request: double(referer: '/services')
)
end
diff --git a/spec/workers/gitlab/jira_import/stage/import_issues_worker_spec.rb b/spec/workers/gitlab/jira_import/stage/import_issues_worker_spec.rb
index f82f6ccd9d6..10702c17cb5 100644
--- a/spec/workers/gitlab/jira_import/stage/import_issues_worker_spec.rb
+++ b/spec/workers/gitlab/jira_import/stage/import_issues_worker_spec.rb
@@ -16,7 +16,7 @@ RSpec.describe Gitlab::JiraImport::Stage::ImportIssuesWorker do
let_it_be(:jira_import, reload: true) { create(:jira_import_state, :scheduled, project: project) }
before do
- stub_jira_service_test
+ stub_jira_integration_test
end
context 'when import did not start' do
@@ -25,7 +25,7 @@ RSpec.describe Gitlab::JiraImport::Stage::ImportIssuesWorker do
end
context 'when import started', :clean_gitlab_redis_cache do
- let_it_be(:jira_service) { create(:jira_service, project: project) }
+ let_it_be(:jira_integration) { create(:jira_integration, project: project) }
before do
jira_import.start!
diff --git a/spec/workers/gitlab/jira_import/stage/import_labels_worker_spec.rb b/spec/workers/gitlab/jira_import/stage/import_labels_worker_spec.rb
index 0b7a35a92e2..52c516b9ff9 100644
--- a/spec/workers/gitlab/jira_import/stage/import_labels_worker_spec.rb
+++ b/spec/workers/gitlab/jira_import/stage/import_labels_worker_spec.rb
@@ -21,10 +21,10 @@ RSpec.describe Gitlab::JiraImport::Stage::ImportLabelsWorker do
end
context 'when import started' do
- let!(:jira_service) { create(:jira_service, project: project) }
+ let!(:jira_integration) { create(:jira_integration, project: project) }
before do
- stub_jira_service_test
+ stub_jira_integration_test
jira_import.start!
diff --git a/spec/workers/projects/post_creation_worker_spec.rb b/spec/workers/projects/post_creation_worker_spec.rb
index 50c21575878..d1600daa3bd 100644
--- a/spec/workers/projects/post_creation_worker_spec.rb
+++ b/spec/workers/projects/post_creation_worker_spec.rb
@@ -36,7 +36,7 @@ RSpec.describe Projects::PostCreationWorker do
create(:clusters_integrations_prometheus, cluster: cluster)
end
- it 'creates PrometheusService record', :aggregate_failures do
+ it 'creates an Integrations::Prometheus record', :aggregate_failures do
subject
service = project.prometheus_service
@@ -53,7 +53,7 @@ RSpec.describe Projects::PostCreationWorker do
create(:clusters_integrations_prometheus, cluster: cluster)
end
- it 'creates PrometheusService record', :aggregate_failures do
+ it 'creates an Integrations::Prometheus record', :aggregate_failures do
subject
service = project.prometheus_service
@@ -64,7 +64,7 @@ RSpec.describe Projects::PostCreationWorker do
it 'cleans invalid record and logs warning', :aggregate_failures do
invalid_service_record = build(:prometheus_service, properties: { api_url: nil, manual_configuration: true }.to_json)
- allow(PrometheusService).to receive(:new).and_return(invalid_service_record)
+ allow(::Integrations::Prometheus).to receive(:new).and_return(invalid_service_record)
expect(Gitlab::ErrorTracking).to receive(:track_exception).with(an_instance_of(ActiveRecord::RecordInvalid), include(extra: { project_id: a_kind_of(Integer) })).twice
subject
@@ -74,7 +74,7 @@ RSpec.describe Projects::PostCreationWorker do
end
context 'shared Prometheus application is not available' do
- it 'does not persist PrometheusService record', :aggregate_failures do
+ it 'does not persist an Integrations::Prometheus record' do
subject
expect(project.prometheus_service).to be_nil
diff --git a/spec/workers/propagate_integration_group_worker_spec.rb b/spec/workers/propagate_integration_group_worker_spec.rb
index 1c72bed323a..9d46534df4f 100644
--- a/spec/workers/propagate_integration_group_worker_spec.rb
+++ b/spec/workers/propagate_integration_group_worker_spec.rb
@@ -8,7 +8,7 @@ RSpec.describe PropagateIntegrationGroupWorker do
let_it_be(:another_group) { create(:group) }
let_it_be(:subgroup1) { create(:group, parent: group) }
let_it_be(:subgroup2) { create(:group, parent: group) }
- let_it_be(:integration) { create(:redmine_service, :instance) }
+ let_it_be(:integration) { create(:redmine_integration, :instance) }
let(:job_args) { [integration.id, group.id, subgroup2.id] }
@@ -22,7 +22,7 @@ RSpec.describe PropagateIntegrationGroupWorker do
end
context 'with a group integration' do
- let_it_be(:integration) { create(:redmine_service, group: group, project: nil) }
+ let_it_be(:integration) { create(:redmine_integration, group: group, project: nil) }
it 'calls to BulkCreateIntegrationService' do
expect(BulkCreateIntegrationService).to receive(:new)
diff --git a/spec/workers/propagate_integration_inherit_descendant_worker_spec.rb b/spec/workers/propagate_integration_inherit_descendant_worker_spec.rb
index b5eb0f69017..8a231d4104c 100644
--- a/spec/workers/propagate_integration_inherit_descendant_worker_spec.rb
+++ b/spec/workers/propagate_integration_inherit_descendant_worker_spec.rb
@@ -5,8 +5,8 @@ require 'spec_helper'
RSpec.describe PropagateIntegrationInheritDescendantWorker do
let_it_be(:group) { create(:group) }
let_it_be(:subgroup) { create(:group, parent: group) }
- let_it_be(:group_integration) { create(:redmine_service, group: group, project: nil) }
- let_it_be(:subgroup_integration) { create(:redmine_service, group: subgroup, project: nil, inherit_from_id: group_integration.id) }
+ let_it_be(:group_integration) { create(:redmine_integration, group: group, project: nil) }
+ let_it_be(:subgroup_integration) { create(:redmine_integration, group: subgroup, project: nil, inherit_from_id: group_integration.id) }
it_behaves_like 'an idempotent worker' do
let(:job_args) { [group_integration.id, subgroup_integration.id, subgroup_integration.id] }
diff --git a/spec/workers/propagate_integration_inherit_worker_spec.rb b/spec/workers/propagate_integration_inherit_worker_spec.rb
index 2b4f241f755..dd5d246d7f9 100644
--- a/spec/workers/propagate_integration_inherit_worker_spec.rb
+++ b/spec/workers/propagate_integration_inherit_worker_spec.rb
@@ -4,10 +4,10 @@ require 'spec_helper'
RSpec.describe PropagateIntegrationInheritWorker do
describe '#perform' do
- let_it_be(:integration) { create(:redmine_service, :instance) }
- let_it_be(:integration1) { create(:redmine_service, inherit_from_id: integration.id) }
+ let_it_be(:integration) { create(:redmine_integration, :instance) }
+ let_it_be(:integration1) { create(:redmine_integration, inherit_from_id: integration.id) }
let_it_be(:integration2) { create(:bugzilla_integration, inherit_from_id: integration.id) }
- let_it_be(:integration3) { create(:redmine_service) }
+ let_it_be(:integration3) { create(:redmine_integration) }
it_behaves_like 'an idempotent worker' do
let(:job_args) { [integration.id, integration1.id, integration3.id] }
diff --git a/spec/workers/propagate_integration_project_worker_spec.rb b/spec/workers/propagate_integration_project_worker_spec.rb
index c8293744bec..312631252cc 100644
--- a/spec/workers/propagate_integration_project_worker_spec.rb
+++ b/spec/workers/propagate_integration_project_worker_spec.rb
@@ -8,7 +8,7 @@ RSpec.describe PropagateIntegrationProjectWorker do
let_it_be(:project1) { create(:project) }
let_it_be(:project2) { create(:project, group: group) }
let_it_be(:project3) { create(:project, group: group) }
- let_it_be(:integration) { create(:redmine_service, :instance) }
+ let_it_be(:integration) { create(:redmine_integration, :instance) }
let(:job_args) { [integration.id, project1.id, project3.id] }
@@ -22,7 +22,7 @@ RSpec.describe PropagateIntegrationProjectWorker do
end
context 'with a group integration' do
- let_it_be(:integration) { create(:redmine_service, group: group, project: nil) }
+ let_it_be(:integration) { create(:redmine_integration, group: group, project: nil) }
it 'calls to BulkCreateIntegrationService' do
expect(BulkCreateIntegrationService).to receive(:new)