Add latest changes from gitlab-org/gitlab@master
|
|
@ -65,12 +65,6 @@ compile-test-assets as-if-foss:
|
|||
- .frontend:rules:compile-test-assets-as-if-foss
|
||||
- .as-if-foss
|
||||
|
||||
compile-test-assets as-if-jh:
|
||||
extends:
|
||||
- compile-test-assets
|
||||
- .frontend:rules:compile-test-assets-as-if-jh
|
||||
needs: ["add-jh-folder"]
|
||||
|
||||
update-assets-compile-production-cache:
|
||||
extends:
|
||||
- compile-production-assets
|
||||
|
|
@ -150,18 +144,6 @@ rspec-all frontend_fixture as-if-foss:
|
|||
- !reference [.frontend-fixtures-base, needs]
|
||||
- "compile-test-assets as-if-foss"
|
||||
|
||||
# Builds FOSS, EE, and JH fixtures in the EE project, with the `jh/` folder added (due to `as-if-jh`).
|
||||
rspec-all frontend_fixture as-if-jh:
|
||||
extends:
|
||||
- .frontend-fixtures-base
|
||||
- .frontend:rules:default-frontend-jobs-as-if-jh
|
||||
needs:
|
||||
- !reference [.frontend-fixtures-base, needs]
|
||||
- "compile-test-assets as-if-jh"
|
||||
- "add-jh-folder"
|
||||
script:
|
||||
- echo "This job is currently doing nothing since there's no specific JH fixtures yet. To enable this job, remove this line."
|
||||
|
||||
graphql-schema-dump:
|
||||
variables:
|
||||
SETUP_DB: "false"
|
||||
|
|
@ -262,14 +244,6 @@ jest-as-if-foss:
|
|||
needs: ["rspec-all frontend_fixture as-if-foss"]
|
||||
parallel: 2
|
||||
|
||||
jest-as-if-jh:
|
||||
extends:
|
||||
- .jest-base
|
||||
- .frontend:rules:default-frontend-jobs-as-if-jh
|
||||
needs: ["rspec-all frontend_fixture as-if-jh", "add-jh-folder"]
|
||||
script:
|
||||
- echo "This job is currently doing nothing since there's no specific JH Jest tests yet. To enable this job, remove this line."
|
||||
|
||||
coverage-frontend:
|
||||
extends:
|
||||
- .default-retry
|
||||
|
|
|
|||
|
|
@ -84,12 +84,6 @@
|
|||
- .use-pg12
|
||||
needs: ["setup-test-env", "retrieve-tests-metadata", "compile-test-assets as-if-foss", "detect-tests"]
|
||||
|
||||
.rspec-base-pg12-as-if-jh:
|
||||
extends:
|
||||
- .rspec-base
|
||||
- .use-pg12
|
||||
needs: ["setup-test-env", "retrieve-tests-metadata", "compile-test-assets as-if-jh", "detect-tests", "add-jh-folder"]
|
||||
|
||||
.rspec-base-pg13:
|
||||
extends:
|
||||
- .rspec-base
|
||||
|
|
@ -117,11 +111,6 @@
|
|||
- .use-pg12-opensearch1-ee
|
||||
- .rails:rules:run-search-tests
|
||||
|
||||
.rspec-jh-base-pg12:
|
||||
extends:
|
||||
- .rspec-base-pg12-as-if-jh
|
||||
- .use-pg12-ee
|
||||
|
||||
.rspec-ee-base-pg13:
|
||||
extends:
|
||||
- .rspec-base
|
||||
|
|
@ -744,31 +733,6 @@ rspec system pg12-as-if-foss single-db:
|
|||
- .single-db-rspec
|
||||
- .rails:rules:single-db
|
||||
|
||||
rspec migration pg12-as-if-jh:
|
||||
extends:
|
||||
- .rspec-base-pg12-as-if-jh
|
||||
- .rspec-base-migration
|
||||
- .rails:rules:as-if-jh-rspec
|
||||
- .rspec-migration-parallel
|
||||
|
||||
rspec unit pg12-as-if-jh:
|
||||
extends:
|
||||
- .rspec-base-pg12-as-if-jh
|
||||
- .rails:rules:as-if-jh-rspec
|
||||
- .rspec-unit-parallel
|
||||
|
||||
rspec integration pg12-as-if-jh:
|
||||
extends:
|
||||
- .rspec-base-pg12-as-if-jh
|
||||
- .rails:rules:as-if-jh-rspec
|
||||
- .rspec-integration-parallel
|
||||
|
||||
rspec system pg12-as-if-jh:
|
||||
extends:
|
||||
- .rspec-base-pg12-as-if-jh
|
||||
- .rails:rules:as-if-jh-rspec
|
||||
- .rspec-system-parallel
|
||||
|
||||
rspec-ee migration pg12:
|
||||
extends:
|
||||
- .rspec-ee-base-pg12
|
||||
|
|
@ -872,52 +836,6 @@ rspec-ee system pg12 single-db:
|
|||
- .single-db-rspec
|
||||
- .rails:rules:single-db
|
||||
|
||||
rspec-ee migration pg12-as-if-jh:
|
||||
extends:
|
||||
- .rspec-jh-base-pg12
|
||||
- .rspec-base-migration
|
||||
- .rails:rules:as-if-jh-rspec
|
||||
- .rspec-ee-migration-parallel
|
||||
|
||||
rspec-ee unit pg12-as-if-jh:
|
||||
extends:
|
||||
- .rspec-jh-base-pg12
|
||||
- .rails:rules:as-if-jh-rspec
|
||||
- .rspec-ee-unit-parallel
|
||||
|
||||
rspec-ee integration pg12-as-if-jh:
|
||||
extends:
|
||||
- .rspec-jh-base-pg12
|
||||
- .rails:rules:as-if-jh-rspec
|
||||
- .rspec-ee-integration-parallel
|
||||
|
||||
rspec-ee system pg12-as-if-jh:
|
||||
extends:
|
||||
- .rspec-jh-base-pg12
|
||||
- .rails:rules:as-if-jh-rspec
|
||||
- .rspec-ee-system-parallel
|
||||
|
||||
rspec-jh migration pg12-as-if-jh:
|
||||
extends:
|
||||
- .rspec-jh-base-pg12
|
||||
- .rspec-base-migration
|
||||
- .rails:rules:as-if-jh-rspec
|
||||
|
||||
rspec-jh unit pg12-as-if-jh:
|
||||
extends:
|
||||
- .rspec-jh-base-pg12
|
||||
- .rails:rules:as-if-jh-rspec
|
||||
|
||||
rspec-jh integration pg12-as-if-jh:
|
||||
extends:
|
||||
- .rspec-jh-base-pg12
|
||||
- .rails:rules:as-if-jh-rspec
|
||||
|
||||
rspec-jh system pg12-as-if-jh:
|
||||
extends:
|
||||
- .rspec-jh-base-pg12
|
||||
- .rails:rules:as-if-jh-rspec
|
||||
|
||||
db:rollback geo:
|
||||
extends:
|
||||
- db:rollback
|
||||
|
|
|
|||
|
|
@ -524,17 +524,6 @@
|
|||
- <<: *if-jh
|
||||
when: never
|
||||
|
||||
.as-if-jh-default-exclusion-rules:
|
||||
rules:
|
||||
- <<: *if-security-merge-request
|
||||
when: never
|
||||
- <<: *if-merge-request-targeting-stable-branch
|
||||
when: never
|
||||
- <<: *if-stable-branch-refs
|
||||
when: never
|
||||
- <<: *if-merge-request-labels-as-if-jh
|
||||
allow_failure: true
|
||||
|
||||
.rails:rules:minimal-default-rules:
|
||||
rules:
|
||||
- <<: *if-merge-request-approved
|
||||
|
|
@ -752,18 +741,6 @@
|
|||
- changes: *startup-css-patterns
|
||||
- changes: *workhorse-patterns
|
||||
|
||||
.frontend:rules:compile-test-assets-as-if-jh:
|
||||
rules:
|
||||
- !reference [".strict-ee-only-rules", rules]
|
||||
- !reference [".as-if-jh-default-exclusion-rules", rules]
|
||||
- <<: *if-merge-request-labels-run-all-rspec
|
||||
allow_failure: true
|
||||
- changes: *code-backstage-qa-patterns
|
||||
allow_failure: true
|
||||
- changes: *startup-css-patterns
|
||||
allow_failure: true
|
||||
- changes: *workhorse-patterns
|
||||
|
||||
.frontend:rules:default-frontend-jobs:
|
||||
rules:
|
||||
- <<: *if-merge-request-labels-run-all-rspec
|
||||
|
|
@ -781,19 +758,6 @@
|
|||
- <<: *if-merge-request
|
||||
changes: *ci-patterns
|
||||
|
||||
.frontend:rules:default-frontend-jobs-as-if-jh:
|
||||
rules:
|
||||
- !reference [".strict-ee-only-rules", rules]
|
||||
- !reference [".as-if-jh-default-exclusion-rules", rules]
|
||||
- <<: *if-merge-request-labels-run-all-rspec
|
||||
allow_failure: true
|
||||
- <<: *if-merge-request
|
||||
changes: *startup-css-patterns
|
||||
allow_failure: true
|
||||
- <<: *if-merge-request
|
||||
changes: *ci-patterns
|
||||
allow_failure: true
|
||||
|
||||
.frontend:rules:frontend_fixture-as-if-foss:
|
||||
rules:
|
||||
- !reference [".strict-ee-only-rules", rules]
|
||||
|
|
@ -1330,14 +1294,6 @@
|
|||
- <<: *if-merge-request-labels-as-if-foss
|
||||
changes: *code-backstage-patterns
|
||||
|
||||
.rails:rules:as-if-jh-rspec:
|
||||
rules:
|
||||
- !reference [".strict-ee-only-rules", rules]
|
||||
- !reference [".as-if-jh-default-exclusion-rules", rules]
|
||||
- <<: *if-merge-request
|
||||
changes: *ci-patterns
|
||||
allow_failure: true
|
||||
|
||||
.rails:rules:ee-and-foss-db-library-code:
|
||||
rules:
|
||||
- changes: *db-library-patterns
|
||||
|
|
@ -1843,19 +1799,6 @@
|
|||
- ".gitlab/ci/test-metadata.gitlab-ci.yml"
|
||||
- "scripts/rspec_helpers.sh"
|
||||
|
||||
.setup:rules:add-jh-folder:
|
||||
rules:
|
||||
- !reference [".strict-ee-only-rules", rules]
|
||||
- !reference [".as-if-jh-default-exclusion-rules", rules]
|
||||
- <<: *if-merge-request-labels-run-all-rspec
|
||||
allow_failure: true
|
||||
- changes: *code-backstage-qa-patterns
|
||||
allow_failure: true
|
||||
- changes: *startup-css-patterns
|
||||
allow_failure: true
|
||||
- changes: *workhorse-patterns
|
||||
allow_failure: true
|
||||
|
||||
#######################
|
||||
# Test metadata rules #
|
||||
#######################
|
||||
|
|
|
|||
|
|
@ -172,22 +172,3 @@ detect-previous-failed-tests:
|
|||
expire_in: 7d
|
||||
paths:
|
||||
- ${PREVIOUS_FAILED_TESTS_DIR}
|
||||
|
||||
add-jh-folder:
|
||||
extends: .setup:rules:add-jh-folder
|
||||
image: ${GITLAB_DEPENDENCY_PROXY}ruby:${RUBY_VERSION}
|
||||
stage: prepare
|
||||
before_script:
|
||||
- source ./scripts/utils.sh
|
||||
- install_gitlab_gem
|
||||
script:
|
||||
- JH_BRANCH=$(./scripts/setup/find-jh-branch.rb)
|
||||
- 'echo "JH_BRANCH: ${JH_BRANCH}"'
|
||||
- curl --location -o "jh-folder.tar.gz" "https://gitlab.com/gitlab-org/gitlab-jh-mirrors/gitlab/-/archive/${JH_BRANCH}/gitlab-${JH_BRANCH}.tar.gz?path=jh"
|
||||
- tar -xf "jh-folder.tar.gz"
|
||||
- mv "gitlab-${JH_BRANCH}-jh/jh/" ./
|
||||
- ls -l jh/
|
||||
artifacts:
|
||||
expire_in: 2d
|
||||
paths:
|
||||
- jh/
|
||||
|
|
|
|||
|
|
@ -566,9 +566,6 @@ Graphql/Descriptions:
|
|||
RSpec/ImplicitSubject:
|
||||
Enabled: false
|
||||
|
||||
RSpec/EmptyLineAfterHook:
|
||||
Enabled: false
|
||||
|
||||
RSpec/HooksBeforeExamples:
|
||||
Enabled: false
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,60 @@
|
|||
---
|
||||
# Cop supports --auto-correct.
|
||||
RSpec/EmptyLineAfterHook:
|
||||
Exclude:
|
||||
- 'ee/spec/controllers/projects/integrations/zentao/issues_controller_spec.rb'
|
||||
- 'ee/spec/controllers/projects/push_rules_controller_spec.rb'
|
||||
- 'ee/spec/features/groups/usage_quotas_spec.rb'
|
||||
- 'ee/spec/features/issues/user_bulk_edits_issues_spec.rb'
|
||||
- 'ee/spec/features/profiles/usage_quotas_spec.rb'
|
||||
- 'ee/spec/lib/ee/api/entities/user_with_admin_spec.rb'
|
||||
- 'ee/spec/lib/ee/audit/compliance_framework_changes_auditor_spec.rb'
|
||||
- 'ee/spec/lib/ee/gitlab/auth/ldap/sync/group_spec.rb'
|
||||
- 'ee/spec/lib/ee/gitlab/scim/provisioning_service_spec.rb'
|
||||
- 'ee/spec/lib/gitlab/audit/auditor_spec.rb'
|
||||
- 'ee/spec/lib/gitlab/proxy_spec.rb'
|
||||
- 'ee/spec/lib/gitlab/vulnerabilities/container_scanning_vulnerability_spec.rb'
|
||||
- 'ee/spec/models/ee/key_spec.rb'
|
||||
- 'ee/spec/models/project_spec.rb'
|
||||
- 'ee/spec/requests/api/users_spec.rb'
|
||||
- 'ee/spec/requests/search_controller_spec.rb'
|
||||
- 'ee/spec/services/ci/sync_reports_to_approval_rules_service_spec.rb'
|
||||
- 'ee/spec/services/ee/gpg_keys/destroy_service_spec.rb'
|
||||
- 'ee/spec/services/ee/two_factor/destroy_service_spec.rb'
|
||||
- 'ee/spec/services/external_status_checks/update_service_spec.rb'
|
||||
- 'ee/spec/services/group_saml/saml_group_links/destroy_service_spec.rb'
|
||||
- 'ee/spec/services/groups/memberships/export_service_spec.rb'
|
||||
- 'ee/spec/services/merge_requests/approval_service_spec.rb'
|
||||
- 'ee/spec/support/shared_examples/policies/protected_environments_shared_examples.rb'
|
||||
- 'qa/qa/specs/features/ee/browser_ui/1_manage/group/group_audit_logs_1_spec.rb'
|
||||
- 'qa/qa/specs/features/ee/browser_ui/1_manage/project/project_audit_logs_spec.rb'
|
||||
- 'qa/spec/specs/helpers/quarantine_spec.rb'
|
||||
- 'qa/spec/support/page_error_checker_spec.rb'
|
||||
- 'spec/controllers/admin/spam_logs_controller_spec.rb'
|
||||
- 'spec/controllers/projects/issues_controller_spec.rb'
|
||||
- 'spec/features/admin/admin_mode/login_spec.rb'
|
||||
- 'spec/features/calendar_spec.rb'
|
||||
- 'spec/features/projects/blobs/user_views_pipeline_editor_button_spec.rb'
|
||||
- 'spec/features/users/overview_spec.rb'
|
||||
- 'spec/lib/gitlab/auth/ldap/person_spec.rb'
|
||||
- 'spec/lib/gitlab/database/migrations/instrumentation_spec.rb'
|
||||
- 'spec/lib/gitlab/prometheus/queries/matched_metric_query_spec.rb'
|
||||
- 'spec/lib/gitlab/sidekiq_middleware_spec.rb'
|
||||
- 'spec/mailers/emails/pages_domains_spec.rb'
|
||||
- 'spec/models/application_record_spec.rb'
|
||||
- 'spec/models/integrations/chat_message/merge_message_spec.rb'
|
||||
- 'spec/models/integrations/microsoft_teams_spec.rb'
|
||||
- 'spec/models/user_spec.rb'
|
||||
- 'spec/requests/api/graphql/mutations/metrics/dashboard/annotations/delete_spec.rb'
|
||||
- 'spec/requests/api/issues/issues_spec.rb'
|
||||
- 'spec/requests/api/pages/internal_access_spec.rb'
|
||||
- 'spec/requests/api/pages/private_access_spec.rb'
|
||||
- 'spec/requests/api/pages/public_access_spec.rb'
|
||||
- 'spec/rubocop/cop/migration/update_column_in_batches_spec.rb'
|
||||
- 'spec/services/merge_requests/execute_approval_hooks_service_spec.rb'
|
||||
- 'spec/services/notes/create_service_spec.rb'
|
||||
- 'spec/services/notes/quick_actions_service_spec.rb'
|
||||
- 'spec/services/projects/fork_service_spec.rb'
|
||||
- 'spec/support/redis/redis_shared_examples.rb'
|
||||
- 'spec/support/shared_examples/requests/api/milestones_shared_examples.rb'
|
||||
- 'spec/support/shared_examples/sends_git_audit_streaming_event_shared_examples.rb'
|
||||
|
|
@ -1,11 +1,16 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class Admin::DevOpsReportController < Admin::ApplicationController
|
||||
include RedisTracking
|
||||
include ProductAnalyticsTracking
|
||||
|
||||
helper_method :show_adoption?
|
||||
|
||||
track_redis_hll_event :show, name: 'i_analytics_dev_ops_score', if: -> { should_track_devops_score? }
|
||||
track_custom_event :show,
|
||||
name: 'i_analytics_dev_ops_score',
|
||||
action: 'perform_analytics_usage_action',
|
||||
label: 'redis_hll_counters.analytics.analytics_total_unique_counts_monthly',
|
||||
destinations: %i[redis_hll snowplow],
|
||||
conditions: -> { should_track_devops_score? }
|
||||
|
||||
feature_category :devops_reports
|
||||
|
||||
|
|
@ -24,6 +29,14 @@ class Admin::DevOpsReportController < Admin::ApplicationController
|
|||
def should_track_devops_score?
|
||||
true
|
||||
end
|
||||
|
||||
def tracking_namespace_source
|
||||
nil
|
||||
end
|
||||
|
||||
def tracking_project_source
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
Admin::DevOpsReportController.prepend_mod_with('Admin::DevOpsReportController')
|
||||
|
|
|
|||
|
|
@ -1,9 +1,13 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class Admin::UsageTrendsController < Admin::ApplicationController
|
||||
include RedisTracking
|
||||
include ProductAnalyticsTracking
|
||||
|
||||
track_redis_hll_event :index, name: 'i_analytics_instance_statistics'
|
||||
track_custom_event :index,
|
||||
name: 'i_analytics_instance_statistics',
|
||||
action: 'perform_analytics_usage_action',
|
||||
label: 'redis_hll_counters.analytics.analytics_total_unique_counts_monthly',
|
||||
destinations: %i[redis_hll snowplow]
|
||||
|
||||
feature_category :devops_reports
|
||||
|
||||
|
|
@ -11,4 +15,12 @@ class Admin::UsageTrendsController < Admin::ApplicationController
|
|||
|
||||
def index
|
||||
end
|
||||
|
||||
def tracking_namespace_source
|
||||
@group
|
||||
end
|
||||
|
||||
def tracking_project_source
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ module ProductAnalyticsTracking
|
|||
return unless destinations.include?(:snowplow) && event_enabled?(name)
|
||||
|
||||
optional_arguments = {
|
||||
namespace: tracking_namespace_source,
|
||||
project: tracking_project_source
|
||||
}.compact
|
||||
|
||||
|
|
@ -48,7 +49,6 @@ module ProductAnalyticsTracking
|
|||
user: current_user,
|
||||
property: name,
|
||||
label: label,
|
||||
namespace: tracking_namespace_source,
|
||||
**optional_arguments
|
||||
)
|
||||
end
|
||||
|
|
@ -60,7 +60,13 @@ module ProductAnalyticsTracking
|
|||
i_search_paid: :route_hll_to_snowplow_phase2,
|
||||
i_search_total: :route_hll_to_snowplow_phase2,
|
||||
i_search_advanced: :route_hll_to_snowplow_phase2,
|
||||
i_ecosystem_jira_service_list_issues: :route_hll_to_snowplow_phase2
|
||||
i_ecosystem_jira_service_list_issues: :route_hll_to_snowplow_phase2,
|
||||
users_viewing_analytics_group_devops_adoption: :route_hll_to_snowplow_phase2,
|
||||
i_analytics_dev_ops_adoption: :route_hll_to_snowplow_phase2,
|
||||
i_analytics_dev_ops_score: :route_hll_to_snowplow_phase2,
|
||||
p_analytics_merge_request: :route_hll_to_snowplow_phase2,
|
||||
i_analytics_instance_statistics: :route_hll_to_snowplow_phase2,
|
||||
g_analytics_contribution: :route_hll_to_snowplow_phase2
|
||||
}
|
||||
|
||||
Feature.enabled?(events_to_ff[event.to_sym], tracking_namespace_source)
|
||||
|
|
|
|||
|
|
@ -15,11 +15,11 @@ module UsersHelper
|
|||
end
|
||||
|
||||
def user_email_help_text(user)
|
||||
return 'We also use email for avatar detection if no avatar is uploaded.' unless user.unconfirmed_email.present?
|
||||
return _('We also use email for avatar detection if no avatar is uploaded.') unless user.unconfirmed_email.present?
|
||||
|
||||
confirmation_link = link_to 'Resend confirmation e-mail', user_confirmation_path(user: { email: @user.unconfirmed_email }), method: :post
|
||||
confirmation_link = link_to _('Resend confirmation e-mail'), user_confirmation_path(user: { email: user.unconfirmed_email }), method: :post
|
||||
|
||||
h('Please click the link in the confirmation email before continuing. It was sent to ') +
|
||||
h(_('Please click the link in the confirmation email before continuing. It was sent to ')) +
|
||||
content_tag(:strong) { user.unconfirmed_email } + h('.') +
|
||||
content_tag(:p) { confirmation_link }
|
||||
end
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ module ApplicationSettingImplementation
|
|||
password_authentication_enabled_for_git: true,
|
||||
password_authentication_enabled_for_web: Settings.gitlab['signin_enabled'],
|
||||
performance_bar_allowed_group_id: nil,
|
||||
personal_access_token_prefix: nil,
|
||||
personal_access_token_prefix: 'glpat-',
|
||||
plantuml_enabled: false,
|
||||
plantuml_url: nil,
|
||||
polling_interval_multiplier: 1,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
description: Analytics usage event
|
||||
category: class of the controller triggering given event
|
||||
action: perform_analytics_usage_action
|
||||
label_description: key_path of corresponding redis hll total metric
|
||||
property_description: the name of the performed action, corresponding to migrated redis hll event name
|
||||
value_description:
|
||||
extra_properties:
|
||||
identifiers:
|
||||
- user
|
||||
- namespace
|
||||
product_section: dev
|
||||
product_stage: manage
|
||||
product_group: optimize
|
||||
product_category:
|
||||
milestone: "15.3"
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/94369
|
||||
distributions:
|
||||
- ce
|
||||
- ee
|
||||
tiers:
|
||||
- free
|
||||
- premium
|
||||
- ultimate
|
||||
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
name: batched_migrations_health_status_wal
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/84555
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/366855
|
||||
milestone: '15.2'
|
||||
type: ops
|
||||
group: group::database
|
||||
default_enabled: false
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
# rubocop:disable Database/MultipleDatabases
|
||||
|
||||
raise 'This patch should be dropped after upgrading Rails v6.1.6.1' if ActiveRecord::VERSION::STRING != "6.1.6.1"
|
||||
|
||||
module ActiveRecord
|
||||
module Coders # :nodoc:
|
||||
class YAMLColumn # :nodoc:
|
||||
private
|
||||
|
||||
def yaml_load(payload)
|
||||
return legacy_yaml_load(payload) if ActiveRecord::Base.use_yaml_unsafe_load
|
||||
|
||||
YAML.safe_load(payload, permitted_classes: ActiveRecord::Base.yaml_column_permitted_classes, aliases: true)
|
||||
rescue Psych::DisallowedClass => e
|
||||
Gitlab::ErrorTracking.track_and_raise_for_dev_exception(e)
|
||||
|
||||
legacy_yaml_load(payload)
|
||||
end
|
||||
|
||||
def legacy_yaml_load(payload)
|
||||
if YAML.respond_to?(:unsafe_load)
|
||||
YAML.unsafe_load(payload)
|
||||
else
|
||||
YAML.load(payload) # rubocop:disable Security/YAMLLoad
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# rubocop:enable Database/MultipleDatabases
|
||||
|
|
@ -30,7 +30,7 @@ However, this may not lead to more downloads in parallel unless the number of
|
|||
available Sidekiq threads is also increased. For example, if repository synchronization
|
||||
concurrency is increased from 25 to 50, you may also want to increase the number
|
||||
of Sidekiq threads from 25 to 50. See the
|
||||
[Sidekiq concurrency documentation](../../operations/extra_sidekiq_processes.md#number-of-threads)
|
||||
[Sidekiq concurrency documentation](../../sidekiq/extra_sidekiq_processes.md#number-of-threads)
|
||||
for more details.
|
||||
|
||||
## Repository re-verification
|
||||
|
|
|
|||
|
Before Width: | Height: | Size: 71 KiB After Width: | Height: | Size: 71 KiB |
|
Before Width: | Height: | Size: 88 KiB After Width: | Height: | Size: 88 KiB |
|
Before Width: | Height: | Size: 111 KiB After Width: | Height: | Size: 111 KiB |
|
Before Width: | Height: | Size: 6.3 KiB After Width: | Height: | Size: 6.3 KiB |
|
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 29 KiB |
|
Before Width: | Height: | Size: 6.5 KiB After Width: | Height: | Size: 6.5 KiB |
|
|
@ -1176,3 +1176,7 @@ to run it, read [the GitLabSOS documentation](https://gitlab.com/gitlab-com/supp
|
|||
for creating and comparing performance statistics from GitLab logs.
|
||||
For more details and instructions to run it, read the
|
||||
[documentation for fast-stats](https://gitlab.com/gitlab-com/support/toolbox/fast-stats#usage).
|
||||
|
||||
## Find relevant log entries with a correlation ID
|
||||
|
||||
Most requests have a log ID that can be used to [find relevant log entries](tracing_correlation_id.md).
|
||||
|
|
|
|||
|
|
@ -0,0 +1,202 @@
|
|||
---
|
||||
stage: Systems
|
||||
group: Distribution
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
---
|
||||
|
||||
# Find relevant log entries with a correlation ID **(FREE SELF)**
|
||||
|
||||
GitLab instances log a unique request tracking ID (known as the
|
||||
"correlation ID") for most requests. Each individual request to GitLab gets
|
||||
its own correlation ID, which then gets logged in each GitLab component's logs for that
|
||||
request. This makes it easier to trace behavior in a
|
||||
distributed system. Without this ID it can be difficult or
|
||||
impossible to match correlating log entries.
|
||||
|
||||
## Identify the correlation ID for a request
|
||||
|
||||
The correlation ID is logged in structured logs under the key `correlation_id`
|
||||
and in all response headers GitLab sends under the header `x-request-id`.
|
||||
You can find your correlation ID by searching in either place.
|
||||
|
||||
### Getting the correlation ID in your browser
|
||||
|
||||
You can use your browser's developer tools to monitor and inspect network
|
||||
activity with the site that you're visiting. See the links below for network monitoring
|
||||
documentation for some popular browsers.
|
||||
|
||||
- [Network Monitor - Firefox Developer Tools](https://developer.mozilla.org/en-US/docs/Tools/Network_Monitor)
|
||||
- [Inspect Network Activity In Chrome DevTools](https://developer.chrome.com/docs/devtools/network/)
|
||||
- [Safari Web Development Tools](https://developer.apple.com/safari/tools/)
|
||||
- [Microsoft Edge Network panel](https://docs.microsoft.com/en-us/microsoft-edge/devtools-guide-chromium/network/)
|
||||
|
||||
To locate a relevant request and view its correlation ID:
|
||||
|
||||
1. Enable persistent logging in your network monitor. Some actions in GitLab redirect you quickly after you submit a form, so this helps capture all relevant activity.
|
||||
1. To help isolate the requests you are looking for, you can filter for `document` requests.
|
||||
1. Select the request of interest to view further detail.
|
||||
1. Go to the **Headers** section and look for **Response Headers**. There you should find an `x-request-id` header with a
|
||||
value that was randomly generated by GitLab for the request.
|
||||
|
||||
See the following example:
|
||||
|
||||

|
||||
|
||||
### Getting the correlation ID from your logs
|
||||
|
||||
Another approach to finding the correct correlation ID is to search or watch
|
||||
your logs and find the `correlation_id` value for the log entry that you're
|
||||
watching for.
|
||||
|
||||
For example, let's say that you want learn what's happening or breaking when
|
||||
you reproduce an action in GitLab. You could tail the GitLab logs, filtering
|
||||
to requests by your user, and then watch the requests until you see what you're
|
||||
interested in.
|
||||
|
||||
### Getting the correlation ID from curl
|
||||
|
||||
If you're using `curl` then you can use the verbose option to show request and response headers, as well as other debug information.
|
||||
|
||||
```shell
|
||||
➜ ~ curl --verbose "https://gitlab.example.com/api/v4/projects"
|
||||
# look for a line that looks like this
|
||||
< x-request-id: 4rAMkV3gof4
|
||||
```
|
||||
|
||||
#### Using jq
|
||||
|
||||
This example uses [jq](https://stedolan.github.io/jq/) to filter results and
|
||||
display values we most likely care about.
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl tail gitlab-rails/production_json.log | jq 'select(.username == "bob") | "User: \(.username), \(.method) \(.path), \(.controller)#\(.action), ID: \(.correlation_id)"'
|
||||
```
|
||||
|
||||
```plaintext
|
||||
"User: bob, GET /root/linux, ProjectsController#show, ID: U7k7fh6NpW3"
|
||||
"User: bob, GET /root/linux/commits/master/signatures, Projects::CommitsController#signatures, ID: XPIHpctzEg1"
|
||||
"User: bob, GET /root/linux/blob/master/README, Projects::BlobController#show, ID: LOt9hgi1TV4"
|
||||
```
|
||||
|
||||
#### Using grep
|
||||
|
||||
This example uses only `grep` and `tr`, which are more likely to be installed than `jq`.
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl tail gitlab-rails/production_json.log | grep '"username":"bob"' | tr ',' '\n' | egrep 'method|path|correlation_id'
|
||||
```
|
||||
|
||||
```plaintext
|
||||
{"method":"GET"
|
||||
"path":"/root/linux"
|
||||
"username":"bob"
|
||||
"correlation_id":"U7k7fh6NpW3"}
|
||||
{"method":"GET"
|
||||
"path":"/root/linux/commits/master/signatures"
|
||||
"username":"bob"
|
||||
"correlation_id":"XPIHpctzEg1"}
|
||||
{"method":"GET"
|
||||
"path":"/root/linux/blob/master/README"
|
||||
"username":"bob"
|
||||
"correlation_id":"LOt9hgi1TV4"}
|
||||
```
|
||||
|
||||
## Searching your logs for the correlation ID
|
||||
|
||||
Once you have the correlation ID you can start searching for relevant log
|
||||
entries. You can filter the lines by the correlation ID itself.
|
||||
Combining a `find` and `grep` should be sufficient to find the entries you are looking for.
|
||||
|
||||
```shell
|
||||
# find <gitlab log directory> -type f -mtime -0 exec grep '<correlation ID>' '{}' '+'
|
||||
find /var/log/gitlab -type f -mtime 0 -exec grep 'LOt9hgi1TV4' '{}' '+'
|
||||
```
|
||||
|
||||
```plaintext
|
||||
/var/log/gitlab/gitlab-workhorse/current:{"correlation_id":"LOt9hgi1TV4","duration_ms":2478,"host":"gitlab.domain.tld","level":"info","method":"GET","msg":"access","proto":"HTTP/1.1","referrer":"https://gitlab.domain.tld/root/linux","remote_addr":"68.0.116.160:0","remote_ip":"[filtered]","status":200,"system":"http","time":"2019-09-17T22:17:19Z","uri":"/root/linux/blob/master/README?format=json\u0026viewer=rich","user_agent":"Mozilla/5.0 (Mac) Gecko Firefox/69.0","written_bytes":1743}
|
||||
/var/log/gitlab/gitaly/current:{"correlation_id":"LOt9hgi1TV4","grpc.code":"OK","grpc.meta.auth_version":"v2","grpc.meta.client_name":"gitlab-web","grpc.method":"FindCommits","grpc.request.deadline":"2019-09-17T22:17:47Z","grpc.request.fullMethod":"/gitaly.CommitService/FindCommits","grpc.request.glProjectPath":"root/linux","grpc.request.glRepository":"project-1","grpc.request.repoPath":"@hashed/6b/86/6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b.git","grpc.request.repoStorage":"default","grpc.request.topLevelGroup":"@hashed","grpc.service":"gitaly.CommitService","grpc.start_time":"2019-09-17T22:17:17Z","grpc.time_ms":2319.161,"level":"info","msg":"finished streaming call with code OK","peer.address":"@","span.kind":"server","system":"grpc","time":"2019-09-17T22:17:19Z"}
|
||||
/var/log/gitlab/gitlab-rails/production_json.log:{"method":"GET","path":"/root/linux/blob/master/README","format":"json","controller":"Projects::BlobController","action":"show","status":200,"duration":2448.77,"view":0.49,"db":21.63,"time":"2019-09-17T22:17:19.800Z","params":[{"key":"viewer","value":"rich"},{"key":"namespace_id","value":"root"},{"key":"project_id","value":"linux"},{"key":"id","value":"master/README"}],"remote_ip":"[filtered]","user_id":2,"username":"bob","ua":"Mozilla/5.0 (Mac) Gecko Firefox/69.0","queue_duration":3.38,"gitaly_calls":1,"gitaly_duration":0.77,"rugged_calls":4,"rugged_duration_ms":28.74,"correlation_id":"LOt9hgi1TV4"}
|
||||
```
|
||||
|
||||
### Searching in distributed architectures
|
||||
|
||||
If you have done some horizontal scaling in your GitLab infrastructure, then
|
||||
you must search across _all_ of your GitLab nodes. You can do this with
|
||||
some sort of log aggregation software like Loki, ELK, Splunk, or others.
|
||||
|
||||
You can use a tool like Ansible or PSSH (parallel SSH) that can execute identical commands across your servers in
|
||||
parallel, or craft your own solution.
|
||||
|
||||
### Viewing the request in the Performance Bar
|
||||
|
||||
You can use the [performance bar](../monitoring/performance/performance_bar.md) to view interesting data including calls made to SQL and Gitaly.
|
||||
|
||||
To view the data, the correlation ID of the request must match the same session as the user
|
||||
viewing the performance bar. For API requests, this means that you must perform the request
|
||||
using the session cookie of the signed-in user.
|
||||
|
||||
For example, if you want to view the database queries executed for the following API endpoint:
|
||||
|
||||
```shell
|
||||
https://gitlab.com/api/v4/groups/2564205/projects?with_security_reports=true&page=1&per_page=1
|
||||
```
|
||||
|
||||
First, enable the **Developer Tools** panel. See [Getting the correlation ID in your browser](#getting-the-correlation-id-in-your-browser) for details on how to do this.
|
||||
|
||||
After developer tools have been enabled, obtain a session cookie as follows:
|
||||
|
||||
1. Visit <https://gitlab.com> while logged in.
|
||||
1. Optional. Select **Fetch/XHR** request filter in the **Developer Tools** panel. This step is described for Google Chrome developer tools and is not strictly necessary, it just makes it easier to find the correct request.
|
||||
1. Select the `results?request_id=<some-request-id>` request on the left hand side.
|
||||
1. The session cookie is displayed under the `Request Headers` section of the `Headers` panel. Right-click on the cookie value and select `Copy value`.
|
||||
|
||||

|
||||
|
||||
You have the value of the session cookie copied to your clipboard, for example:
|
||||
|
||||
```shell
|
||||
experimentation_subject_id=<subject-id>; _gitlab_session=<session-id>; event_filter=all; visitor_id=<visitor-id>; perf_bar_enabled=true; sidebar_collapsed=true; diff_view=inline; sast_entry_point_dismissed=true; auto_devops_settings_dismissed=true; cf_clearance=<cf-clearance>; collapsed_gutter=false; frequently_used_emojis=clap,thumbsup,rofl,tada,eyes,bow
|
||||
```
|
||||
|
||||
Use the value of the session cookie to craft an API request by pasting it into a custom header of a `curl` request:
|
||||
|
||||
```shell
|
||||
$ curl --include "https://gitlab.com/api/v4/groups/2564205/projects?with_security_reports=true&page=1&per_page=1" \
|
||||
--header 'cookie: experimentation_subject_id=<subject-id>; _gitlab_session=<session-id>; event_filter=all; visitor_id=<visitor-id>; perf_bar_enabled=true; sidebar_collapsed=true; diff_view=inline; sast_entry_point_dismissed=true; auto_devops_settings_dismissed=true; cf_clearance=<cf-clearance>; collapsed_gutter=false; frequently_used_emojis=clap,thumbsup,rofl,tada,eyes,bow'
|
||||
|
||||
date: Tue, 28 Sep 2021 03:55:33 GMT
|
||||
content-type: application/json
|
||||
...
|
||||
x-request-id: 01FGN8P881GF2E5J91JYA338Y3
|
||||
...
|
||||
[
|
||||
{
|
||||
"id":27497069,
|
||||
"description":"Analyzer for images used on live K8S containers based on Starboard"
|
||||
},
|
||||
"container_registry_image_prefix":"registry.gitlab.com/gitlab-org/security-products/analyzers/cluster-image-scanning",
|
||||
"..."
|
||||
]
|
||||
```
|
||||
|
||||
The response contains the data from the API endpoint, and a `correlation_id` value, returned in the `x-request-id` header, as described in the [Identify the correlation ID for a request](#identify-the-correlation-id-for-a-request) section.
|
||||
|
||||
You can then view the database details for this request:
|
||||
|
||||
1. Paste the `x-request-id` value into the `request details` field of the [performance bar](../monitoring/performance/performance_bar.md) and press <kbd>Enter/Return</kbd>. This example uses the `x-request-id` value `01FGN8P881GF2E5J91JYA338Y3`, returned by the above response:
|
||||
|
||||

|
||||
|
||||
1. A new request is inserted into the `Request Selector` dropdown on the right-hand side of the Performance Bar. Select the new request to view the metrics of the API request:
|
||||
|
||||

|
||||
|
||||
<!-- vale gitlab.Substitutions = NO -->
|
||||
1. Select the `pg` link in the Progress Bar to view the database queries executed by the API request:
|
||||
|
||||

|
||||
<!-- vale gitlab.Substitutions = YES -->
|
||||
|
||||
The database query dialog is displayed:
|
||||
|
||||

|
||||
|
|
@ -14,7 +14,7 @@ Prometheus scraper understands.
|
|||
|
||||
NOTE:
|
||||
This page is about web application metrics.
|
||||
To export background job metrics, learn how to [configure the Sidekiq metrics server](../../sidekiq.md#configure-the-sidekiq-metrics-server).
|
||||
To export background job metrics, learn how to [configure the Sidekiq metrics server](../../sidekiq/index.md#configure-the-sidekiq-metrics-server).
|
||||
|
||||
We provide two mechanisms by which web application metrics can be exported:
|
||||
|
||||
|
|
|
|||
|
|
@ -1,362 +1,11 @@
|
|||
---
|
||||
stage: Systems
|
||||
group: Distribution
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
redirect_to: '../sidekiq/extra_sidekiq_processes.md'
|
||||
remove_date: '2022-11-11'
|
||||
---
|
||||
|
||||
# Run multiple Sidekiq processes **(FREE SELF)**
|
||||
This document was moved to [another location](../sidekiq/extra_sidekiq_processes.md).
|
||||
|
||||
GitLab allows you to start multiple Sidekiq processes.
|
||||
These processes can be used to consume a dedicated set
|
||||
of queues. This can be used to ensure certain queues always have dedicated
|
||||
workers, no matter the number of jobs to be processed.
|
||||
|
||||
NOTE:
|
||||
The information in this page applies only to Omnibus GitLab.
|
||||
|
||||
## Available Sidekiq queues
|
||||
|
||||
For a list of the existing Sidekiq queues, check the following files:
|
||||
|
||||
- [Queues for both GitLab Community and Enterprise Editions](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/all_queues.yml)
|
||||
- [Queues for GitLab Enterprise Editions only](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/workers/all_queues.yml)
|
||||
|
||||
Each entry in the above files represents a queue on which Sidekiq processes
|
||||
can be started.
|
||||
|
||||
## Start multiple processes
|
||||
|
||||
> - [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/4006) in GitLab 12.10, starting multiple processes with Sidekiq cluster.
|
||||
> - [Sidekiq cluster moved](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/181) to GitLab Free in 12.10.
|
||||
> - [Sidekiq cluster became default](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/4140) in GitLab 13.0.
|
||||
|
||||
When starting multiple processes, the number of processes should
|
||||
equal (and **not** exceed) the number of CPU cores you want to
|
||||
dedicate to Sidekiq. Each Sidekiq process can use only 1 CPU
|
||||
core, subject to the available workload and concurrency settings.
|
||||
|
||||
To start multiple processes:
|
||||
|
||||
1. Using the `sidekiq['queue_groups']` array setting, specify how many processes to
|
||||
create using `sidekiq-cluster` and which queue they should handle.
|
||||
Each item in the array equates to one additional Sidekiq
|
||||
process, and values in each item determine the queues it works on.
|
||||
|
||||
For example, the following setting creates three Sidekiq processes, one to run on
|
||||
`elastic_commit_indexer`, one to run on `mailers`, and one process running on all queues:
|
||||
|
||||
```ruby
|
||||
sidekiq['queue_groups'] = [
|
||||
"elastic_commit_indexer",
|
||||
"mailers",
|
||||
"*"
|
||||
]
|
||||
```
|
||||
|
||||
To have an additional Sidekiq process handle multiple queues, add multiple
|
||||
queue names to its item delimited by commas. For example:
|
||||
|
||||
```ruby
|
||||
sidekiq['queue_groups'] = [
|
||||
"elastic_commit_indexer, elastic_association_indexer",
|
||||
"mailers",
|
||||
"*"
|
||||
]
|
||||
```
|
||||
|
||||
[In GitLab 12.9](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/26594) and
|
||||
later, the special queue name `*` means all queues. This starts two
|
||||
processes, each handling all queues:
|
||||
|
||||
```ruby
|
||||
sidekiq['queue_groups'] = [
|
||||
"*",
|
||||
"*"
|
||||
]
|
||||
```
|
||||
|
||||
`*` cannot be combined with concrete queue names - `*, mailers`
|
||||
just handles the `mailers` queue.
|
||||
|
||||
When `sidekiq-cluster` is only running on a single node, make sure that at least
|
||||
one process is running on all queues using `*`. This ensures a process
|
||||
automatically picks up jobs in queues created in the future,
|
||||
including queues that have dedicated processes.
|
||||
|
||||
If `sidekiq-cluster` is running on more than one node, you can also use
|
||||
[`--negate`](#negate-settings) and list all the queues that are already being
|
||||
processed.
|
||||
|
||||
1. Save the file and reconfigure GitLab for the changes to take effect:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
To view the Sidekiq processes in GitLab:
|
||||
|
||||
1. On the top bar, select **Menu > Admin**.
|
||||
1. On the left sidebar, select **Monitoring > Background Jobs**.
|
||||
|
||||
## Negate settings
|
||||
|
||||
To have the Sidekiq process work on every queue **except** the ones
|
||||
you list. In this example, we exclude all import-related jobs from a Sidekiq node:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb` and add:
|
||||
|
||||
```ruby
|
||||
sidekiq['negate'] = true
|
||||
sidekiq['queue_selector'] = true
|
||||
sidekiq['queue_groups'] = [
|
||||
"feature_category=importers"
|
||||
]
|
||||
```
|
||||
|
||||
1. Save the file and reconfigure GitLab for the changes to take effect:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
## Queue selector
|
||||
|
||||
> - [Introduced](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/45) in GitLab 12.8.
|
||||
> - [Sidekiq cluster, including queue selector, moved](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/181) to GitLab Free in 12.10.
|
||||
> - [Renamed from `experimental_queue_selector` to `queue_selector`](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/147) in GitLab 13.6.
|
||||
|
||||
In addition to selecting queues by name, as above, the `queue_selector` option
|
||||
allows queue groups to be selected in a more general way using a
|
||||
[worker matching query](extra_sidekiq_routing.md#worker-matching-query). After `queue_selector`
|
||||
is set, all `queue_groups` must follow the aforementioned syntax.
|
||||
|
||||
In `/etc/gitlab/gitlab.rb`:
|
||||
|
||||
```ruby
|
||||
sidekiq['enable'] = true
|
||||
sidekiq['queue_selector'] = true
|
||||
sidekiq['queue_groups'] = [
|
||||
# Run all non-CPU-bound queues that are high urgency
|
||||
'resource_boundary!=cpu&urgency=high',
|
||||
# Run all continuous integration and pages queues that are not high urgency
|
||||
'feature_category=continuous_integration,pages&urgency!=high',
|
||||
# Run all queues
|
||||
'*'
|
||||
]
|
||||
```
|
||||
|
||||
## Ignore all import queues
|
||||
|
||||
When [importing from GitHub](../../user/project/import/github.md) or
|
||||
other sources, Sidekiq might use all of its resources to perform those
|
||||
operations. To set up two separate `sidekiq-cluster` processes, where
|
||||
one only processes imports and the other processes all other queues:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb` and add:
|
||||
|
||||
```ruby
|
||||
sidekiq['enable'] = true
|
||||
sidekiq['queue_selector'] = true
|
||||
sidekiq['queue_groups'] = [
|
||||
"feature_category=importers",
|
||||
"feature_category!=importers"
|
||||
]
|
||||
```
|
||||
|
||||
1. Save the file and reconfigure GitLab for the changes to take effect:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
## Number of threads
|
||||
|
||||
By default each process defined under `sidekiq` starts with a
|
||||
number of threads that equals the number of queues, plus one spare thread.
|
||||
For example, a process that handles the `process_commit` and `post_receive`
|
||||
queues uses three threads in total.
|
||||
|
||||
These thread run inside a single Ruby process, and each process
|
||||
can only use a single CPU core. The usefulness of threading depends
|
||||
on the work having some external dependencies to wait on, like database queries or
|
||||
HTTP requests. Most Sidekiq deployments benefit from this threading, and when
|
||||
running fewer queues in a process, increasing the thread count might be
|
||||
even more desirable to make the most effective use of CPU resources.
|
||||
|
||||
### Manage thread counts explicitly
|
||||
|
||||
The correct maximum thread count (also called concurrency) depends on the workload.
|
||||
Typical values range from `1` for highly CPU-bound tasks to `15` or higher for mixed
|
||||
low-priority work. A reasonable starting range is `15` to `25` for a non-specialized
|
||||
deployment.
|
||||
|
||||
You can find example values used by GitLab.com by searching for `concurrency:` in
|
||||
[the Helm charts](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/blob/master/releases/gitlab/values/gprd.yaml.gotmpl).
|
||||
The values vary according to the work each specific deployment of Sidekiq does.
|
||||
Any other specialized deployments with processes dedicated to specific queues should
|
||||
have the concurrency tuned according to:
|
||||
have the concurrency tuned according to:
|
||||
|
||||
- The CPU usage of each type of process.
|
||||
- The throughput achieved.
|
||||
|
||||
Each thread requires a Redis connection, so adding threads may increase Redis
|
||||
latency and potentially cause client timeouts. See the
|
||||
[Sidekiq documentation about Redis](https://github.com/mperham/sidekiq/wiki/Using-Redis) for more
|
||||
details.
|
||||
|
||||
#### When running Sidekiq cluster (default)
|
||||
|
||||
Running Sidekiq cluster is the default in GitLab 13.0 and later.
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb` and add:
|
||||
|
||||
```ruby
|
||||
sidekiq['min_concurrency'] = 15
|
||||
sidekiq['max_concurrency'] = 25
|
||||
```
|
||||
|
||||
1. Save the file and reconfigure GitLab for the changes to take effect:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
`min_concurrency` and `max_concurrency` are independent; one can be set without
|
||||
the other. Setting `min_concurrency` to `0` disables the limit.
|
||||
|
||||
For each queue group, let `N` be one more than the number of queues. The
|
||||
concurrency is set to:
|
||||
|
||||
1. `N`, if it's between `min_concurrency` and `max_concurrency`.
|
||||
1. `max_concurrency`, if `N` exceeds this value.
|
||||
1. `min_concurrency`, if `N` is less than this value.
|
||||
|
||||
If `min_concurrency` is equal to `max_concurrency`, then this value is used
|
||||
regardless of the number of queues.
|
||||
|
||||
When `min_concurrency` is greater than `max_concurrency`, it is treated as
|
||||
being equal to `max_concurrency`.
|
||||
|
||||
#### When running a single Sidekiq process
|
||||
|
||||
Running a single Sidekiq process is the default in GitLab 12.10 and earlier.
|
||||
|
||||
WARNING:
|
||||
Running Sidekiq directly was removed in GitLab
|
||||
[14.0](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/240).
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb` and add:
|
||||
|
||||
```ruby
|
||||
sidekiq['cluster'] = false
|
||||
sidekiq['concurrency'] = 25
|
||||
```
|
||||
|
||||
1. Save the file and reconfigure GitLab for the changes to take effect:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
This sets the concurrency (number of threads) for the Sidekiq process.
|
||||
|
||||
## Modify the check interval
|
||||
|
||||
To modify `sidekiq-cluster`'s health check interval for the additional Sidekiq processes:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb` and add (the value can be any integer number of seconds):
|
||||
|
||||
```ruby
|
||||
sidekiq['interval'] = 5
|
||||
```
|
||||
|
||||
1. Save the file and [reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
|
||||
|
||||
## Troubleshoot using the CLI
|
||||
|
||||
WARNING:
|
||||
It's recommended to use `/etc/gitlab/gitlab.rb` to configure the Sidekiq processes.
|
||||
If you experience a problem, you should contact GitLab support. Use the command
|
||||
line at your own risk.
|
||||
|
||||
For debugging purposes, you can start extra Sidekiq processes by using the command
|
||||
`/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster`. This command
|
||||
takes arguments using the following syntax:
|
||||
|
||||
```shell
|
||||
/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster [QUEUE,QUEUE,...] [QUEUE, ...]
|
||||
```
|
||||
|
||||
Each separate argument denotes a group of queues that have to be processed by a
|
||||
Sidekiq process. Multiple queues can be processed by the same process by
|
||||
separating them with a comma instead of a space.
|
||||
|
||||
Instead of a queue, a queue namespace can also be provided, to have the process
|
||||
automatically listen on all queues in that namespace without needing to
|
||||
explicitly list all the queue names. For more information about queue namespaces,
|
||||
see the relevant section in the
|
||||
[Sidekiq development documentation](../../development/sidekiq/index.md#queue-namespaces).
|
||||
|
||||
For example, say you want to start 2 extra processes: one to process the
|
||||
`process_commit` queue, and one to process the `post_receive` queue. This can be
|
||||
done as follows:
|
||||
|
||||
```shell
|
||||
/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster process_commit post_receive
|
||||
```
|
||||
|
||||
If you instead want to start one process processing both queues, you'd use the
|
||||
following syntax:
|
||||
|
||||
```shell
|
||||
/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster process_commit,post_receive
|
||||
```
|
||||
|
||||
If you want to have one Sidekiq process dealing with the `process_commit` and
|
||||
`post_receive` queues, and one process to process the `gitlab_shell` queue,
|
||||
you'd use the following:
|
||||
|
||||
```shell
|
||||
/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster process_commit,post_receive gitlab_shell
|
||||
```
|
||||
|
||||
### Monitor the `sidekiq-cluster` command
|
||||
|
||||
The `sidekiq-cluster` command does not terminate once it has started the desired
|
||||
amount of Sidekiq processes. Instead, the process continues running and
|
||||
forwards any signals to the child processes. This allows you to stop all
|
||||
Sidekiq processes as you send a signal to the `sidekiq-cluster` process,
|
||||
instead of having to send it to the individual processes.
|
||||
|
||||
If the `sidekiq-cluster` process crashes or receives a `SIGKILL`, the child
|
||||
processes terminate themselves after a few seconds. This ensures you don't
|
||||
end up with zombie Sidekiq processes.
|
||||
|
||||
This allows you to monitor the processes by hooking up
|
||||
`sidekiq-cluster` to your supervisor of choice (for example, runit).
|
||||
|
||||
If a child process died the `sidekiq-cluster` command signals all remaining
|
||||
process to terminate, then terminate itself. This removes the need for
|
||||
`sidekiq-cluster` to re-implement complex process monitoring/restarting code.
|
||||
Instead you should make sure your supervisor restarts the `sidekiq-cluster`
|
||||
process whenever necessary.
|
||||
|
||||
### PID files
|
||||
|
||||
The `sidekiq-cluster` command can store its PID in a file. By default no PID
|
||||
file is written, but this can be changed by passing the `--pidfile` option to
|
||||
`sidekiq-cluster`. For example:
|
||||
|
||||
```shell
|
||||
/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster --pidfile /var/run/gitlab/sidekiq_cluster.pid process_commit
|
||||
```
|
||||
|
||||
Keep in mind that the PID file contains the PID of the `sidekiq-cluster`
|
||||
command and not the PIDs of the started Sidekiq processes.
|
||||
|
||||
### Environment
|
||||
|
||||
The Rails environment can be set by passing the `--environment` flag to the
|
||||
`sidekiq-cluster` command, or by setting `RAILS_ENV` to a non-empty value. The
|
||||
default value can be found in `/opt/gitlab/etc/gitlab-rails/env/RAILS_ENV`.
|
||||
<!-- This redirect file can be deleted after <2022-11-11>. -->
|
||||
<!-- Redirects that point to other docs in the same project expire in three months. -->
|
||||
<!-- Redirects that point to docs in a different project or site (link is not relative and starts with `https:`) expire in one year. -->
|
||||
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
|
||||
|
|
|
|||
|
|
@ -1,180 +1,11 @@
|
|||
---
|
||||
stage: Systems
|
||||
group: Distribution
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
redirect_to: '../sidekiq/extra_sidekiq_routing.md'
|
||||
remove_date: '2022-11-11'
|
||||
---
|
||||
|
||||
# Queue routing rules **(FREE SELF)**
|
||||
This document was moved to [another location](../sidekiq/extra_sidekiq_routing.md).
|
||||
|
||||
When the number of Sidekiq jobs increases to a certain scale, the system faces
|
||||
some scalability issues. One of them is that the length of the queue tends to get
|
||||
longer. High-urgency jobs have to wait longer until other less urgent jobs
|
||||
finish. This head-of-line blocking situation may eventually affect the
|
||||
responsiveness of the system, especially critical actions. In another scenario,
|
||||
the performance of some jobs is degraded due to other long running or CPU-intensive jobs
|
||||
(computing or rendering ones) in the same machine.
|
||||
|
||||
To counter the aforementioned issues, one effective solution is to split
|
||||
Sidekiq jobs into different queues and assign machines handling each queue
|
||||
exclusively. For example, all CPU-intensive jobs could be routed to the
|
||||
`cpu-bound` queue and handled by a fleet of CPU optimized instances. The queue
|
||||
topology differs between companies depending on the workloads and usage
|
||||
patterns. Therefore, GitLab supports a flexible mechanism for the
|
||||
administrator to route the jobs based on their characteristics.
|
||||
|
||||
As an alternative to [Queue selector](extra_sidekiq_processes.md#queue-selector), which
|
||||
configures Sidekiq cluster to listen to a specific set of workers or queues,
|
||||
GitLab also supports routing a job from a worker to the desired queue when it
|
||||
is scheduled. Sidekiq clients try to match a job against a configured list of
|
||||
routing rules. Rules are evaluated from first to last, and as soon as we find a
|
||||
match for a given worker we stop processing for that worker (first match wins).
|
||||
If the worker doesn't match any rule, it falls back to the queue name generated
|
||||
from the worker name.
|
||||
|
||||
By default, if the routing rules are not configured (or denoted with an empty
|
||||
array), all the jobs are routed to the queue generated from the worker name.
|
||||
|
||||
## Example configuration
|
||||
|
||||
In `/etc/gitlab/gitlab.rb`:
|
||||
|
||||
```ruby
|
||||
sidekiq['routing_rules'] = [
|
||||
# Do not re-route workers that require their own queue
|
||||
['tags=needs_own_queue', nil],
|
||||
# Route all non-CPU-bound workers that are high urgency to `high-urgency` queue
|
||||
['resource_boundary!=cpu&urgency=high', 'high-urgency'],
|
||||
# Route all database, gitaly and global search workers that are throttled to `throttled` queue
|
||||
['feature_category=database,gitaly,global_search&urgency=throttled', 'throttled'],
|
||||
# Route all workers having contact with outside work to a `network-intenstive` queue
|
||||
['has_external_dependencies=true|feature_category=hooks|tags=network', 'network-intensive'],
|
||||
# Route all import workers to the queues generated by the worker name, for
|
||||
# example, JiraImportWorker to `jira_import`, SVNWorker to `svn_worker`
|
||||
['feature_category=import', nil],
|
||||
# Wildcard matching, route the rest to `default` queue
|
||||
['*', 'default']
|
||||
]
|
||||
```
|
||||
|
||||
The routing rules list is an order-matter array of tuples of query and
|
||||
corresponding queue:
|
||||
|
||||
- The query is following a [worker matching query](#worker-matching-query) syntax.
|
||||
- The `<queue_name>` must be a valid Sidekiq queue name. If the queue name
|
||||
is `nil`, or an empty string, the worker is routed to the queue generated
|
||||
by the name of the worker instead.
|
||||
|
||||
The query supports wildcard matching `*`, which matches all workers. As a
|
||||
result, the wildcard query must stay at the end of the list or the rules after it
|
||||
are ignored.
|
||||
|
||||
NOTE:
|
||||
Mixing queue routing rules and queue selectors requires care to
|
||||
ensure all jobs that are scheduled and picked up by appropriate Sidekiq
|
||||
workers.
|
||||
|
||||
## Worker matching query
|
||||
|
||||
GitLab provides a query syntax to match a worker based on its
|
||||
attributes. This query syntax is employed by both
|
||||
[Queue routing rules](#queue-routing-rules) and
|
||||
[Queue selector](extra_sidekiq_processes.md#queue-selector). A query includes two
|
||||
components:
|
||||
|
||||
- Attributes that can be selected.
|
||||
- Operators used to construct a query.
|
||||
|
||||
### Available attributes
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/261) in GitLab 13.1 (`tags`).
|
||||
|
||||
Queue matching query works upon the worker attributes, described in
|
||||
[Sidekiq style guide](../../development/sidekiq/index.md). We support querying
|
||||
based on a subset of worker attributes:
|
||||
|
||||
- `feature_category` - the
|
||||
[GitLab feature category](https://about.gitlab.com/direction/maturity/#category-maturity) the
|
||||
queue belongs to. For example, the `merge` queue belongs to the
|
||||
`source_code_management` category.
|
||||
- `has_external_dependencies` - whether or not the queue connects to external
|
||||
services. For example, all importers have this set to `true`.
|
||||
- `urgency` - how important it is that this queue's jobs run
|
||||
quickly. Can be `high`, `low`, or `throttled`. For example, the
|
||||
`authorized_projects` queue is used to refresh user permissions, and
|
||||
is `high` urgency.
|
||||
- `worker_name` - the worker name. Use this attribute to select a specific worker.
|
||||
- `name` - the queue name generated from the worker name. Use this attribute to select a specific queue. Because this is generated from
|
||||
the worker name, it does not change based on the result of other routing
|
||||
rules.
|
||||
- `resource_boundary` - if the queue is bound by `cpu`, `memory`, or
|
||||
`unknown`. For example, the `ProjectExportWorker` is memory bound as it has
|
||||
to load data in memory before saving it for export.
|
||||
- `tags` - short-lived annotations for queues. These are expected to frequently
|
||||
change from release to release, and may be removed entirely.
|
||||
|
||||
`has_external_dependencies` is a boolean attribute: only the exact
|
||||
string `true` is considered true, and everything else is considered
|
||||
false.
|
||||
|
||||
`tags` is a set, which means that `=` checks for intersecting sets, and
|
||||
`!=` checks for disjoint sets. For example, `tags=a,b` selects queues
|
||||
that have tags `a`, `b`, or both. `tags!=a,b` selects queues that have
|
||||
neither of those tags.
|
||||
|
||||
The attributes of each worker are hard-coded in the source code. For
|
||||
convenience, we generate a
|
||||
[list of all available attributes in GitLab Community Edition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/all_queues.yml)
|
||||
and a
|
||||
[list of all available attributes in GitLab Enterprise Edition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/workers/all_queues.yml).
|
||||
|
||||
### Available operators
|
||||
|
||||
`queue_selector` supports the following operators, listed from highest
|
||||
to lowest precedence:
|
||||
|
||||
- `|` - the logical `OR` operator. For example, `query_a|query_b` (where `query_a`
|
||||
and `query_b` are queries made up of the other operators here) includes
|
||||
queues that match either query.
|
||||
- `&` - the logical `AND` operator. For example, `query_a&query_b` (where
|
||||
`query_a` and `query_b` are queries made up of the other operators here) will
|
||||
only include queues that match both queries.
|
||||
- `!=` - the `NOT IN` operator. For example, `feature_category!=issue_tracking`
|
||||
excludes all queues from the `issue_tracking` feature category.
|
||||
- `=` - the `IN` operator. For example, `resource_boundary=cpu` includes all
|
||||
queues that are CPU bound.
|
||||
- `,` - the concatenate set operator. For example,
|
||||
`feature_category=continuous_integration,pages` includes all queues from
|
||||
either the `continuous_integration` category or the `pages` category. This
|
||||
example is also possible using the OR operator, but allows greater brevity, as
|
||||
well as being lower precedence.
|
||||
|
||||
The operator precedence for this syntax is fixed: it's not possible to make `AND`
|
||||
have higher precedence than `OR`.
|
||||
|
||||
[In GitLab 12.9](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/26594) and
|
||||
later, as with the standard queue group syntax above, a single `*` as the
|
||||
entire queue group selects all queues.
|
||||
|
||||
### Migration
|
||||
|
||||
After the Sidekiq routing rules are changed, administrators must take care
|
||||
with the migration to avoid losing jobs entirely, especially in a system with
|
||||
long queues of jobs. The migration can be done by following the migration steps
|
||||
mentioned in [Sidekiq job migration](../../raketasks/sidekiq_job_migration.md)
|
||||
|
||||
### Workers that cannot be migrated
|
||||
|
||||
Some workers cannot share a queue with other workers - typically because
|
||||
they check the size of their own queue - and so must be excluded from
|
||||
this process. We recommend excluding these from any further worker
|
||||
routing by adding a rule to keep them in their own queue, for example:
|
||||
|
||||
```ruby
|
||||
sidekiq['routing_rules'] = [
|
||||
['tags=needs_own_queue', nil],
|
||||
# ...
|
||||
]
|
||||
```
|
||||
|
||||
These queues must also be included in at least one
|
||||
[Sidekiq queue group](extra_sidekiq_processes.md#start-multiple-processes).
|
||||
<!-- This redirect file can be deleted after <2022-11-11>. -->
|
||||
<!-- Redirects that point to other docs in the same project expire in three months. -->
|
||||
<!-- Redirects that point to docs in a different project or site (link is not relative and starts with `https:`) expire in one year. -->
|
||||
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
|
||||
|
|
|
|||
|
|
@ -1,82 +1,11 @@
|
|||
---
|
||||
stage: Data Stores
|
||||
group: Application Performance
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
redirect_to: '../sidekiq/sidekiq_memory_killer.md'
|
||||
remove_date: '2022-11-11'
|
||||
---
|
||||
|
||||
# Sidekiq MemoryKiller **(FREE SELF)**
|
||||
This document was moved to [another location](../sidekiq/sidekiq_memory_killer.md).
|
||||
|
||||
The GitLab Rails application code suffers from memory leaks. For web requests
|
||||
this problem is made manageable using
|
||||
[`puma-worker-killer`](https://github.com/schneems/puma_worker_killer) which
|
||||
restarts Puma worker processes if it exceeds a memory limit. The Sidekiq
|
||||
MemoryKiller applies the same approach to the Sidekiq processes used by GitLab
|
||||
to process background jobs.
|
||||
|
||||
Unlike puma-worker-killer, which is enabled by default for all GitLab
|
||||
installations of GitLab 13.0 and later, the Sidekiq MemoryKiller is enabled by default
|
||||
_only_ for Omnibus packages. The reason for this is that the MemoryKiller
|
||||
relies on runit to restart Sidekiq after a memory-induced shutdown and GitLab
|
||||
installations from source do not all use runit or an equivalent.
|
||||
|
||||
With the default settings, the MemoryKiller causes a Sidekiq restart no
|
||||
more often than once every 15 minutes, with the restart causing about one
|
||||
minute of delay for incoming background jobs.
|
||||
|
||||
Some background jobs rely on long-running external processes. To ensure these
|
||||
are cleanly terminated when Sidekiq is restarted, each Sidekiq process should be
|
||||
run as a process group leader (for example, using `chpst -P`). If using Omnibus or the
|
||||
`bin/background_jobs` script with `runit` installed, this is handled for you.
|
||||
|
||||
## Configuring the MemoryKiller
|
||||
|
||||
The MemoryKiller is controlled using environment variables.
|
||||
|
||||
- `SIDEKIQ_DAEMON_MEMORY_KILLER`: defaults to 1. When set to 0, the MemoryKiller
|
||||
works in _legacy_ mode. Otherwise, the MemoryKiller works in _daemon_ mode.
|
||||
|
||||
In _legacy_ mode, the MemoryKiller checks the Sidekiq process RSS
|
||||
([Resident Set Size](https://github.com/mperham/sidekiq/wiki/Memory#rss))
|
||||
after each job.
|
||||
|
||||
In _daemon_ mode, the MemoryKiller checks the Sidekiq process RSS every 3 seconds
|
||||
(defined by `SIDEKIQ_MEMORY_KILLER_CHECK_INTERVAL`).
|
||||
|
||||
- `SIDEKIQ_MEMORY_KILLER_MAX_RSS` (KB): if this variable is set, and its value is greater
|
||||
than 0, the MemoryKiller is enabled. Otherwise the MemoryKiller is disabled.
|
||||
|
||||
`SIDEKIQ_MEMORY_KILLER_MAX_RSS` defines the Sidekiq process allowed RSS.
|
||||
|
||||
In _legacy_ mode, if the Sidekiq process exceeds the allowed RSS then an irreversible
|
||||
delayed graceful restart is triggered. The restart of Sidekiq happens
|
||||
after `SIDEKIQ_MEMORY_KILLER_GRACE_TIME` seconds.
|
||||
|
||||
In _daemon_ mode, if the Sidekiq process exceeds the allowed RSS for longer than
|
||||
`SIDEKIQ_MEMORY_KILLER_GRACE_TIME` the graceful restart is triggered. If the
|
||||
Sidekiq process go below the allowed RSS within `SIDEKIQ_MEMORY_KILLER_GRACE_TIME`,
|
||||
the restart is aborted.
|
||||
|
||||
The default value for Omnibus packages is set
|
||||
[in the Omnibus GitLab repository](https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/attributes/default.rb).
|
||||
|
||||
- `SIDEKIQ_MEMORY_KILLER_HARD_LIMIT_RSS` (KB): is used by _daemon_ mode. If the Sidekiq
|
||||
process RSS (expressed in kilobytes) exceeds `SIDEKIQ_MEMORY_KILLER_HARD_LIMIT_RSS`,
|
||||
an immediate graceful restart of Sidekiq is triggered.
|
||||
|
||||
- `SIDEKIQ_MEMORY_KILLER_CHECK_INTERVAL`: used in _daemon_ mode to define how
|
||||
often to check process RSS, default to 3 seconds.
|
||||
|
||||
- `SIDEKIQ_MEMORY_KILLER_GRACE_TIME`: defaults to 900 seconds (15 minutes).
|
||||
The usage of this variable is described as part of `SIDEKIQ_MEMORY_KILLER_MAX_RSS`.
|
||||
|
||||
- `SIDEKIQ_MEMORY_KILLER_SHUTDOWN_WAIT`: defaults to 30 seconds. This defines the
|
||||
maximum time allowed for all Sidekiq jobs to finish. No new jobs are accepted
|
||||
during that time, and the process exits as soon as all jobs finish.
|
||||
|
||||
If jobs do not finish during that time, the MemoryKiller interrupts all currently
|
||||
running jobs by sending `SIGTERM` to the Sidekiq process.
|
||||
|
||||
If the process hard shutdown/restart is not performed by Sidekiq,
|
||||
the Sidekiq process is forcefully terminated after
|
||||
`Sidekiq.options[:timeout] + 2` seconds. An external supervision mechanism
|
||||
(for example, runit) must restart Sidekiq afterwards.
|
||||
<!-- This redirect file can be deleted after <2022-11-11>. -->
|
||||
<!-- Redirects that point to other docs in the same project expire in three months. -->
|
||||
<!-- Redirects that point to docs in a different project or site (link is not relative and starts with `https:`) expire in one year. -->
|
||||
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ GitLab officially supports LTS versions of operating systems. While OSs like
|
|||
Ubuntu have a clear distinction between LTS and non-LTS versions, there are
|
||||
other OSs, openSUSE for example, that don't follow the LTS concept. Hence to
|
||||
avoid confusion, the official policy is that at any point of time, all the
|
||||
operating systems supported by GitLab are listed in the
|
||||
operating systems supported by GitLab are listed in the
|
||||
[installation page](https://about.gitlab.com/install/).
|
||||
|
||||
The following lists the currently supported OSs and their possible EOL dates.
|
||||
|
|
@ -19,7 +19,6 @@ The following lists the currently supported OSs and their possible EOL dates.
|
|||
| ------------------------------------------------------------ | ------------------------------ | --------------- | :----------------------------------------------------------: | ---------- | ------------------------------------------------------------ |
|
||||
| AlmaLinux 8 | GitLab CE / GitLab EE 14.5.0 | x86_64, aarch64 | [AlmaLinux Install Documentation](https://about.gitlab.com/install/#almalinux-8) | 2029 | <https://almalinux.org/> |
|
||||
| CentOS 7 | GitLab CE / GitLab EE 7.10.0 | x86_64 | [CentOS Install Documentation](https://about.gitlab.com/install/#centos-7) | June 2024 | <https://wiki.centos.org/About/Product> |
|
||||
| Debian 9 | GitLab CE / GitLab EE 9.3.0 | amd64 | [Debian Install Documentation](https://about.gitlab.com/install/#debian) | 2022 | <https://wiki.debian.org/LTS> |
|
||||
| Debian 10 | GitLab CE / GitLab EE 12.2.0 | amd64, arm64 | [Debian Install Documentation](https://about.gitlab.com/install/#debian) | 2024 | <https://wiki.debian.org/LTS> |
|
||||
| Debian 11 | GitLab CE / GitLab EE 14.6.0 | amd64, arm64 | [Debian Install Documentation](https://about.gitlab.com/install/#debian) | 2026 | <https://wiki.debian.org/LTS> |
|
||||
| OpenSUSE 15.3 | GitLab CE / GitLab EE 14.5.0 | x86_64, aarch64 | [OpenSUSE Install Documentation](https://about.gitlab.com/install/#opensuse-leap-15-3) | Nov 2022 | <https://en.opensuse.org/Lifetime> |
|
||||
|
|
@ -87,6 +86,7 @@ release for them can be found below:
|
|||
| OpenSUSE 15.1 | [November 2020](https://en.opensuse.org/Lifetime#Discontinued_distributions) | [GitLab CE](https://packages.gitlab.com/app/gitlab/gitlab-ce/search?q=gitlab-ce-13.12&dist=opensuse%2F15.1) / [GitLab EE](https://packages.gitlab.com/app/gitlab/gitlab-ee/search?q=gitlab-ee-13.12&dist=opensuse%2F15.1) 13.12 |
|
||||
| Ubuntu 16.04 | [April 2021](https://ubuntu.com/info/release-end-of-life) | [GitLab CE](https://packages.gitlab.com/app/gitlab/gitlab-ce/search?q=gitlab-ce_13.12&dist=ubuntu%2Fxenial) / [GitLab EE](https://packages.gitlab.com/app/gitlab/gitlab-ee/search?q=gitlab-ee_13.12&dist=ubuntu%2Fxenial) 13.12 |
|
||||
| OpenSUSE 15.2 | [December 2021](https://en.opensuse.org/Lifetime#Discontinued_distributions) | [GitLab CE](https://packages.gitlab.com/app/gitlab/gitlab-ce/search?q=gitlab-ce-14.7&dist=opensuse%2F15.2) / [GitLab EE](https://packages.gitlab.com/app/gitlab/gitlab-ee/search?q=gitlab-ee-14.7&dist=opensuse%2F15.2) 14.7 |
|
||||
| Debian 9 "Stretch" | [June 2022](https://lists.debian.org/debian-lts-announce/2022/07/msg00002.html) | [GitLab CE](https://packages.gitlab.com/app/gitlab/gitlab-ce/search?q=gitlab-ce_15.2&dist=debian%2Fstretch) / [GitLab EE](https://packages.gitlab.com/app/gitlab/gitlab-ee/search?q=gitlab-ee_15.2&dist=debian%2Fstretch) 15.2 |
|
||||
|
||||
NOTE:
|
||||
An exception to this deprecation policy is when we are unable to provide
|
||||
|
|
|
|||
|
|
@ -614,7 +614,7 @@ Follow the steps below to configure verbose logging of GitLab Pages daemon.
|
|||
Setting the `propagate_correlation_id` to true allows installations behind a reverse proxy to generate
|
||||
and set a correlation ID to requests sent to GitLab Pages. When a reverse proxy sets the header value `X-Request-ID`,
|
||||
the value propagates in the request chain.
|
||||
Users [can find the correlation ID in the logs](../troubleshooting/tracing_correlation_id.md#identify-the-correlation-id-for-a-request).
|
||||
Users [can find the correlation ID in the logs](../logs/tracing_correlation_id.md#identify-the-correlation-id-for-a-request).
|
||||
|
||||
To enable the propagation of the correlation ID:
|
||||
|
||||
|
|
|
|||
|
|
@ -1,385 +1,11 @@
|
|||
---
|
||||
stage: Systems
|
||||
group: Distribution
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
redirect_to: 'sidekiq/index.md'
|
||||
remove_date: '2022-11-11'
|
||||
---
|
||||
|
||||
# Configure an external Sidekiq instance **(FREE SELF)**
|
||||
This document was moved to [another location](sidekiq/index.md).
|
||||
|
||||
You can configure an external Sidekiq instance by using the Sidekiq that's bundled in the GitLab package. Sidekiq requires connection to the Redis,
|
||||
PostgreSQL, and Gitaly instances.
|
||||
|
||||
## Configure TCP access for PostgreSQL, Gitaly, and Redis
|
||||
|
||||
By default, GitLab uses UNIX sockets and is not set up to communicate via TCP. To change this:
|
||||
|
||||
1. Edit the `/etc/gitlab/gitlab.rb` file on your GitLab instance, and add the following:
|
||||
|
||||
```ruby
|
||||
|
||||
## PostgreSQL
|
||||
|
||||
# Replace POSTGRESQL_PASSWORD_HASH with a generated md5 value
|
||||
postgresql['sql_user_password'] = 'POSTGRESQL_PASSWORD_HASH'
|
||||
postgresql['listen_address'] = '0.0.0.0'
|
||||
postgresql['port'] = 5432
|
||||
|
||||
# Add the Sidekiq nodes to PostgreSQL's trusted addresses.
|
||||
# In the following example, 10.10.1.30/32 is the private IP
|
||||
# of the Sidekiq server.
|
||||
postgresql['md5_auth_cidr_addresses'] = %w(127.0.0.1/32 10.10.1.30/32)
|
||||
postgresql['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.10.1.30/32)
|
||||
|
||||
## Gitaly
|
||||
|
||||
# Make Gitaly accept connections on all network interfaces
|
||||
gitaly['listen_addr'] = "0.0.0.0:8075"
|
||||
## Set up the Gitaly token as a form of authentication since you are accessing Gitaly over the network
|
||||
## https://docs.gitlab.com/ee/administration/gitaly/configure_gitaly.html#about-the-gitaly-token
|
||||
gitaly['auth_token'] = 'abc123secret'
|
||||
praefect['auth_token'] = 'abc123secret'
|
||||
gitlab_rails['gitaly_token'] = 'abc123secret'
|
||||
|
||||
## Redis configuration
|
||||
|
||||
redis['bind'] = '0.0.0.0'
|
||||
redis['port'] = 6379
|
||||
# Password to Authenticate Redis
|
||||
redis['password'] = 'redis-password-goes-here'
|
||||
gitlab_rails['redis_password'] = 'redis-password-goes-here'
|
||||
|
||||
gitlab_rails['auto_migrate'] = false
|
||||
```
|
||||
|
||||
1. Run `reconfigure`:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
1. Restart the `PostgreSQL` server:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl restart postgresql
|
||||
```
|
||||
|
||||
1. After the restart, set `auto_migrate` to `true` or comment to use the default settings:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['auto_migrate'] = true
|
||||
```
|
||||
|
||||
1. Run `reconfigure` again:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
## Set up Sidekiq instance
|
||||
|
||||
1. SSH into the Sidekiq server.
|
||||
|
||||
1. Confirm that you can access the PostgreSQL, Gitaly, and Redis ports:
|
||||
|
||||
```shell
|
||||
telnet <GitLab host> 5432 # PostgreSQL
|
||||
telnet <GitLab host> 8075 # Gitaly
|
||||
telnet <GitLab host> 6379 # Redis
|
||||
```
|
||||
|
||||
1. [Download and install](https://about.gitlab.com/install/) the Omnibus GitLab package
|
||||
using steps 1 and 2. **Do not complete any other steps.**
|
||||
|
||||
1. Copy the `/etc/gitlab/gitlab.rb` file from the GitLab instance and add the following settings. Make sure
|
||||
to replace them with your values:
|
||||
|
||||
<!--
|
||||
Updates to example must be made at:
|
||||
- https://gitlab.com/gitlab-org/gitlab/blob/master/doc/administration/sidekiq.md
|
||||
- all reference architecture pages
|
||||
-->
|
||||
|
||||
```ruby
|
||||
########################################
|
||||
##### Services Disabled ###
|
||||
########################################
|
||||
#
|
||||
# When running GitLab on just one server, you have a single `gitlab.rb`
|
||||
# to enable all services you want to run.
|
||||
# When running GitLab on N servers, you have N `gitlab.rb` files.
|
||||
# Enable only the services you want to run on each
|
||||
# specific server, while disabling all others.
|
||||
#
|
||||
gitaly['enable'] = false
|
||||
postgresql['enable'] = false
|
||||
redis['enable'] = false
|
||||
nginx['enable'] = false
|
||||
puma['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
prometheus['enable'] = false
|
||||
alertmanager['enable'] = false
|
||||
grafana['enable'] = false
|
||||
gitlab_exporter['enable'] = false
|
||||
gitlab_kas['enable'] = false
|
||||
|
||||
##
|
||||
## To maintain uniformity of links across nodes, the
|
||||
## `external_url` on the Sidekiq server should point to the external URL that users
|
||||
## use to access GitLab. This can be either:
|
||||
##
|
||||
## - The `external_url` set on your application server.
|
||||
## - The URL of a external load balancer, which routes traffic to the GitLab application server.
|
||||
##
|
||||
external_url 'https://gitlab.example.com'
|
||||
|
||||
# Configure the gitlab-shell API callback URL. Without this, `git push` will
|
||||
# fail. This can be your 'front door' GitLab URL or an internal load
|
||||
# balancer.
|
||||
gitlab_rails['internal_api_url'] = 'GITLAB_URL'
|
||||
gitlab_shell['secret_token'] = 'SHELL_TOKEN'
|
||||
|
||||
########################################
|
||||
#### Redis ###
|
||||
########################################
|
||||
|
||||
## Must be the same in every sentinel node.
|
||||
redis['master_name'] = 'gitlab-redis' # Required if you have setup redis cluster
|
||||
## The same password for Redis authentication you set up for the master node.
|
||||
redis['master_password'] = '<redis_master_password>'
|
||||
|
||||
### If redis is running on the main Gitlab instance and you have opened the TCP port as above add the following
|
||||
gitlab_rails['redis_host'] = '<gitlab_host>'
|
||||
gitlab_rails['redis_port'] = 6379
|
||||
|
||||
#######################################
|
||||
### Gitaly ###
|
||||
#######################################
|
||||
|
||||
## Replace <gitaly_token> with the one you set up, see
|
||||
## https://docs.gitlab.com/ee/administration/gitaly/configure_gitaly.html#about-the-gitaly-token
|
||||
git_data_dirs({
|
||||
"default" => {
|
||||
"gitaly_address" => "tcp://<gitlab_host>:8075",
|
||||
"gitaly_token" => "<gitaly_token>"
|
||||
}
|
||||
})
|
||||
|
||||
#######################################
|
||||
### Postgres ###
|
||||
#######################################
|
||||
|
||||
# Replace <database_host> and <database_password>
|
||||
gitlab_rails['db_host'] = '<database_host>'
|
||||
gitlab_rails['db_port'] = '5432'
|
||||
gitlab_rails['db_password'] = '<database_password>'
|
||||
## Prevent database migrations from running on upgrade automatically
|
||||
gitlab_rails['auto_migrate'] = false
|
||||
|
||||
#######################################
|
||||
### Sidekiq configuration ###
|
||||
#######################################
|
||||
sidekiq['enable'] = true
|
||||
sidekiq['listen_address'] = "0.0.0.0"
|
||||
|
||||
## Set number of Sidekiq queue processes to the same number as available CPUs
|
||||
sidekiq['queue_groups'] = ['*'] * 4
|
||||
|
||||
## Set number of Sidekiq threads per queue process to the recommend number of 10
|
||||
sidekiq['max_concurrency'] = 10
|
||||
```
|
||||
|
||||
1. Copy the `/etc/gitlab/gitlab-secrets.json` file from the GitLab instance and replace the file in the Sidekiq instance.
|
||||
|
||||
1. Reconfigure GitLab:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
1. Restart the Sidekiq instance after completing the process and finishing the database migrations.
|
||||
|
||||
## Configure multiple Sidekiq nodes with shared storage
|
||||
|
||||
If you run multiple Sidekiq nodes with a shared file storage, such as NFS, you must
|
||||
specify the UIDs and GIDs to ensure they match between servers. Specifying the UIDs
|
||||
and GIDs prevents permissions issues in the file system. This advice is similar to the
|
||||
[advice for Geo setups](geo/replication/multiple_servers.md#step-4-configure-the-frontend-application-nodes-on-the-geo-secondary-site).
|
||||
|
||||
To set up multiple Sidekiq nodes:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb`:
|
||||
|
||||
```ruby
|
||||
user['uid'] = 9000
|
||||
user['gid'] = 9000
|
||||
web_server['uid'] = 9001
|
||||
web_server['gid'] = 9001
|
||||
registry['uid'] = 9002
|
||||
registry['gid'] = 9002
|
||||
```
|
||||
|
||||
1. Reconfigure GitLab:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
## Configure the Container Registry when using an external Sidekiq
|
||||
|
||||
If you're using the Container Registry and it's running on a different
|
||||
node than Sidekiq, follow the steps below.
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb`, and configure the registry URL:
|
||||
|
||||
```ruby
|
||||
registry_external_url 'https://registry.example.com'
|
||||
gitlab_rails['registry_api_url'] = "https://registry.example.com"
|
||||
```
|
||||
|
||||
1. Reconfigure GitLab:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
1. In the instance where Container Registry is hosted, copy the `registry.key`
|
||||
file to the Sidekiq node.
|
||||
|
||||
## Configure the Sidekiq metrics server
|
||||
|
||||
If you want to collect Sidekiq metrics, enable the Sidekiq metrics server.
|
||||
To make metrics available from `localhost:8082/metrics`:
|
||||
|
||||
To configure the metrics server:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb`:
|
||||
|
||||
```ruby
|
||||
sidekiq['metrics_enabled'] = true
|
||||
sidekiq['listen_address'] = "localhost"
|
||||
sidekiq['listen_port'] = "8082"
|
||||
|
||||
# Optionally log all the metrics server logs to log/sidekiq_exporter.log
|
||||
sidekiq['exporter_log_enabled'] = true
|
||||
```
|
||||
|
||||
1. Reconfigure GitLab:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
### Enable HTTPS
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/364771) in GitLab 15.2.
|
||||
|
||||
To serve metrics via HTTPS instead of HTTP, enable TLS in the exporter settings:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb` to add (or find and uncomment) the following lines:
|
||||
|
||||
```ruby
|
||||
sidekiq['exporter_tls_enabled'] = true
|
||||
sidekiq['exporter_tls_cert_path'] = "/path/to/certificate.pem"
|
||||
sidekiq['exporter_tls_key_path'] = "/path/to/private-key.pem"
|
||||
```
|
||||
|
||||
1. Save the file and [reconfigure GitLab](restart_gitlab.md#omnibus-gitlab-reconfigure)
|
||||
for the changes to take effect.
|
||||
|
||||
When TLS is enabled, the same `port` and `address` are used as described above.
|
||||
The metrics server cannot serve both HTTP and HTTPS at the same time.
|
||||
|
||||
## Configure health checks
|
||||
|
||||
If you use health check probes to observe Sidekiq, enable the Sidekiq health check server.
|
||||
To make health checks available from `localhost:8092`:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb`:
|
||||
|
||||
```ruby
|
||||
sidekiq['health_checks_enabled'] = true
|
||||
sidekiq['health_checks_listen_address'] = "localhost"
|
||||
sidekiq['health_checks_listen_port'] = "8092"
|
||||
```
|
||||
|
||||
1. Reconfigure GitLab:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
For more information about health checks, see the [Sidekiq health check page](sidekiq_health_check.md).
|
||||
|
||||
## Configure LDAP and user or group synchronization
|
||||
|
||||
If you use LDAP for user and group management, you must add the LDAP configuration to your Sidekiq node as well as the LDAP
|
||||
synchronization worker. If the LDAP configuration and LDAP synchronization worker are not applied to your Sidekiq node,
|
||||
users and groups are not automatically synchronized.
|
||||
|
||||
For more information about configuring LDAP for GitLab, see:
|
||||
|
||||
- [GitLab LDAP configuration documentation](auth/ldap/index.md#configure-ldap)
|
||||
- [LDAP synchronization documentation](auth/ldap/ldap_synchronization.md#adjust-ldap-user-sync-schedule)
|
||||
|
||||
To enable LDAP with the synchronization worker for Sidekiq:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb`:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['ldap_enabled'] = true
|
||||
gitlab_rails['prevent_ldap_sign_in'] = false
|
||||
gitlab_rails['ldap_servers'] = {
|
||||
'main' => {
|
||||
'label' => 'LDAP',
|
||||
'host' => 'ldap.mydomain.com',
|
||||
'port' => 389,
|
||||
'uid' => 'sAMAccountName',
|
||||
'encryption' => 'simple_tls',
|
||||
'verify_certificates' => true,
|
||||
'bind_dn' => '_the_full_dn_of_the_user_you_will_bind_with',
|
||||
'password' => '_the_password_of_the_bind_user',
|
||||
'tls_options' => {
|
||||
'ca_file' => '',
|
||||
'ssl_version' => '',
|
||||
'ciphers' => '',
|
||||
'cert' => '',
|
||||
'key' => ''
|
||||
},
|
||||
'timeout' => 10,
|
||||
'active_directory' => true,
|
||||
'allow_username_or_email_login' => false,
|
||||
'block_auto_created_users' => false,
|
||||
'base' => 'dc=example,dc=com',
|
||||
'user_filter' => '',
|
||||
'attributes' => {
|
||||
'username' => ['uid', 'userid', 'sAMAccountName'],
|
||||
'email' => ['mail', 'email', 'userPrincipalName'],
|
||||
'name' => 'cn',
|
||||
'first_name' => 'givenName',
|
||||
'last_name' => 'sn'
|
||||
},
|
||||
'lowercase_usernames' => false,
|
||||
|
||||
# Enterprise Edition only
|
||||
# https://docs.gitlab.com/ee/administration/auth/ldap/ldap_synchronization.html
|
||||
'group_base' => '',
|
||||
'admin_group' => '',
|
||||
'external_groups' => [],
|
||||
'sync_ssh_keys' => false
|
||||
}
|
||||
}
|
||||
gitlab_rails['ldap_sync_worker_cron'] = "0 */12 * * *"
|
||||
```
|
||||
|
||||
1. Reconfigure GitLab:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
## Related topics
|
||||
|
||||
- [Extra Sidekiq processes](operations/extra_sidekiq_processes.md)
|
||||
- [Extra Sidekiq routing](operations/extra_sidekiq_routing.md)
|
||||
- [Using the GitLab-Sidekiq chart](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/)
|
||||
- [Sidekiq health checks](sidekiq_health_check.md)
|
||||
<!-- This redirect file can be deleted after <2022-11-11>. -->
|
||||
<!-- Redirects that point to other docs in the same project expire in three months. -->
|
||||
<!-- Redirects that point to docs in a different project or site (link is not relative and starts with `https:`) expire in one year. -->
|
||||
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
|
||||
|
|
|
|||
|
|
@ -0,0 +1,362 @@
|
|||
---
|
||||
stage: Systems
|
||||
group: Distribution
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
---
|
||||
|
||||
# Run multiple Sidekiq processes **(FREE SELF)**
|
||||
|
||||
GitLab allows you to start multiple Sidekiq processes.
|
||||
These processes can be used to consume a dedicated set
|
||||
of queues. This can be used to ensure certain queues always have dedicated
|
||||
workers, no matter the number of jobs to be processed.
|
||||
|
||||
NOTE:
|
||||
The information in this page applies only to Omnibus GitLab.
|
||||
|
||||
## Available Sidekiq queues
|
||||
|
||||
For a list of the existing Sidekiq queues, check the following files:
|
||||
|
||||
- [Queues for both GitLab Community and Enterprise Editions](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/all_queues.yml)
|
||||
- [Queues for GitLab Enterprise Editions only](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/workers/all_queues.yml)
|
||||
|
||||
Each entry in the above files represents a queue on which Sidekiq processes
|
||||
can be started.
|
||||
|
||||
## Start multiple processes
|
||||
|
||||
> - [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/4006) in GitLab 12.10, starting multiple processes with Sidekiq cluster.
|
||||
> - [Sidekiq cluster moved](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/181) to GitLab Free in 12.10.
|
||||
> - [Sidekiq cluster became default](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/4140) in GitLab 13.0.
|
||||
|
||||
When starting multiple processes, the number of processes should
|
||||
equal (and **not** exceed) the number of CPU cores you want to
|
||||
dedicate to Sidekiq. Each Sidekiq process can use only 1 CPU
|
||||
core, subject to the available workload and concurrency settings.
|
||||
|
||||
To start multiple processes:
|
||||
|
||||
1. Using the `sidekiq['queue_groups']` array setting, specify how many processes to
|
||||
create using `sidekiq-cluster` and which queue they should handle.
|
||||
Each item in the array equates to one additional Sidekiq
|
||||
process, and values in each item determine the queues it works on.
|
||||
|
||||
For example, the following setting creates three Sidekiq processes, one to run on
|
||||
`elastic_commit_indexer`, one to run on `mailers`, and one process running on all queues:
|
||||
|
||||
```ruby
|
||||
sidekiq['queue_groups'] = [
|
||||
"elastic_commit_indexer",
|
||||
"mailers",
|
||||
"*"
|
||||
]
|
||||
```
|
||||
|
||||
To have an additional Sidekiq process handle multiple queues, add multiple
|
||||
queue names to its item delimited by commas. For example:
|
||||
|
||||
```ruby
|
||||
sidekiq['queue_groups'] = [
|
||||
"elastic_commit_indexer, elastic_association_indexer",
|
||||
"mailers",
|
||||
"*"
|
||||
]
|
||||
```
|
||||
|
||||
[In GitLab 12.9](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/26594) and
|
||||
later, the special queue name `*` means all queues. This starts two
|
||||
processes, each handling all queues:
|
||||
|
||||
```ruby
|
||||
sidekiq['queue_groups'] = [
|
||||
"*",
|
||||
"*"
|
||||
]
|
||||
```
|
||||
|
||||
`*` cannot be combined with concrete queue names - `*, mailers`
|
||||
just handles the `mailers` queue.
|
||||
|
||||
When `sidekiq-cluster` is only running on a single node, make sure that at least
|
||||
one process is running on all queues using `*`. This ensures a process
|
||||
automatically picks up jobs in queues created in the future,
|
||||
including queues that have dedicated processes.
|
||||
|
||||
If `sidekiq-cluster` is running on more than one node, you can also use
|
||||
[`--negate`](#negate-settings) and list all the queues that are already being
|
||||
processed.
|
||||
|
||||
1. Save the file and reconfigure GitLab for the changes to take effect:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
To view the Sidekiq processes in GitLab:
|
||||
|
||||
1. On the top bar, select **Menu > Admin**.
|
||||
1. On the left sidebar, select **Monitoring > Background Jobs**.
|
||||
|
||||
## Negate settings
|
||||
|
||||
To have the Sidekiq process work on every queue **except** the ones
|
||||
you list. In this example, we exclude all import-related jobs from a Sidekiq node:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb` and add:
|
||||
|
||||
```ruby
|
||||
sidekiq['negate'] = true
|
||||
sidekiq['queue_selector'] = true
|
||||
sidekiq['queue_groups'] = [
|
||||
"feature_category=importers"
|
||||
]
|
||||
```
|
||||
|
||||
1. Save the file and reconfigure GitLab for the changes to take effect:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
## Queue selector
|
||||
|
||||
> - [Introduced](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/45) in GitLab 12.8.
|
||||
> - [Sidekiq cluster, including queue selector, moved](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/181) to GitLab Free in 12.10.
|
||||
> - [Renamed from `experimental_queue_selector` to `queue_selector`](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/147) in GitLab 13.6.
|
||||
|
||||
In addition to selecting queues by name, as above, the `queue_selector` option
|
||||
allows queue groups to be selected in a more general way using a
|
||||
[worker matching query](extra_sidekiq_routing.md#worker-matching-query). After `queue_selector`
|
||||
is set, all `queue_groups` must follow the aforementioned syntax.
|
||||
|
||||
In `/etc/gitlab/gitlab.rb`:
|
||||
|
||||
```ruby
|
||||
sidekiq['enable'] = true
|
||||
sidekiq['queue_selector'] = true
|
||||
sidekiq['queue_groups'] = [
|
||||
# Run all non-CPU-bound queues that are high urgency
|
||||
'resource_boundary!=cpu&urgency=high',
|
||||
# Run all continuous integration and pages queues that are not high urgency
|
||||
'feature_category=continuous_integration,pages&urgency!=high',
|
||||
# Run all queues
|
||||
'*'
|
||||
]
|
||||
```
|
||||
|
||||
## Ignore all import queues
|
||||
|
||||
When [importing from GitHub](../../user/project/import/github.md) or
|
||||
other sources, Sidekiq might use all of its resources to perform those
|
||||
operations. To set up two separate `sidekiq-cluster` processes, where
|
||||
one only processes imports and the other processes all other queues:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb` and add:
|
||||
|
||||
```ruby
|
||||
sidekiq['enable'] = true
|
||||
sidekiq['queue_selector'] = true
|
||||
sidekiq['queue_groups'] = [
|
||||
"feature_category=importers",
|
||||
"feature_category!=importers"
|
||||
]
|
||||
```
|
||||
|
||||
1. Save the file and reconfigure GitLab for the changes to take effect:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
## Number of threads
|
||||
|
||||
By default each process defined under `sidekiq` starts with a
|
||||
number of threads that equals the number of queues, plus one spare thread.
|
||||
For example, a process that handles the `process_commit` and `post_receive`
|
||||
queues uses three threads in total.
|
||||
|
||||
These thread run inside a single Ruby process, and each process
|
||||
can only use a single CPU core. The usefulness of threading depends
|
||||
on the work having some external dependencies to wait on, like database queries or
|
||||
HTTP requests. Most Sidekiq deployments benefit from this threading, and when
|
||||
running fewer queues in a process, increasing the thread count might be
|
||||
even more desirable to make the most effective use of CPU resources.
|
||||
|
||||
### Manage thread counts explicitly
|
||||
|
||||
The correct maximum thread count (also called concurrency) depends on the workload.
|
||||
Typical values range from `1` for highly CPU-bound tasks to `15` or higher for mixed
|
||||
low-priority work. A reasonable starting range is `15` to `25` for a non-specialized
|
||||
deployment.
|
||||
|
||||
You can find example values used by GitLab.com by searching for `concurrency:` in
|
||||
[the Helm charts](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/blob/master/releases/gitlab/values/gprd.yaml.gotmpl).
|
||||
The values vary according to the work each specific deployment of Sidekiq does.
|
||||
Any other specialized deployments with processes dedicated to specific queues should
|
||||
have the concurrency tuned according to:
|
||||
have the concurrency tuned according to:
|
||||
|
||||
- The CPU usage of each type of process.
|
||||
- The throughput achieved.
|
||||
|
||||
Each thread requires a Redis connection, so adding threads may increase Redis
|
||||
latency and potentially cause client timeouts. See the
|
||||
[Sidekiq documentation about Redis](https://github.com/mperham/sidekiq/wiki/Using-Redis) for more
|
||||
details.
|
||||
|
||||
#### When running Sidekiq cluster (default)
|
||||
|
||||
Running Sidekiq cluster is the default in GitLab 13.0 and later.
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb` and add:
|
||||
|
||||
```ruby
|
||||
sidekiq['min_concurrency'] = 15
|
||||
sidekiq['max_concurrency'] = 25
|
||||
```
|
||||
|
||||
1. Save the file and reconfigure GitLab for the changes to take effect:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
`min_concurrency` and `max_concurrency` are independent; one can be set without
|
||||
the other. Setting `min_concurrency` to `0` disables the limit.
|
||||
|
||||
For each queue group, let `N` be one more than the number of queues. The
|
||||
concurrency is set to:
|
||||
|
||||
1. `N`, if it's between `min_concurrency` and `max_concurrency`.
|
||||
1. `max_concurrency`, if `N` exceeds this value.
|
||||
1. `min_concurrency`, if `N` is less than this value.
|
||||
|
||||
If `min_concurrency` is equal to `max_concurrency`, then this value is used
|
||||
regardless of the number of queues.
|
||||
|
||||
When `min_concurrency` is greater than `max_concurrency`, it is treated as
|
||||
being equal to `max_concurrency`.
|
||||
|
||||
#### When running a single Sidekiq process
|
||||
|
||||
Running a single Sidekiq process is the default in GitLab 12.10 and earlier.
|
||||
|
||||
WARNING:
|
||||
Running Sidekiq directly was removed in GitLab
|
||||
[14.0](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/240).
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb` and add:
|
||||
|
||||
```ruby
|
||||
sidekiq['cluster'] = false
|
||||
sidekiq['concurrency'] = 25
|
||||
```
|
||||
|
||||
1. Save the file and reconfigure GitLab for the changes to take effect:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
This sets the concurrency (number of threads) for the Sidekiq process.
|
||||
|
||||
## Modify the check interval
|
||||
|
||||
To modify `sidekiq-cluster`'s health check interval for the additional Sidekiq processes:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb` and add (the value can be any integer number of seconds):
|
||||
|
||||
```ruby
|
||||
sidekiq['interval'] = 5
|
||||
```
|
||||
|
||||
1. Save the file and [reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
|
||||
|
||||
## Troubleshoot using the CLI
|
||||
|
||||
WARNING:
|
||||
It's recommended to use `/etc/gitlab/gitlab.rb` to configure the Sidekiq processes.
|
||||
If you experience a problem, you should contact GitLab support. Use the command
|
||||
line at your own risk.
|
||||
|
||||
For debugging purposes, you can start extra Sidekiq processes by using the command
|
||||
`/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster`. This command
|
||||
takes arguments using the following syntax:
|
||||
|
||||
```shell
|
||||
/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster [QUEUE,QUEUE,...] [QUEUE, ...]
|
||||
```
|
||||
|
||||
Each separate argument denotes a group of queues that have to be processed by a
|
||||
Sidekiq process. Multiple queues can be processed by the same process by
|
||||
separating them with a comma instead of a space.
|
||||
|
||||
Instead of a queue, a queue namespace can also be provided, to have the process
|
||||
automatically listen on all queues in that namespace without needing to
|
||||
explicitly list all the queue names. For more information about queue namespaces,
|
||||
see the relevant section in the
|
||||
[Sidekiq development documentation](../../development/sidekiq/index.md#queue-namespaces).
|
||||
|
||||
For example, say you want to start 2 extra processes: one to process the
|
||||
`process_commit` queue, and one to process the `post_receive` queue. This can be
|
||||
done as follows:
|
||||
|
||||
```shell
|
||||
/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster process_commit post_receive
|
||||
```
|
||||
|
||||
If you instead want to start one process processing both queues, you'd use the
|
||||
following syntax:
|
||||
|
||||
```shell
|
||||
/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster process_commit,post_receive
|
||||
```
|
||||
|
||||
If you want to have one Sidekiq process dealing with the `process_commit` and
|
||||
`post_receive` queues, and one process to process the `gitlab_shell` queue,
|
||||
you'd use the following:
|
||||
|
||||
```shell
|
||||
/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster process_commit,post_receive gitlab_shell
|
||||
```
|
||||
|
||||
### Monitor the `sidekiq-cluster` command
|
||||
|
||||
The `sidekiq-cluster` command does not terminate once it has started the desired
|
||||
amount of Sidekiq processes. Instead, the process continues running and
|
||||
forwards any signals to the child processes. This allows you to stop all
|
||||
Sidekiq processes as you send a signal to the `sidekiq-cluster` process,
|
||||
instead of having to send it to the individual processes.
|
||||
|
||||
If the `sidekiq-cluster` process crashes or receives a `SIGKILL`, the child
|
||||
processes terminate themselves after a few seconds. This ensures you don't
|
||||
end up with zombie Sidekiq processes.
|
||||
|
||||
This allows you to monitor the processes by hooking up
|
||||
`sidekiq-cluster` to your supervisor of choice (for example, runit).
|
||||
|
||||
If a child process died the `sidekiq-cluster` command signals all remaining
|
||||
process to terminate, then terminate itself. This removes the need for
|
||||
`sidekiq-cluster` to re-implement complex process monitoring/restarting code.
|
||||
Instead you should make sure your supervisor restarts the `sidekiq-cluster`
|
||||
process whenever necessary.
|
||||
|
||||
### PID files
|
||||
|
||||
The `sidekiq-cluster` command can store its PID in a file. By default no PID
|
||||
file is written, but this can be changed by passing the `--pidfile` option to
|
||||
`sidekiq-cluster`. For example:
|
||||
|
||||
```shell
|
||||
/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster --pidfile /var/run/gitlab/sidekiq_cluster.pid process_commit
|
||||
```
|
||||
|
||||
Keep in mind that the PID file contains the PID of the `sidekiq-cluster`
|
||||
command and not the PIDs of the started Sidekiq processes.
|
||||
|
||||
### Environment
|
||||
|
||||
The Rails environment can be set by passing the `--environment` flag to the
|
||||
`sidekiq-cluster` command, or by setting `RAILS_ENV` to a non-empty value. The
|
||||
default value can be found in `/opt/gitlab/etc/gitlab-rails/env/RAILS_ENV`.
|
||||
|
|
@ -0,0 +1,180 @@
|
|||
---
|
||||
stage: Systems
|
||||
group: Distribution
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
---
|
||||
|
||||
# Queue routing rules **(FREE SELF)**
|
||||
|
||||
When the number of Sidekiq jobs increases to a certain scale, the system faces
|
||||
some scalability issues. One of them is that the length of the queue tends to get
|
||||
longer. High-urgency jobs have to wait longer until other less urgent jobs
|
||||
finish. This head-of-line blocking situation may eventually affect the
|
||||
responsiveness of the system, especially critical actions. In another scenario,
|
||||
the performance of some jobs is degraded due to other long running or CPU-intensive jobs
|
||||
(computing or rendering ones) in the same machine.
|
||||
|
||||
To counter the aforementioned issues, one effective solution is to split
|
||||
Sidekiq jobs into different queues and assign machines handling each queue
|
||||
exclusively. For example, all CPU-intensive jobs could be routed to the
|
||||
`cpu-bound` queue and handled by a fleet of CPU optimized instances. The queue
|
||||
topology differs between companies depending on the workloads and usage
|
||||
patterns. Therefore, GitLab supports a flexible mechanism for the
|
||||
administrator to route the jobs based on their characteristics.
|
||||
|
||||
As an alternative to [Queue selector](extra_sidekiq_processes.md#queue-selector), which
|
||||
configures Sidekiq cluster to listen to a specific set of workers or queues,
|
||||
GitLab also supports routing a job from a worker to the desired queue when it
|
||||
is scheduled. Sidekiq clients try to match a job against a configured list of
|
||||
routing rules. Rules are evaluated from first to last, and as soon as we find a
|
||||
match for a given worker we stop processing for that worker (first match wins).
|
||||
If the worker doesn't match any rule, it falls back to the queue name generated
|
||||
from the worker name.
|
||||
|
||||
By default, if the routing rules are not configured (or denoted with an empty
|
||||
array), all the jobs are routed to the queue generated from the worker name.
|
||||
|
||||
## Example configuration
|
||||
|
||||
In `/etc/gitlab/gitlab.rb`:
|
||||
|
||||
```ruby
|
||||
sidekiq['routing_rules'] = [
|
||||
# Do not re-route workers that require their own queue
|
||||
['tags=needs_own_queue', nil],
|
||||
# Route all non-CPU-bound workers that are high urgency to `high-urgency` queue
|
||||
['resource_boundary!=cpu&urgency=high', 'high-urgency'],
|
||||
# Route all database, gitaly and global search workers that are throttled to `throttled` queue
|
||||
['feature_category=database,gitaly,global_search&urgency=throttled', 'throttled'],
|
||||
# Route all workers having contact with outside work to a `network-intenstive` queue
|
||||
['has_external_dependencies=true|feature_category=hooks|tags=network', 'network-intensive'],
|
||||
# Route all import workers to the queues generated by the worker name, for
|
||||
# example, JiraImportWorker to `jira_import`, SVNWorker to `svn_worker`
|
||||
['feature_category=import', nil],
|
||||
# Wildcard matching, route the rest to `default` queue
|
||||
['*', 'default']
|
||||
]
|
||||
```
|
||||
|
||||
The routing rules list is an order-matter array of tuples of query and
|
||||
corresponding queue:
|
||||
|
||||
- The query is following a [worker matching query](#worker-matching-query) syntax.
|
||||
- The `<queue_name>` must be a valid Sidekiq queue name. If the queue name
|
||||
is `nil`, or an empty string, the worker is routed to the queue generated
|
||||
by the name of the worker instead.
|
||||
|
||||
The query supports wildcard matching `*`, which matches all workers. As a
|
||||
result, the wildcard query must stay at the end of the list or the rules after it
|
||||
are ignored.
|
||||
|
||||
NOTE:
|
||||
Mixing queue routing rules and queue selectors requires care to
|
||||
ensure all jobs that are scheduled and picked up by appropriate Sidekiq
|
||||
workers.
|
||||
|
||||
## Worker matching query
|
||||
|
||||
GitLab provides a query syntax to match a worker based on its
|
||||
attributes. This query syntax is employed by both
|
||||
[Queue routing rules](#queue-routing-rules) and
|
||||
[Queue selector](extra_sidekiq_processes.md#queue-selector). A query includes two
|
||||
components:
|
||||
|
||||
- Attributes that can be selected.
|
||||
- Operators used to construct a query.
|
||||
|
||||
### Available attributes
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/261) in GitLab 13.1 (`tags`).
|
||||
|
||||
Queue matching query works upon the worker attributes, described in
|
||||
[Sidekiq style guide](../../development/sidekiq/index.md). We support querying
|
||||
based on a subset of worker attributes:
|
||||
|
||||
- `feature_category` - the
|
||||
[GitLab feature category](https://about.gitlab.com/direction/maturity/#category-maturity) the
|
||||
queue belongs to. For example, the `merge` queue belongs to the
|
||||
`source_code_management` category.
|
||||
- `has_external_dependencies` - whether or not the queue connects to external
|
||||
services. For example, all importers have this set to `true`.
|
||||
- `urgency` - how important it is that this queue's jobs run
|
||||
quickly. Can be `high`, `low`, or `throttled`. For example, the
|
||||
`authorized_projects` queue is used to refresh user permissions, and
|
||||
is `high` urgency.
|
||||
- `worker_name` - the worker name. Use this attribute to select a specific worker.
|
||||
- `name` - the queue name generated from the worker name. Use this attribute to select a specific queue. Because this is generated from
|
||||
the worker name, it does not change based on the result of other routing
|
||||
rules.
|
||||
- `resource_boundary` - if the queue is bound by `cpu`, `memory`, or
|
||||
`unknown`. For example, the `ProjectExportWorker` is memory bound as it has
|
||||
to load data in memory before saving it for export.
|
||||
- `tags` - short-lived annotations for queues. These are expected to frequently
|
||||
change from release to release, and may be removed entirely.
|
||||
|
||||
`has_external_dependencies` is a boolean attribute: only the exact
|
||||
string `true` is considered true, and everything else is considered
|
||||
false.
|
||||
|
||||
`tags` is a set, which means that `=` checks for intersecting sets, and
|
||||
`!=` checks for disjoint sets. For example, `tags=a,b` selects queues
|
||||
that have tags `a`, `b`, or both. `tags!=a,b` selects queues that have
|
||||
neither of those tags.
|
||||
|
||||
The attributes of each worker are hard-coded in the source code. For
|
||||
convenience, we generate a
|
||||
[list of all available attributes in GitLab Community Edition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/all_queues.yml)
|
||||
and a
|
||||
[list of all available attributes in GitLab Enterprise Edition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/workers/all_queues.yml).
|
||||
|
||||
### Available operators
|
||||
|
||||
`queue_selector` supports the following operators, listed from highest
|
||||
to lowest precedence:
|
||||
|
||||
- `|` - the logical `OR` operator. For example, `query_a|query_b` (where `query_a`
|
||||
and `query_b` are queries made up of the other operators here) includes
|
||||
queues that match either query.
|
||||
- `&` - the logical `AND` operator. For example, `query_a&query_b` (where
|
||||
`query_a` and `query_b` are queries made up of the other operators here) will
|
||||
only include queues that match both queries.
|
||||
- `!=` - the `NOT IN` operator. For example, `feature_category!=issue_tracking`
|
||||
excludes all queues from the `issue_tracking` feature category.
|
||||
- `=` - the `IN` operator. For example, `resource_boundary=cpu` includes all
|
||||
queues that are CPU bound.
|
||||
- `,` - the concatenate set operator. For example,
|
||||
`feature_category=continuous_integration,pages` includes all queues from
|
||||
either the `continuous_integration` category or the `pages` category. This
|
||||
example is also possible using the OR operator, but allows greater brevity, as
|
||||
well as being lower precedence.
|
||||
|
||||
The operator precedence for this syntax is fixed: it's not possible to make `AND`
|
||||
have higher precedence than `OR`.
|
||||
|
||||
[In GitLab 12.9](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/26594) and
|
||||
later, as with the standard queue group syntax above, a single `*` as the
|
||||
entire queue group selects all queues.
|
||||
|
||||
### Migration
|
||||
|
||||
After the Sidekiq routing rules are changed, administrators must take care
|
||||
with the migration to avoid losing jobs entirely, especially in a system with
|
||||
long queues of jobs. The migration can be done by following the migration steps
|
||||
mentioned in [Sidekiq job migration](sidekiq_job_migration.md)
|
||||
|
||||
### Workers that cannot be migrated
|
||||
|
||||
Some workers cannot share a queue with other workers - typically because
|
||||
they check the size of their own queue - and so must be excluded from
|
||||
this process. We recommend excluding these from any further worker
|
||||
routing by adding a rule to keep them in their own queue, for example:
|
||||
|
||||
```ruby
|
||||
sidekiq['routing_rules'] = [
|
||||
['tags=needs_own_queue', nil],
|
||||
# ...
|
||||
]
|
||||
```
|
||||
|
||||
These queues must also be included in at least one
|
||||
[Sidekiq queue group](extra_sidekiq_processes.md#start-multiple-processes).
|
||||
|
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 53 KiB |
|
|
@ -0,0 +1,403 @@
|
|||
---
|
||||
stage: Systems
|
||||
group: Distribution
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
---
|
||||
|
||||
# Configure an external Sidekiq instance **(FREE SELF)**
|
||||
|
||||
You can configure an external Sidekiq instance by using the Sidekiq that's bundled in the GitLab package. Sidekiq requires connection to the Redis,
|
||||
PostgreSQL, and Gitaly instances.
|
||||
|
||||
## Configure TCP access for PostgreSQL, Gitaly, and Redis
|
||||
|
||||
By default, GitLab uses UNIX sockets and is not set up to communicate via TCP. To change this:
|
||||
|
||||
1. Edit the `/etc/gitlab/gitlab.rb` file on your GitLab instance, and add the following:
|
||||
|
||||
```ruby
|
||||
|
||||
## PostgreSQL
|
||||
|
||||
# Replace POSTGRESQL_PASSWORD_HASH with a generated md5 value
|
||||
postgresql['sql_user_password'] = 'POSTGRESQL_PASSWORD_HASH'
|
||||
postgresql['listen_address'] = '0.0.0.0'
|
||||
postgresql['port'] = 5432
|
||||
|
||||
# Add the Sidekiq nodes to PostgreSQL's trusted addresses.
|
||||
# In the following example, 10.10.1.30/32 is the private IP
|
||||
# of the Sidekiq server.
|
||||
postgresql['md5_auth_cidr_addresses'] = %w(127.0.0.1/32 10.10.1.30/32)
|
||||
postgresql['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.10.1.30/32)
|
||||
|
||||
## Gitaly
|
||||
|
||||
# Make Gitaly accept connections on all network interfaces
|
||||
gitaly['listen_addr'] = "0.0.0.0:8075"
|
||||
## Set up the Gitaly token as a form of authentication since you are accessing Gitaly over the network
|
||||
## https://docs.gitlab.com/ee/administration/gitaly/configure_gitaly.html#about-the-gitaly-token
|
||||
gitaly['auth_token'] = 'abc123secret'
|
||||
praefect['auth_token'] = 'abc123secret'
|
||||
gitlab_rails['gitaly_token'] = 'abc123secret'
|
||||
|
||||
## Redis configuration
|
||||
|
||||
redis['bind'] = '0.0.0.0'
|
||||
redis['port'] = 6379
|
||||
# Password to Authenticate Redis
|
||||
redis['password'] = 'redis-password-goes-here'
|
||||
gitlab_rails['redis_password'] = 'redis-password-goes-here'
|
||||
|
||||
gitlab_rails['auto_migrate'] = false
|
||||
```
|
||||
|
||||
1. Run `reconfigure`:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
1. Restart the `PostgreSQL` server:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl restart postgresql
|
||||
```
|
||||
|
||||
1. After the restart, set `auto_migrate` to `true` or comment to use the default settings:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['auto_migrate'] = true
|
||||
```
|
||||
|
||||
1. Run `reconfigure` again:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
## Set up Sidekiq instance
|
||||
|
||||
1. SSH into the Sidekiq server.
|
||||
|
||||
1. Confirm that you can access the PostgreSQL, Gitaly, and Redis ports:
|
||||
|
||||
```shell
|
||||
telnet <GitLab host> 5432 # PostgreSQL
|
||||
telnet <GitLab host> 8075 # Gitaly
|
||||
telnet <GitLab host> 6379 # Redis
|
||||
```
|
||||
|
||||
1. [Download and install](https://about.gitlab.com/install/) the Omnibus GitLab package
|
||||
using steps 1 and 2. **Do not complete any other steps.**
|
||||
|
||||
1. Copy the `/etc/gitlab/gitlab.rb` file from the GitLab instance and add the following settings. Make sure
|
||||
to replace them with your values:
|
||||
|
||||
<!--
|
||||
Updates to example must be made at:
|
||||
- https://gitlab.com/gitlab-org/gitlab/blob/master/doc/administration/sidekiq.md
|
||||
- all reference architecture pages
|
||||
-->
|
||||
|
||||
```ruby
|
||||
########################################
|
||||
##### Services Disabled ###
|
||||
########################################
|
||||
#
|
||||
# When running GitLab on just one server, you have a single `gitlab.rb`
|
||||
# to enable all services you want to run.
|
||||
# When running GitLab on N servers, you have N `gitlab.rb` files.
|
||||
# Enable only the services you want to run on each
|
||||
# specific server, while disabling all others.
|
||||
#
|
||||
gitaly['enable'] = false
|
||||
postgresql['enable'] = false
|
||||
redis['enable'] = false
|
||||
nginx['enable'] = false
|
||||
puma['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
prometheus['enable'] = false
|
||||
alertmanager['enable'] = false
|
||||
grafana['enable'] = false
|
||||
gitlab_exporter['enable'] = false
|
||||
gitlab_kas['enable'] = false
|
||||
|
||||
##
|
||||
## To maintain uniformity of links across nodes, the
|
||||
## `external_url` on the Sidekiq server should point to the external URL that users
|
||||
## use to access GitLab. This can be either:
|
||||
##
|
||||
## - The `external_url` set on your application server.
|
||||
## - The URL of a external load balancer, which routes traffic to the GitLab application server.
|
||||
##
|
||||
external_url 'https://gitlab.example.com'
|
||||
|
||||
# Configure the gitlab-shell API callback URL. Without this, `git push` will
|
||||
# fail. This can be your 'front door' GitLab URL or an internal load
|
||||
# balancer.
|
||||
gitlab_rails['internal_api_url'] = 'GITLAB_URL'
|
||||
gitlab_shell['secret_token'] = 'SHELL_TOKEN'
|
||||
|
||||
########################################
|
||||
#### Redis ###
|
||||
########################################
|
||||
|
||||
## Must be the same in every sentinel node.
|
||||
redis['master_name'] = 'gitlab-redis' # Required if you have setup redis cluster
|
||||
## The same password for Redis authentication you set up for the master node.
|
||||
redis['master_password'] = '<redis_master_password>'
|
||||
|
||||
### If redis is running on the main Gitlab instance and you have opened the TCP port as above add the following
|
||||
gitlab_rails['redis_host'] = '<gitlab_host>'
|
||||
gitlab_rails['redis_port'] = 6379
|
||||
|
||||
#######################################
|
||||
### Gitaly ###
|
||||
#######################################
|
||||
|
||||
## Replace <gitaly_token> with the one you set up, see
|
||||
## https://docs.gitlab.com/ee/administration/gitaly/configure_gitaly.html#about-the-gitaly-token
|
||||
git_data_dirs({
|
||||
"default" => {
|
||||
"gitaly_address" => "tcp://<gitlab_host>:8075",
|
||||
"gitaly_token" => "<gitaly_token>"
|
||||
}
|
||||
})
|
||||
|
||||
#######################################
|
||||
### Postgres ###
|
||||
#######################################
|
||||
|
||||
# Replace <database_host> and <database_password>
|
||||
gitlab_rails['db_host'] = '<database_host>'
|
||||
gitlab_rails['db_port'] = '5432'
|
||||
gitlab_rails['db_password'] = '<database_password>'
|
||||
## Prevent database migrations from running on upgrade automatically
|
||||
gitlab_rails['auto_migrate'] = false
|
||||
|
||||
#######################################
|
||||
### Sidekiq configuration ###
|
||||
#######################################
|
||||
sidekiq['enable'] = true
|
||||
sidekiq['listen_address'] = "0.0.0.0"
|
||||
|
||||
## Set number of Sidekiq queue processes to the same number as available CPUs
|
||||
sidekiq['queue_groups'] = ['*'] * 4
|
||||
|
||||
## Set number of Sidekiq threads per queue process to the recommend number of 10
|
||||
sidekiq['max_concurrency'] = 10
|
||||
```
|
||||
|
||||
1. Copy the `/etc/gitlab/gitlab-secrets.json` file from the GitLab instance and replace the file in the Sidekiq instance.
|
||||
|
||||
1. Reconfigure GitLab:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
1. Restart the Sidekiq instance after completing the process and finishing the database migrations.
|
||||
|
||||
## Configure multiple Sidekiq nodes with shared storage
|
||||
|
||||
If you run multiple Sidekiq nodes with a shared file storage, such as NFS, you must
|
||||
specify the UIDs and GIDs to ensure they match between servers. Specifying the UIDs
|
||||
and GIDs prevents permissions issues in the file system. This advice is similar to the
|
||||
[advice for Geo setups](../geo/replication/multiple_servers.md#step-4-configure-the-frontend-application-nodes-on-the-geo-secondary-site).
|
||||
|
||||
To set up multiple Sidekiq nodes:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb`:
|
||||
|
||||
```ruby
|
||||
user['uid'] = 9000
|
||||
user['gid'] = 9000
|
||||
web_server['uid'] = 9001
|
||||
web_server['gid'] = 9001
|
||||
registry['uid'] = 9002
|
||||
registry['gid'] = 9002
|
||||
```
|
||||
|
||||
1. Reconfigure GitLab:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
## Configure the Container Registry when using an external Sidekiq
|
||||
|
||||
If you're using the Container Registry and it's running on a different
|
||||
node than Sidekiq, follow the steps below.
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb`, and configure the registry URL:
|
||||
|
||||
```ruby
|
||||
registry_external_url 'https://registry.example.com'
|
||||
gitlab_rails['registry_api_url'] = "https://registry.example.com"
|
||||
```
|
||||
|
||||
1. Reconfigure GitLab:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
1. In the instance where Container Registry is hosted, copy the `registry.key`
|
||||
file to the Sidekiq node.
|
||||
|
||||
## Configure the Sidekiq metrics server
|
||||
|
||||
If you want to collect Sidekiq metrics, enable the Sidekiq metrics server.
|
||||
To make metrics available from `localhost:8082/metrics`:
|
||||
|
||||
To configure the metrics server:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb`:
|
||||
|
||||
```ruby
|
||||
sidekiq['metrics_enabled'] = true
|
||||
sidekiq['listen_address'] = "localhost"
|
||||
sidekiq['listen_port'] = "8082"
|
||||
|
||||
# Optionally log all the metrics server logs to log/sidekiq_exporter.log
|
||||
sidekiq['exporter_log_enabled'] = true
|
||||
```
|
||||
|
||||
1. Reconfigure GitLab:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
### Enable HTTPS
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/364771) in GitLab 15.2.
|
||||
|
||||
To serve metrics via HTTPS instead of HTTP, enable TLS in the exporter settings:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb` to add (or find and uncomment) the following lines:
|
||||
|
||||
```ruby
|
||||
sidekiq['exporter_tls_enabled'] = true
|
||||
sidekiq['exporter_tls_cert_path'] = "/path/to/certificate.pem"
|
||||
sidekiq['exporter_tls_key_path'] = "/path/to/private-key.pem"
|
||||
```
|
||||
|
||||
1. Save the file and [reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure)
|
||||
for the changes to take effect.
|
||||
|
||||
When TLS is enabled, the same `port` and `address` are used as described above.
|
||||
The metrics server cannot serve both HTTP and HTTPS at the same time.
|
||||
|
||||
## Configure health checks
|
||||
|
||||
If you use health check probes to observe Sidekiq, enable the Sidekiq health check server.
|
||||
To make health checks available from `localhost:8092`:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb`:
|
||||
|
||||
```ruby
|
||||
sidekiq['health_checks_enabled'] = true
|
||||
sidekiq['health_checks_listen_address'] = "localhost"
|
||||
sidekiq['health_checks_listen_port'] = "8092"
|
||||
```
|
||||
|
||||
1. Reconfigure GitLab:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
For more information about health checks, see the [Sidekiq health check page](sidekiq_health_check.md).
|
||||
|
||||
## Configure LDAP and user or group synchronization
|
||||
|
||||
If you use LDAP for user and group management, you must add the LDAP configuration to your Sidekiq node as well as the LDAP
|
||||
synchronization worker. If the LDAP configuration and LDAP synchronization worker are not applied to your Sidekiq node,
|
||||
users and groups are not automatically synchronized.
|
||||
|
||||
For more information about configuring LDAP for GitLab, see:
|
||||
|
||||
- [GitLab LDAP configuration documentation](../auth/ldap/index.md#configure-ldap)
|
||||
- [LDAP synchronization documentation](../auth/ldap/ldap_synchronization.md#adjust-ldap-user-sync-schedule)
|
||||
|
||||
To enable LDAP with the synchronization worker for Sidekiq:
|
||||
|
||||
1. Edit `/etc/gitlab/gitlab.rb`:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['ldap_enabled'] = true
|
||||
gitlab_rails['prevent_ldap_sign_in'] = false
|
||||
gitlab_rails['ldap_servers'] = {
|
||||
'main' => {
|
||||
'label' => 'LDAP',
|
||||
'host' => 'ldap.mydomain.com',
|
||||
'port' => 389,
|
||||
'uid' => 'sAMAccountName',
|
||||
'encryption' => 'simple_tls',
|
||||
'verify_certificates' => true,
|
||||
'bind_dn' => '_the_full_dn_of_the_user_you_will_bind_with',
|
||||
'password' => '_the_password_of_the_bind_user',
|
||||
'tls_options' => {
|
||||
'ca_file' => '',
|
||||
'ssl_version' => '',
|
||||
'ciphers' => '',
|
||||
'cert' => '',
|
||||
'key' => ''
|
||||
},
|
||||
'timeout' => 10,
|
||||
'active_directory' => true,
|
||||
'allow_username_or_email_login' => false,
|
||||
'block_auto_created_users' => false,
|
||||
'base' => 'dc=example,dc=com',
|
||||
'user_filter' => '',
|
||||
'attributes' => {
|
||||
'username' => ['uid', 'userid', 'sAMAccountName'],
|
||||
'email' => ['mail', 'email', 'userPrincipalName'],
|
||||
'name' => 'cn',
|
||||
'first_name' => 'givenName',
|
||||
'last_name' => 'sn'
|
||||
},
|
||||
'lowercase_usernames' => false,
|
||||
|
||||
# Enterprise Edition only
|
||||
# https://docs.gitlab.com/ee/administration/auth/ldap/ldap_synchronization.html
|
||||
'group_base' => '',
|
||||
'admin_group' => '',
|
||||
'external_groups' => [],
|
||||
'sync_ssh_keys' => false
|
||||
}
|
||||
}
|
||||
gitlab_rails['ldap_sync_worker_cron'] = "0 */12 * * *"
|
||||
```
|
||||
|
||||
1. Reconfigure GitLab:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
## Disable Rugged
|
||||
|
||||
Calls into Rugged, Ruby bindings for `libgit2`, [lock the Sidekiq processes's GVL](https://silverhammermba.github.io/emberb/c/#c-in-ruby-threads),
|
||||
blocking all jobs on that worker from proceeding. If Rugged calls performed by Sidekiq are slow, this can cause significant delays in
|
||||
background task processing.
|
||||
|
||||
By default, Rugged is used when Git repository data is stored on local storage or on an NFS mount.
|
||||
[Using Rugged is recommended when using NFS](../nfs.md#improving-nfs-performance-with-gitlab), but if
|
||||
you are using local storage, disabling Rugged can improve Sidekiq performance:
|
||||
|
||||
```shell
|
||||
sudo gitlab-rake gitlab:features:disable_rugged
|
||||
```
|
||||
|
||||
## Related topics
|
||||
|
||||
- [Extra Sidekiq processes](extra_sidekiq_processes.md)
|
||||
- [Extra Sidekiq routing](extra_sidekiq_routing.md)
|
||||
- [Sidekiq health checks](sidekiq_health_check.md)
|
||||
- [Using the GitLab-Sidekiq chart](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
See our [administrator guide to troubleshooting Sidekiq](sidekiq_troubleshooting.md).
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
---
|
||||
stage: Systems
|
||||
group: Distribution
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
---
|
||||
|
||||
# Sidekiq Health Check **(FREE SELF)**
|
||||
|
||||
GitLab provides liveness and readiness probes to indicate service health and
|
||||
reachability to the Sidekiq cluster. These endpoints
|
||||
[can be provided to schedulers like Kubernetes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/)
|
||||
to hold traffic until the system is ready or restart the container as needed.
|
||||
|
||||
The health check server can be set up when [configuring Sidekiq](index.md).
|
||||
|
||||
## Readiness
|
||||
|
||||
The readiness probe checks whether the Sidekiq workers are ready to process jobs.
|
||||
|
||||
```plaintext
|
||||
GET /readiness
|
||||
```
|
||||
|
||||
If the server is bound to `localhost:8092`, the process cluster can be probed for readiness as follows:
|
||||
|
||||
```shell
|
||||
curl "http://localhost:8092/readiness"
|
||||
```
|
||||
|
||||
On success, the endpoint returns a `200` HTTP status code, and a response like the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "ok"
|
||||
}
|
||||
```
|
||||
|
||||
## Liveness
|
||||
|
||||
Checks whether the Sidekiq cluster is running.
|
||||
|
||||
```plaintext
|
||||
GET /liveness
|
||||
```
|
||||
|
||||
If the server is bound to `localhost:8092`, the process cluster can be probed for liveness as follows:
|
||||
|
||||
```shell
|
||||
curl "http://localhost:8092/liveness"
|
||||
```
|
||||
|
||||
On success, the endpoint returns a `200` HTTP status code, and a response like the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "ok"
|
||||
}
|
||||
```
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
---
|
||||
stage: none
|
||||
group: unassigned
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
---
|
||||
|
||||
# Sidekiq job migration **(FREE SELF)**
|
||||
|
||||
WARNING:
|
||||
This operation should be very uncommon. We do not recommend it for the vast majority of GitLab instances.
|
||||
|
||||
Sidekiq routing rules allow administrators to re-route certain background jobs from their regular queue to an alternative queue. By default, GitLab uses one queue per background job type. GitLab has over 400 background job types, and so correspondingly it has over 400 queues.
|
||||
|
||||
Most administrators do not need to change this setting. In some cases with particularly large background job processing workloads, Redis performance may suffer due to the number of queues that GitLab listens to.
|
||||
|
||||
If the Sidekiq routing rules are changed, administrators need to take care with the migration to avoid losing jobs entirely. The basic migration steps are:
|
||||
|
||||
1. Listen to both the old and new queues.
|
||||
1. Update the routing rules.
|
||||
1. Wait until there are no publishers dispatching jobs to the old queues.
|
||||
1. Run the [Rake tasks for future jobs](#future-jobs).
|
||||
1. Wait for the old queues to be empty.
|
||||
1. Stop listening to the old queues.
|
||||
|
||||
## Future jobs
|
||||
|
||||
Step 4 involves rewriting some Sidekiq job data for jobs that are already stored in Redis, but due to run in future. There are two sets of jobs to run in future: scheduled jobs and jobs to be retried. We provide a separate Rake task to migrate each set:
|
||||
|
||||
- `gitlab:sidekiq:migrate_jobs:retry` for jobs to be retried.
|
||||
- `gitlab:sidekiq:migrate_jobs:scheduled` for scheduled jobs.
|
||||
|
||||
Most of the time, running both at the same time is the correct choice. There are two separate tasks to allow for more fine-grained control where needed. To run both at once:
|
||||
|
||||
```shell
|
||||
# omnibus-gitlab
|
||||
sudo gitlab-rake gitlab:sidekiq:migrate_jobs:retry gitlab:sidekiq:migrate_jobs:schedule
|
||||
|
||||
# source installations
|
||||
bundle exec rake gitlab:sidekiq:migrate_jobs:retry gitlab:sidekiq:migrate_jobs:schedule RAILS_ENV=production
|
||||
```
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
---
|
||||
stage: Data Stores
|
||||
group: Memory
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
---
|
||||
|
||||
# Sidekiq MemoryKiller **(FREE SELF)**
|
||||
|
||||
The GitLab Rails application code suffers from memory leaks. For web requests
|
||||
this problem is made manageable using
|
||||
[`puma-worker-killer`](https://github.com/schneems/puma_worker_killer) which
|
||||
restarts Puma worker processes if it exceeds a memory limit. The Sidekiq
|
||||
MemoryKiller applies the same approach to the Sidekiq processes used by GitLab
|
||||
to process background jobs.
|
||||
|
||||
Unlike puma-worker-killer, which is enabled by default for all GitLab
|
||||
installations of GitLab 13.0 and later, the Sidekiq MemoryKiller is enabled by default
|
||||
_only_ for Omnibus packages. The reason for this is that the MemoryKiller
|
||||
relies on runit to restart Sidekiq after a memory-induced shutdown and GitLab
|
||||
installations from source do not all use runit or an equivalent.
|
||||
|
||||
With the default settings, the MemoryKiller causes a Sidekiq restart no
|
||||
more often than once every 15 minutes, with the restart causing about one
|
||||
minute of delay for incoming background jobs.
|
||||
|
||||
Some background jobs rely on long-running external processes. To ensure these
|
||||
are cleanly terminated when Sidekiq is restarted, each Sidekiq process should be
|
||||
run as a process group leader (for example, using `chpst -P`). If using Omnibus or the
|
||||
`bin/background_jobs` script with `runit` installed, this is handled for you.
|
||||
|
||||
## Configuring the MemoryKiller
|
||||
|
||||
The MemoryKiller is controlled using environment variables.
|
||||
|
||||
- `SIDEKIQ_DAEMON_MEMORY_KILLER`: defaults to 1. When set to 0, the MemoryKiller
|
||||
works in _legacy_ mode. Otherwise, the MemoryKiller works in _daemon_ mode.
|
||||
|
||||
In _legacy_ mode, the MemoryKiller checks the Sidekiq process RSS
|
||||
([Resident Set Size](https://github.com/mperham/sidekiq/wiki/Memory#rss))
|
||||
after each job.
|
||||
|
||||
In _daemon_ mode, the MemoryKiller checks the Sidekiq process RSS every 3 seconds
|
||||
(defined by `SIDEKIQ_MEMORY_KILLER_CHECK_INTERVAL`).
|
||||
|
||||
- `SIDEKIQ_MEMORY_KILLER_MAX_RSS` (KB): if this variable is set, and its value is greater
|
||||
than 0, the MemoryKiller is enabled. Otherwise the MemoryKiller is disabled.
|
||||
|
||||
`SIDEKIQ_MEMORY_KILLER_MAX_RSS` defines the Sidekiq process allowed RSS.
|
||||
|
||||
In _legacy_ mode, if the Sidekiq process exceeds the allowed RSS then an irreversible
|
||||
delayed graceful restart is triggered. The restart of Sidekiq happens
|
||||
after `SIDEKIQ_MEMORY_KILLER_GRACE_TIME` seconds.
|
||||
|
||||
In _daemon_ mode, if the Sidekiq process exceeds the allowed RSS for longer than
|
||||
`SIDEKIQ_MEMORY_KILLER_GRACE_TIME` the graceful restart is triggered. If the
|
||||
Sidekiq process go below the allowed RSS within `SIDEKIQ_MEMORY_KILLER_GRACE_TIME`,
|
||||
the restart is aborted.
|
||||
|
||||
The default value for Omnibus packages is set
|
||||
[in the Omnibus GitLab repository](https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/attributes/default.rb).
|
||||
|
||||
- `SIDEKIQ_MEMORY_KILLER_HARD_LIMIT_RSS` (KB): is used by _daemon_ mode. If the Sidekiq
|
||||
process RSS (expressed in kilobytes) exceeds `SIDEKIQ_MEMORY_KILLER_HARD_LIMIT_RSS`,
|
||||
an immediate graceful restart of Sidekiq is triggered.
|
||||
|
||||
- `SIDEKIQ_MEMORY_KILLER_CHECK_INTERVAL`: used in _daemon_ mode to define how
|
||||
often to check process RSS, default to 3 seconds.
|
||||
|
||||
- `SIDEKIQ_MEMORY_KILLER_GRACE_TIME`: defaults to 900 seconds (15 minutes).
|
||||
The usage of this variable is described as part of `SIDEKIQ_MEMORY_KILLER_MAX_RSS`.
|
||||
|
||||
- `SIDEKIQ_MEMORY_KILLER_SHUTDOWN_WAIT`: defaults to 30 seconds. This defines the
|
||||
maximum time allowed for all Sidekiq jobs to finish. No new jobs are accepted
|
||||
during that time, and the process exits as soon as all jobs finish.
|
||||
|
||||
If jobs do not finish during that time, the MemoryKiller interrupts all currently
|
||||
running jobs by sending `SIGTERM` to the Sidekiq process.
|
||||
|
||||
If the process hard shutdown/restart is not performed by Sidekiq,
|
||||
the Sidekiq process is forcefully terminated after
|
||||
`Sidekiq.options[:timeout] + 2` seconds. An external supervision mechanism
|
||||
(for example, runit) must restart Sidekiq afterwards.
|
||||
|
|
@ -0,0 +1,381 @@
|
|||
---
|
||||
stage: Systems
|
||||
group: Distribution
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
---
|
||||
|
||||
# Troubleshooting Sidekiq **(FREE SELF)**
|
||||
|
||||
Sidekiq is the background job processor GitLab uses to asynchronously run
|
||||
tasks. When things go wrong it can be difficult to troubleshoot. These
|
||||
situations also tend to be high-pressure because a production system job queue
|
||||
may be filling up. Users will notice when this happens because new branches
|
||||
may not show up and merge requests may not be updated. The following are some
|
||||
troubleshooting steps to help you diagnose the bottleneck.
|
||||
|
||||
GitLab administrators/users should consider working through these
|
||||
debug steps with GitLab Support so the backtraces can be analyzed by our team.
|
||||
It may reveal a bug or necessary improvement in GitLab.
|
||||
|
||||
In any of the backtraces, be wary of suspecting cases where every
|
||||
thread appears to be waiting in the database, Redis, or waiting to acquire
|
||||
a mutex. This **may** mean there's contention in the database, for example,
|
||||
but look for one thread that is different than the rest. This other thread
|
||||
may be using all available CPU, or have a Ruby Global Interpreter Lock,
|
||||
preventing other threads from continuing.
|
||||
|
||||
## Log arguments to Sidekiq jobs
|
||||
|
||||
[In GitLab 13.6 and later](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/44853)
|
||||
some arguments passed to Sidekiq jobs are logged by default.
|
||||
To avoid logging sensitive information (for instance, password reset tokens),
|
||||
GitLab logs numeric arguments for all workers, with overrides for some specific
|
||||
workers where their arguments are not sensitive.
|
||||
|
||||
Example log output:
|
||||
|
||||
```json
|
||||
{"severity":"INFO","time":"2020-06-08T14:37:37.892Z","class":"AdminEmailsWorker","args":["[FILTERED]","[FILTERED]","[FILTERED]"],"retry":3,"queue":"admin_emails","backtrace":true,"jid":"9e35e2674ac7b12d123e13cc","created_at":"2020-06-08T14:37:37.373Z","meta.user":"root","meta.caller_id":"Admin::EmailsController#create","correlation_id":"37D3lArJmT1","uber-trace-id":"2d942cc98cc1b561:6dc94409cfdd4d77:9fbe19bdee865293:1","enqueued_at":"2020-06-08T14:37:37.410Z","pid":65011,"message":"AdminEmailsWorker JID-9e35e2674ac7b12d123e13cc: done: 0.48085 sec","job_status":"done","scheduling_latency_s":0.001012,"redis_calls":9,"redis_duration_s":0.004608,"redis_read_bytes":696,"redis_write_bytes":6141,"duration_s":0.48085,"cpu_s":0.308849,"completed_at":"2020-06-08T14:37:37.892Z","db_duration_s":0.010742}
|
||||
{"severity":"INFO","time":"2020-06-08T14:37:37.894Z","class":"ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper","wrapped":"ActionMailer::MailDeliveryJob","queue":"mailers","args":["[FILTERED]"],"retry":3,"backtrace":true,"jid":"e47a4f6793d475378432e3c8","created_at":"2020-06-08T14:37:37.884Z","meta.user":"root","meta.caller_id":"AdminEmailsWorker","correlation_id":"37D3lArJmT1","uber-trace-id":"2d942cc98cc1b561:29344de0f966446d:5c3b0e0e1bef987b:1","enqueued_at":"2020-06-08T14:37:37.885Z","pid":65011,"message":"ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper JID-e47a4f6793d475378432e3c8: start","job_status":"start","scheduling_latency_s":0.009473}
|
||||
{"severity":"INFO","time":"2020-06-08T14:39:50.648Z","class":"NewIssueWorker","args":["455","1"],"retry":3,"queue":"new_issue","backtrace":true,"jid":"a24af71f96fd129ec47f5d1e","created_at":"2020-06-08T14:39:50.643Z","meta.user":"root","meta.project":"h5bp/html5-boilerplate","meta.root_namespace":"h5bp","meta.caller_id":"Projects::IssuesController#create","correlation_id":"f9UCZHqhuP7","uber-trace-id":"28f65730f99f55a3:a5d2b62dec38dffc:48ddd092707fa1b7:1","enqueued_at":"2020-06-08T14:39:50.646Z","pid":65011,"message":"NewIssueWorker JID-a24af71f96fd129ec47f5d1e: start","job_status":"start","scheduling_latency_s":0.001144}
|
||||
```
|
||||
|
||||
When using [Sidekiq JSON logging](../logs/index.md#sidekiqlog),
|
||||
arguments logs are limited to a maximum size of 10 kilobytes of text;
|
||||
any arguments after this limit are discarded and replaced with a
|
||||
single argument containing the string `"..."`.
|
||||
|
||||
You can set `SIDEKIQ_LOG_ARGUMENTS` [environment variable](https://docs.gitlab.com/omnibus/settings/environment-variables.html)
|
||||
to `0` (false) to disable argument logging.
|
||||
|
||||
Example:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['env'] = {"SIDEKIQ_LOG_ARGUMENTS" => "0"}
|
||||
```
|
||||
|
||||
In GitLab 13.5 and earlier, set `SIDEKIQ_LOG_ARGUMENTS` to `1` to start logging arguments passed to Sidekiq.
|
||||
|
||||
## Thread dump
|
||||
|
||||
Send the Sidekiq process ID the `TTIN` signal to output thread
|
||||
backtraces in the log file.
|
||||
|
||||
```shell
|
||||
kill -TTIN <sidekiq_pid>
|
||||
```
|
||||
|
||||
Check in `/var/log/gitlab/sidekiq/current` or `$GITLAB_HOME/log/sidekiq.log` for
|
||||
the backtrace output. The backtraces are lengthy and generally start with
|
||||
several `WARN` level messages. Here's an example of a single thread's backtrace:
|
||||
|
||||
```plaintext
|
||||
2016-04-13T06:21:20.022Z 31517 TID-orn4urby0 WARN: ActiveRecord::RecordNotFound: Couldn't find Note with 'id'=3375386
|
||||
2016-04-13T06:21:20.022Z 31517 TID-orn4urby0 WARN: /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/activerecord-4.2.5.2/lib/active_record/core.rb:155:in `find'
|
||||
/opt/gitlab/embedded/service/gitlab-rails/app/workers/new_note_worker.rb:7:in `perform'
|
||||
/opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/sidekiq-4.0.1/lib/sidekiq/processor.rb:150:in `execute_job'
|
||||
/opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/sidekiq-4.0.1/lib/sidekiq/processor.rb:132:in `block (2 levels) in process'
|
||||
/opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/sidekiq-4.0.1/lib/sidekiq/middleware/chain.rb:127:in `block in invoke'
|
||||
/opt/gitlab/embedded/service/gitlab-rails/lib/gitlab/sidekiq_middleware/memory_killer.rb:17:in `call'
|
||||
/opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/sidekiq-4.0.1/lib/sidekiq/middleware/chain.rb:129:in `block in invoke'
|
||||
/opt/gitlab/embedded/service/gitlab-rails/lib/gitlab/sidekiq_middleware/arguments_logger.rb:6:in `call'
|
||||
...
|
||||
```
|
||||
|
||||
In some cases Sidekiq may be hung and unable to respond to the `TTIN` signal.
|
||||
Move on to other troubleshooting methods if this happens.
|
||||
|
||||
## Ruby profiling with `rbspy`
|
||||
|
||||
[rbspy](https://rbspy.github.io) is an easy to use and low-overhead Ruby profiler that can be used to create
|
||||
flamegraph-style diagrams of CPU usage by Ruby processes.
|
||||
|
||||
No changes to GitLab are required to use it and it has no dependencies. To install it:
|
||||
|
||||
1. Download the binary from the [`rbspy` releases page](https://github.com/rbspy/rbspy/releases).
|
||||
1. Make the binary executable.
|
||||
|
||||
To profile a Sidekiq worker for one minute, run:
|
||||
|
||||
```shell
|
||||
sudo ./rbspy record --pid <sidekiq_pid> --duration 60 --file /tmp/sidekiq_profile.svg
|
||||
```
|
||||
|
||||

|
||||
|
||||
In this example of a flamegraph generated by `rbspy`, almost all of the Sidekiq process's time is spent in `rev_parse`, a native C
|
||||
function in Rugged. In the stack, we can see `rev_parse` is being called by the `ExpirePipelineCacheWorker`.
|
||||
|
||||
## Process profiling with `perf`
|
||||
|
||||
Linux has a process profiling tool called `perf` that is helpful when a certain
|
||||
process is eating up a lot of CPU. If you see high CPU usage and Sidekiq isn't
|
||||
responding to the `TTIN` signal, this is a good next step.
|
||||
|
||||
If `perf` is not installed on your system, install it with `apt-get` or `yum`:
|
||||
|
||||
```shell
|
||||
# Debian
|
||||
sudo apt-get install linux-tools
|
||||
|
||||
# Ubuntu (may require these additional Kernel packages)
|
||||
sudo apt-get install linux-tools-common linux-tools-generic linux-tools-`uname -r`
|
||||
|
||||
# Red Hat/CentOS
|
||||
sudo yum install perf
|
||||
```
|
||||
|
||||
Run `perf` against the Sidekiq PID:
|
||||
|
||||
```shell
|
||||
sudo perf record -p <sidekiq_pid>
|
||||
```
|
||||
|
||||
Let this run for 30-60 seconds and then press Ctrl-C. Then view the `perf` report:
|
||||
|
||||
```shell
|
||||
$ sudo perf report
|
||||
|
||||
# Sample output
|
||||
Samples: 348K of event 'cycles', Event count (approx.): 280908431073
|
||||
97.69% ruby nokogiri.so [.] xmlXPathNodeSetMergeAndClear
|
||||
0.18% ruby libruby.so.2.1.0 [.] objspace_malloc_increase
|
||||
0.12% ruby libc-2.12.so [.] _int_malloc
|
||||
0.10% ruby libc-2.12.so [.] _int_free
|
||||
```
|
||||
|
||||
Above you see sample output from a `perf` report. It shows that 97% of the CPU is
|
||||
being spent inside Nokogiri and `xmlXPathNodeSetMergeAndClear`. For something
|
||||
this obvious you should then go investigate what job in GitLab would use
|
||||
Nokogiri and XPath. Combine with `TTIN` or `gdb` output to show the
|
||||
corresponding Ruby code where this is happening.
|
||||
|
||||
## The GNU Project Debugger (`gdb`)
|
||||
|
||||
`gdb` can be another effective tool for debugging Sidekiq. It gives you a little
|
||||
more interactive way to look at each thread and see what's causing problems.
|
||||
|
||||
Attaching to a process with `gdb` suspends the normal operation
|
||||
of the process (Sidekiq does not process jobs while `gdb` is attached).
|
||||
|
||||
Start by attaching to the Sidekiq PID:
|
||||
|
||||
```shell
|
||||
gdb -p <sidekiq_pid>
|
||||
```
|
||||
|
||||
Then gather information on all the threads:
|
||||
|
||||
```plaintext
|
||||
info threads
|
||||
|
||||
# Example output
|
||||
30 Thread 0x7fe5fbd63700 (LWP 26060) 0x0000003f7cadf113 in poll () from /lib64/libc.so.6
|
||||
29 Thread 0x7fe5f2b3b700 (LWP 26533) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
28 Thread 0x7fe5f2a3a700 (LWP 26534) 0x0000003f7ce0ba5e in pthread_cond_timedwait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
27 Thread 0x7fe5f2939700 (LWP 26535) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
26 Thread 0x7fe5f2838700 (LWP 26537) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
25 Thread 0x7fe5f2737700 (LWP 26538) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
24 Thread 0x7fe5f2535700 (LWP 26540) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
23 Thread 0x7fe5f2434700 (LWP 26541) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
22 Thread 0x7fe5f2232700 (LWP 26543) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
21 Thread 0x7fe5f2131700 (LWP 26544) 0x00007fe5f7b570f0 in xmlXPathNodeSetMergeAndClear ()
|
||||
from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
...
|
||||
```
|
||||
|
||||
If you see a suspicious thread, like the Nokogiri one above, you may want
|
||||
to get more information:
|
||||
|
||||
```plaintext
|
||||
thread 21
|
||||
bt
|
||||
|
||||
# Example output
|
||||
#0 0x00007ff0d6afe111 in xmlXPathNodeSetMergeAndClear () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#1 0x00007ff0d6b0b836 in xmlXPathNodeCollectAndTest () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#2 0x00007ff0d6b09037 in xmlXPathCompOpEval () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#3 0x00007ff0d6b09017 in xmlXPathCompOpEval () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#4 0x00007ff0d6b092e0 in xmlXPathCompOpEval () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#5 0x00007ff0d6b0bc37 in xmlXPathRunEval () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#6 0x00007ff0d6b0be5f in xmlXPathEvalExpression () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#7 0x00007ff0d6a97dc3 in evaluate (argc=2, argv=0x1022d058, self=<value optimized out>) at xml_xpath_context.c:221
|
||||
#8 0x00007ff0daeab0ea in vm_call_cfunc_with_frame (th=0x1022a4f0, reg_cfp=0x1032b810, ci=<value optimized out>) at vm_insnhelper.c:1510
|
||||
```
|
||||
|
||||
To output a backtrace from all threads at once:
|
||||
|
||||
```plaintext
|
||||
set pagination off
|
||||
thread apply all bt
|
||||
```
|
||||
|
||||
Once you're done debugging with `gdb`, be sure to detach from the process and
|
||||
exit:
|
||||
|
||||
```plaintext
|
||||
detach
|
||||
exit
|
||||
```
|
||||
|
||||
## Sidekiq kill signals
|
||||
|
||||
TTIN was described above as the signal to print backtraces for logging, however
|
||||
Sidekiq responds to other signals as well. For example, TSTP and TERM can be used
|
||||
to gracefully shut Sidekiq down, see
|
||||
[the Sidekiq Signals docs](https://github.com/mperham/sidekiq/wiki/Signals#ttin).
|
||||
|
||||
## Check for blocking queries
|
||||
|
||||
Sometimes the speed at which Sidekiq processes jobs can be so fast that it can
|
||||
cause database contention. Check for blocking queries when backtraces above
|
||||
show that many threads are stuck in the database adapter.
|
||||
|
||||
The PostgreSQL wiki has details on the query you can run to see blocking
|
||||
queries. The query is different based on PostgreSQL version. See
|
||||
[Lock Monitoring](https://wiki.postgresql.org/wiki/Lock_Monitoring) for
|
||||
the query details.
|
||||
|
||||
## Managing Sidekiq queues
|
||||
|
||||
It is possible to use [Sidekiq API](https://github.com/mperham/sidekiq/wiki/API)
|
||||
to perform a number of troubleshooting steps on Sidekiq.
|
||||
|
||||
These are the administrative commands and it should only be used if currently
|
||||
administration interface is not suitable due to scale of installation.
|
||||
|
||||
All these commands should be run using `gitlab-rails console`.
|
||||
|
||||
### View the queue size
|
||||
|
||||
```ruby
|
||||
Sidekiq::Queue.new("pipeline_processing:build_queue").size
|
||||
```
|
||||
|
||||
### Enumerate all enqueued jobs
|
||||
|
||||
```ruby
|
||||
queue = Sidekiq::Queue.new("chaos:chaos_sleep")
|
||||
queue.each do |job|
|
||||
# job.klass # => 'MyWorker'
|
||||
# job.args # => [1, 2, 3]
|
||||
# job.jid # => jid
|
||||
# job.queue # => chaos:chaos_sleep
|
||||
# job["retry"] # => 3
|
||||
# job.item # => {
|
||||
# "class"=>"Chaos::SleepWorker",
|
||||
# "args"=>[1000],
|
||||
# "retry"=>3,
|
||||
# "queue"=>"chaos:chaos_sleep",
|
||||
# "backtrace"=>true,
|
||||
# "queue_namespace"=>"chaos",
|
||||
# "jid"=>"39bc482b823cceaf07213523",
|
||||
# "created_at"=>1566317076.266069,
|
||||
# "correlation_id"=>"c323b832-a857-4858-b695-672de6f0e1af",
|
||||
# "enqueued_at"=>1566317076.26761},
|
||||
# }
|
||||
|
||||
# job.delete if job.jid == 'abcdef1234567890'
|
||||
end
|
||||
```
|
||||
|
||||
### Enumerate currently running jobs
|
||||
|
||||
```ruby
|
||||
workers = Sidekiq::Workers.new
|
||||
workers.each do |process_id, thread_id, work|
|
||||
# process_id is a unique identifier per Sidekiq process
|
||||
# thread_id is a unique identifier per thread
|
||||
# work is a Hash which looks like:
|
||||
# {"queue"=>"chaos:chaos_sleep",
|
||||
# "payload"=>
|
||||
# { "class"=>"Chaos::SleepWorker",
|
||||
# "args"=>[1000],
|
||||
# "retry"=>3,
|
||||
# "queue"=>"chaos:chaos_sleep",
|
||||
# "backtrace"=>true,
|
||||
# "queue_namespace"=>"chaos",
|
||||
# "jid"=>"b2a31e3eac7b1a99ff235869",
|
||||
# "created_at"=>1566316974.9215662,
|
||||
# "correlation_id"=>"e484fb26-7576-45f9-bf21-b99389e1c53c",
|
||||
# "enqueued_at"=>1566316974.9229589},
|
||||
# "run_at"=>1566316974}],
|
||||
end
|
||||
```
|
||||
|
||||
### Remove Sidekiq jobs for given parameters (destructive)
|
||||
|
||||
The general method to kill jobs conditionally is the following command, which
|
||||
removes jobs that are queued but not started. Running jobs can not be killed.
|
||||
|
||||
```ruby
|
||||
queue = Sidekiq::Queue.new('<queue name>')
|
||||
queue.each { |job| job.delete if <condition>}
|
||||
```
|
||||
|
||||
Have a look at the section below for cancelling running jobs.
|
||||
|
||||
In the method above, `<queue-name>` is the name of the queue that contains the jobs you want to delete and `<condition>` decides which jobs get deleted.
|
||||
|
||||
Commonly, `<condition>` references the job arguments, which depend on the type of job in question. To find the arguments for a specific queue, you can have a look at the `perform` function of the related worker file, commonly found at `/app/workers/<queue-name>_worker.rb`.
|
||||
|
||||
For example, `repository_import` has `project_id` as the job argument, while `update_merge_requests` has `project_id, user_id, oldrev, newrev, ref`.
|
||||
|
||||
Arguments need to be referenced by their sequence ID using `job.args[<id>]` because `job.args` is a list of all arguments provided to the Sidekiq job.
|
||||
|
||||
Here are some examples:
|
||||
|
||||
```ruby
|
||||
queue = Sidekiq::Queue.new('update_merge_requests')
|
||||
# In this example, we want to remove any update_merge_requests jobs
|
||||
# for the Project with ID 125 and ref `ref/heads/my_branch`
|
||||
queue.each { |job| job.delete if job.args[0] == 125 and job.args[4] == 'ref/heads/my_branch' }
|
||||
```
|
||||
|
||||
```ruby
|
||||
# Cancelling jobs like: `RepositoryImportWorker.new.perform_async(100)`
|
||||
id_list = [100]
|
||||
|
||||
queue = Sidekiq::Queue.new('repository_import')
|
||||
queue.each do |job|
|
||||
job.delete if id_list.include?(job.args[0])
|
||||
end
|
||||
```
|
||||
|
||||
### Remove specific job ID (destructive)
|
||||
|
||||
```ruby
|
||||
queue = Sidekiq::Queue.new('repository_import')
|
||||
queue.each do |job|
|
||||
job.delete if job.jid == 'my-job-id'
|
||||
end
|
||||
```
|
||||
|
||||
## Canceling running jobs (destructive)
|
||||
|
||||
> Introduced in GitLab 12.3.
|
||||
|
||||
This is highly risky operation and use it as last resort.
|
||||
Doing that might result in data corruption, as the job
|
||||
is interrupted mid-execution and it is not guaranteed
|
||||
that proper rollback of transactions is implemented.
|
||||
|
||||
```ruby
|
||||
Gitlab::SidekiqDaemon::Monitor.cancel_job('job-id')
|
||||
```
|
||||
|
||||
> This requires the Sidekiq to be run with `SIDEKIQ_MONITOR_WORKER=1`
|
||||
> environment variable.
|
||||
|
||||
To perform of the interrupt we use `Thread.raise` which
|
||||
has number of drawbacks, as mentioned in [Why Ruby's Timeout is dangerous (and Thread.raise is terrifying)](https://jvns.ca/blog/2015/11/27/why-rubys-timeout-is-dangerous-and-thread-dot-raise-is-terrifying/):
|
||||
|
||||
> This is where the implications get interesting, and terrifying. This means that an exception can get raised:
|
||||
>
|
||||
> - during a network request (ok, as long as the surrounding code is prepared to catch Timeout::Error)
|
||||
> - during the cleanup for the network request
|
||||
> - during a rescue block
|
||||
> - while creating an object to save to the database afterwards
|
||||
> - in any of your code, regardless of whether it could have possibly raised an exception before
|
||||
>
|
||||
> Nobody writes code to defend against an exception being raised on literally any line. That's not even possible. So Thread.raise is basically like a sneak attack on your code that could result in almost anything. It would probably be okay if it were pure-functional code that did not modify any state. But this is Ruby, so that's unlikely :)
|
||||
|
|
@ -1,58 +1,11 @@
|
|||
---
|
||||
stage: Systems
|
||||
group: Distribution
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
redirect_to: 'sidekiq/sidekiq_health_check.md'
|
||||
remove_date: '2022-11-11'
|
||||
---
|
||||
|
||||
# Sidekiq Health Check **(FREE SELF)**
|
||||
This document was moved to [another location](sidekiq/sidekiq_health_check.md).
|
||||
|
||||
GitLab provides liveness and readiness probes to indicate service health and
|
||||
reachability to the Sidekiq cluster. These endpoints
|
||||
[can be provided to schedulers like Kubernetes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/)
|
||||
to hold traffic until the system is ready or restart the container as needed.
|
||||
|
||||
The health check server can be set up when [configuring Sidekiq](sidekiq.md).
|
||||
|
||||
## Readiness
|
||||
|
||||
The readiness probe checks whether the Sidekiq workers are ready to process jobs.
|
||||
|
||||
```plaintext
|
||||
GET /readiness
|
||||
```
|
||||
|
||||
If the server is bound to `localhost:8092`, the process cluster can be probed for readiness as follows:
|
||||
|
||||
```shell
|
||||
curl "http://localhost:8092/readiness"
|
||||
```
|
||||
|
||||
On success, the endpoint returns a `200` HTTP status code, and a response like the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "ok"
|
||||
}
|
||||
```
|
||||
|
||||
## Liveness
|
||||
|
||||
Checks whether the Sidekiq cluster is running.
|
||||
|
||||
```plaintext
|
||||
GET /liveness
|
||||
```
|
||||
|
||||
If the server is bound to `localhost:8092`, the process cluster can be probed for liveness as follows:
|
||||
|
||||
```shell
|
||||
curl "http://localhost:8092/liveness"
|
||||
```
|
||||
|
||||
On success, the endpoint returns a `200` HTTP status code, and a response like the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "ok"
|
||||
}
|
||||
```
|
||||
<!-- This redirect file can be deleted after <2022-11-11>. -->
|
||||
<!-- Redirects that point to other docs in the same project expire in three months. -->
|
||||
<!-- Redirects that point to docs in a different project or site (link is not relative and starts with `https:`) expire in one year. -->
|
||||
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
|
||||
|
|
|
|||
|
|
@ -13,14 +13,12 @@ installation.
|
|||
|
||||
- [SSL](ssl.md)
|
||||
- [Geo](../geo/replication/troubleshooting.md)
|
||||
- [Sidekiq](sidekiq.md)
|
||||
- [GitLab Rails console cheat sheet](gitlab_rails_cheat_sheet.md)
|
||||
- [Example group SAML and SCIM configurations](../../user/group/saml_sso/example_saml_config.md)
|
||||
- [Kubernetes cheat sheet](https://docs.gitlab.com/charts/troubleshooting/kubernetes_cheat_sheet.html)
|
||||
- [Linux cheat sheet](linux_cheat_sheet.md)
|
||||
- [Parsing GitLab logs with `jq`](../logs/log_parsing.md)
|
||||
- [Diagnostics tools](diagnostics_tools.md)
|
||||
- [Tracing requests with correlation ID](tracing_correlation_id.md)
|
||||
|
||||
Some feature documentation pages also have a troubleshooting section at the end
|
||||
that you can check for feature-specific help.
|
||||
|
|
|
|||
|
|
@ -1,395 +1,11 @@
|
|||
---
|
||||
stage: Systems
|
||||
group: Distribution
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
redirect_to: '../sidekiq/sidekiq_troubleshooting.md'
|
||||
remove_date: '2022-11-11'
|
||||
---
|
||||
|
||||
# Troubleshooting Sidekiq **(FREE SELF)**
|
||||
This document was moved to [another location](../sidekiq/sidekiq_troubleshooting.md).
|
||||
|
||||
Sidekiq is the background job processor GitLab uses to asynchronously run
|
||||
tasks. When things go wrong it can be difficult to troubleshoot. These
|
||||
situations also tend to be high-pressure because a production system job queue
|
||||
may be filling up. Users will notice when this happens because new branches
|
||||
may not show up and merge requests may not be updated. The following are some
|
||||
troubleshooting steps to help you diagnose the bottleneck.
|
||||
|
||||
GitLab administrators/users should consider working through these
|
||||
debug steps with GitLab Support so the backtraces can be analyzed by our team.
|
||||
It may reveal a bug or necessary improvement in GitLab.
|
||||
|
||||
In any of the backtraces, be wary of suspecting cases where every
|
||||
thread appears to be waiting in the database, Redis, or waiting to acquire
|
||||
a mutex. This **may** mean there's contention in the database, for example,
|
||||
but look for one thread that is different than the rest. This other thread
|
||||
may be using all available CPU, or have a Ruby Global Interpreter Lock,
|
||||
preventing other threads from continuing.
|
||||
|
||||
## Log arguments to Sidekiq jobs
|
||||
|
||||
[In GitLab 13.6 and later](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/44853)
|
||||
some arguments passed to Sidekiq jobs are logged by default.
|
||||
To avoid logging sensitive information (for instance, password reset tokens),
|
||||
GitLab logs numeric arguments for all workers, with overrides for some specific
|
||||
workers where their arguments are not sensitive.
|
||||
|
||||
Example log output:
|
||||
|
||||
```json
|
||||
{"severity":"INFO","time":"2020-06-08T14:37:37.892Z","class":"AdminEmailsWorker","args":["[FILTERED]","[FILTERED]","[FILTERED]"],"retry":3,"queue":"admin_emails","backtrace":true,"jid":"9e35e2674ac7b12d123e13cc","created_at":"2020-06-08T14:37:37.373Z","meta.user":"root","meta.caller_id":"Admin::EmailsController#create","correlation_id":"37D3lArJmT1","uber-trace-id":"2d942cc98cc1b561:6dc94409cfdd4d77:9fbe19bdee865293:1","enqueued_at":"2020-06-08T14:37:37.410Z","pid":65011,"message":"AdminEmailsWorker JID-9e35e2674ac7b12d123e13cc: done: 0.48085 sec","job_status":"done","scheduling_latency_s":0.001012,"redis_calls":9,"redis_duration_s":0.004608,"redis_read_bytes":696,"redis_write_bytes":6141,"duration_s":0.48085,"cpu_s":0.308849,"completed_at":"2020-06-08T14:37:37.892Z","db_duration_s":0.010742}
|
||||
{"severity":"INFO","time":"2020-06-08T14:37:37.894Z","class":"ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper","wrapped":"ActionMailer::MailDeliveryJob","queue":"mailers","args":["[FILTERED]"],"retry":3,"backtrace":true,"jid":"e47a4f6793d475378432e3c8","created_at":"2020-06-08T14:37:37.884Z","meta.user":"root","meta.caller_id":"AdminEmailsWorker","correlation_id":"37D3lArJmT1","uber-trace-id":"2d942cc98cc1b561:29344de0f966446d:5c3b0e0e1bef987b:1","enqueued_at":"2020-06-08T14:37:37.885Z","pid":65011,"message":"ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper JID-e47a4f6793d475378432e3c8: start","job_status":"start","scheduling_latency_s":0.009473}
|
||||
{"severity":"INFO","time":"2020-06-08T14:39:50.648Z","class":"NewIssueWorker","args":["455","1"],"retry":3,"queue":"new_issue","backtrace":true,"jid":"a24af71f96fd129ec47f5d1e","created_at":"2020-06-08T14:39:50.643Z","meta.user":"root","meta.project":"h5bp/html5-boilerplate","meta.root_namespace":"h5bp","meta.caller_id":"Projects::IssuesController#create","correlation_id":"f9UCZHqhuP7","uber-trace-id":"28f65730f99f55a3:a5d2b62dec38dffc:48ddd092707fa1b7:1","enqueued_at":"2020-06-08T14:39:50.646Z","pid":65011,"message":"NewIssueWorker JID-a24af71f96fd129ec47f5d1e: start","job_status":"start","scheduling_latency_s":0.001144}
|
||||
```
|
||||
|
||||
When using [Sidekiq JSON logging](../logs/index.md#sidekiqlog),
|
||||
arguments logs are limited to a maximum size of 10 kilobytes of text;
|
||||
any arguments after this limit are discarded and replaced with a
|
||||
single argument containing the string `"..."`.
|
||||
|
||||
You can set `SIDEKIQ_LOG_ARGUMENTS` [environment variable](https://docs.gitlab.com/omnibus/settings/environment-variables.html)
|
||||
to `0` (false) to disable argument logging.
|
||||
|
||||
Example:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['env'] = {"SIDEKIQ_LOG_ARGUMENTS" => "0"}
|
||||
```
|
||||
|
||||
In GitLab 13.5 and earlier, set `SIDEKIQ_LOG_ARGUMENTS` to `1` to start logging arguments passed to Sidekiq.
|
||||
|
||||
## Thread dump
|
||||
|
||||
Send the Sidekiq process ID the `TTIN` signal to output thread
|
||||
backtraces in the log file.
|
||||
|
||||
```shell
|
||||
kill -TTIN <sidekiq_pid>
|
||||
```
|
||||
|
||||
Check in `/var/log/gitlab/sidekiq/current` or `$GITLAB_HOME/log/sidekiq.log` for
|
||||
the backtrace output. The backtraces are lengthy and generally start with
|
||||
several `WARN` level messages. Here's an example of a single thread's backtrace:
|
||||
|
||||
```plaintext
|
||||
2016-04-13T06:21:20.022Z 31517 TID-orn4urby0 WARN: ActiveRecord::RecordNotFound: Couldn't find Note with 'id'=3375386
|
||||
2016-04-13T06:21:20.022Z 31517 TID-orn4urby0 WARN: /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/activerecord-4.2.5.2/lib/active_record/core.rb:155:in `find'
|
||||
/opt/gitlab/embedded/service/gitlab-rails/app/workers/new_note_worker.rb:7:in `perform'
|
||||
/opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/sidekiq-4.0.1/lib/sidekiq/processor.rb:150:in `execute_job'
|
||||
/opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/sidekiq-4.0.1/lib/sidekiq/processor.rb:132:in `block (2 levels) in process'
|
||||
/opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/sidekiq-4.0.1/lib/sidekiq/middleware/chain.rb:127:in `block in invoke'
|
||||
/opt/gitlab/embedded/service/gitlab-rails/lib/gitlab/sidekiq_middleware/memory_killer.rb:17:in `call'
|
||||
/opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/sidekiq-4.0.1/lib/sidekiq/middleware/chain.rb:129:in `block in invoke'
|
||||
/opt/gitlab/embedded/service/gitlab-rails/lib/gitlab/sidekiq_middleware/arguments_logger.rb:6:in `call'
|
||||
...
|
||||
```
|
||||
|
||||
In some cases Sidekiq may be hung and unable to respond to the `TTIN` signal.
|
||||
Move on to other troubleshooting methods if this happens.
|
||||
|
||||
## Ruby profiling with `rbspy`
|
||||
|
||||
[rbspy](https://rbspy.github.io) is an easy to use and low-overhead Ruby profiler that can be used to create
|
||||
flamegraph-style diagrams of CPU usage by Ruby processes.
|
||||
|
||||
No changes to GitLab are required to use it and it has no dependencies. To install it:
|
||||
|
||||
1. Download the binary from the [`rbspy` releases page](https://github.com/rbspy/rbspy/releases).
|
||||
1. Make the binary executable.
|
||||
|
||||
To profile a Sidekiq worker for one minute, run:
|
||||
|
||||
```shell
|
||||
sudo ./rbspy record --pid <sidekiq_pid> --duration 60 --file /tmp/sidekiq_profile.svg
|
||||
```
|
||||
|
||||

|
||||
|
||||
In this example of a flamegraph generated by `rbspy`, almost all of the Sidekiq process's time is spent in `rev_parse`, a native C
|
||||
function in Rugged. In the stack, we can see `rev_parse` is being called by the `ExpirePipelineCacheWorker`.
|
||||
|
||||
## Process profiling with `perf`
|
||||
|
||||
Linux has a process profiling tool called `perf` that is helpful when a certain
|
||||
process is eating up a lot of CPU. If you see high CPU usage and Sidekiq isn't
|
||||
responding to the `TTIN` signal, this is a good next step.
|
||||
|
||||
If `perf` is not installed on your system, install it with `apt-get` or `yum`:
|
||||
|
||||
```shell
|
||||
# Debian
|
||||
sudo apt-get install linux-tools
|
||||
|
||||
# Ubuntu (may require these additional Kernel packages)
|
||||
sudo apt-get install linux-tools-common linux-tools-generic linux-tools-`uname -r`
|
||||
|
||||
# Red Hat/CentOS
|
||||
sudo yum install perf
|
||||
```
|
||||
|
||||
Run `perf` against the Sidekiq PID:
|
||||
|
||||
```shell
|
||||
sudo perf record -p <sidekiq_pid>
|
||||
```
|
||||
|
||||
Let this run for 30-60 seconds and then press Ctrl-C. Then view the `perf` report:
|
||||
|
||||
```shell
|
||||
$ sudo perf report
|
||||
|
||||
# Sample output
|
||||
Samples: 348K of event 'cycles', Event count (approx.): 280908431073
|
||||
97.69% ruby nokogiri.so [.] xmlXPathNodeSetMergeAndClear
|
||||
0.18% ruby libruby.so.2.1.0 [.] objspace_malloc_increase
|
||||
0.12% ruby libc-2.12.so [.] _int_malloc
|
||||
0.10% ruby libc-2.12.so [.] _int_free
|
||||
```
|
||||
|
||||
Above you see sample output from a `perf` report. It shows that 97% of the CPU is
|
||||
being spent inside Nokogiri and `xmlXPathNodeSetMergeAndClear`. For something
|
||||
this obvious you should then go investigate what job in GitLab would use
|
||||
Nokogiri and XPath. Combine with `TTIN` or `gdb` output to show the
|
||||
corresponding Ruby code where this is happening.
|
||||
|
||||
## The GNU Project Debugger (`gdb`)
|
||||
|
||||
`gdb` can be another effective tool for debugging Sidekiq. It gives you a little
|
||||
more interactive way to look at each thread and see what's causing problems.
|
||||
|
||||
Attaching to a process with `gdb` suspends the normal operation
|
||||
of the process (Sidekiq does not process jobs while `gdb` is attached).
|
||||
|
||||
Start by attaching to the Sidekiq PID:
|
||||
|
||||
```shell
|
||||
gdb -p <sidekiq_pid>
|
||||
```
|
||||
|
||||
Then gather information on all the threads:
|
||||
|
||||
```plaintext
|
||||
info threads
|
||||
|
||||
# Example output
|
||||
30 Thread 0x7fe5fbd63700 (LWP 26060) 0x0000003f7cadf113 in poll () from /lib64/libc.so.6
|
||||
29 Thread 0x7fe5f2b3b700 (LWP 26533) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
28 Thread 0x7fe5f2a3a700 (LWP 26534) 0x0000003f7ce0ba5e in pthread_cond_timedwait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
27 Thread 0x7fe5f2939700 (LWP 26535) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
26 Thread 0x7fe5f2838700 (LWP 26537) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
25 Thread 0x7fe5f2737700 (LWP 26538) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
24 Thread 0x7fe5f2535700 (LWP 26540) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
23 Thread 0x7fe5f2434700 (LWP 26541) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
22 Thread 0x7fe5f2232700 (LWP 26543) 0x0000003f7ce0b68c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
|
||||
21 Thread 0x7fe5f2131700 (LWP 26544) 0x00007fe5f7b570f0 in xmlXPathNodeSetMergeAndClear ()
|
||||
from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
...
|
||||
```
|
||||
|
||||
If you see a suspicious thread, like the Nokogiri one above, you may want
|
||||
to get more information:
|
||||
|
||||
```plaintext
|
||||
thread 21
|
||||
bt
|
||||
|
||||
# Example output
|
||||
#0 0x00007ff0d6afe111 in xmlXPathNodeSetMergeAndClear () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#1 0x00007ff0d6b0b836 in xmlXPathNodeCollectAndTest () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#2 0x00007ff0d6b09037 in xmlXPathCompOpEval () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#3 0x00007ff0d6b09017 in xmlXPathCompOpEval () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#4 0x00007ff0d6b092e0 in xmlXPathCompOpEval () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#5 0x00007ff0d6b0bc37 in xmlXPathRunEval () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#6 0x00007ff0d6b0be5f in xmlXPathEvalExpression () from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokogiri/nokogiri.so
|
||||
#7 0x00007ff0d6a97dc3 in evaluate (argc=2, argv=0x1022d058, self=<value optimized out>) at xml_xpath_context.c:221
|
||||
#8 0x00007ff0daeab0ea in vm_call_cfunc_with_frame (th=0x1022a4f0, reg_cfp=0x1032b810, ci=<value optimized out>) at vm_insnhelper.c:1510
|
||||
```
|
||||
|
||||
To output a backtrace from all threads at once:
|
||||
|
||||
```plaintext
|
||||
set pagination off
|
||||
thread apply all bt
|
||||
```
|
||||
|
||||
Once you're done debugging with `gdb`, be sure to detach from the process and
|
||||
exit:
|
||||
|
||||
```plaintext
|
||||
detach
|
||||
exit
|
||||
```
|
||||
|
||||
## Sidekiq kill signals
|
||||
|
||||
TTIN was described above as the signal to print backtraces for logging, however
|
||||
Sidekiq responds to other signals as well. For example, TSTP and TERM can be used
|
||||
to gracefully shut Sidekiq down, see
|
||||
[the Sidekiq Signals docs](https://github.com/mperham/sidekiq/wiki/Signals#ttin).
|
||||
|
||||
## Check for blocking queries
|
||||
|
||||
Sometimes the speed at which Sidekiq processes jobs can be so fast that it can
|
||||
cause database contention. Check for blocking queries when backtraces above
|
||||
show that many threads are stuck in the database adapter.
|
||||
|
||||
The PostgreSQL wiki has details on the query you can run to see blocking
|
||||
queries. The query is different based on PostgreSQL version. See
|
||||
[Lock Monitoring](https://wiki.postgresql.org/wiki/Lock_Monitoring) for
|
||||
the query details.
|
||||
|
||||
## Managing Sidekiq queues
|
||||
|
||||
It is possible to use [Sidekiq API](https://github.com/mperham/sidekiq/wiki/API)
|
||||
to perform a number of troubleshooting steps on Sidekiq.
|
||||
|
||||
These are the administrative commands and it should only be used if currently
|
||||
administration interface is not suitable due to scale of installation.
|
||||
|
||||
All these commands should be run using `gitlab-rails console`.
|
||||
|
||||
### View the queue size
|
||||
|
||||
```ruby
|
||||
Sidekiq::Queue.new("pipeline_processing:build_queue").size
|
||||
```
|
||||
|
||||
### Enumerate all enqueued jobs
|
||||
|
||||
```ruby
|
||||
queue = Sidekiq::Queue.new("chaos:chaos_sleep")
|
||||
queue.each do |job|
|
||||
# job.klass # => 'MyWorker'
|
||||
# job.args # => [1, 2, 3]
|
||||
# job.jid # => jid
|
||||
# job.queue # => chaos:chaos_sleep
|
||||
# job["retry"] # => 3
|
||||
# job.item # => {
|
||||
# "class"=>"Chaos::SleepWorker",
|
||||
# "args"=>[1000],
|
||||
# "retry"=>3,
|
||||
# "queue"=>"chaos:chaos_sleep",
|
||||
# "backtrace"=>true,
|
||||
# "queue_namespace"=>"chaos",
|
||||
# "jid"=>"39bc482b823cceaf07213523",
|
||||
# "created_at"=>1566317076.266069,
|
||||
# "correlation_id"=>"c323b832-a857-4858-b695-672de6f0e1af",
|
||||
# "enqueued_at"=>1566317076.26761},
|
||||
# }
|
||||
|
||||
# job.delete if job.jid == 'abcdef1234567890'
|
||||
end
|
||||
```
|
||||
|
||||
### Enumerate currently running jobs
|
||||
|
||||
```ruby
|
||||
workers = Sidekiq::Workers.new
|
||||
workers.each do |process_id, thread_id, work|
|
||||
# process_id is a unique identifier per Sidekiq process
|
||||
# thread_id is a unique identifier per thread
|
||||
# work is a Hash which looks like:
|
||||
# {"queue"=>"chaos:chaos_sleep",
|
||||
# "payload"=>
|
||||
# { "class"=>"Chaos::SleepWorker",
|
||||
# "args"=>[1000],
|
||||
# "retry"=>3,
|
||||
# "queue"=>"chaos:chaos_sleep",
|
||||
# "backtrace"=>true,
|
||||
# "queue_namespace"=>"chaos",
|
||||
# "jid"=>"b2a31e3eac7b1a99ff235869",
|
||||
# "created_at"=>1566316974.9215662,
|
||||
# "correlation_id"=>"e484fb26-7576-45f9-bf21-b99389e1c53c",
|
||||
# "enqueued_at"=>1566316974.9229589},
|
||||
# "run_at"=>1566316974}],
|
||||
end
|
||||
```
|
||||
|
||||
### Remove Sidekiq jobs for given parameters (destructive)
|
||||
|
||||
The general method to kill jobs conditionally is the following command, which
|
||||
removes jobs that are queued but not started. Running jobs can not be killed.
|
||||
|
||||
```ruby
|
||||
queue = Sidekiq::Queue.new('<queue name>')
|
||||
queue.each { |job| job.delete if <condition>}
|
||||
```
|
||||
|
||||
Have a look at the section below for cancelling running jobs.
|
||||
|
||||
In the method above, `<queue-name>` is the name of the queue that contains the jobs you want to delete and `<condition>` decides which jobs get deleted.
|
||||
|
||||
Commonly, `<condition>` references the job arguments, which depend on the type of job in question. To find the arguments for a specific queue, you can have a look at the `perform` function of the related worker file, commonly found at `/app/workers/<queue-name>_worker.rb`.
|
||||
|
||||
For example, `repository_import` has `project_id` as the job argument, while `update_merge_requests` has `project_id, user_id, oldrev, newrev, ref`.
|
||||
|
||||
Arguments need to be referenced by their sequence ID using `job.args[<id>]` because `job.args` is a list of all arguments provided to the Sidekiq job.
|
||||
|
||||
Here are some examples:
|
||||
|
||||
```ruby
|
||||
queue = Sidekiq::Queue.new('update_merge_requests')
|
||||
# In this example, we want to remove any update_merge_requests jobs
|
||||
# for the Project with ID 125 and ref `ref/heads/my_branch`
|
||||
queue.each { |job| job.delete if job.args[0] == 125 and job.args[4] == 'ref/heads/my_branch' }
|
||||
```
|
||||
|
||||
```ruby
|
||||
# Cancelling jobs like: `RepositoryImportWorker.new.perform_async(100)`
|
||||
id_list = [100]
|
||||
|
||||
queue = Sidekiq::Queue.new('repository_import')
|
||||
queue.each do |job|
|
||||
job.delete if id_list.include?(job.args[0])
|
||||
end
|
||||
```
|
||||
|
||||
### Remove specific job ID (destructive)
|
||||
|
||||
```ruby
|
||||
queue = Sidekiq::Queue.new('repository_import')
|
||||
queue.each do |job|
|
||||
job.delete if job.jid == 'my-job-id'
|
||||
end
|
||||
```
|
||||
|
||||
## Canceling running jobs (destructive)
|
||||
|
||||
> Introduced in GitLab 12.3.
|
||||
|
||||
This is highly risky operation and use it as last resort.
|
||||
Doing that might result in data corruption, as the job
|
||||
is interrupted mid-execution and it is not guaranteed
|
||||
that proper rollback of transactions is implemented.
|
||||
|
||||
```ruby
|
||||
Gitlab::SidekiqDaemon::Monitor.cancel_job('job-id')
|
||||
```
|
||||
|
||||
> This requires the Sidekiq to be run with `SIDEKIQ_MONITOR_WORKER=1`
|
||||
> environment variable.
|
||||
|
||||
To perform of the interrupt we use `Thread.raise` which
|
||||
has number of drawbacks, as mentioned in [Why Ruby's Timeout is dangerous (and Thread.raise is terrifying)](https://jvns.ca/blog/2015/11/27/why-rubys-timeout-is-dangerous-and-thread-dot-raise-is-terrifying/):
|
||||
|
||||
> This is where the implications get interesting, and terrifying. This means that an exception can get raised:
|
||||
>
|
||||
> - during a network request (ok, as long as the surrounding code is prepared to catch Timeout::Error)
|
||||
> - during the cleanup for the network request
|
||||
> - during a rescue block
|
||||
> - while creating an object to save to the database afterwards
|
||||
> - in any of your code, regardless of whether it could have possibly raised an exception before
|
||||
>
|
||||
> Nobody writes code to defend against an exception being raised on literally any line. That's not even possible. So Thread.raise is basically like a sneak attack on your code that could result in almost anything. It would probably be okay if it were pure-functional code that did not modify any state. But this is Ruby, so that's unlikely :)
|
||||
|
||||
## Disable Rugged
|
||||
|
||||
Calls into Rugged, Ruby bindings for `libgit2`, [lock the Sidekiq processes's GVL](https://silverhammermba.github.io/emberb/c/#c-in-ruby-threads),
|
||||
blocking all jobs on that worker from proceeding. If Rugged calls performed by Sidekiq are slow, this can cause significant delays in
|
||||
background task processing.
|
||||
|
||||
By default, Rugged is used when Git repository data is stored on local storage or on an NFS mount.
|
||||
[Using Rugged is recommended when using NFS](../nfs.md#improving-nfs-performance-with-gitlab), but if
|
||||
you are using local storage, disabling Rugged can improve Sidekiq performance:
|
||||
|
||||
```shell
|
||||
sudo gitlab-rake gitlab:features:disable_rugged
|
||||
```
|
||||
<!-- This redirect file can be deleted after <2022-11-11>. -->
|
||||
<!-- Redirects that point to other docs in the same project expire in three months. -->
|
||||
<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
|
||||
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
|
||||
|
|
|
|||
|
|
@ -1,202 +1,11 @@
|
|||
---
|
||||
stage: Systems
|
||||
group: Distribution
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
redirect_to: '../logs/tracing_correlation_id.md'
|
||||
remove_date: '2022-11-12'
|
||||
---
|
||||
|
||||
# Finding relevant log entries with a correlation ID **(FREE SELF)**
|
||||
This document was moved to [another location](../logs/tracing_correlation_id.md).
|
||||
|
||||
GitLab instances log a unique request tracking ID (known as the
|
||||
"correlation ID") for most requests. Each individual request to GitLab gets
|
||||
its own correlation ID, which then gets logged in each GitLab component's logs for that
|
||||
request. This makes it easier to trace behavior in a
|
||||
distributed system. Without this ID it can be difficult or
|
||||
impossible to match correlating log entries.
|
||||
|
||||
## Identify the correlation ID for a request
|
||||
|
||||
The correlation ID is logged in structured logs under the key `correlation_id`
|
||||
and in all response headers GitLab sends under the header `x-request-id`.
|
||||
You can find your correlation ID by searching in either place.
|
||||
|
||||
### Getting the correlation ID in your browser
|
||||
|
||||
You can use your browser's developer tools to monitor and inspect network
|
||||
activity with the site that you're visiting. See the links below for network monitoring
|
||||
documentation for some popular browsers.
|
||||
|
||||
- [Network Monitor - Firefox Developer Tools](https://developer.mozilla.org/en-US/docs/Tools/Network_Monitor)
|
||||
- [Inspect Network Activity In Chrome DevTools](https://developer.chrome.com/docs/devtools/network/)
|
||||
- [Safari Web Development Tools](https://developer.apple.com/safari/tools/)
|
||||
- [Microsoft Edge Network panel](https://docs.microsoft.com/en-us/microsoft-edge/devtools-guide-chromium/network/)
|
||||
|
||||
To locate a relevant request and view its correlation ID:
|
||||
|
||||
1. Enable persistent logging in your network monitor. Some actions in GitLab redirect you quickly after you submit a form, so this helps capture all relevant activity.
|
||||
1. To help isolate the requests you are looking for, you can filter for `document` requests.
|
||||
1. Select the request of interest to view further detail.
|
||||
1. Go to the **Headers** section and look for **Response Headers**. There you should find an `x-request-id` header with a
|
||||
value that was randomly generated by GitLab for the request.
|
||||
|
||||
See the following example:
|
||||
|
||||

|
||||
|
||||
### Getting the correlation ID from your logs
|
||||
|
||||
Another approach to finding the correct correlation ID is to search or watch
|
||||
your logs and find the `correlation_id` value for the log entry that you're
|
||||
watching for.
|
||||
|
||||
For example, let's say that you want learn what's happening or breaking when
|
||||
you reproduce an action in GitLab. You could tail the GitLab logs, filtering
|
||||
to requests by your user, and then watch the requests until you see what you're
|
||||
interested in.
|
||||
|
||||
### Getting the correlation ID from curl
|
||||
|
||||
If you're using `curl` then you can use the verbose option to show request and response headers, as well as other debug information.
|
||||
|
||||
```shell
|
||||
➜ ~ curl --verbose "https://gitlab.example.com/api/v4/projects"
|
||||
# look for a line that looks like this
|
||||
< x-request-id: 4rAMkV3gof4
|
||||
```
|
||||
|
||||
#### Using jq
|
||||
|
||||
This example uses [jq](https://stedolan.github.io/jq/) to filter results and
|
||||
display values we most likely care about.
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl tail gitlab-rails/production_json.log | jq 'select(.username == "bob") | "User: \(.username), \(.method) \(.path), \(.controller)#\(.action), ID: \(.correlation_id)"'
|
||||
```
|
||||
|
||||
```plaintext
|
||||
"User: bob, GET /root/linux, ProjectsController#show, ID: U7k7fh6NpW3"
|
||||
"User: bob, GET /root/linux/commits/master/signatures, Projects::CommitsController#signatures, ID: XPIHpctzEg1"
|
||||
"User: bob, GET /root/linux/blob/master/README, Projects::BlobController#show, ID: LOt9hgi1TV4"
|
||||
```
|
||||
|
||||
#### Using grep
|
||||
|
||||
This example uses only `grep` and `tr`, which are more likely to be installed than `jq`.
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl tail gitlab-rails/production_json.log | grep '"username":"bob"' | tr ',' '\n' | egrep 'method|path|correlation_id'
|
||||
```
|
||||
|
||||
```plaintext
|
||||
{"method":"GET"
|
||||
"path":"/root/linux"
|
||||
"username":"bob"
|
||||
"correlation_id":"U7k7fh6NpW3"}
|
||||
{"method":"GET"
|
||||
"path":"/root/linux/commits/master/signatures"
|
||||
"username":"bob"
|
||||
"correlation_id":"XPIHpctzEg1"}
|
||||
{"method":"GET"
|
||||
"path":"/root/linux/blob/master/README"
|
||||
"username":"bob"
|
||||
"correlation_id":"LOt9hgi1TV4"}
|
||||
```
|
||||
|
||||
## Searching your logs for the correlation ID
|
||||
|
||||
Once you have the correlation ID you can start searching for relevant log
|
||||
entries. You can filter the lines by the correlation ID itself.
|
||||
Combining a `find` and `grep` should be sufficient to find the entries you are looking for.
|
||||
|
||||
```shell
|
||||
# find <gitlab log directory> -type f -mtime -0 exec grep '<correlation ID>' '{}' '+'
|
||||
find /var/log/gitlab -type f -mtime 0 -exec grep 'LOt9hgi1TV4' '{}' '+'
|
||||
```
|
||||
|
||||
```plaintext
|
||||
/var/log/gitlab/gitlab-workhorse/current:{"correlation_id":"LOt9hgi1TV4","duration_ms":2478,"host":"gitlab.domain.tld","level":"info","method":"GET","msg":"access","proto":"HTTP/1.1","referrer":"https://gitlab.domain.tld/root/linux","remote_addr":"68.0.116.160:0","remote_ip":"[filtered]","status":200,"system":"http","time":"2019-09-17T22:17:19Z","uri":"/root/linux/blob/master/README?format=json\u0026viewer=rich","user_agent":"Mozilla/5.0 (Mac) Gecko Firefox/69.0","written_bytes":1743}
|
||||
/var/log/gitlab/gitaly/current:{"correlation_id":"LOt9hgi1TV4","grpc.code":"OK","grpc.meta.auth_version":"v2","grpc.meta.client_name":"gitlab-web","grpc.method":"FindCommits","grpc.request.deadline":"2019-09-17T22:17:47Z","grpc.request.fullMethod":"/gitaly.CommitService/FindCommits","grpc.request.glProjectPath":"root/linux","grpc.request.glRepository":"project-1","grpc.request.repoPath":"@hashed/6b/86/6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b.git","grpc.request.repoStorage":"default","grpc.request.topLevelGroup":"@hashed","grpc.service":"gitaly.CommitService","grpc.start_time":"2019-09-17T22:17:17Z","grpc.time_ms":2319.161,"level":"info","msg":"finished streaming call with code OK","peer.address":"@","span.kind":"server","system":"grpc","time":"2019-09-17T22:17:19Z"}
|
||||
/var/log/gitlab/gitlab-rails/production_json.log:{"method":"GET","path":"/root/linux/blob/master/README","format":"json","controller":"Projects::BlobController","action":"show","status":200,"duration":2448.77,"view":0.49,"db":21.63,"time":"2019-09-17T22:17:19.800Z","params":[{"key":"viewer","value":"rich"},{"key":"namespace_id","value":"root"},{"key":"project_id","value":"linux"},{"key":"id","value":"master/README"}],"remote_ip":"[filtered]","user_id":2,"username":"bob","ua":"Mozilla/5.0 (Mac) Gecko Firefox/69.0","queue_duration":3.38,"gitaly_calls":1,"gitaly_duration":0.77,"rugged_calls":4,"rugged_duration_ms":28.74,"correlation_id":"LOt9hgi1TV4"}
|
||||
```
|
||||
|
||||
### Searching in distributed architectures
|
||||
|
||||
If you have done some horizontal scaling in your GitLab infrastructure, then
|
||||
you must search across _all_ of your GitLab nodes. You can do this with
|
||||
some sort of log aggregation software like Loki, ELK, Splunk, or others.
|
||||
|
||||
You can use a tool like Ansible or PSSH (parallel SSH) that can execute identical commands across your servers in
|
||||
parallel, or craft your own solution.
|
||||
|
||||
### Viewing the request in the Performance Bar
|
||||
|
||||
You can use the [performance bar](../monitoring/performance/performance_bar.md) to view interesting data including calls made to SQL and Gitaly.
|
||||
|
||||
To view the data, the correlation ID of the request must match the same session as the user
|
||||
viewing the performance bar. For API requests, this means that you must perform the request
|
||||
using the session cookie of the signed-in user.
|
||||
|
||||
For example, if you want to view the database queries executed for the following API endpoint:
|
||||
|
||||
```shell
|
||||
https://gitlab.com/api/v4/groups/2564205/projects?with_security_reports=true&page=1&per_page=1
|
||||
```
|
||||
|
||||
First, enable the **Developer Tools** panel. See [Getting the correlation ID in your browser](#getting-the-correlation-id-in-your-browser) for details on how to do this.
|
||||
|
||||
After developer tools have been enabled, obtain a session cookie as follows:
|
||||
|
||||
1. Visit <https://gitlab.com> while logged in.
|
||||
1. Optional. Select **Fetch/XHR** request filter in the **Developer Tools** panel. This step is described for Google Chrome developer tools and is not strictly necessary, it just makes it easier to find the correct request.
|
||||
1. Select the `results?request_id=<some-request-id>` request on the left hand side.
|
||||
1. The session cookie is displayed under the `Request Headers` section of the `Headers` panel. Right-click on the cookie value and select `Copy value`.
|
||||
|
||||

|
||||
|
||||
You have the value of the session cookie copied to your clipboard, for example:
|
||||
|
||||
```shell
|
||||
experimentation_subject_id=<subject-id>; _gitlab_session=<session-id>; event_filter=all; visitor_id=<visitor-id>; perf_bar_enabled=true; sidebar_collapsed=true; diff_view=inline; sast_entry_point_dismissed=true; auto_devops_settings_dismissed=true; cf_clearance=<cf-clearance>; collapsed_gutter=false; frequently_used_emojis=clap,thumbsup,rofl,tada,eyes,bow
|
||||
```
|
||||
|
||||
Use the value of the session cookie to craft an API request by pasting it into a custom header of a `curl` request:
|
||||
|
||||
```shell
|
||||
$ curl --include "https://gitlab.com/api/v4/groups/2564205/projects?with_security_reports=true&page=1&per_page=1" \
|
||||
--header 'cookie: experimentation_subject_id=<subject-id>; _gitlab_session=<session-id>; event_filter=all; visitor_id=<visitor-id>; perf_bar_enabled=true; sidebar_collapsed=true; diff_view=inline; sast_entry_point_dismissed=true; auto_devops_settings_dismissed=true; cf_clearance=<cf-clearance>; collapsed_gutter=false; frequently_used_emojis=clap,thumbsup,rofl,tada,eyes,bow'
|
||||
|
||||
date: Tue, 28 Sep 2021 03:55:33 GMT
|
||||
content-type: application/json
|
||||
...
|
||||
x-request-id: 01FGN8P881GF2E5J91JYA338Y3
|
||||
...
|
||||
[
|
||||
{
|
||||
"id":27497069,
|
||||
"description":"Analyzer for images used on live K8S containers based on Starboard"
|
||||
},
|
||||
"container_registry_image_prefix":"registry.gitlab.com/gitlab-org/security-products/analyzers/cluster-image-scanning",
|
||||
"..."
|
||||
]
|
||||
```
|
||||
|
||||
The response contains the data from the API endpoint, and a `correlation_id` value, returned in the `x-request-id` header, as described in the [Identify the correlation ID for a request](#identify-the-correlation-id-for-a-request) section.
|
||||
|
||||
You can then view the database details for this request:
|
||||
|
||||
1. Paste the `x-request-id` value into the `request details` field of the [performance bar](../monitoring/performance/performance_bar.md) and press <kbd>Enter/Return</kbd>. This example uses the `x-request-id` value `01FGN8P881GF2E5J91JYA338Y3`, returned by the above response:
|
||||
|
||||

|
||||
|
||||
1. A new request is inserted into the `Request Selector` dropdown on the right-hand side of the Performance Bar. Select the new request to view the metrics of the API request:
|
||||
|
||||

|
||||
|
||||
<!-- vale gitlab.Substitutions = NO -->
|
||||
1. Select the `pg` link in the Progress Bar to view the database queries executed by the API request:
|
||||
|
||||

|
||||
<!-- vale gitlab.Substitutions = YES -->
|
||||
|
||||
The database query dialog is displayed:
|
||||
|
||||

|
||||
<!-- This redirect file can be deleted after 2022-11-12. -->
|
||||
<!-- Redirects that point to other docs in the same project expire in three months. -->
|
||||
<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
|
||||
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
|
||||
|
|
@ -95,7 +95,7 @@ The approach here depends on the data volume and the cleanup strategy. If we can
|
|||
records by doing a database query and the record count is not high, then the data migration can
|
||||
be executed in a Rails migration.
|
||||
|
||||
In case the data volume is higher (>1000 records), it's better to create a background migration. If unsure, please contact the database team for advice.
|
||||
In case the data volume is higher (>1000 records), it's better to create a background migration. If unsure, contact the database team for advice.
|
||||
|
||||
Example for cleaning up records in the `emails` table in a database migration:
|
||||
|
||||
|
|
|
|||
|
|
@ -199,7 +199,7 @@ class CleanupUsersUpdatedAtRename < Gitlab::Database::Migration[1.0]
|
|||
end
|
||||
```
|
||||
|
||||
If you're renaming a [large table](https://gitlab.com/gitlab-org/gitlab/-/blob/master/rubocop/rubocop-migrations.yml#L3), please carefully consider the state when the first migration has run but the second cleanup migration hasn't been run yet.
|
||||
If you're renaming a [large table](https://gitlab.com/gitlab-org/gitlab/-/blob/master/rubocop/rubocop-migrations.yml#L3), carefully consider the state when the first migration has run but the second cleanup migration hasn't been run yet.
|
||||
With [Canary](https://gitlab.com/gitlab-com/gl-infra/readiness/-/tree/master/library/canary/) it is possible that the system runs in this state for a significant amount of time.
|
||||
|
||||
## Changing Column Constraints
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
|
||||
WARNING:
|
||||
Background migrations are strongly discouraged in favor of the new [batched background migrations framework](batched_background_migrations.md).
|
||||
Please check that documentation and determine if that framework suits your needs and fall back
|
||||
Check that documentation and determine if that framework suits your needs and fall back
|
||||
to these only if required.
|
||||
|
||||
Background migrations should be used to perform data migrations whenever a
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ The following guides provide a quick introduction and links to follow on more ad
|
|||
- Guide on [understanding EXPLAIN plans](understanding_explain_plans.md).
|
||||
- [Explaining the unexplainable series in `depesz`](https://www.depesz.com/tag/unexplainable/).
|
||||
|
||||
We also have licensed access to The Art of PostgreSQL available, if you are interested in getting access please check out the
|
||||
We also have licensed access to The Art of PostgreSQL. If you are interested in getting access, check out the
|
||||
[issue (confidential)](https://gitlab.com/gitlab-org/database-team/team-tasks/-/issues/23).
|
||||
|
||||
Finally, you can find various guides in the [Database guides](index.md) page that cover more specific
|
||||
|
|
|
|||
|
|
@ -135,7 +135,7 @@ post-deployment migration or a background data migration:
|
|||
- If the data volume is less than `1000` records, then the data migration can be executed within the post-migration.
|
||||
- If the data volume is higher than `1000` records, it's advised to create a background migration.
|
||||
|
||||
When unsure about which option to use, please contact the Database team for advice.
|
||||
When unsure about which option to use, contact the Database team for advice.
|
||||
|
||||
Back to our example, the epics table is not considerably large nor frequently accessed,
|
||||
so we add a post-deployment migration for the 13.0 milestone (current),
|
||||
|
|
@ -206,6 +206,6 @@ In that rare case you need 3 releases end-to-end:
|
|||
1. Release `N.M+1` - Cleanup the background migration.
|
||||
1. Release `N.M+2` - Validate the `NOT NULL` constraint.
|
||||
|
||||
For these cases, please consult the database team early in the update cycle. The `NOT NULL`
|
||||
For these cases, consult the database team early in the update cycle. The `NOT NULL`
|
||||
constraint may not be required or other options could exist that do not affect really large
|
||||
or frequently accessed tables.
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ renamed manually in a separate migration, which can be also part of the release
|
|||
- Foreign key columns might still contain the old table name. For smaller tables, follow our
|
||||
[standard column rename process](avoiding_downtime_in_migrations.md#renaming-columns)
|
||||
- Avoid renaming database tables which are using with triggers.
|
||||
- Table modifications (add or remove columns) are not allowed during the rename process, please make sure that all changes to the table happen before the rename migration is started (or in the next release).
|
||||
- Table modifications (add or remove columns) are not allowed during the rename process. Make sure that all changes to the table happen before the rename migration is started (or in the next release).
|
||||
- As the index names might change, verify that the model does not use bulk insert
|
||||
(for example, `insert_all` and `upsert_all`) with the `unique_by: index_name` option.
|
||||
Renaming an index while using these methods may break functionality.
|
||||
|
|
|
|||
|
|
@ -189,7 +189,7 @@ migration or a background data migration:
|
|||
- If the data volume is less than `1,000` records, then the data migration can be executed within the post-migration.
|
||||
- If the data volume is higher than `1,000` records, it's advised to create a background migration.
|
||||
|
||||
When unsure about which option to use, please contact the Database team for advice.
|
||||
When unsure about which option to use, contact the Database team for advice.
|
||||
|
||||
Back to our example, the issues table is considerably large and frequently accessed, so we are going
|
||||
to add a background migration for the 13.0 milestone (current),
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ Or:
|
|||
hello = _("Hello world!")
|
||||
```
|
||||
|
||||
Be careful when translating strings at the class or module level since these are only evaluated once
|
||||
Be careful when translating strings at the class or module level because these are only evaluated once
|
||||
at class load time. For example:
|
||||
|
||||
```ruby
|
||||
|
|
@ -299,16 +299,16 @@ use `%{created_at}` in Ruby but `%{createdAt}` in JavaScript. Make sure to
|
|||
- In Ruby/HAML:
|
||||
|
||||
```ruby
|
||||
_("Hello %{name}") % { name: 'Joe' } => 'Hello Joe'
|
||||
format(_("Hello %{name}"), name: 'Joe') => 'Hello Joe'
|
||||
```
|
||||
|
||||
- In Vue:
|
||||
|
||||
Use the [`GlSprintf`](https://gitlab-org.gitlab.io/gitlab-ui/?path=/docs/utilities-sprintf--sentence-with-link) component if:
|
||||
|
||||
- You need to include child components in the translation string.
|
||||
- You need to include HTML in your translation string.
|
||||
- You're using `sprintf` and need to pass `false` as the third argument to
|
||||
- You are including child components in the translation string.
|
||||
- You are including HTML in your translation string.
|
||||
- You are using `sprintf` and are passing `false` as the third argument to
|
||||
prevent it from escaping placeholder values.
|
||||
|
||||
For example:
|
||||
|
|
@ -482,7 +482,7 @@ Instead of this:
|
|||
|
||||
```ruby
|
||||
# incorrect usage example
|
||||
n_("%{project_name}", "%d projects selected", count) % { project_name: 'GitLab' }
|
||||
format(n_("%{project_name}", "%d projects selected", count), project_name: 'GitLab')
|
||||
```
|
||||
|
||||
### Namespaces
|
||||
|
|
|
|||
|
|
@ -248,6 +248,9 @@ The intent is to ensure that a change doesn't introduce a failure after `gitlab-
|
|||
|
||||
## As-if-JH jobs
|
||||
|
||||
NOTE:
|
||||
This is disabled for now.
|
||||
|
||||
The `* as-if-jh` jobs run the GitLab test suite "as if JiHu", meaning as if the jobs would run in the context
|
||||
of [GitLab JH](jh_features_review.md). These jobs are only created in the following cases:
|
||||
|
||||
|
|
@ -262,12 +265,18 @@ The intent is to ensure that a change doesn't introduce a failure after `gitlab-
|
|||
|
||||
### When to consider applying `pipeline:run-as-if-jh` label
|
||||
|
||||
NOTE:
|
||||
This is disabled for now.
|
||||
|
||||
If a Ruby file is renamed and there's a corresponding [`prepend_mod` line](jh_features_review.md#jh-features-based-on-ce-or-ee-features),
|
||||
it's likely that GitLab JH is relying on it and requires a corresponding
|
||||
change to rename the module or class it's prepending.
|
||||
|
||||
### Corresponding JH branch
|
||||
|
||||
NOTE:
|
||||
This is disabled for now.
|
||||
|
||||
You can create a corresponding JH branch on [GitLab JH](https://jihulab.com/gitlab-cn/gitlab) by
|
||||
appending `-jh` to the branch name. If a corresponding JH branch is found,
|
||||
`* as-if-jh` jobs grab the `jh` folder from the respective branch,
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
We use [Sidekiq](https://github.com/mperham/sidekiq) as our background
|
||||
job processor. These guides are for writing jobs that will work well on
|
||||
GitLab.com and be consistent with our existing worker classes. For
|
||||
information on administering GitLab, see [configuring Sidekiq](../../administration/sidekiq.md).
|
||||
information on administering GitLab, see [configuring Sidekiq](../../administration/sidekiq/index.md).
|
||||
|
||||
There are pages with additional detail on the following topics:
|
||||
|
||||
|
|
@ -27,7 +27,7 @@ There are pages with additional detail on the following topics:
|
|||
|
||||
All workers should include `ApplicationWorker` instead of `Sidekiq::Worker`,
|
||||
which adds some convenience methods and automatically sets the queue based on
|
||||
the [routing rules](../../administration/operations/extra_sidekiq_routing.md#queue-routing-rules).
|
||||
the [routing rules](../../administration/sidekiq/extra_sidekiq_routing.md#queue-routing-rules).
|
||||
|
||||
## Retries
|
||||
|
||||
|
|
@ -63,7 +63,7 @@ error rate.
|
|||
Previously, each worker had its own queue, which was automatically set based on the
|
||||
worker class name. For a worker named `ProcessSomethingWorker`, the queue name
|
||||
would be `process_something`. You can now route workers to a specific queue using
|
||||
[queue routing rules](../../administration/operations/extra_sidekiq_routing.md#queue-routing-rules).
|
||||
[queue routing rules](../../administration/sidekiq/extra_sidekiq_routing.md#queue-routing-rules).
|
||||
In GDK, new workers are routed to a queue named `default`.
|
||||
|
||||
If you're not sure what queue a worker uses,
|
||||
|
|
@ -74,7 +74,7 @@ After adding a new worker, run `bin/rake
|
|||
gitlab:sidekiq:all_queues_yml:generate` to regenerate
|
||||
`app/workers/all_queues.yml` or `ee/app/workers/all_queues.yml` so that
|
||||
it can be picked up by
|
||||
[`sidekiq-cluster`](../../administration/operations/extra_sidekiq_processes.md)
|
||||
[`sidekiq-cluster`](../../administration/sidekiq/extra_sidekiq_processes.md)
|
||||
in installations that don't use routing rules. To learn more about potential changes,
|
||||
read [Use routing rules by default and deprecate queue selectors for self-managed](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/596).
|
||||
|
||||
|
|
@ -175,7 +175,7 @@ available in Sidekiq. There are possible workarounds such as:
|
|||
|
||||
Some jobs have a weight declared. This is only used when running Sidekiq
|
||||
in the default execution mode - using
|
||||
[`sidekiq-cluster`](../../administration/operations/extra_sidekiq_processes.md)
|
||||
[`sidekiq-cluster`](../../administration/sidekiq/extra_sidekiq_processes.md)
|
||||
does not account for weights.
|
||||
|
||||
As we are [moving towards using `sidekiq-cluster` in Free](https://gitlab.com/gitlab-org/gitlab/-/issues/34396), newly-added
|
||||
|
|
|
|||
|
|
@ -127,7 +127,7 @@ blocks:
|
|||
|
||||
## Arguments logging
|
||||
|
||||
As of GitLab 13.6, Sidekiq job arguments are logged by default, unless [`SIDEKIQ_LOG_ARGUMENTS`](../../administration/troubleshooting/sidekiq.md#log-arguments-to-sidekiq-jobs)
|
||||
As of GitLab 13.6, Sidekiq job arguments are logged by default, unless [`SIDEKIQ_LOG_ARGUMENTS`](../../administration/sidekiq/sidekiq_troubleshooting.md#log-arguments-to-sidekiq-jobs)
|
||||
is disabled.
|
||||
|
||||
By default, the only arguments logged are numeric arguments, because
|
||||
|
|
|
|||
|
|
@ -234,7 +234,7 @@ When a user makes an HTTP request, such as creating a new project, the
|
|||
initial request is routed through Workhorse to another service, which
|
||||
may in turn, make other requests. To help trace the request as it flows
|
||||
across services, Workhorse generates a random value called a
|
||||
[correlation ID](../../administration/troubleshooting/tracing_correlation_id.md).
|
||||
[correlation ID](../../administration/logs/tracing_correlation_id.md).
|
||||
Workhorse sends this correlation ID via the `X-Request-Id` HTTP header.
|
||||
|
||||
Some GitLab services, such as GitLab Shell, generate their own
|
||||
|
|
|
|||
|
|
@ -584,7 +584,7 @@ WARNING:
|
|||
Indexing a large instance generates a lot of Sidekiq jobs.
|
||||
Make sure to prepare for this task by having a
|
||||
[scalable setup](../../administration/reference_architectures/index.md) or creating
|
||||
[extra Sidekiq processes](../../administration/operations/extra_sidekiq_processes.md).
|
||||
[extra Sidekiq processes](../../administration/sidekiq/extra_sidekiq_processes.md).
|
||||
|
||||
1. [Configure your Elasticsearch host and port](#enable-advanced-search).
|
||||
1. Create empty indices:
|
||||
|
|
@ -783,8 +783,8 @@ additional process dedicated to indexing a set of queues (or queue group). This
|
|||
ensure that indexing queues always have a dedicated worker, while the rest of the queues have
|
||||
another dedicated worker to avoid contention.
|
||||
|
||||
For this purpose, use the [queue selector](../../administration/operations/extra_sidekiq_processes.md#queue-selector)
|
||||
option that allows a more general selection of queue groups using a [worker matching query](../../administration/operations/extra_sidekiq_routing.md#worker-matching-query).
|
||||
For this purpose, use the [queue selector](../../administration/sidekiq/extra_sidekiq_processes.md#queue-selector)
|
||||
option that allows a more general selection of queue groups using a [worker matching query](../../administration/sidekiq/extra_sidekiq_routing.md#worker-matching-query).
|
||||
|
||||
To handle these two queue groups, we generally recommend one of the following two options. You can either:
|
||||
|
||||
|
|
@ -818,7 +818,7 @@ WARNING:
|
|||
When starting multiple processes, the number of processes cannot exceed the number of CPU
|
||||
cores you want to dedicate to Sidekiq. Each Sidekiq process can use only one CPU core, subject
|
||||
to the available workload and concurrency settings. For more details, see how to
|
||||
[run multiple Sidekiq processes](../../administration/operations/extra_sidekiq_processes.md).
|
||||
[run multiple Sidekiq processes](../../administration/sidekiq/extra_sidekiq_processes.md).
|
||||
|
||||
### Two nodes, one process for each
|
||||
|
||||
|
|
|
|||
|
|
@ -189,7 +189,7 @@ sudo gitlab-rake gitlab:elastic:clear_locked_projects
|
|||
If `ElasticCommitIndexerWorker` Sidekiq workers are failing with this error during indexing, it usually means that Elasticsearch is unable to keep up with the concurrency of indexing request. To address change the following settings:
|
||||
|
||||
- To decrease the indexing throughput you can decrease `Bulk request concurrency` (see [Advanced Search settings](elasticsearch.md#advanced-search-configuration)). This is set to `10` by default, but you change it to as low as 1 to reduce the number of concurrent indexing operations.
|
||||
- If changing `Bulk request concurrency` didn't help, you can use the [queue selector](../../administration/operations/extra_sidekiq_processes.md#queue-selector) option to [limit indexing jobs only to specific Sidekiq nodes](elasticsearch.md#index-large-instances-with-dedicated-sidekiq-nodes-or-processes), which should reduce the number of indexing requests.
|
||||
- If changing `Bulk request concurrency` didn't help, you can use the [queue selector](../../administration/sidekiq/extra_sidekiq_processes.md#queue-selector) option to [limit indexing jobs only to specific Sidekiq nodes](elasticsearch.md#index-large-instances-with-dedicated-sidekiq-nodes-or-processes), which should reduce the number of indexing requests.
|
||||
|
||||
### Indexing is very slow or fails with `rejected execution of coordinating operation` messages
|
||||
|
||||
|
|
|
|||
|
|
@ -1,40 +1,11 @@
|
|||
---
|
||||
stage: none
|
||||
group: unassigned
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
redirect_to: '../administration/sidekiq/sidekiq_job_migration.md'
|
||||
remove_date: '2022-11-11'
|
||||
---
|
||||
|
||||
# Sidekiq job migration **(FREE SELF)**
|
||||
This document was moved to [another location](../administration/sidekiq/sidekiq_job_migration.md).
|
||||
|
||||
WARNING:
|
||||
This operation should be very uncommon. We do not recommend it for the vast majority of GitLab instances.
|
||||
|
||||
Sidekiq routing rules allow administrators to re-route certain background jobs from their regular queue to an alternative queue. By default, GitLab uses one queue per background job type. GitLab has over 400 background job types, and so correspondingly it has over 400 queues.
|
||||
|
||||
Most administrators do not need to change this setting. In some cases with particularly large background job processing workloads, Redis performance may suffer due to the number of queues that GitLab listens to.
|
||||
|
||||
If the Sidekiq routing rules are changed, administrators need to take care with the migration to avoid losing jobs entirely. The basic migration steps are:
|
||||
|
||||
1. Listen to both the old and new queues.
|
||||
1. Update the routing rules.
|
||||
1. Wait until there are no publishers dispatching jobs to the old queues.
|
||||
1. Run the [Rake tasks for future jobs](#future-jobs).
|
||||
1. Wait for the old queues to be empty.
|
||||
1. Stop listening to the old queues.
|
||||
|
||||
## Future jobs
|
||||
|
||||
Step 4 involves rewriting some Sidekiq job data for jobs that are already stored in Redis, but due to run in future. There are two sets of jobs to run in future: scheduled jobs and jobs to be retried. We provide a separate Rake task to migrate each set:
|
||||
|
||||
- `gitlab:sidekiq:migrate_jobs:retry` for jobs to be retried.
|
||||
- `gitlab:sidekiq:migrate_jobs:scheduled` for scheduled jobs.
|
||||
|
||||
Most of the time, running both at the same time is the correct choice. There are two separate tasks to allow for more fine-grained control where needed. To run both at once:
|
||||
|
||||
```shell
|
||||
# omnibus-gitlab
|
||||
sudo gitlab-rake gitlab:sidekiq:migrate_jobs:retry gitlab:sidekiq:migrate_jobs:schedule
|
||||
|
||||
# source installations
|
||||
bundle exec rake gitlab:sidekiq:migrate_jobs:retry gitlab:sidekiq:migrate_jobs:schedule RAILS_ENV=production
|
||||
```
|
||||
<!-- This redirect file can be deleted after <2022-11-11>. -->
|
||||
<!-- Redirects that point to other docs in the same project expire in three months. -->
|
||||
<!-- Redirects that point to docs in a different project or site (link is not relative and starts with `https:`) expire in one year. -->
|
||||
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
|
||||
|
|
|
|||
|
|
@ -68,8 +68,7 @@ The v1 chart is backward compatible with the v0 chart, so no configuration chang
|
|||
### Upgrade deployments to the v2 `auto-deploy-image`
|
||||
|
||||
The v2 auto-deploy-image contains multiple dependency and architectural changes.
|
||||
If your Auto DevOps project has an active environment deployed with the v1 `auto-deploy-image`,
|
||||
please proceed with the following upgrade guide. Otherwise, you can skip this process.
|
||||
If your Auto DevOps project has an active environment deployed with the v1 `auto-deploy-image`, proceed with the following upgrade guide. Otherwise, you can skip this process.
|
||||
|
||||
#### Kubernetes 1.16+
|
||||
|
||||
|
|
@ -276,4 +275,4 @@ you might encounter the following error:
|
|||
- `Error: rendered manifests contain a resource that already exists. Unable to continue with install: Secret "production-postgresql" in namespace "<project-name>-production" exists and cannot be imported into the current release: invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"; annotation validation error: missing key "meta.helm.sh/release-name": must be set to "production-postgresql"; annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "<project-name>-production"`
|
||||
|
||||
This is because the previous deployment was deployed with Helm2, which is not compatible with Helm3.
|
||||
To resolve the problem, please follow the [upgrade guide](#upgrade-deployments-to-the-v2-auto-deploy-image).
|
||||
To resolve the problem, follow the [upgrade guide](#upgrade-deployments-to-the-v2-auto-deploy-image).
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ Background migrations and batched migrations are not the same, so you should che
|
|||
complete before updating.
|
||||
|
||||
Decrease the time required to complete these migrations by increasing the number of
|
||||
[Sidekiq workers](../administration/operations/extra_sidekiq_processes.md)
|
||||
[Sidekiq workers](../administration/sidekiq/extra_sidekiq_processes.md)
|
||||
that can process jobs in the `background_migration` queue.
|
||||
|
||||
### Background migrations
|
||||
|
|
@ -471,7 +471,7 @@ and [Helm Chart deployments](https://docs.gitlab.com/charts/). They come with ap
|
|||
[upgraded to 15.1](#1510) before upgrading to 15.2 (and later) due to a
|
||||
configuration change in Rails that can result in inconsistent ETag key
|
||||
generation.
|
||||
- Some Sidekiq workers were renamed in this release. To avoid any disruption, [run the Rake tasks to migrate any pending jobs](../raketasks/sidekiq_job_migration.md#future-jobs) before starting the upgrade to GitLab 15.2.0.
|
||||
- Some Sidekiq workers were renamed in this release. To avoid any disruption, [run the Rake tasks to migrate any pending jobs](../administration/sidekiq/sidekiq_job_migration.md#future-jobs) before starting the upgrade to GitLab 15.2.0.
|
||||
|
||||
### 15.1.0
|
||||
|
||||
|
|
|
|||
|
|
@ -68,17 +68,34 @@ To configure GitLab CI/CD as a backend:
|
|||
}
|
||||
```
|
||||
|
||||
1. In the root directory of your project repository, create a `.gitlab-ci.yml` file. Use
|
||||
[this file](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Terraform.gitlab-ci.yml)
|
||||
to populate it.
|
||||
|
||||
1. In the root directory of your project repository, create a `.gitlab-ci.yml` file. Use the
|
||||
[`Terraform.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Terraform.gitlab-ci.yml)
|
||||
template to populate it.
|
||||
1. Push your project to GitLab. This action triggers a pipeline, which
|
||||
runs the `gitlab-terraform init`, `gitlab-terraform validate`, and
|
||||
`gitlab-terraform plan` commands.
|
||||
1. Trigger the manual `terraform apply` job from the previous pipeline to provision the defined infrastructure.
|
||||
1. Trigger the manual `deploy` job from the previous pipeline, which runs `gitlab-terraform apply` command, to provision the defined infrastructure.
|
||||
|
||||
The output from the above `terraform` commands should be viewable in the job logs.
|
||||
|
||||
The `gitlab-terraform` CLI is a wrapper around the `terraform` CLI. You can [view the source code of `gitlab-terraform`](https://gitlab.com/gitlab-org/terraform-images/-/blob/master/src/bin/gitlab-terraform.sh) if you're interested.
|
||||
|
||||
If you prefer to call the `terraform` commands explicitly, you can override
|
||||
the template, and instead, use it as reference for what you can achieve.
|
||||
|
||||
### Customizing your Terraform environment variables
|
||||
|
||||
When you use the `Terraform.gitlab-ci.yml` template, you can use [Terraform HTTP configuration variables](https://www.terraform.io/language/settings/backends/http#configuration-variables) when you define your CI/CD jobs.
|
||||
|
||||
To customize your `terraform init` and override the Terraform configuration,
|
||||
use environment variables instead of the `terraform init -backend-config=...` approach.
|
||||
When you use `-backend-config`, the configuration is:
|
||||
|
||||
- Cached in the output of the `terraform plan` command.
|
||||
- Usually passed forward to the `terraform apply` command.
|
||||
|
||||
This configuration can lead to problems like [being unable to lock Terraform state files in CI jobs](troubleshooting.md#unable-to-lock-terraform-state-files-in-ci-jobs-for-terraform-apply-using-a-plan-created-in-a-previous-job).
|
||||
|
||||
## Access the state from your local machine
|
||||
|
||||
You can access the GitLab-managed Terraform state from your local machine.
|
||||
|
|
|
|||
|
|
@ -97,6 +97,8 @@ As a result, to create a plan and later use the same plan in another CI job, you
|
|||
`Error: Error acquiring the state lock` errors when using `-backend-config=password=$CI_JOB_TOKEN`.
|
||||
This happens because the value of `$CI_JOB_TOKEN` is only valid for the duration of the current job.
|
||||
|
||||
Another possible error message for the same problem could be: `Error: Error loading state: HTTP remote state endpoint requires auth`.
|
||||
|
||||
As a workaround, use [http backend configuration variables](https://www.terraform.io/docs/language/settings/backends/http.html#configuration-variables) in your CI job,
|
||||
which is what happens behind the scenes when following the
|
||||
[Get started using GitLab CI](terraform_state.md#initialize-a-terraform-state-as-a-backend-by-using-gitlab-cicd) instructions.
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ To view merge requests for all projects in a group:
|
|||
|
||||
If your group contains subgroups, this view also displays merge requests from the subgroup projects.
|
||||
|
||||
## View all merge requests assigned to you
|
||||
### View all merge requests assigned to you
|
||||
|
||||
To view all merge requests assigned to you:
|
||||
|
||||
|
|
@ -52,13 +52,14 @@ To view all merge requests assigned to you:
|
|||
|
||||
<!-- vale gitlab.FirstPerson = YES -->
|
||||
|
||||
Or:
|
||||
or:
|
||||
|
||||
- To use a [keyboard shortcut](../../shortcuts.md), press <kbd>Shift</kbd> + <kbd>m</kbd>.
|
||||
- On the top bar, on the top right, select **{merge-request-open}** **Merge requests**.
|
||||
Then select one of the following:
|
||||
- [Review requests](reviews/index.md).
|
||||
- Merge requests assigned.
|
||||
|
||||
or:
|
||||
|
||||
1. On the top bar, on the top right, select **{merge-request-open}** **Merge requests**.
|
||||
1. From the dropdown list, select **Assigned to you**.
|
||||
|
||||
## Filter the list of merge requests
|
||||
|
||||
|
|
|
|||
|
|
@ -206,7 +206,7 @@ module Gitlab
|
|||
end
|
||||
|
||||
def health_context
|
||||
HealthStatus::Context.new([table_name])
|
||||
HealthStatus::Context.new(connection, [table_name])
|
||||
end
|
||||
|
||||
def hold!(until_time: 10.minutes.from_now)
|
||||
|
|
|
|||
|
|
@ -144,9 +144,9 @@ module Gitlab
|
|||
end
|
||||
|
||||
def adjust_migration(active_migration)
|
||||
signal = HealthStatus.evaluate(active_migration)
|
||||
signals = HealthStatus.evaluate(active_migration)
|
||||
|
||||
if signal.is_a?(HealthStatus::Signals::Stop)
|
||||
if signals.any?(&:stop?)
|
||||
active_migration.hold!
|
||||
else
|
||||
active_migration.optimize!
|
||||
|
|
|
|||
|
|
@ -4,21 +4,29 @@ module Gitlab
|
|||
module Database
|
||||
module BackgroundMigration
|
||||
module HealthStatus
|
||||
DEFAULT_INIDICATORS = [
|
||||
Indicators::AutovacuumActiveOnTable,
|
||||
Indicators::WriteAheadLog
|
||||
].freeze
|
||||
|
||||
# Rather than passing along the migration, we use a more explicitly defined context
|
||||
Context = Struct.new(:tables)
|
||||
Context = Struct.new(:connection, :tables)
|
||||
|
||||
def self.evaluate(migration, indicator = Indicators::AutovacuumActiveOnTable)
|
||||
signal = begin
|
||||
indicator.new(migration.health_context).evaluate
|
||||
rescue StandardError => e
|
||||
Gitlab::ErrorTracking.track_exception(e, migration_id: migration.id,
|
||||
job_class_name: migration.job_class_name)
|
||||
Signals::Unknown.new(indicator, reason: "unexpected error: #{e.message} (#{e.class})")
|
||||
def self.evaluate(migration, indicators = DEFAULT_INIDICATORS)
|
||||
indicators.map do |indicator|
|
||||
signal = begin
|
||||
indicator.new(migration.health_context).evaluate
|
||||
rescue StandardError => e
|
||||
Gitlab::ErrorTracking.track_exception(e, migration_id: migration.id,
|
||||
job_class_name: migration.job_class_name)
|
||||
|
||||
Signals::Unknown.new(indicator, reason: "unexpected error: #{e.message} (#{e.class})")
|
||||
end
|
||||
|
||||
log_signal(signal, migration) if signal.log_info?
|
||||
|
||||
signal
|
||||
end
|
||||
|
||||
log_signal(signal, migration) if signal.log_info?
|
||||
|
||||
signal
|
||||
end
|
||||
|
||||
def self.log_signal(signal, migration)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,74 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Database
|
||||
module BackgroundMigration
|
||||
module HealthStatus
|
||||
module Indicators
|
||||
class WriteAheadLog
|
||||
include Gitlab::Utils::StrongMemoize
|
||||
|
||||
LIMIT = 42
|
||||
PENDING_WAL_COUNT_SQL = <<~SQL
|
||||
WITH
|
||||
current_wal_file AS (
|
||||
SELECT pg_walfile_name(pg_current_wal_insert_lsn()) AS pg_walfile_name
|
||||
),
|
||||
current_wal AS (
|
||||
SELECT
|
||||
('x' || substring(pg_walfile_name, 9, 8))::bit(32)::int AS log,
|
||||
('x' || substring(pg_walfile_name, 17, 8))::bit(32)::int AS seg,
|
||||
pg_walfile_name
|
||||
FROM current_wal_file
|
||||
),
|
||||
archive_wal AS (
|
||||
SELECT
|
||||
('x' || substring(last_archived_wal, 9, 8))::bit(32)::int AS log,
|
||||
('x' || substring(last_archived_wal, 17, 8))::bit(32)::int AS seg,
|
||||
last_archived_wal
|
||||
FROM pg_stat_archiver
|
||||
)
|
||||
SELECT ((current_wal.log - archive_wal.log) * 256) + (current_wal.seg - archive_wal.seg) AS pending_wal_count
|
||||
FROM current_wal, archive_wal
|
||||
SQL
|
||||
|
||||
def initialize(context)
|
||||
@connection = context.connection
|
||||
end
|
||||
|
||||
def evaluate
|
||||
return Signals::NotAvailable.new(self.class, reason: 'indicator disabled') unless enabled?
|
||||
|
||||
unless pending_wal_count
|
||||
return Signals::NotAvailable.new(self.class, reason: 'WAL archive queue can not be calculated')
|
||||
end
|
||||
|
||||
if pending_wal_count > LIMIT
|
||||
Signals::Stop.new(self.class, reason: "WAL archive queue is too big")
|
||||
else
|
||||
Signals::Normal.new(self.class, reason: 'WAL archive queue is within limit')
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :connection
|
||||
|
||||
def enabled?
|
||||
Feature.enabled?(:batched_migrations_health_status_wal, type: :ops)
|
||||
end
|
||||
|
||||
# Returns number of WAL segments pending archival
|
||||
def pending_wal_count
|
||||
strong_memoize(:pending_wal_count) do
|
||||
Gitlab::Database::LoadBalancing::Session.current.use_primary do
|
||||
connection.execute(PENDING_WAL_COUNT_SQL).to_a.first&.fetch('pending_wal_count')
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -22,6 +22,10 @@ module Gitlab
|
|||
def log_info?
|
||||
false
|
||||
end
|
||||
|
||||
def stop?
|
||||
false
|
||||
end
|
||||
# :nocov:
|
||||
|
||||
private
|
||||
|
|
@ -38,6 +42,10 @@ module Gitlab
|
|||
def log_info?
|
||||
true
|
||||
end
|
||||
|
||||
def stop?
|
||||
true
|
||||
end
|
||||
# :nocov:
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -29332,6 +29332,9 @@ msgstr ""
|
|||
msgid "Please check your email (%{email}) to verify that you own this address and unlock the power of CI/CD. Didn't receive it? %{resend_link}. Wrong email address? %{update_link}."
|
||||
msgstr ""
|
||||
|
||||
msgid "Please click the link in the confirmation email before continuing. It was sent to "
|
||||
msgstr ""
|
||||
|
||||
msgid "Please complete your profile with email address"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -33287,6 +33290,9 @@ msgstr ""
|
|||
msgid "Resend Request"
|
||||
msgstr ""
|
||||
|
||||
msgid "Resend confirmation e-mail"
|
||||
msgstr ""
|
||||
|
||||
msgid "Resend confirmation email"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -43559,6 +43565,9 @@ msgstr ""
|
|||
msgid "Watch how"
|
||||
msgstr ""
|
||||
|
||||
msgid "We also use email for avatar detection if no avatar is uploaded."
|
||||
msgstr ""
|
||||
|
||||
msgid "We are currently unable to fetch data for the pipeline header."
|
||||
msgstr ""
|
||||
|
||||
|
|
|
|||
|
|
@ -28,6 +28,17 @@ RSpec.describe Admin::DevOpsReportController do
|
|||
|
||||
let(:request_params) { { tab: 'devops-score' } }
|
||||
end
|
||||
|
||||
it_behaves_like 'Snowplow event tracking' do
|
||||
subject { get :show, format: :html }
|
||||
|
||||
let(:feature_flag_name) { :route_hll_to_snowplow_phase2 }
|
||||
let(:category) { described_class.name }
|
||||
let(:action) { 'perform_analytics_usage_action' }
|
||||
let(:label) { 'redis_hll_counters.analytics.analytics_total_unique_counts_monthly' }
|
||||
let(:property) { 'i_analytics_dev_ops_score' }
|
||||
let(:namespace) { nil }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -13,5 +13,18 @@ RSpec.describe Admin::UsageTrendsController do
|
|||
it_behaves_like 'tracking unique visits', :index do
|
||||
let(:target_id) { 'i_analytics_instance_statistics' }
|
||||
end
|
||||
|
||||
it_behaves_like 'Snowplow event tracking' do
|
||||
subject { get :index }
|
||||
|
||||
let(:feature_flag_name) { :route_hll_to_snowplow_phase2 }
|
||||
let(:category) { described_class.name }
|
||||
let(:action) { 'perform_analytics_usage_action' }
|
||||
let(:label) { 'redis_hll_counters.analytics.analytics_total_unique_counts_monthly' }
|
||||
let(:property) { 'i_analytics_instance_statistics' }
|
||||
let(:namespace) { nil }
|
||||
let(:project) { nil }
|
||||
let(:user) { admin }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -3,14 +3,20 @@
|
|||
require('spec_helper')
|
||||
|
||||
RSpec.describe Projects::ProtectedBranchesController do
|
||||
let(:project) { create(:project, :repository) }
|
||||
let_it_be_with_reload(:project) { create(:project, :repository) }
|
||||
let_it_be(:maintainer) { create(:user) }
|
||||
|
||||
let(:protected_branch) { create(:protected_branch, project: project) }
|
||||
let(:project_params) { { namespace_id: project.namespace.to_param, project_id: project } }
|
||||
let(:base_params) { project_params.merge(id: protected_branch.id) }
|
||||
let(:user) { create(:user) }
|
||||
let(:user) { maintainer }
|
||||
|
||||
before_all do
|
||||
project.add_maintainer(maintainer)
|
||||
end
|
||||
|
||||
before do
|
||||
project.add_maintainer(user)
|
||||
sign_in(user)
|
||||
end
|
||||
|
||||
describe "GET #index" do
|
||||
|
|
@ -30,23 +36,16 @@ RSpec.describe Projects::ProtectedBranchesController do
|
|||
|
||||
let(:create_params) { attributes_for(:protected_branch).merge(access_level_params) }
|
||||
|
||||
before do
|
||||
sign_in(user)
|
||||
end
|
||||
|
||||
it 'creates the protected branch rule' do
|
||||
expect do
|
||||
post(:create, params: project_params.merge(protected_branch: create_params))
|
||||
end.to change(ProtectedBranch, :count).by(1)
|
||||
end
|
||||
|
||||
context 'when a policy restricts rule deletion' do
|
||||
before do
|
||||
policy = instance_double(ProtectedBranchPolicy, allowed?: false)
|
||||
allow(ProtectedBranchPolicy).to receive(:new).and_return(policy)
|
||||
end
|
||||
|
||||
context 'when a policy restricts rule creation' do
|
||||
it "prevents creation of the protected branch rule" do
|
||||
disallow(:create_protected_branch, an_instance_of(ProtectedBranch))
|
||||
|
||||
post(:create, params: project_params.merge(protected_branch: create_params))
|
||||
|
||||
expect(ProtectedBranch.count).to eq 0
|
||||
|
|
@ -57,10 +56,6 @@ RSpec.describe Projects::ProtectedBranchesController do
|
|||
describe "PUT #update" do
|
||||
let(:update_params) { { name: 'new_name' } }
|
||||
|
||||
before do
|
||||
sign_in(user)
|
||||
end
|
||||
|
||||
it 'updates the protected branch rule' do
|
||||
put(:update, params: base_params.merge(protected_branch: update_params))
|
||||
|
||||
|
|
@ -68,13 +63,10 @@ RSpec.describe Projects::ProtectedBranchesController do
|
|||
expect(json_response["name"]).to eq('new_name')
|
||||
end
|
||||
|
||||
context 'when a policy restricts rule deletion' do
|
||||
before do
|
||||
policy = instance_double(ProtectedBranchPolicy, allowed?: false)
|
||||
allow(ProtectedBranchPolicy).to receive(:new).and_return(policy)
|
||||
end
|
||||
|
||||
context 'when a policy restricts rule update' do
|
||||
it "prevents update of the protected branch rule" do
|
||||
disallow(:update_protected_branch, protected_branch)
|
||||
|
||||
old_name = protected_branch.name
|
||||
|
||||
put(:update, params: base_params.merge(protected_branch: update_params))
|
||||
|
|
@ -85,10 +77,6 @@ RSpec.describe Projects::ProtectedBranchesController do
|
|||
end
|
||||
|
||||
describe "DELETE #destroy" do
|
||||
before do
|
||||
sign_in(user)
|
||||
end
|
||||
|
||||
it "deletes the protected branch rule" do
|
||||
delete(:destroy, params: base_params)
|
||||
|
||||
|
|
@ -96,16 +84,18 @@ RSpec.describe Projects::ProtectedBranchesController do
|
|||
end
|
||||
|
||||
context 'when a policy restricts rule deletion' do
|
||||
before do
|
||||
policy = instance_double(ProtectedBranchPolicy, allowed?: false)
|
||||
allow(ProtectedBranchPolicy).to receive(:new).and_return(policy)
|
||||
end
|
||||
|
||||
it "prevents deletion of the protected branch rule" do
|
||||
disallow(:destroy_protected_branch, protected_branch)
|
||||
|
||||
delete(:destroy, params: base_params)
|
||||
|
||||
expect(response).to have_gitlab_http_status(:forbidden)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def disallow(ability, protected_branch)
|
||||
allow(Ability).to receive(:allowed?).and_call_original
|
||||
allow(Ability).to receive(:allowed?).with(user, ability, protected_branch).and_return(false)
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -42,12 +42,12 @@ describe('Confirm Modal', () => {
|
|||
|
||||
it('should emit `confirmed` event on `primary` modal event', () => {
|
||||
findGlModal().vm.$emit('primary');
|
||||
expect(wrapper.emitted('confirmed')).toBeTruthy();
|
||||
expect(wrapper.emitted('confirmed')).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should emit closed` event on `hidden` modal event', () => {
|
||||
modal.vm.$emit('hidden');
|
||||
expect(wrapper.emitted('closed')).toBeTruthy();
|
||||
expect(wrapper.emitted('closed')).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -421,6 +421,25 @@ RSpec.describe UsersHelper do
|
|||
end
|
||||
end
|
||||
|
||||
describe '#user_email_help_text' do
|
||||
subject(:user_email_help_text) { helper.user_email_help_text(user) }
|
||||
|
||||
context 'when `user.unconfirmed_email` is not set' do
|
||||
it 'contains avatar detection text' do
|
||||
expect(user_email_help_text).to include _('We also use email for avatar detection if no avatar is uploaded.')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when `user.unconfirmed_email` is set' do
|
||||
let(:user) { create(:user, :unconfirmed, unconfirmed_email: 'foo@bar.com') }
|
||||
|
||||
it 'contains resend confirmation e-mail text' do
|
||||
expect(user_email_help_text).to include _('Resend confirmation e-mail')
|
||||
expect(user_email_help_text).to include _('Please click the link in the confirmation email before continuing. It was sent to ')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '#admin_user_actions_data_attributes' do
|
||||
subject(:data) { helper.admin_user_actions_data_attributes(user) }
|
||||
|
||||
|
|
|
|||
|
|
@ -1,66 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe 'Rails YAML safe load patch' do
|
||||
let(:unsafe_load) { false }
|
||||
|
||||
let(:klass) do
|
||||
Class.new(ActiveRecord::Base) do
|
||||
self.table_name = 'issues'
|
||||
|
||||
serialize :description
|
||||
end
|
||||
end
|
||||
|
||||
before do
|
||||
allow(ActiveRecord::Base).to receive(:use_yaml_unsafe_load).and_return(unsafe_load)
|
||||
end
|
||||
|
||||
context 'with safe load' do
|
||||
let(:instance) { klass.new(description: data) }
|
||||
|
||||
context 'with default permitted classes' do
|
||||
let(:data) do
|
||||
{
|
||||
"test" => Time.now,
|
||||
ab: 1
|
||||
}
|
||||
end
|
||||
|
||||
it 'deserializes data' do
|
||||
expect(Gitlab::ErrorTracking).not_to receive(:track_and_raise_for_dev_exception)
|
||||
|
||||
instance.save!
|
||||
|
||||
expect(klass.find(instance.id).description).to eq(data)
|
||||
end
|
||||
end
|
||||
|
||||
context 'with unpermitted classes' do
|
||||
let(:data) { DateTime.now }
|
||||
|
||||
it 'logs an exception and loads the data' do
|
||||
expect(Gitlab::ErrorTracking).to receive(:track_and_raise_for_dev_exception).twice
|
||||
|
||||
instance.save!
|
||||
|
||||
expect(klass.find(instance.id).description).to eq(data)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'with unsafe load' do
|
||||
let(:unsafe_load) { true }
|
||||
let(:data) { DateTime.now }
|
||||
let(:instance) { klass.new(description: data) }
|
||||
|
||||
it 'loads the data' do
|
||||
expect(Gitlab::ErrorTracking).not_to receive(:track_and_raise_for_dev_exception)
|
||||
|
||||
instance.save!
|
||||
|
||||
expect(klass.find(instance.id).description).to eq(data)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -15,8 +15,8 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationRunner do
|
|||
end
|
||||
|
||||
before do
|
||||
allow(Gitlab::Database::BackgroundMigration::HealthStatus).to receive(:evaluate)
|
||||
.and_return(Gitlab::Database::BackgroundMigration::HealthStatus::Signals::Normal)
|
||||
normal_signal = instance_double(Gitlab::Database::BackgroundMigration::HealthStatus::Signals::Normal, stop?: false)
|
||||
allow(Gitlab::Database::BackgroundMigration::HealthStatus).to receive(:evaluate).and_return([normal_signal])
|
||||
end
|
||||
|
||||
describe '#run_migration_job' do
|
||||
|
|
@ -77,14 +77,14 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationRunner do
|
|||
end
|
||||
|
||||
it 'puts migration on hold on stop signal' do
|
||||
expect(health_status).to receive(:evaluate).and_return(stop_signal)
|
||||
expect(health_status).to receive(:evaluate).and_return([stop_signal])
|
||||
|
||||
expect { runner.run_migration_job(migration) }.to change { migration.on_hold? }
|
||||
.from(false).to(true)
|
||||
end
|
||||
|
||||
it 'optimizes migration on normal signal' do
|
||||
expect(health_status).to receive(:evaluate).and_return(normal_signal)
|
||||
expect(health_status).to receive(:evaluate).and_return([normal_signal])
|
||||
|
||||
expect(migration).to receive(:optimize!)
|
||||
|
||||
|
|
@ -92,7 +92,7 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationRunner do
|
|||
end
|
||||
|
||||
it 'optimizes migration on no signal' do
|
||||
expect(health_status).to receive(:evaluate).and_return(not_available_signal)
|
||||
expect(health_status).to receive(:evaluate).and_return([not_available_signal])
|
||||
|
||||
expect(migration).to receive(:optimize!)
|
||||
|
||||
|
|
@ -100,7 +100,7 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationRunner do
|
|||
end
|
||||
|
||||
it 'optimizes migration on unknown signal' do
|
||||
expect(health_status).to receive(:evaluate).and_return(unknown_signal)
|
||||
expect(health_status).to receive(:evaluate).and_return([unknown_signal])
|
||||
|
||||
expect(migration).to receive(:optimize!)
|
||||
|
||||
|
|
|
|||
|
|
@ -20,9 +20,9 @@ RSpec.describe Gitlab::Database::BackgroundMigration::HealthStatus::Indicators::
|
|||
swapout_view_for_table(:postgres_autovacuum_activity)
|
||||
end
|
||||
|
||||
let(:context) { Gitlab::Database::BackgroundMigration::HealthStatus::Context.new(tables) }
|
||||
let(:tables) { [table] }
|
||||
let(:table) { 'users' }
|
||||
let(:context) { Gitlab::Database::BackgroundMigration::HealthStatus::Context.new(connection, tables) }
|
||||
|
||||
context 'without autovacuum activity' do
|
||||
it 'returns Normal signal' do
|
||||
|
|
|
|||
|
|
@ -0,0 +1,61 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Database::BackgroundMigration::HealthStatus::Indicators::WriteAheadLog do
|
||||
let(:connection) { Gitlab::Database.database_base_models[:main].connection }
|
||||
|
||||
around do |example|
|
||||
Gitlab::Database::SharedModel.using_connection(connection) do
|
||||
example.run
|
||||
end
|
||||
end
|
||||
|
||||
describe '#evaluate' do
|
||||
let(:tables) { [table] }
|
||||
let(:table) { 'users' }
|
||||
let(:context) { Gitlab::Database::BackgroundMigration::HealthStatus::Context.new(connection, tables) }
|
||||
|
||||
subject(:evaluate) { described_class.new(context).evaluate }
|
||||
|
||||
it 'remembers the indicator class' do
|
||||
expect(evaluate.indicator_class).to eq(described_class)
|
||||
end
|
||||
|
||||
it 'returns NoSignal signal in case the feature flag is disabled' do
|
||||
stub_feature_flags(batched_migrations_health_status_wal: false)
|
||||
|
||||
expect(evaluate).to be_a(Gitlab::Database::BackgroundMigration::HealthStatus::Signals::NotAvailable)
|
||||
expect(evaluate.reason).to include('indicator disabled')
|
||||
end
|
||||
|
||||
it 'returns NoSignal signal when WAL archive queue can not be calculated' do
|
||||
expect(connection).to receive(:execute).and_return([{ 'pending_wal_count' => nil }])
|
||||
|
||||
expect(evaluate).to be_a(Gitlab::Database::BackgroundMigration::HealthStatus::Signals::NotAvailable)
|
||||
expect(evaluate.reason).to include('WAL archive queue can not be calculated')
|
||||
end
|
||||
|
||||
it 'uses primary database' do
|
||||
expect(Gitlab::Database::LoadBalancing::Session.current).to receive(:use_primary).and_yield
|
||||
|
||||
evaluate
|
||||
end
|
||||
|
||||
context 'when WAL archive queue size is below the limit' do
|
||||
it 'returns Normal signal' do
|
||||
expect(connection).to receive(:execute).and_return([{ 'pending_wal_count' => 1 }])
|
||||
expect(evaluate).to be_a(Gitlab::Database::BackgroundMigration::HealthStatus::Signals::Normal)
|
||||
expect(evaluate.reason).to include('WAL archive queue is within limit')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when WAL archive queue size is above the limit' do
|
||||
it 'returns Stop signal' do
|
||||
expect(connection).to receive(:execute).and_return([{ 'pending_wal_count' => 420 }])
|
||||
expect(evaluate).to be_a(Gitlab::Database::BackgroundMigration::HealthStatus::Signals::Stop)
|
||||
expect(evaluate.reason).to include('WAL archive queue is too big')
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -12,30 +12,47 @@ RSpec.describe Gitlab::Database::BackgroundMigration::HealthStatus do
|
|||
end
|
||||
|
||||
describe '.evaluate' do
|
||||
subject(:evaluate) { described_class.evaluate(migration, indicator_class) }
|
||||
subject(:evaluate) { described_class.evaluate(migration, [autovacuum_indicator_class]) }
|
||||
|
||||
let(:migration) { build(:batched_background_migration, :active) }
|
||||
|
||||
let(:health_status) { 'Gitlab::Database::BackgroundMigration::HealthStatus' }
|
||||
let(:indicator_class) { class_double("#{health_status}::Indicators::AutovacuumActiveOnTable") }
|
||||
let(:indicator) { instance_double("#{health_status}::Indicators::AutovacuumActiveOnTable") }
|
||||
let(:health_status) { Gitlab::Database::BackgroundMigration::HealthStatus }
|
||||
let(:autovacuum_indicator_class) { health_status::Indicators::AutovacuumActiveOnTable }
|
||||
let(:wal_indicator_class) { health_status::Indicators::WriteAheadLog }
|
||||
let(:autovacuum_indicator) { instance_double(autovacuum_indicator_class) }
|
||||
let(:wal_indicator) { instance_double(wal_indicator_class) }
|
||||
|
||||
before do
|
||||
allow(indicator_class).to receive(:new).with(migration.health_context).and_return(indicator)
|
||||
allow(autovacuum_indicator_class).to receive(:new).with(migration.health_context).and_return(autovacuum_indicator)
|
||||
end
|
||||
|
||||
it 'returns a signal' do
|
||||
context 'with default indicators' do
|
||||
subject(:evaluate) { described_class.evaluate(migration) }
|
||||
|
||||
it 'returns a collection of signals' do
|
||||
normal_signal = instance_double("#{health_status}::Signals::Normal", log_info?: false)
|
||||
not_available_signal = instance_double("#{health_status}::Signals::NotAvailable", log_info?: false)
|
||||
|
||||
expect(autovacuum_indicator).to receive(:evaluate).and_return(normal_signal)
|
||||
expect(wal_indicator_class).to receive(:new).with(migration.health_context).and_return(wal_indicator)
|
||||
expect(wal_indicator).to receive(:evaluate).and_return(not_available_signal)
|
||||
|
||||
expect(evaluate).to contain_exactly(normal_signal, not_available_signal)
|
||||
end
|
||||
end
|
||||
|
||||
it 'returns a collection of signals' do
|
||||
signal = instance_double("#{health_status}::Signals::Normal", log_info?: false)
|
||||
|
||||
expect(indicator).to receive(:evaluate).and_return(signal)
|
||||
expect(autovacuum_indicator).to receive(:evaluate).and_return(signal)
|
||||
|
||||
expect(evaluate).to eq(signal)
|
||||
expect(evaluate).to contain_exactly(signal)
|
||||
end
|
||||
|
||||
it 'logs interesting signals' do
|
||||
signal = instance_double("#{health_status}::Signals::Stop", log_info?: true)
|
||||
|
||||
expect(indicator).to receive(:evaluate).and_return(signal)
|
||||
expect(autovacuum_indicator).to receive(:evaluate).and_return(signal)
|
||||
expect(described_class).to receive(:log_signal).with(signal, migration)
|
||||
|
||||
evaluate
|
||||
|
|
@ -44,7 +61,7 @@ RSpec.describe Gitlab::Database::BackgroundMigration::HealthStatus do
|
|||
it 'does not log signals of no interest' do
|
||||
signal = instance_double("#{health_status}::Signals::Normal", log_info?: false)
|
||||
|
||||
expect(indicator).to receive(:evaluate).and_return(signal)
|
||||
expect(autovacuum_indicator).to receive(:evaluate).and_return(signal)
|
||||
expect(described_class).not_to receive(:log_signal)
|
||||
|
||||
evaluate
|
||||
|
|
@ -54,7 +71,7 @@ RSpec.describe Gitlab::Database::BackgroundMigration::HealthStatus do
|
|||
let(:error) { RuntimeError.new('everything broken') }
|
||||
|
||||
before do
|
||||
expect(indicator).to receive(:evaluate).and_raise(error)
|
||||
expect(autovacuum_indicator).to receive(:evaluate).and_raise(error)
|
||||
end
|
||||
|
||||
it 'does not fail' do
|
||||
|
|
@ -62,8 +79,10 @@ RSpec.describe Gitlab::Database::BackgroundMigration::HealthStatus do
|
|||
end
|
||||
|
||||
it 'returns Unknown signal' do
|
||||
expect(evaluate).to be_an_instance_of(Gitlab::Database::BackgroundMigration::HealthStatus::Signals::Unknown)
|
||||
expect(evaluate.reason).to eq("unexpected error: everything broken (RuntimeError)")
|
||||
signal = evaluate.first
|
||||
|
||||
expect(signal).to be_an_instance_of(Gitlab::Database::BackgroundMigration::HealthStatus::Signals::Unknown)
|
||||
expect(signal.reason).to eq("unexpected error: everything broken (RuntimeError)")
|
||||
end
|
||||
|
||||
it 'reports the exception to error tracking' do
|
||||
|
|
|
|||
|
|
@ -1438,4 +1438,10 @@ RSpec.describe ApplicationSetting do
|
|||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'personal accesss token prefix' do
|
||||
it 'sets the correct default value' do
|
||||
expect(setting.personal_access_token_prefix).to eql('glpat-')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -3,14 +3,22 @@
|
|||
require 'spec_helper'
|
||||
|
||||
RSpec.describe API::ProtectedBranches do
|
||||
let(:user) { create(:user) }
|
||||
let!(:project) { create(:project, :repository) }
|
||||
let_it_be_with_reload(:project) { create(:project, :repository) }
|
||||
let_it_be(:maintainer) { create(:user) }
|
||||
let_it_be(:guest) { create(:user) }
|
||||
|
||||
let(:protected_name) { 'feature' }
|
||||
let(:branch_name) { protected_name }
|
||||
|
||||
let!(:protected_branch) do
|
||||
create(:protected_branch, project: project, name: protected_name)
|
||||
end
|
||||
|
||||
before_all do
|
||||
project.add_maintainer(maintainer)
|
||||
project.add_guest(guest)
|
||||
end
|
||||
|
||||
describe "GET /projects/:id/protected_branches" do
|
||||
let(:params) { {} }
|
||||
let(:route) { "/projects/#{project.id}/protected_branches" }
|
||||
|
|
@ -29,9 +37,7 @@ RSpec.describe API::ProtectedBranches do
|
|||
end
|
||||
|
||||
context 'when authenticated as a maintainer' do
|
||||
before do
|
||||
project.add_maintainer(user)
|
||||
end
|
||||
let(:user) { maintainer }
|
||||
|
||||
context 'when search param is not present' do
|
||||
it_behaves_like 'protected branches' do
|
||||
|
|
@ -49,9 +55,7 @@ RSpec.describe API::ProtectedBranches do
|
|||
end
|
||||
|
||||
context 'when authenticated as a guest' do
|
||||
before do
|
||||
project.add_guest(user)
|
||||
end
|
||||
let(:user) { guest }
|
||||
|
||||
it_behaves_like '403 response' do
|
||||
let(:request) { get api(route, user) }
|
||||
|
|
@ -84,9 +88,7 @@ RSpec.describe API::ProtectedBranches do
|
|||
end
|
||||
|
||||
context 'when authenticated as a maintainer' do
|
||||
before do
|
||||
project.add_maintainer(user)
|
||||
end
|
||||
let(:user) { maintainer }
|
||||
|
||||
it_behaves_like 'protected branch'
|
||||
|
||||
|
|
@ -104,9 +106,7 @@ RSpec.describe API::ProtectedBranches do
|
|||
end
|
||||
|
||||
context 'when authenticated as a guest' do
|
||||
before do
|
||||
project.add_guest(user)
|
||||
end
|
||||
let(:user) { guest }
|
||||
|
||||
it_behaves_like '403 response' do
|
||||
let(:request) { get api(route, user) }
|
||||
|
|
@ -124,9 +124,7 @@ RSpec.describe API::ProtectedBranches do
|
|||
end
|
||||
|
||||
context 'when authenticated as a maintainer' do
|
||||
before do
|
||||
project.add_maintainer(user)
|
||||
end
|
||||
let(:user) { maintainer }
|
||||
|
||||
it 'protects a single branch' do
|
||||
post post_endpoint, params: { name: branch_name }
|
||||
|
|
@ -226,13 +224,10 @@ RSpec.describe API::ProtectedBranches do
|
|||
end
|
||||
end
|
||||
|
||||
context 'when a policy restricts rule deletion' do
|
||||
before do
|
||||
policy = instance_double(ProtectedBranchPolicy, allowed?: false)
|
||||
expect(ProtectedBranchPolicy).to receive(:new).and_return(policy)
|
||||
end
|
||||
context 'when a policy restricts rule creation' do
|
||||
it "prevents creations of the protected branch rule" do
|
||||
disallow(:create_protected_branch, an_instance_of(ProtectedBranch))
|
||||
|
||||
it "prevents deletion of the protected branch rule" do
|
||||
post post_endpoint, params: { name: branch_name }
|
||||
|
||||
expect(response).to have_gitlab_http_status(:forbidden)
|
||||
|
|
@ -241,9 +236,7 @@ RSpec.describe API::ProtectedBranches do
|
|||
end
|
||||
|
||||
context 'when authenticated as a guest' do
|
||||
before do
|
||||
project.add_guest(user)
|
||||
end
|
||||
let(:user) { guest }
|
||||
|
||||
it "returns a 403 error if guest" do
|
||||
post post_endpoint, params: { name: branch_name }
|
||||
|
|
@ -254,12 +247,9 @@ RSpec.describe API::ProtectedBranches do
|
|||
end
|
||||
|
||||
describe "DELETE /projects/:id/protected_branches/unprotect/:branch" do
|
||||
let(:user) { maintainer }
|
||||
let(:delete_endpoint) { api("/projects/#{project.id}/protected_branches/#{branch_name}", user) }
|
||||
|
||||
before do
|
||||
project.add_maintainer(user)
|
||||
end
|
||||
|
||||
it "unprotects a single branch" do
|
||||
delete delete_endpoint
|
||||
|
||||
|
|
@ -277,12 +267,9 @@ RSpec.describe API::ProtectedBranches do
|
|||
end
|
||||
|
||||
context 'when a policy restricts rule deletion' do
|
||||
before do
|
||||
policy = instance_double(ProtectedBranchPolicy, allowed?: false)
|
||||
expect(ProtectedBranchPolicy).to receive(:new).and_return(policy)
|
||||
end
|
||||
|
||||
it "prevents deletion of the protected branch rule" do
|
||||
disallow(:destroy_protected_branch, protected_branch)
|
||||
|
||||
delete delete_endpoint
|
||||
|
||||
expect(response).to have_gitlab_http_status(:forbidden)
|
||||
|
|
@ -299,4 +286,9 @@ RSpec.describe API::ProtectedBranches do
|
|||
end
|
||||
end
|
||||
end
|
||||
|
||||
def disallow(ability, protected_branch)
|
||||
allow(Ability).to receive(:allowed?).and_call_original
|
||||
allow(Ability).to receive(:allowed?).with(user, ability, protected_branch).and_return(false)
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ RSpec.describe API::Settings, 'Settings', :do_not_mock_admin_mode_setting do
|
|||
expect(json_response['spam_check_api_key']).to be_nil
|
||||
expect(json_response['wiki_page_max_content_bytes']).to be_a(Integer)
|
||||
expect(json_response['require_admin_approval_after_user_signup']).to eq(true)
|
||||
expect(json_response['personal_access_token_prefix']).to be_nil
|
||||
expect(json_response['personal_access_token_prefix']).to eq('glpat-')
|
||||
expect(json_response['admin_mode']).to be(false)
|
||||
expect(json_response['whats_new_variant']).to eq('all_tiers')
|
||||
expect(json_response['user_deactivation_emails_enabled']).to be(true)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,8 @@
|
|||
require 'spec_helper'
|
||||
|
||||
RSpec.describe ProtectedBranches::CreateService do
|
||||
let(:project) { create(:project) }
|
||||
let_it_be_with_reload(:project) { create(:project) }
|
||||
|
||||
let(:user) { project.first_owner }
|
||||
let(:params) do
|
||||
{
|
||||
|
|
@ -13,11 +14,11 @@ RSpec.describe ProtectedBranches::CreateService do
|
|||
}
|
||||
end
|
||||
|
||||
subject(:service) { described_class.new(project, user, params) }
|
||||
|
||||
describe '#execute' do
|
||||
let(:name) { 'master' }
|
||||
|
||||
subject(:service) { described_class.new(project, user, params) }
|
||||
|
||||
it 'creates a new protected branch' do
|
||||
expect { service.execute }.to change(ProtectedBranch, :count).by(1)
|
||||
expect(project.protected_branches.last.push_access_levels.map(&:access_level)).to eq([Gitlab::Access::MAINTAINER])
|
||||
|
|
@ -35,8 +36,6 @@ RSpec.describe ProtectedBranches::CreateService do
|
|||
context 'when protecting a branch with a name that contains HTML tags' do
|
||||
let(:name) { 'foo<b>bar<\b>' }
|
||||
|
||||
subject(:service) { described_class.new(project, user, params) }
|
||||
|
||||
it 'creates a new protected branch' do
|
||||
expect { service.execute }.to change(ProtectedBranch, :count).by(1)
|
||||
expect(project.protected_branches.last.name).to eq(name)
|
||||
|
|
@ -60,16 +59,18 @@ RSpec.describe ProtectedBranches::CreateService do
|
|||
end
|
||||
|
||||
context 'when a policy restricts rule creation' do
|
||||
before do
|
||||
policy = instance_double(ProtectedBranchPolicy, allowed?: false)
|
||||
expect(ProtectedBranchPolicy).to receive(:new).and_return(policy)
|
||||
end
|
||||
|
||||
it "prevents creation of the protected branch rule" do
|
||||
disallow(:create_protected_branch, an_instance_of(ProtectedBranch))
|
||||
|
||||
expect do
|
||||
service.execute
|
||||
end.to raise_error(Gitlab::Access::AccessDeniedError)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def disallow(ability, protected_branch)
|
||||
allow(Ability).to receive(:allowed?).and_call_original
|
||||
allow(Ability).to receive(:allowed?).with(user, ability, protected_branch).and_return(false)
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -3,13 +3,14 @@
|
|||
require 'spec_helper'
|
||||
|
||||
RSpec.describe ProtectedBranches::DestroyService do
|
||||
let(:protected_branch) { create(:protected_branch) }
|
||||
let(:project) { protected_branch.project }
|
||||
let_it_be_with_reload(:project) { create(:project) }
|
||||
|
||||
let(:protected_branch) { create(:protected_branch, project: project) }
|
||||
let(:user) { project.first_owner }
|
||||
|
||||
describe '#execute' do
|
||||
subject(:service) { described_class.new(project, user) }
|
||||
subject(:service) { described_class.new(project, user) }
|
||||
|
||||
describe '#execute' do
|
||||
it 'destroys a protected branch' do
|
||||
service.execute(protected_branch)
|
||||
|
||||
|
|
@ -25,16 +26,18 @@ RSpec.describe ProtectedBranches::DestroyService do
|
|||
end
|
||||
|
||||
context 'when a policy restricts rule deletion' do
|
||||
before do
|
||||
policy = instance_double(ProtectedBranchPolicy, allowed?: false)
|
||||
expect(ProtectedBranchPolicy).to receive(:new).and_return(policy)
|
||||
end
|
||||
|
||||
it "prevents deletion of the protected branch rule" do
|
||||
disallow(:destroy_protected_branch, protected_branch)
|
||||
|
||||
expect do
|
||||
service.execute(protected_branch)
|
||||
end.to raise_error(Gitlab::Access::AccessDeniedError)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def disallow(ability, protected_branch)
|
||||
allow(Ability).to receive(:allowed?).and_call_original
|
||||
allow(Ability).to receive(:allowed?).with(user, ability, protected_branch).and_return(false)
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -3,17 +3,18 @@
|
|||
require 'spec_helper'
|
||||
|
||||
RSpec.describe ProtectedBranches::UpdateService do
|
||||
let(:protected_branch) { create(:protected_branch) }
|
||||
let(:project) { protected_branch.project }
|
||||
let_it_be_with_reload(:project) { create(:project) }
|
||||
|
||||
let(:protected_branch) { create(:protected_branch, project: project) }
|
||||
let(:user) { project.first_owner }
|
||||
let(:params) { { name: new_name } }
|
||||
|
||||
subject(:service) { described_class.new(project, user, params) }
|
||||
|
||||
describe '#execute' do
|
||||
let(:new_name) { 'new protected branch name' }
|
||||
let(:result) { service.execute(protected_branch) }
|
||||
|
||||
subject(:service) { described_class.new(project, user, params) }
|
||||
|
||||
it 'updates a protected branch' do
|
||||
expect(result.reload.name).to eq(params[:name])
|
||||
end
|
||||
|
|
@ -30,8 +31,6 @@ RSpec.describe ProtectedBranches::UpdateService do
|
|||
let(:new_name) { 'foo<b>bar<\b>' }
|
||||
let(:result) { service.execute(protected_branch) }
|
||||
|
||||
subject(:service) { described_class.new(project, user, params) }
|
||||
|
||||
it 'updates a protected branch' do
|
||||
expect(result.reload.name).to eq(new_name)
|
||||
end
|
||||
|
|
@ -45,15 +44,17 @@ RSpec.describe ProtectedBranches::UpdateService do
|
|||
end
|
||||
end
|
||||
|
||||
context 'when a policy restricts rule creation' do
|
||||
before do
|
||||
policy = instance_double(ProtectedBranchPolicy, allowed?: false)
|
||||
expect(ProtectedBranchPolicy).to receive(:new).and_return(policy)
|
||||
end
|
||||
context 'when a policy restricts rule update' do
|
||||
it "prevents update of the protected branch rule" do
|
||||
disallow(:update_protected_branch, protected_branch)
|
||||
|
||||
it "prevents creation of the protected branch rule" do
|
||||
expect { service.execute(protected_branch) }.to raise_error(Gitlab::Access::AccessDeniedError)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def disallow(ability, protected_branch)
|
||||
allow(Ability).to receive(:allowed?).and_call_original
|
||||
allow(Ability).to receive(:allowed?).with(user, ability, protected_branch).and_return(false)
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@
|
|||
# - label
|
||||
# - **extra
|
||||
|
||||
shared_examples 'Snowplow event tracking' do
|
||||
shared_examples 'Snowplow event tracking' do |overrides: {}|
|
||||
let(:extra) { {} }
|
||||
|
||||
it 'is not emitted if FF is disabled' do
|
||||
|
|
@ -33,7 +33,7 @@ shared_examples 'Snowplow event tracking' do
|
|||
project: try(:project),
|
||||
label: try(:label),
|
||||
property: try(:property)
|
||||
}.compact.merge(extra)
|
||||
}.merge(overrides).compact.merge(extra)
|
||||
|
||||
subject
|
||||
|
||||
|
|
|
|||
|
|
@ -366,6 +366,15 @@ RSpec.shared_examples 'it runs batched background migration jobs' do |tracking_d
|
|||
|
||||
expect { migration_run }.to change { migration.reload.on_hold? }.from(false).to(true)
|
||||
end
|
||||
|
||||
it 'puts migration on hold when the pending WAL count is above the limit' do
|
||||
sql = Gitlab::Database::BackgroundMigration::HealthStatus::Indicators::WriteAheadLog::PENDING_WAL_COUNT_SQL
|
||||
limit = Gitlab::Database::BackgroundMigration::HealthStatus::Indicators::WriteAheadLog::LIMIT
|
||||
|
||||
expect(connection).to receive(:execute).with(sql).and_return([{ 'pending_wal_count' => limit + 1 }])
|
||||
|
||||
expect { migration_run }.to change { migration.reload.on_hold? }.from(false).to(true)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||