Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-02-01 12:09:16 +00:00
parent f23c6a03df
commit 002b575a8a
242 changed files with 24016 additions and 235 deletions

View File

@ -47,3 +47,7 @@ include:
inputs:
gem_name: "diff_match_patch"
gem_path_prefix: "vendor/gems/"
- local: .gitlab/ci/templates/gem.gitlab-ci.yml
inputs:
gem_name: "sidekiq-7.1.6"
gem_path_prefix: "vendor/gems/"

View File

@ -230,7 +230,7 @@ gem 'asciidoctor-kroki', '~> 0.8.0', require: false # rubocop:todo Gemfile/Missi
gem 'rouge', '~> 4.2.0' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'truncato', '~> 0.7.12' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'nokogiri', '~> 1.16' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'gitlab-glfm-markdown', '~> 0.0.11', feature_category: :team_planning
gem 'gitlab-glfm-markdown', '~> 0.0.12', feature_category: :team_planning
# Calendar rendering
gem 'icalendar' # rubocop:todo Gemfile/MissingFeatureCategory
@ -256,7 +256,7 @@ gem 'state_machines-activerecord', '~> 0.8.0' # rubocop:todo Gemfile/MissingFeat
gem 'acts-as-taggable-on', '~> 10.0' # rubocop:todo Gemfile/MissingFeatureCategory
# Background jobs
gem 'sidekiq', '~> 7.1.6' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'sidekiq', path: 'vendor/gems/sidekiq-7.1.6', require: 'sidekiq' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'sidekiq-cron', '~> 1.12.0', feature_category: :shared
gem 'gitlab-sidekiq-fetcher', path: 'vendor/gems/sidekiq-reliable-fetch', require: 'sidekiq-reliable-fetch' # rubocop:todo Gemfile/MissingFeatureCategory

View File

@ -213,11 +213,11 @@
{"name":"gitlab-dangerfiles","version":"4.6.0","platform":"ruby","checksum":"441b37b17d1dad36268517490a30aaf57e43dffb2e9ebc1da38d3bc9fa20741e"},
{"name":"gitlab-experiment","version":"0.9.1","platform":"ruby","checksum":"f230ee742154805a755d5f2539dc44d93cdff08c5bbbb7656018d61f93d01f48"},
{"name":"gitlab-fog-azure-rm","version":"1.8.0","platform":"ruby","checksum":"e4f24b174b273b88849d12fbcfecb79ae1c09f56cbd614998714c7f0a81e6c28"},
{"name":"gitlab-glfm-markdown","version":"0.0.11","platform":"aarch64-linux","checksum":"6e1d507b40936d6eb287e042f461213e477e4842976a86c2a01ac4b06492ec11"},
{"name":"gitlab-glfm-markdown","version":"0.0.11","platform":"arm64-darwin","checksum":"1e98714692fb33463027a900818e24d2a74cef70e57993488e5438c78d0c6e41"},
{"name":"gitlab-glfm-markdown","version":"0.0.11","platform":"ruby","checksum":"4b318fc0271e3ab38920b073b1cbb168f84b5bca74001551779ed3edf622107c"},
{"name":"gitlab-glfm-markdown","version":"0.0.11","platform":"x86_64-darwin","checksum":"c740f6274f6cd0b8e54cce6250b57880e2a9b6ff7aefbe63c0e813eb49f56c76"},
{"name":"gitlab-glfm-markdown","version":"0.0.11","platform":"x86_64-linux","checksum":"fbfb0e8699f01e4cd09d60c2f4388ce92ac4975814fdda1e95168fb0213bf4fb"},
{"name":"gitlab-glfm-markdown","version":"0.0.12","platform":"aarch64-linux","checksum":"06904f7d790caa655ff72ed1c627200179a1e5daebfbbad190d49d361d1cedf2"},
{"name":"gitlab-glfm-markdown","version":"0.0.12","platform":"arm64-darwin","checksum":"ddeabbc0d34a9963eac8873865d38e34c25e6498dcb98ab9f1f95843ab3dd8a8"},
{"name":"gitlab-glfm-markdown","version":"0.0.12","platform":"ruby","checksum":"91b245c6b7abfa8638ea93c4c76c4ed7bc52711daa4c60422309a54994f81c57"},
{"name":"gitlab-glfm-markdown","version":"0.0.12","platform":"x86_64-darwin","checksum":"d6d7c3ae818b4094529c4c9e710adc320238e531d65401ee0cf509166271c523"},
{"name":"gitlab-glfm-markdown","version":"0.0.12","platform":"x86_64-linux","checksum":"3bbf7d7c0418bc6adbb75333df0a2ffe6362f710132dbc4324e5befb49c34e01"},
{"name":"gitlab-labkit","version":"0.35.0","platform":"ruby","checksum":"26ff06aa4e34ee7b01ead44cc56d01e45822845da70408ca35ff3b20e3c84679"},
{"name":"gitlab-license","version":"2.4.0","platform":"ruby","checksum":"fd238fb1e605a6b9250d4eb1744434ffd131f18d50a3be32f613c883f7635e20"},
{"name":"gitlab-mail_room","version":"0.0.24","platform":"ruby","checksum":"c7bf3df73dbcc024bc98dbf72514520ac2ff2b6d0124de496279fe56c13c3cb3"},
@ -601,7 +601,6 @@
{"name":"sexp_processor","version":"4.17.1","platform":"ruby","checksum":"91110946720307f30bf1d549e90d9a529fef40d1fc471c069c8cca7667015da0"},
{"name":"shellany","version":"0.0.1","platform":"ruby","checksum":"0e127a9132698766d7e752e82cdac8250b6adbd09e6c0a7fbbb6f61964fedee7"},
{"name":"shoulda-matchers","version":"5.1.0","platform":"ruby","checksum":"a01d20589989e9653ab4a28c67d9db2b82bcf0a2496cf01d5e1a95a4aaaf5b07"},
{"name":"sidekiq","version":"7.1.6","platform":"ruby","checksum":"7859da66d5bcef3c22bea2c3091d08c866890168e003f5bf4dea197dc37843a2"},
{"name":"sidekiq-cron","version":"1.12.0","platform":"ruby","checksum":"6663080a454088bd88773a0da3ae91e554b8a2e8b06cfc629529a83fd1a3096c"},
{"name":"sigdump","version":"0.2.4","platform":"ruby","checksum":"0bf2176e55c1a262788623fe5ea57caddd6ba2abebe5e349d9d5e7c3a3010ed7"},
{"name":"signet","version":"0.18.0","platform":"ruby","checksum":"66cda8c2edc2dde25090b792e7e6fc9598c3c2bdd64ffacd89f1ffe3cb9cea3b"},

View File

@ -178,6 +178,15 @@ PATH
nokogiri (>= 1.4.4)
omniauth (~> 2.0)
PATH
remote: vendor/gems/sidekiq-7.1.6
specs:
sidekiq (7.1.6)
concurrent-ruby (< 2)
connection_pool (>= 2.3.0)
rack (>= 2.2.4)
redis-client (>= 0.14.0)
PATH
remote: vendor/gems/sidekiq-reliable-fetch
specs:
@ -696,7 +705,7 @@ GEM
fog-core (= 2.1.0)
fog-json (~> 1.2.0)
mime-types
gitlab-glfm-markdown (0.0.11)
gitlab-glfm-markdown (0.0.12)
rb_sys (~> 0.9.86)
gitlab-labkit (0.35.0)
actionpack (>= 5.0.0, < 8.0.0)
@ -1569,11 +1578,6 @@ GEM
shellany (0.0.1)
shoulda-matchers (5.1.0)
activesupport (>= 5.2.0)
sidekiq (7.1.6)
concurrent-ruby (< 2)
connection_pool (>= 2.3.0)
rack (>= 2.2.4)
redis-client (>= 0.14.0)
sidekiq-cron (1.12.0)
fugit (~> 1.8)
globalid (>= 1.0.1)
@ -1910,7 +1914,7 @@ DEPENDENCIES
gitlab-dangerfiles (~> 4.6.0)
gitlab-experiment (~> 0.9.1)
gitlab-fog-azure-rm (~> 1.8.0)
gitlab-glfm-markdown (~> 0.0.11)
gitlab-glfm-markdown (~> 0.0.12)
gitlab-housekeeper!
gitlab-http!
gitlab-labkit (~> 0.35.0)
@ -2101,7 +2105,7 @@ DEPENDENCIES
sentry-ruby (~> 5.10.0)
sentry-sidekiq (~> 5.10.0)
shoulda-matchers (~> 5.1.0)
sidekiq (~> 7.1.6)
sidekiq!
sidekiq-cron (~> 1.12.0)
sigdump (~> 0.2.4)
simple_po_parser (~> 1.1.6)

View File

@ -1,6 +1,6 @@
<script>
import { GlCollapsibleListbox } from '@gitlab/ui';
import { debounce } from 'lodash';
import { debounce, isNull } from 'lodash';
import { __, s__ } from '~/locale';
import { createAlert } from '~/alert';
@ -33,7 +33,13 @@ export default {
},
selected: {
type: String,
required: true,
required: false,
default: null,
},
toggleText: {
type: String,
required: false,
default: null,
},
userNamespace: {
type: String,
@ -82,7 +88,11 @@ export default {
);
},
toggleText() {
listboxToggleText() {
if (isNull(this.selected)) {
return this.toggleText;
}
return truncate(this.selected, this.$options.MAX_IMPORT_TARGET_LENGTH);
},
@ -146,7 +156,7 @@ export default {
:items="items"
:disabled="disabled"
:selected="selected"
:toggle-text="toggleText"
:toggle-text="listboxToggleText"
searchable
fluid-width
toggle-class="gl-rounded-top-right-none! gl-rounded-bottom-right-none!"

View File

@ -51,7 +51,7 @@ export default {
isProjectCreationAllowed: {
type: Boolean,
required: false,
default: false,
default: true,
},
},

View File

@ -13,7 +13,7 @@ import {
GlFormCheckbox,
GlTooltipDirective,
} from '@gitlab/ui';
import { debounce, isNumber } from 'lodash';
import { debounce, isNumber, isUndefined } from 'lodash';
import { createAlert } from '~/alert';
import * as Sentry from '~/sentry/sentry_browser_wrapper';
import { s__, __, n__, sprintf } from '~/locale';
@ -31,7 +31,7 @@ import { STATUSES } from '../../constants';
import importGroupsMutation from '../graphql/mutations/import_groups.mutation.graphql';
import updateImportStatusMutation from '../graphql/mutations/update_import_status.mutation.graphql';
import bulkImportSourceGroupsQuery from '../graphql/queries/bulk_import_source_groups.query.graphql';
import { NEW_NAME_FIELD, ROOT_NAMESPACE, i18n } from '../constants';
import { NEW_NAME_FIELD, TARGET_NAMESPACE_FIELD, ROOT_NAMESPACE, i18n } from '../constants';
import { StatusPoller } from '../services/status_poller';
import {
isFinished,
@ -177,11 +177,16 @@ export default {
: isAvailableForImport(group) && status !== STATUSES.SCHEDULING;
const flags = {
isInvalid: (importTarget.validationErrors ?? []).filter((e) => !e.nonBlocking).length > 0,
isInvalid:
(importTarget?.validationErrors ?? []).filter((e) => !e.nonBlocking).length > 0,
isAvailableForImport: isGroupAvailableForImport,
isAllowedForReimport: false,
isFinished: isFinished(group),
isProjectCreationAllowed: isProjectCreationAllowed(importTarget?.targetNamespace),
isProjectCreationAllowed: importTarget?.targetNamespace
? isProjectCreationAllowed(importTarget.targetNamespace)
: // When targetNamespace is not selected, we set the flag to undefined (instead of defaulting to true / false)
// to allow import_actions_cell.vue to use its default prop value.
undefined,
};
return {
@ -428,6 +433,10 @@ export default {
},
async importGroup({ group, extraArgs, index }) {
if (!this.validateImportTargetNamespace(group.importTarget)) {
return;
}
if (group.flags.isFinished && !this.reimportRequests.includes(group.id)) {
this.validateImportTarget(group.importTarget);
this.reimportRequests.push(group.id);
@ -495,9 +504,25 @@ export default {
});
},
validateImportTargetNamespace(importTarget) {
if (isUndefined(importTarget.targetNamespace)) {
// eslint-disable-next-line no-param-reassign
importTarget.validationErrors = [
{ field: TARGET_NAMESPACE_FIELD, message: i18n.ERROR_TARGET_NAMESPACE_REQUIRED },
];
return false;
}
return true;
},
validateImportTarget: debounce(async function validate(importTarget) {
const newValidationErrors = [];
importTarget.cancellationToken?.cancel();
if (!this.validateImportTargetNamespace(importTarget)) {
return;
}
if (importTarget.newName === '') {
newValidationErrors.push({ field: NEW_NAME_FIELD, message: i18n.ERROR_REQUIRED });
} else if (!isNameValid(importTarget, this.groupPathRegex)) {
@ -539,14 +564,9 @@ export default {
}, VALIDATION_DEBOUNCE_TIME),
setDefaultImportTarget(group) {
// If we've reached this Vue application we have at least one potential import destination
const defaultTargetNamespace =
// first option: namespace id was explicitly provided
this.availableNamespaces.find((ns) => ns.id === this.defaultTargetNamespace) ??
// second option: first available namespace
this.availableNamespaces[0] ??
// last resort: if no namespaces are available - suggest creating new namespace at root
ROOT_NAMESPACE;
const lastTargetNamespace = this.availableNamespaces.find(
(ns) => ns.id === this.defaultTargetNamespace,
);
let importTarget;
if (group.lastImportTarget) {
@ -555,12 +575,12 @@ export default {
);
importTarget = {
targetNamespace: targetNamespace ?? defaultTargetNamespace,
targetNamespace: targetNamespace ?? lastTargetNamespace,
newName: group.lastImportTarget.newName,
};
} else {
importTarget = {
targetNamespace: defaultTargetNamespace,
targetNamespace: lastTargetNamespace,
newName: group.fullPath,
};
}
@ -572,6 +592,10 @@ export default {
validationErrors: [],
});
if (!importTarget.targetNamespace) {
return;
}
getGroupPathAvailability(
importTarget.newName,
getIdFromGraphQLId(importTarget.targetNamespace.id),

View File

@ -1,8 +1,10 @@
<script>
import { GlFormInput } from '@gitlab/ui';
import ImportTargetDropdown from '../../components/import_target_dropdown.vue';
import { getInvalidNameValidationMessage } from '../utils';
import { validationMessageFor } from '../utils';
import { NEW_NAME_FIELD, TARGET_NAMESPACE_FIELD } from '../constants';
export default {
components: {
@ -18,18 +20,27 @@ export default {
computed: {
selectedImportTarget() {
return this.group.importTarget.targetNamespace.fullPath || '';
return this.group.importTarget?.targetNamespace?.fullPath;
},
importTargetNewName() {
return this.group.importTarget?.newName;
},
validationMessage() {
return (
this.group.progress?.message || getInvalidNameValidationMessage(this.group.importTarget)
this.group.progress?.message ||
validationMessageFor(this.group.importTarget, TARGET_NAMESPACE_FIELD) ||
validationMessageFor(this.group.importTarget, NEW_NAME_FIELD)
);
},
validNameState() {
// bootstrap-vue requires null for "indifferent" state, if we return true
// this will highlight field in green like "passed validation"
return this.group.flags.isInvalid && this.group.flags.isAvailableForImport ? false : null;
return this.group.flags.isInvalid && this.isPathSelectionAvailable ? false : null;
},
isPathSelectionAvailable() {
return this.group.flags.isAvailableForImport;
},
@ -52,6 +63,7 @@ export default {
<div class="gl-display-flex gl-align-items-stretch">
<import-target-dropdown
:selected="selectedImportTarget"
:toggle-text="s__('BulkImport|Select parent group')"
:disabled="!isPathSelectionAvailable"
@select="onImportTargetSelect"
/>
@ -76,7 +88,7 @@ export default {
debounce="500"
data-testid="target-namespace-input"
:disabled="!isPathSelectionAvailable"
:value="group.importTarget.newName"
:value="importTargetNewName"
:aria-label="__('New name')"
:state="validNameState"
@input="$emit('update-new-name', $event)"

View File

@ -1,6 +1,7 @@
import { __, s__ } from '~/locale';
export const i18n = {
ERROR_TARGET_NAMESPACE_REQUIRED: s__('BulkImport|Please select a parent group.'),
ERROR_INVALID_FORMAT: s__(
'GroupSettings|Please choose a group URL with no special characters or spaces.',
),
@ -24,5 +25,6 @@ export const i18n = {
};
export const NEW_NAME_FIELD = 'newName';
export const TARGET_NAMESPACE_FIELD = 'targetNamespace';
export const ROOT_NAMESPACE = { fullPath: '', id: null };

View File

@ -5,8 +5,9 @@ export function isNameValid(importTarget, validationRegex) {
return validationRegex.test(importTarget[NEW_NAME_FIELD]);
}
export function getInvalidNameValidationMessage(importTarget) {
return importTarget.validationErrors?.find(({ field }) => field === NEW_NAME_FIELD)?.message;
export function validationMessageFor(importTarget, field) {
return importTarget?.validationErrors?.find(({ field: fieldName }) => fieldName === field)
?.message;
}
export function isFinished(group) {
@ -17,7 +18,7 @@ export function isAvailableForImport(group) {
return !group.progress || isFinished(group);
}
export function isProjectCreationAllowed(group) {
export function isProjectCreationAllowed(group = {}) {
return Boolean(group.projectCreationLevel) && group.projectCreationLevel !== 'noone';
}

View File

@ -72,12 +72,6 @@
}
}
input[type='submit'] {
margin-bottom: 0;
display: block;
width: 100%;
}
.devise-errors {
h2 {
margin-top: 0;

View File

@ -0,0 +1,23 @@
# frozen_string_literal: true
module Resolvers
module Projects
class PlanLimitsResolver < BaseResolver
include Gitlab::Graphql::Authorize::AuthorizeResource
type Types::ProjectPlanLimitsType, null: false
authorize :read_project
def resolve
authorize!(object)
schedule_allowed = Ability.allowed?(current_user, :read_ci_pipeline_schedules_plan_limit, object)
{
ci_pipeline_schedules: schedule_allowed ? object.actual_limits.ci_pipeline_schedules : nil
}
end
end
end
end

View File

@ -0,0 +1,13 @@
# frozen_string_literal: true
module Types
# rubocop: disable Graphql/AuthorizeTypes -- The resolver authorizes the request
class ProjectPlanLimitsType < BaseObject
graphql_name 'ProjectPlanLimits'
description 'Plan limits for the current project.'
field :ci_pipeline_schedules, GraphQL::Types::Int, null: true,
description: 'Maximum number of pipeline schedules allowed per project.'
end
# rubocop: enable Graphql/AuthorizeTypes
end

View File

@ -698,6 +698,12 @@ module Types
calls_gitaly: true,
alpha: { milestone: '16.9' }
field :project_plan_limits, Types::ProjectPlanLimitsType,
resolver: Resolvers::Projects::PlanLimitsResolver,
description: 'Plan limits for the current project.',
alpha: { milestone: '16.9' },
null: true
def protectable_branches
ProtectableDropdown.new(project, :branches).protectable_ref_names
end

View File

@ -6,12 +6,17 @@ module Ci
include Ci::HasVariable
include Ci::RawVariable
ROUTING_FEATURE_FLAG = :ci_partitioning_use_ci_pipeline_variables_routing_table
belongs_to :pipeline
self.primary_key = :id
self.sequence_name = :ci_pipeline_variables_id_seq
partitionable scope: :pipeline
partitionable scope: :pipeline, through: {
table: :p_ci_pipeline_variables,
flag: ROUTING_FEATURE_FLAG
}
alias_attribute :secret_value, :value

View File

@ -34,7 +34,7 @@ module Ci
return false if routing_class?
Gitlab::SafeRequestStore.fetch(routing_table_name_flag) do
::Feature.enabled?(routing_table_name_flag, :request)
::Feature.enabled?(routing_table_name_flag, :request, type: :gitlab_com_derisk)
end
end

View File

@ -28,7 +28,7 @@ module Packages
scope :pending_destruction, -> { stale.default }
scope :with_file_name, ->(file_name) { where(arel_table[:file].lower.eq(file_name.downcase)) }
scope :with_signature, ->(signature) { where(arel_table[:signature].lower.eq(signature.downcase)) }
scope :with_file_sha256, ->(checksums) { where(file_sha256: checksums) }
scope :with_file_sha256, ->(checksums) { where(file_sha256: Array.wrap(checksums).map(&:downcase)) }
def self.find_by_signature_and_file_and_checksum(signature, file_name, checksums)
with_signature(signature)

View File

@ -992,6 +992,10 @@ class ProjectPolicy < BasePolicy
rule { ~private_project & guest & external_user }.enable :read_container_image
rule { can?(:create_pipeline_schedule) }.policy do
enable :read_ci_pipeline_schedules_plan_limit
end
private
def user_is_user?

View File

@ -5,7 +5,7 @@ module Ci
class TriggerDownstreamPipelineService
# This is a temporary constant. It may be converted into an application setting
# in the future. See https://gitlab.com/gitlab-org/gitlab/-/issues/425941.
DOWNSTREAM_PIPELINE_TRIGGER_LIMIT_PER_PROJECT_USER_SHA = 50
DOWNSTREAM_PIPELINE_TRIGGER_LIMIT_PER_PROJECT_USER_SHA = 200
def initialize(bridge)
@bridge = bridge

View File

@ -47,10 +47,10 @@ module Members
def enqueue_jobs_that_needs_to_be_run_only_once_per_hierarchy(member, unassign_issuables)
return if recursive_call?
enqueue_cleanup_jobs_once_per_hierarchy(member, unassign_issuables)
enqueue_cleanup_jobs_once_per_heirarchy(member, unassign_issuables)
end
def enqueue_cleanup_jobs_once_per_hierarchy(member, unassign_issuables)
def enqueue_cleanup_jobs_once_per_heirarchy(member, unassign_issuables)
enqueue_delete_todos(member)
enqueue_unassign_issuables(member) if unassign_issuables
end

View File

@ -14,40 +14,10 @@ module Members
project_ids = entity.is_a?(Group) ? entity.all_projects.select(:id) : [entity.id]
unassign_from_issues(project_ids)
unassign_from_merge_requests(project_ids)
user.issue_assignees.on_issues(Issue.in_projects(project_ids).select(:id)).delete_all
user.merge_request_assignees.in_projects(project_ids).delete_all
user.invalidate_cache_counts
end
private
def unassign_from_issues(project_ids)
user.issue_assignees.on_issues(Issue.in_projects(project_ids)).select(:issue_id).each do |assignee|
issue = Issue.find(assignee.issue_id)
Issues::UpdateService.new(
container: issue.project,
current_user: user,
params: { assignee_ids: new_assignee_ids(issue) }
).execute(issue)
end
end
def unassign_from_merge_requests(project_ids)
user.merge_request_assignees.in_projects(project_ids).select(:merge_request_id).each do |assignee|
merge_request = MergeRequest.find(assignee.merge_request_id)
::MergeRequests::UpdateAssigneesService.new(
project: merge_request.project,
current_user: user,
params: { assignee_ids: new_assignee_ids(merge_request), skip_authorization: true }
).execute(merge_request)
end
end
def new_assignee_ids(issuable)
issuable.assignees.map(&:id) - [user.id]
end
end
end

View File

@ -2,18 +2,12 @@
module MergeRequests
class UpdateAssigneesService < UpdateService
def initialize(project:, current_user: nil, params: {})
super
@skip_authorization = @params.delete(:skip_authorization) || false
end
# a stripped down service that only does what it must to update the
# assignees, and knows that it does not have to check for other updates.
# This saves a lot of queries for irrelevant things that cannot possibly
# change in the execution of this service.
def execute(merge_request)
return merge_request unless current_user&.can?(:set_merge_request_metadata, merge_request) || skip_authorization
return merge_request unless current_user&.can?(:set_merge_request_metadata, merge_request)
old_assignees = merge_request.assignees.to_a
old_ids = old_assignees.map(&:id)
@ -39,8 +33,6 @@ module MergeRequests
private
attr_reader :skip_authorization
def assignee_ids
filter_sentinel_values(params.fetch(:assignee_ids)).first(1)
end

View File

@ -174,7 +174,8 @@ module Projects
allow_localhost: allow_local_requests?,
allow_local_network: allow_local_requests?,
dns_rebind_protection: dns_rebind_protection?,
deny_all_requests_except_allowed: Gitlab::CurrentSettings.deny_all_requests_except_allowed?)
deny_all_requests_except_allowed: Gitlab::CurrentSettings.deny_all_requests_except_allowed?,
outbound_local_requests_allowlist: Gitlab::CurrentSettings.outbound_local_requests_whitelist) # rubocop:disable Naming/InclusiveLanguage -- existing setting
.then do |(import_url, resolved_host)|
next '' if resolved_host.nil? || !import_url.scheme.in?(%w[http https])

View File

@ -5,7 +5,7 @@
- if crowd_enabled?
%li.nav-item
= link_to _("Crowd"), "#crowd", class: "nav-link #{active_when(form_based_auth_provider_has_active_class?(:crowd))}", 'data-toggle' => 'tab', role: 'tab'
= render_if_exists "devise/shared/kerberos_tab"
- ldap_servers.each_with_index do |server, i|
%li.nav-item
= link_to server['label'], "##{server['provider_name']}", class: "nav-link #{active_when(i == 0 && form_based_auth_provider_has_active_class?(:ldapmain))}", data: { toggle: 'tab', testid: 'ldap-tab' }, role: 'tab'
@ -15,6 +15,7 @@
- if show_password_form
%li.nav-item
= link_to _('Standard'), '#login-pane', class: 'nav-link', data: { toggle: 'tab', testid: 'standard-tab' }, role: 'tab'
- if render_signup_link && allow_signup?
%li.nav-item
= link_to _('Register'), '#register-pane', class: 'nav-link', data: { toggle: 'tab', testid: 'register-tab' }, role: 'tab'

View File

@ -13,7 +13,7 @@
= s_('GroupsNew|Importing groups by direct transfer is currently disabled.')
- if current_user.admin?
- admin_link = link_to('', general_admin_application_settings_path(anchor: 'js-visibility-settings'))
- admin_link = link_to('', general_admin_application_settings_path(anchor: 'js-import-export-settings'))
= safe_format(s_('GroupsNew|Please %{admin_link_start}enable it in the Admin settings%{admin_link_end}.'), tag_pair(admin_link, :admin_link_start, :admin_link_end))
- else

View File

@ -1,9 +1,9 @@
---
name: allow_members_to_see_invited_groups_in_access_dropdowns
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/345140
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/140927
rollout_issue_url: https://gitlab.com/gitlab-com/gl-infra/production/-/issues/17364
milestone: '16.8'
group: group::source code
name: ci_partitioning_use_ci_pipeline_variables_routing_table
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/439069
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/143334
rollout_issue_url: https://gitlab.com/gitlab-com/gl-infra/production/-/issues/17508
milestone: '16.9'
group: group::pipeline execution
type: gitlab_com_derisk
default_enabled: false

View File

@ -7,4 +7,12 @@ feature_categories:
description: Settings for the dependency proxy for packages.
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/120894
milestone: '16.1'
gitlab_schema: gitlab_main
gitlab_schema: gitlab_main_cell
allow_cross_joins:
- gitlab_main_clusterwide
allow_cross_transactions:
- gitlab_main_clusterwide
allow_cross_foreign_keys:
- gitlab_main_clusterwide
sharding_key:
project_id: projects

View File

@ -7,4 +7,12 @@ feature_categories:
description: Namespace and group-level settings for the package registry
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/50104
milestone: '13.8'
gitlab_schema: gitlab_main
gitlab_schema: gitlab_main_cell
allow_cross_joins:
- gitlab_main_clusterwide
allow_cross_transactions:
- gitlab_main_clusterwide
allow_cross_foreign_keys:
- gitlab_main_clusterwide
sharding_key:
namespace_id: namespaces

View File

@ -7,4 +7,12 @@ feature_categories:
description: Cleanup policy parameters for packages.
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/85918
milestone: '15.0'
gitlab_schema: gitlab_main
gitlab_schema: gitlab_main_cell
allow_cross_joins:
- gitlab_main_clusterwide
allow_cross_transactions:
- gitlab_main_clusterwide
allow_cross_foreign_keys:
- gitlab_main_clusterwide
sharding_key:
project_id: projects

View File

@ -7,4 +7,12 @@ feature_categories:
description: Debian registry group level distributions
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/49405
milestone: '13.8'
gitlab_schema: gitlab_main
gitlab_schema: gitlab_main_cell
allow_cross_joins:
- gitlab_main_clusterwide
allow_cross_transactions:
- gitlab_main_clusterwide
allow_cross_foreign_keys:
- gitlab_main_clusterwide
sharding_key:
group_id: namespaces

View File

@ -7,4 +7,12 @@ feature_categories:
description: Debian package registry project level distributions
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/49405
milestone: '13.8'
gitlab_schema: gitlab_main
gitlab_schema: gitlab_main_cell
allow_cross_joins:
- gitlab_main_clusterwide
allow_cross_transactions:
- gitlab_main_clusterwide
allow_cross_foreign_keys:
- gitlab_main_clusterwide
sharding_key:
project_id: projects

View File

@ -8,4 +8,12 @@ feature_categories:
description: Information for individual packages in the package registry
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/6607
milestone: '11.3'
gitlab_schema: gitlab_main
gitlab_schema: gitlab_main_cell
allow_cross_joins:
- gitlab_main_clusterwide
allow_cross_transactions:
- gitlab_main_clusterwide
allow_cross_foreign_keys:
- gitlab_main_clusterwide
sharding_key:
project_id: projects

View File

@ -7,4 +7,12 @@ feature_categories:
description: Represents package protection rules for package registry.
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/124776
milestone: '16.4'
gitlab_schema: gitlab_main
gitlab_schema: gitlab_main_cell
allow_cross_joins:
- gitlab_main_clusterwide
allow_cross_transactions:
- gitlab_main_clusterwide
allow_cross_foreign_keys:
- gitlab_main_clusterwide
sharding_key:
project_id: projects

View File

@ -7,4 +7,12 @@ feature_categories:
description: Package registry file links and file metadata for RPM packages
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/97484
milestone: '15.5'
gitlab_schema: gitlab_main
gitlab_schema: gitlab_main_cell
allow_cross_joins:
- gitlab_main_clusterwide
allow_cross_transactions:
- gitlab_main_clusterwide
allow_cross_foreign_keys:
- gitlab_main_clusterwide
sharding_key:
project_id: projects

View File

@ -0,0 +1,29 @@
# frozen_string_literal: true
class SyncIndexForPCiBuildsPart1 < Gitlab::Database::Migration[2.2]
include Gitlab::Database::PartitioningMigrationHelpers
milestone '16.9'
TABLE_NAME = :p_ci_builds
INDEXES = [
['p_ci_builds_auto_canceled_by_id_bigint_idx', [:auto_canceled_by_id_convert_to_bigint],
{ where: "auto_canceled_by_id_convert_to_bigint IS NOT NULL" }],
['p_ci_builds_commit_id_bigint_status_type_idx', [:commit_id_convert_to_bigint, :status, :type], {}],
['p_ci_builds_commit_id_bigint_type_name_ref_idx', [:commit_id_convert_to_bigint, :type, :name, :ref], {}]
]
disable_ddl_transaction!
def up
INDEXES.each do |index_name, columns, options|
add_concurrent_partitioned_index(TABLE_NAME, columns, name: index_name, **options)
end
end
def down
INDEXES.each do |index_name, _columns, _options|
remove_concurrent_partitioned_index_by_name(TABLE_NAME, index_name)
end
end
end

View File

@ -0,0 +1,56 @@
# frozen_string_literal: true
class AddFkForAutoCanceledByIdBigintBetweenPCiBuildsAndCiPipelines < Gitlab::Database::Migration[2.2]
include Gitlab::Database::PartitioningMigrationHelpers
milestone '16.9'
disable_ddl_transaction!
SOURCE_TABLE_NAME = :p_ci_builds
TARGET_TABLE_NAME = :ci_pipelines
COLUMN = :auto_canceled_by_id_convert_to_bigint
TARGET_COLUMN = :id
FK_NAME = :fk_dd3c83bdee
def up
add_concurrent_partitioned_foreign_key(
SOURCE_TABLE_NAME, TARGET_TABLE_NAME,
column: [COLUMN],
target_column: [TARGET_COLUMN],
validate: false,
reverse_lock_order: true,
on_delete: :nullify,
name: FK_NAME
)
prepare_partitioned_async_foreign_key_validation(
SOURCE_TABLE_NAME, [COLUMN],
name: FK_NAME
)
end
def down
unprepare_partitioned_async_foreign_key_validation(
SOURCE_TABLE_NAME, [COLUMN],
name: FK_NAME
)
Gitlab::Database::PostgresPartitionedTable.each_partition(SOURCE_TABLE_NAME) do |partition|
with_lock_retries do
remove_foreign_key_if_exists(
partition.identifier, TARGET_TABLE_NAME,
name: FK_NAME,
reverse_lock_order: true
)
end
end
with_lock_retries do
remove_foreign_key_if_exists(
SOURCE_TABLE_NAME, TARGET_TABLE_NAME,
name: FK_NAME,
reverse_lock_order: true
)
end
end
end

View File

@ -0,0 +1 @@
81c0af94a19346f9f01c4191f37a579e4825a0d06efb42c714e6b835a114544a

View File

@ -0,0 +1 @@
99c24a58185c81b7b4d2c0fb0b0fdfa3505035a9de5474f71b9a482c8c93f537

View File

@ -32529,6 +32529,10 @@ CREATE INDEX idx_vulnerability_reads_project_id_scanner_id_vulnerability_id ON v
CREATE UNIQUE INDEX idx_work_item_types_on_namespace_id_and_name_null_namespace ON work_item_types USING btree (btrim(lower(name)), ((namespace_id IS NULL))) WHERE (namespace_id IS NULL);
CREATE INDEX p_ci_builds_commit_id_bigint_status_type_idx ON ONLY p_ci_builds USING btree (commit_id_convert_to_bigint, status, type);
CREATE INDEX index_8c07a79c70 ON ci_builds USING btree (commit_id_convert_to_bigint, status, type);
CREATE INDEX index_abuse_events_on_abuse_report_id ON abuse_events USING btree (abuse_report_id);
CREATE INDEX index_abuse_events_on_category_and_source ON abuse_events USING btree (category, source);
@ -33739,6 +33743,10 @@ CREATE UNIQUE INDEX index_external_audit_event_destinations_on_namespace_id ON a
CREATE UNIQUE INDEX index_external_pull_requests_on_project_and_branches ON external_pull_requests USING btree (project_id, source_branch, target_branch);
CREATE INDEX p_ci_builds_commit_id_bigint_type_name_ref_idx ON ONLY p_ci_builds USING btree (commit_id_convert_to_bigint, type, name, ref);
CREATE INDEX index_feafb4d370 ON ci_builds USING btree (commit_id_convert_to_bigint, type, name, ref);
CREATE UNIQUE INDEX index_feature_flag_scopes_on_flag_id_and_environment_scope ON operations_feature_flag_scopes USING btree (feature_flag_id, environment_scope);
CREATE UNIQUE INDEX index_feature_flags_clients_on_project_id_and_token_encrypted ON operations_feature_flags_clients USING btree (project_id, token_encrypted);
@ -33747,6 +33755,10 @@ CREATE UNIQUE INDEX index_feature_gates_on_feature_key_and_key_and_value ON feat
CREATE UNIQUE INDEX index_features_on_key ON features USING btree (key);
CREATE INDEX p_ci_builds_auto_canceled_by_id_bigint_idx ON ONLY p_ci_builds USING btree (auto_canceled_by_id_convert_to_bigint) WHERE (auto_canceled_by_id_convert_to_bigint IS NOT NULL);
CREATE INDEX index_ffe1233676 ON ci_builds USING btree (auto_canceled_by_id_convert_to_bigint) WHERE (auto_canceled_by_id_convert_to_bigint IS NOT NULL);
CREATE INDEX index_for_security_scans_scan_type ON security_scans USING btree (scan_type, project_id, pipeline_id) WHERE (status = 1);
CREATE INDEX index_for_status_per_branch_per_project ON merge_trains USING btree (target_project_id, target_branch, status);
@ -38031,6 +38043,8 @@ ALTER INDEX p_ci_builds_pkey ATTACH PARTITION ci_builds_pkey;
ALTER INDEX p_ci_pipeline_variables_pkey ATTACH PARTITION ci_pipeline_variables_pkey;
ALTER INDEX p_ci_builds_commit_id_bigint_status_type_idx ATTACH PARTITION index_8c07a79c70;
ALTER INDEX p_ci_builds_metadata_build_id_idx ATTACH PARTITION index_ci_builds_metadata_on_build_id_and_has_exposed_artifacts;
ALTER INDEX p_ci_builds_metadata_build_id_id_idx ATTACH PARTITION index_ci_builds_metadata_on_build_id_and_id_and_interruptible;
@ -38075,6 +38089,10 @@ ALTER INDEX p_ci_builds_project_id_status_idx ATTACH PARTITION index_ci_builds_p
ALTER INDEX p_ci_builds_runner_id_idx ATTACH PARTITION index_ci_builds_runner_id_running;
ALTER INDEX p_ci_builds_commit_id_bigint_type_name_ref_idx ATTACH PARTITION index_feafb4d370;
ALTER INDEX p_ci_builds_auto_canceled_by_id_bigint_idx ATTACH PARTITION index_ffe1233676;
ALTER INDEX p_ci_builds_user_id_name_idx ATTACH PARTITION index_partial_ci_builds_on_user_id_name_parser_features;
ALTER INDEX p_ci_pipeline_variables_pipeline_id_key_partition_id_idx ATTACH PARTITION index_pipeline_variables_on_pipeline_id_key_partition_id_unique;
@ -39187,6 +39205,9 @@ ALTER TABLE ONLY workspaces
ALTER TABLE ONLY epics
ADD CONSTRAINT fk_dccd3f98fc FOREIGN KEY (assignee_id) REFERENCES users(id) ON DELETE SET NULL;
ALTER TABLE ONLY ci_builds
ADD CONSTRAINT fk_dd3c83bdee FOREIGN KEY (auto_canceled_by_id_convert_to_bigint) REFERENCES ci_pipelines(id) ON DELETE SET NULL NOT VALID;
ALTER TABLE ONLY protected_branches
ADD CONSTRAINT fk_de9216e774 FOREIGN KEY (namespace_id) REFERENCES namespaces(id) ON DELETE CASCADE;

View File

@ -15,7 +15,8 @@ Read these sections carefully before updating your Geo sites. Not following
version-specific upgrade steps may result in unexpected downtime. If you have
any specific questions, [contact Support](https://about.gitlab.com/support/#contact-support).
A database major version upgrade requires [re-initializing the PostgreSQL replication](https://docs.gitlab.com/omnibus/settings/database.html#upgrading-a-geo-instance)
to Geo secondaries. This may result in a larger than expected downtime.
to Geo secondaries. This applies to both Linux-packaged and externally-managed databases.
This may result in a larger than expected downtime.
Upgrading Geo sites involves performing:

View File

@ -804,6 +804,25 @@ Returns [`RunnerSetup`](#runnersetup).
| <a id="queryrunnersetupplatform"></a>`platform` | [`String!`](#string) | Platform to generate the instructions for. |
| <a id="queryrunnersetupprojectid"></a>`projectId` **{warning-solid}** | [`ProjectID`](#projectid) | **Deprecated** in 13.11. No longer used. |
### `Query.runnerUsage`
Runner usage by runner.
NOTE:
**Introduced** in 16.9.
**Status**: Experiment.
Returns [`[CiRunnerUsage!]`](#cirunnerusage).
#### Arguments
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="queryrunnerusagefromdate"></a>`fromDate` | [`Date`](#date) | Start of the requested date frame. Defaults to the start of the previous calendar month. |
| <a id="queryrunnerusagerunnertype"></a>`runnerType` | [`CiRunnerType`](#cirunnertype) | Filter runners by the type. |
| <a id="queryrunnerusagerunnerslimit"></a>`runnersLimit` | [`Int`](#int) | Maximum number of runners to return. Other runners will be aggregated to a `runner: null` entry. Defaults to 5 if unspecified. Maximum of 500. |
| <a id="queryrunnerusagetodate"></a>`toDate` | [`Date`](#date) | End of the requested date frame. Defaults to the end of the previous calendar month. |
### `Query.runnerUsageByProject`
Runner usage by project.
@ -812,14 +831,14 @@ NOTE:
**Introduced** in 16.9.
**Status**: Experiment.
Returns [`[RunnerUsageByProject!]`](#runnerusagebyproject).
Returns [`[CiRunnerUsageByProject!]`](#cirunnerusagebyproject).
#### Arguments
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="queryrunnerusagebyprojectfromdate"></a>`fromDate` | [`Date`](#date) | Start of the requested date frame. Defaults to the start of the previous calendar month. |
| <a id="queryrunnerusagebyprojectprojectslimit"></a>`projectsLimit` | [`Int`](#int) | Maximum number of projects to return.Other projects will be aggregated to a `project: null` entry.Can not be larger than 500. Defaults to 5. |
| <a id="queryrunnerusagebyprojectprojectslimit"></a>`projectsLimit` | [`Int`](#int) | Maximum number of projects to return.Other projects will be aggregated to a `project: null` entry. Defaults to 5 if unspecified. Maximum of 500. |
| <a id="queryrunnerusagebyprojectrunnertype"></a>`runnerType` | [`CiRunnerType`](#cirunnertype) | Filter jobs by the type of runner that executed them. |
| <a id="queryrunnerusagebyprojecttodate"></a>`toDate` | [`Date`](#date) | End of the requested date frame. Defaults to the end of the previous calendar month. |
@ -16187,6 +16206,30 @@ Returns [`CiRunnerStatus!`](#cirunnerstatus).
| <a id="cirunnermanagerupgradestatus"></a>`upgradeStatus` **{warning-solid}** | [`CiRunnerUpgradeStatus`](#cirunnerupgradestatus) | **Introduced** in 16.1. **Status**: Experiment. Availability of upgrades for the runner manager. |
| <a id="cirunnermanagerversion"></a>`version` | [`String`](#string) | Version of the runner. |
### `CiRunnerUsage`
Runner usage.
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="cirunnerusagecibuildcount"></a>`ciBuildCount` | [`Int!`](#int) | Amount of builds executed during the selected period. |
| <a id="cirunnerusageciminutesused"></a>`ciMinutesUsed` | [`Int!`](#int) | Amount of minutes used during the selected period. |
| <a id="cirunnerusagerunner"></a>`runner` | [`CiRunner`](#cirunner) | Runner that the usage refers to. Null means "Other runners". |
### `CiRunnerUsageByProject`
Runner usage in minutes by project.
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="cirunnerusagebyprojectcibuildcount"></a>`ciBuildCount` | [`Int!`](#int) | Amount of builds executed during the selected period. |
| <a id="cirunnerusagebyprojectciminutesused"></a>`ciMinutesUsed` | [`Int!`](#int) | Amount of minutes used during the selected period. |
| <a id="cirunnerusagebyprojectproject"></a>`project` | [`Project`](#project) | Project that the usage refers to. Null means "Other projects". |
### `CiSecureFileRegistry`
Represents the Geo replication and verification state of a ci_secure_file.
@ -24470,6 +24513,7 @@ Represents vulnerability finding of a security report on the pipeline.
| <a id="projectprintingmergerequestlinkenabled"></a>`printingMergeRequestLinkEnabled` | [`Boolean`](#boolean) | Indicates if a link to create or view a merge request should display after a push to Git repositories of the project from the command line. |
| <a id="projectproductanalyticsinstrumentationkey"></a>`productAnalyticsInstrumentationKey` **{warning-solid}** | [`String`](#string) | **Introduced** in 16.0. **Status**: Experiment. Product Analytics instrumentation key assigned to the project. |
| <a id="projectproductanalyticsstate"></a>`productAnalyticsState` **{warning-solid}** | [`ProductAnalyticsState`](#productanalyticsstate) | **Introduced** in 15.10. **Status**: Experiment. Current state of the product analytics stack for this project.Can only be called for one project in a single request. |
| <a id="projectprojectplanlimits"></a>`projectPlanLimits` **{warning-solid}** | [`ProjectPlanLimits`](#projectplanlimits) | **Introduced** in 16.9. **Status**: Experiment. Plan limits for the current project. |
| <a id="projectprotectablebranches"></a>`protectableBranches` **{warning-solid}** | [`[String!]`](#string) | **Introduced** in 16.9. **Status**: Experiment. List of unprotected branches, ignoring any wildcard branch rules. |
| <a id="projectpublicjobs"></a>`publicJobs` | [`Boolean`](#boolean) | Indicates if there is public access to pipelines and job details of the project, including output logs and artifacts. |
| <a id="projectpushrules"></a>`pushRules` | [`PushRules`](#pushrules) | Project's push rules settings. |
@ -26128,6 +26172,16 @@ Returns [`UserMergeRequestInteraction`](#usermergerequestinteraction).
| <a id="projectpermissionsupdatewiki"></a>`updateWiki` | [`Boolean!`](#boolean) | If `true`, the user can perform `update_wiki` on this resource. |
| <a id="projectpermissionsuploadfile"></a>`uploadFile` | [`Boolean!`](#boolean) | If `true`, the user can perform `upload_file` on this resource. |
### `ProjectPlanLimits`
Plan limits for the current project.
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="projectplanlimitscipipelineschedules"></a>`ciPipelineSchedules` | [`Int`](#int) | Maximum number of pipeline schedules allowed per project. |
### `ProjectRepositoryRegistry`
Represents the Geo replication and verification state of a project repository.
@ -26931,18 +26985,6 @@ Counts of requirements by their state.
| <a id="runnersetupinstallinstructions"></a>`installInstructions` | [`String!`](#string) | Instructions for installing the runner on the specified architecture. |
| <a id="runnersetupregisterinstructions"></a>`registerInstructions` | [`String`](#string) | Instructions for registering the runner. The actual registration tokens are not included in the commands. Instead, a placeholder `$REGISTRATION_TOKEN` is shown. |
### `RunnerUsageByProject`
Runner usage in minutes by project.
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="runnerusagebyprojectcibuildcount"></a>`ciBuildCount` | [`Int!`](#int) | Amount of builds executed during the selected period. |
| <a id="runnerusagebyprojectciminutesused"></a>`ciMinutesUsed` | [`Int!`](#int) | Amount of minutes used during the selected period. |
| <a id="runnerusagebyprojectproject"></a>`project` | [`Project`](#project) | Project that the usage refers to. Null means "Other projects". |
### `SastCiConfiguration`
Represents a CI configuration of SAST.

View File

@ -145,8 +145,10 @@ The modules that can be configured for logging are as follows:
| `BROWS` | Used for querying the state or page of the browser. |
| `CACHE` | Used for reporting on cache hit and miss for cached HTTP resources. |
| `CHROM` | Used to log Chrome DevTools messages. |
| `CONFG` | Used to log the analyzer configuration. |
| `CONTA` | Used for the container that collects parts of HTTP requests and responses from DevTools messages. |
| `CRAWL` | Used for the core crawler algorithm. |
| `CRWLG` | Used for the crawl graph generator. |
| `DATAB` | Used for persisting data to the internal database. |
| `LEASE` | Used to create browsers to add them to the browser pool. |
| `MAIN` | Used for the flow of the main event loop of the crawler. |

View File

@ -161,7 +161,7 @@ To add a group to a project:
The invited group is displayed on the **Groups** tab.
Private groups are masked from unauthorized users.
With the feature flag `allow_members_to_see_invited_groups_in_access_dropdowns` enabled, private groups are displayed in project settings for protected branches, protected tags, and protected environments.
Private groups are displayed in project settings for protected branches, protected tags, and protected environments.
The members of the invited group are not displayed on the **Members** tab.
The **Members** tab shows:

View File

@ -37,6 +37,20 @@ Pages domains are `*.gitlab.io`.
| Project pages owned by a group | `acmecorp/webshop` | `http(s)://acmecorp.example.io/webshop`|
| Project pages owned by a subgroup | `acmecorp/documentation/product-manual` | `http(s)://acmecorp.example.io/documentation/product-manual`|
When **Use unique domain** setting is enabled, all URLs
are flattened with the following structure:
| Type of GitLab Pages | Example path of a project in GitLab | Website URL |
| -------------------- | ------------ | ----------- |
| User pages | `username/username.example.io` | `http(s)://username-example-io-username.example.io` |
| Group pages | `acmecorp/acmecorp.example.io` | `http(s)://acmecorp-example-io-acmecorp-uniqueid.example.io` |
| Project pages owned by a user | `username/my-website` | `https://my-website-username-uniqueid.gitlab.io/` |
| Project pages owned by a group | `acmecorp/webshop` | `http(s)://webshop-acmecorp-uniqueid.example.io/`|
| Project pages owned by a subgroup | `acmecorp/documentation/product-manual` | `http(s)://product-manual-documentation-acmecorp-uniqueid.example.io/`|
The `unique_id` portion of the URL is an alphanumeric string. For example, if the `unique_id` is `30bae2547a50der6ed7d9a08d417a33525a5c4dc6fdd68`, the last example would be
`http(s)://product-manual-documentation-acmecorp-30bae2547a50der6ed7d9a08d417a33525a5c4dc6fdd68.example.io/`.
WARNING:
There are some known [limitations](introduction.md#subdomains-of-subdomains)
regarding namespaces served under the general domain name and HTTPS.
@ -44,21 +58,24 @@ Make sure to read that section.
To understand Pages domains clearly, read the examples below.
NOTE:
The following examples imply you disabled the **Use unique domain** setting. If you did not, refer to the previous table, replacing `example.io` by `gitlab.io`.
### Project website examples
- You created a project called `blog` under your username `john`,
therefore your project URL is `https://gitlab.com/john/blog/`.
Once you enable GitLab Pages for this project, and build your site,
After you enabled GitLab Pages for this project, and build your site,
you can access it at `https://john.gitlab.io/blog/`.
- You created a group for all your websites called `websites`,
and a project within this group is called `blog`. Your project
URL is `https://gitlab.com/websites/blog/`. Once you enable
and a project in this group is called `blog`. Your project
URL is `https://gitlab.com/websites/blog/`. After you enabled
GitLab Pages for this project, the site is available at
`https://websites.gitlab.io/blog/`.
- You created a group for your engineering department called `engineering`,
a subgroup for all your documentation websites called `docs`,
and a project within this subgroup is called `workflows`. Your project
URL is `https://gitlab.com/engineering/docs/workflows/`. Once you enable
and a project in this subgroup is called `workflows`. Your project
URL is `https://gitlab.com/engineering/docs/workflows/`. After you enabled
GitLab Pages for this project, the site is available at
`https://engineering.gitlab.io/docs/workflows`.
@ -66,11 +83,11 @@ To understand Pages domains clearly, read the examples below.
- Under your username, `john`, you created a project called
`john.gitlab.io`. Your project URL is `https://gitlab.com/john/john.gitlab.io`.
Once you enable GitLab Pages for your project, your website
After you enabled GitLab Pages for your project, your website
is published under `https://john.gitlab.io`.
- Under your group `websites`, you created a project called
`websites.gitlab.io`. Your project's URL is `https://gitlab.com/websites/websites.gitlab.io`.
Once you enable GitLab Pages for your project,
After you enabled GitLab Pages for your project,
your website is published under `https://websites.gitlab.io`.
**General example:**

View File

@ -3,10 +3,11 @@
module Gitlab
module Checks
class ChangedBlobs
def initialize(project, revisions, bytes_limit:)
def initialize(project, revisions, bytes_limit:, with_paths: false)
@project = project
@revisions = revisions
@bytes_limit = bytes_limit
@with_paths = with_paths
end
def execute(timeout:)
@ -19,6 +20,7 @@ module Gitlab
project.repository.list_blobs(
['--not', '--all', '--not'] + revisions,
bytes_limit: bytes_limit,
with_paths: with_paths,
dynamic_timeout: timeout
).to_a
end
@ -26,7 +28,7 @@ module Gitlab
private
attr_reader :project, :revisions, :bytes_limit
attr_reader :project, :revisions, :bytes_limit, :with_paths
def fetch_blobs_from_quarantined_repo(timeout:)
blobs = project.repository.list_all_blobs(
@ -45,10 +47,14 @@ module Gitlab
# or even in a new file, it would be ignored because we filter the blob out because it still "exists".
#
# See https://gitlab.com/gitlab-org/gitlab/-/merge_requests/136896#note_1680680116 for more details.
filter_existing(blobs)
filter_existing!(blobs)
populate_paths!(blobs) if with_paths
blobs
end
def filter_existing(blobs)
def filter_existing!(blobs)
# We check for object existence in the main repository, but the
# object directory points to the object quarantine. This can be fixed
# by unsetting it, which will cause us to use the normal repository as
@ -61,13 +67,44 @@ module Gitlab
)
# Remove blobs that already exist.
blobs.reject { |blob| map_blob_id_to_existence[blob.id] }
blobs.reject! { |blob| map_blob_id_to_existence[blob.id] }
end
def ignore_alternate_directories?
git_env = ::Gitlab::Git::HookEnv.all(project.repository.gl_repository)
git_env['GIT_OBJECT_DIRECTORY_RELATIVE'].present?
end
def populate_paths!(blobs)
# All commits which have been newly introduced via any of the given changes
commits = project.repository.new_commits(revisions)
# This Gitaly RPC call performs `git-diff-tree` to get changed paths along with their blob ids
paths = project.repository.find_changed_paths(commits, merge_commit_diff_mode: :all_parents)
paths_by_blob_id = paths.group_by(&:new_blob_id)
# `blobs` variable doesn't contain duplicates; however, different paths can point to the same blob
# In order to make it memory-efficient, we modify the `blobs` by setting the paths, but if
# a blob points to more than 1 path, we duplicate this blob and store it into `extra_blobs`.
extra_blobs = []
blobs.map! do |blob|
changed_paths = paths_by_blob_id[blob.id]
next blob if changed_paths.blank?
blob.path = changed_paths.first.path
changed_paths[1..].each do |changed_path|
extra_blobs << blob.dup.tap { |b| b.path = changed_path.path }
end
blob
end
# Concat extra blobs to the list of blobs. It doesn't create new array and saves memory
blobs.concat(extra_blobs)
end
end
end
end

View File

@ -9259,12 +9259,18 @@ msgstr ""
msgid "BulkImport|Path of the new group."
msgstr ""
msgid "BulkImport|Please select a parent group."
msgstr ""
msgid "BulkImport|Project import history"
msgstr ""
msgid "BulkImport|Re-import creates a new group. It does not sync with the existing group."
msgstr ""
msgid "BulkImport|Select parent group"
msgstr ""
msgid "BulkImport|Showing %{start}-%{end} of %{total}"
msgstr ""

View File

@ -58,7 +58,8 @@ describe('import source cell', () => {
});
it('renders last imported line', () => {
expect(wrapper.text()).toMatchInterpolatedText('fake_group_1 Last imported to root/group1');
expect(wrapper.text()).toContain('fake_group_1');
expect(wrapper.text()).toContain('Last imported to Commit451/group1');
});
});
});

View File

@ -14,6 +14,7 @@ import { STATUSES } from '~/import_entities/constants';
import { ROOT_NAMESPACE } from '~/import_entities/import_groups/constants';
import ImportTable from '~/import_entities/import_groups/components/import_table.vue';
import ImportStatus from '~/import_entities/import_groups/components/import_status.vue';
import ImportTargetCell from '~/import_entities/import_groups/components/import_target_cell.vue';
import ImportHistoryLink from '~/import_entities/import_groups/components//import_history_link.vue';
import importGroupsMutation from '~/import_entities/import_groups/graphql/mutations/import_groups.mutation.graphql';
import PaginationBar from '~/vue_shared/components/pagination_bar/pagination_bar.vue';
@ -60,8 +61,6 @@ describe('import table', () => {
idx
];
const findPaginationDropdown = () => wrapper.findByTestId('page-size');
const findTargetNamespaceDropdown = (rowWrapper) =>
extendedWrapper(rowWrapper).findByTestId('target-namespace-dropdown');
const findTargetNamespaceInput = (rowWrapper) =>
extendedWrapper(rowWrapper).findByTestId('target-namespace-input');
const findPaginationDropdownText = () => findPaginationDropdown().find('button').text();
@ -71,6 +70,10 @@ describe('import table', () => {
const findImportProjectsWarning = () => wrapper.findByTestId('import-projects-warning');
const findAllImportStatuses = () => wrapper.findAllComponents(ImportStatus);
const findFirstImportTargetCell = () => wrapper.findAllComponents(ImportTargetCell).at(0);
const findFirstImportTargetNamespaceText = () =>
findFirstImportTargetCell().find('[aria-haspopup]').text();
const triggerSelectAllCheckbox = (checked = true) =>
wrapper.find('thead input[type=checkbox]').setChecked(checked);
@ -199,51 +202,109 @@ describe('import table', () => {
expect(importHistoryLinks.at(1).props('id')).toBe(FAKE_GROUPS[3].id);
});
it('correctly maintains root namespace as last import target', async () => {
createComponent({
bulkImportSourceGroups: () => ({
nodes: [
{
...generateFakeEntry({ id: 1, status: STATUSES.FINISHED }),
lastImportTarget: {
id: 1,
targetNamespace: ROOT_NAMESPACE.fullPath,
newName: 'does-not-matter',
describe('selecting import target namespace', () => {
describe('when lastImportTarget is not defined', () => {
beforeEach(async () => {
createComponent({
bulkImportSourceGroups: () => ({
nodes: [
{
...generateFakeEntry({ id: 1, status: STATUSES.NONE }),
lastImportTarget: null,
},
],
pageInfo: FAKE_PAGE_INFO,
versionValidation: FAKE_VERSION_VALIDATION,
}),
});
await waitForPromises();
});
it('does not pre-select target namespace', () => {
expect(findFirstImportTargetNamespaceText()).toBe('Select parent group');
});
it('does not validate by default', () => {
expect(wrapper.find('tbody tr').text()).not.toContain('Please select a parent group.');
});
it('triggers validations when import button is clicked', async () => {
await findRowImportDropdownAtIndex(0).trigger('click');
expect(wrapper.find('tbody tr').text()).toContain('Please select a parent group.');
});
it('is valid when root namespace is selected', async () => {
findFirstImportTargetCell().vm.$emit('update-target-namespace', {
fullPath: '',
});
await findRowImportDropdownAtIndex(0).trigger('click');
expect(wrapper.find('tbody tr').text()).not.toContain('Please select a parent group.');
expect(findFirstImportTargetNamespaceText()).toBe('No parent');
});
it('is valid when target namespace is selected', async () => {
findFirstImportTargetCell().vm.$emit('update-target-namespace', {
fullPath: 'gitlab-org',
});
await findRowImportDropdownAtIndex(0).trigger('click');
expect(wrapper.find('tbody tr').text()).not.toContain('Please select a parent group.');
expect(findFirstImportTargetNamespaceText()).toBe('gitlab-org');
});
});
describe('when lastImportTarget is set', () => {
it('correctly maintains root namespace as last import target', async () => {
createComponent({
bulkImportSourceGroups: () => ({
nodes: [
{
...generateFakeEntry({ id: 1, status: STATUSES.NONE }),
lastImportTarget: {
id: 1,
targetNamespace: ROOT_NAMESPACE.fullPath,
newName: 'does-not-matter',
},
},
],
pageInfo: FAKE_PAGE_INFO,
versionValidation: FAKE_VERSION_VALIDATION,
}),
});
await waitForPromises();
expect(findFirstImportTargetNamespaceText()).toBe('No parent');
});
});
it('correctly maintains target namespace as last import target', async () => {
const targetNamespace = AVAILABLE_NAMESPACES[1];
createComponent({
bulkImportSourceGroups: () => ({
nodes: [
{
...generateFakeEntry({ id: 1, status: STATUSES.FINISHED }),
lastImportTarget: {
id: 1,
targetNamespace: targetNamespace.fullPath,
newName: 'does-not-matter',
},
},
},
],
pageInfo: FAKE_PAGE_INFO,
versionValidation: FAKE_VERSION_VALIDATION,
}),
],
pageInfo: FAKE_PAGE_INFO,
versionValidation: FAKE_VERSION_VALIDATION,
}),
});
await waitForPromises();
expect(findFirstImportTargetNamespaceText()).toBe(targetNamespace.fullPath);
});
await waitForPromises();
const firstRow = wrapper.find('tbody tr');
const targetNamespaceDropdownButton = findTargetNamespaceDropdown(firstRow).find(
'[aria-haspopup]',
);
expect(targetNamespaceDropdownButton.text()).toBe('No parent');
});
it('respects default namespace if provided', async () => {
const targetNamespace = AVAILABLE_NAMESPACES[1];
createComponent({
bulkImportSourceGroups: () => ({
nodes: FAKE_GROUPS,
pageInfo: FAKE_PAGE_INFO,
versionValidation: FAKE_VERSION_VALIDATION,
}),
defaultTargetNamespace: targetNamespace.id,
});
await waitForPromises();
const firstRow = wrapper.find('tbody tr');
const targetNamespaceDropdownButton = findTargetNamespaceDropdown(firstRow).find(
'[aria-haspopup]',
);
expect(targetNamespaceDropdownButton.text()).toBe(targetNamespace.fullPath);
});
it('does not render status string when result list is empty', async () => {

View File

@ -8,7 +8,7 @@ export const generateFakeEntry = ({ id, status, hasFailures = false, message, ..
fullName: `fake_name_${id}`,
lastImportTarget: {
id,
targetNamespace: 'root',
targetNamespace: 'Commit451',
newName: `group${id}`,
},
id,

View File

@ -0,0 +1,42 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Resolvers::Projects::PlanLimitsResolver, feature_category: :api do
include GraphqlHelpers
let_it_be(:user) { create(:user) }
let(:project) { build(:project, :repository) }
describe 'Pipeline schedule limits' do
before do
project.add_owner(user)
end
it 'gets the current limits for pipeline schedules' do
limits = resolve_plan_limits
expect(limits).to include({ ci_pipeline_schedules: project.actual_limits.ci_pipeline_schedules })
end
end
describe 'Pipeline schedule limits without authorization' do
it 'returns a ResourceNotAvailable error' do
expect_graphql_error_to_be_created(Gitlab::Graphql::Errors::ResourceNotAvailable) do
resolve_plan_limits
end
end
it 'returns null when a user is not allowed to see the limit but allowed to see project' do
project.add_reporter(user)
limits = resolve_plan_limits
expect(limits).to include({ ci_pipeline_schedules: nil })
end
end
def resolve_plan_limits(args: {})
resolve(described_class, obj: project, ctx: { current_user: user }, args: args)
end
end

View File

@ -0,0 +1,15 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Types::ProjectPlanLimitsType, feature_category: :api do
include GraphqlHelpers
specify { expect(described_class.graphql_name).to eq('ProjectPlanLimits') }
it 'exposes the expected fields' do
expected_fields = %i[ci_pipeline_schedules]
expect(described_class).to have_graphql_fields(*expected_fields)
end
end

View File

@ -3,24 +3,51 @@
require 'spec_helper'
RSpec.describe Gitlab::Checks::ChangedBlobs, feature_category: :source_code_management do
let(:repository) { project.repository }
let_it_be(:user) { create(:user) }
subject(:service) do
described_class.new(project, revisions, bytes_limit: 100).execute(timeout: 60)
let(:repository) { project.repository }
let(:service_params) { {} }
subject(:blobs) do
described_class.new(project, revisions, bytes_limit: 100, **service_params).execute(timeout: 60)
end
describe '#execute' do
context 'without quarantine directory' do
let_it_be(:project) { create(:project, :repository) }
let_it_be(:project) do
create(:project, :repository).tap do |pr|
pr.repository.delete_branch('add-pdf-file')
end
end
let(:revisions) { ['e774ebd33ca5de8e6ef1e633fd887bb52b9d0a7a'] }
it 'returns the blobs' do
project.repository.delete_branch('add-pdf-file')
expect(repository).to receive(:list_blobs).with(
['--not', '--all', '--not'] + revisions,
bytes_limit: 100,
with_paths: false,
dynamic_timeout: 60
).and_call_original
expect(repository).to receive(:list_blobs).and_call_original
expect(blobs).to contain_exactly(kind_of(Gitlab::Git::Blob))
expect(blobs.first.path).to eq('')
end
expect(service).to contain_exactly(kind_of(Gitlab::Git::Blob))
context 'when with_paths option is passed' do
let(:service_params) { { with_paths: true } }
it 'populates the paths' do
expect(repository).to receive(:list_blobs).with(
['--not', '--all', '--not'] + revisions,
bytes_limit: 100,
with_paths: true,
dynamic_timeout: 60
).and_call_original
expect(blobs).to contain_exactly(kind_of(Gitlab::Git::Blob))
expect(blobs.first.path).to eq('files/pdf/test.pdf')
end
end
end
@ -46,8 +73,58 @@ RSpec.describe Gitlab::Checks::ChangedBlobs, feature_category: :source_code_mana
end
it 'returns the blobs' do
expect(service.size).to eq(1)
expect(service.first).to be_kind_of(Gitlab::Git::Blob)
expect(blobs.size).to eq(1)
expect(blobs.first).to be_kind_of(Gitlab::Git::Blob)
end
context 'when the same file with different paths is committed' do
let_it_be(:commits) do
project.repository.commit_files(
user,
branch_name: project.repository.root_ref,
message: 'Commit to root ref',
actions: [
{ action: :create, file_path: 'newfile', content: 'New' },
{ action: :create, file_path: 'modified', content: 'Before' }
]
)
project.repository.commit_files(
user,
branch_name: project.repository.root_ref,
message: 'Another commit to root ref',
actions: [
{ action: :create, file_path: 'samefile', content: 'New' },
{ action: :update, file_path: 'modified', content: 'After' }
]
)
project.repository.commits(project.repository.root_ref, limit: 3)
end
it 'returns the blobs' do
expect(blobs.map(&:data)).to contain_exactly(
'After', 'Before', 'New', 'test'
)
expect(blobs.map(&:path)).to all be_blank
end
context 'when with_paths option is passed' do
let(:service_params) { { with_paths: true } }
it 'populates the paths of the blobs' do
blobs_data = blobs.map { |blob| [blob.data, blob.path] }
expect(blobs_data).to contain_exactly(
%w[After modified],
%w[Before modified],
%w[New samefile],
%w[New newfile],
['test', 'test.txt']
)
end
end
end
end
@ -57,7 +134,7 @@ RSpec.describe Gitlab::Checks::ChangedBlobs, feature_category: :source_code_mana
end
it 'filters out the blobs' do
expect(service).to eq([])
expect(blobs).to eq([])
end
end
end

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Ci::PipelineVariable do
RSpec.describe Ci::PipelineVariable, feature_category: :continuous_integration do
subject { build(:ci_pipeline_variable) }
it_behaves_like "CI variable"
@ -38,4 +38,26 @@ RSpec.describe Ci::PipelineVariable do
end
end
end
describe 'routing table switch' do
context 'with ff disabled' do
before do
stub_feature_flags(ci_partitioning_use_ci_pipeline_variables_routing_table: false)
end
it 'uses the legacy table' do
expect(described_class.table_name).to eq('ci_pipeline_variables')
end
end
context 'with ff enabled' do
before do
stub_feature_flags(ci_partitioning_use_ci_pipeline_variables_routing_table: true)
end
it 'uses the routing table' do
expect(described_class.table_name).to eq('p_ci_pipeline_variables')
end
end
end
end

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Ci::Partitionable::Switch, :aggregate_failures do
RSpec.describe Ci::Partitionable::Switch, :aggregate_failures, feature_category: :continuous_integration do
let(:model) do
Class.new(Ci::ApplicationRecord) do
self.primary_key = :id
@ -71,7 +71,7 @@ RSpec.describe Ci::Partitionable::Switch, :aggregate_failures do
allow(Feature::Definition).to receive(:get).with(table_rollout_flag)
.and_return(
Feature::Definition.new("development/#{table_rollout_flag}.yml",
{ type: 'development', name: table_rollout_flag }
{ type: 'gitlab_com_derisk', name: table_rollout_flag }
)
)
end

View File

@ -91,13 +91,17 @@ RSpec.describe Packages::Nuget::Symbol, type: :model, feature_category: :package
end
describe '.with_file_sha256' do
subject(:with_file_sha256) { described_class.with_file_sha256(checksums) }
subject { described_class.with_file_sha256(checksum) }
let_it_be(:checksums) { OpenSSL::Digest.hexdigest('SHA256', 'checksums') }
let_it_be(:symbol) { create(:nuget_symbol, file_sha256: checksums) }
let_it_be(:checksum) { OpenSSL::Digest.hexdigest('SHA256', 'checksum') }
let_it_be(:symbol) { create(:nuget_symbol, file_sha256: checksum) }
it 'returns symbols with the given checksums' do
expect(with_file_sha256).to eq([symbol])
it { is_expected.to contain_exactly(symbol) }
context 'when checksum is in uppercase' do
subject { described_class.with_file_sha256(checksum.upcase) }
it { is_expected.to contain_exactly(symbol) }
end
end

View File

@ -216,6 +216,7 @@ RSpec.describe ProjectPolicy, feature_category: :system_access do
expect_allowed(:update_pipeline)
expect_allowed(:cancel_pipeline)
expect_allowed(:create_pipeline_schedule)
expect_allowed(:read_ci_pipeline_schedules_plan_limit)
end
end
@ -228,6 +229,7 @@ RSpec.describe ProjectPolicy, feature_category: :system_access do
expect_disallowed(:cancel_pipeline)
expect_disallowed(:destroy_pipeline)
expect_disallowed(:create_pipeline_schedule)
expect_disallowed(:read_ci_pipeline_schedules_plan_limit)
end
end

View File

@ -14,14 +14,14 @@ RSpec.describe Members::UnassignIssuablesService, feature_category: :groups_and_
describe '#execute' do
RSpec.shared_examples 'un-assigning issuables' do |issue_count, mr_count, open_issue_count, open_mr_count|
# :sidekiq_inline is used b/c unlike issues, assignee changes for MRs get handled asynchronously.
it 'removes issuable assignments', :sidekiq_inline, :aggregate_failures do
expect { subject }
.to change { user.assigned_issues.count }.from(issue_count).to(0)
.and change { user.assigned_merge_requests.count }.from(mr_count).to(0)
.and change { Note.where('note ILIKE ?', '%unassigned%').count }.by(
user.assigned_issues.count + user.assigned_merge_requests.count
)
it 'removes issuable assignments', :aggregate_failures do
expect(user.assigned_issues.count).to eq(issue_count)
expect(user.assigned_merge_requests.count).to eq(mr_count)
subject
expect(user.assigned_issues.count).to eq(0)
expect(user.assigned_merge_requests.count).to eq(0)
end
it 'invalidates user cache', :aggregate_failures, :clean_gitlab_redis_cache do

View File

@ -143,16 +143,6 @@ RSpec.describe MergeRequests::UpdateAssigneesService, feature_category: :code_re
expect { update_merge_request }
.not_to change { merge_request.reload.assignees.to_a }
end
context 'when skip_authorization is set' do
let(:opts) { { assignee_ids: [user2.id], skip_authorization: true } }
it 'updates the MR assignees' do
expect { update_merge_request }
.to change { merge_request.reload.assignees }
.from([user3]).to([user2])
end
end
end
end
end

View File

@ -300,12 +300,32 @@ RSpec.describe Projects::ImportService, feature_category: :importers do
stub_application_setting(allow_local_requests_from_web_hooks_and_services: false)
end
it 'returns an error' do
expect(project.repository).not_to receive(:import_repository)
expect(subject.execute).to include(
status: :error,
message: end_with('Requests to localhost are not allowed')
)
context 'when the IP is allow-listed' do
before do
stub_application_setting(outbound_local_requests_whitelist: ["127.0.0.1"])
end
it 'imports successfully' do
expect(project.repository)
.to receive(:import_repository)
.and_return(true)
expect(subject.execute[:status]).to eq(:success)
end
end
context 'when the IP is not allow-listed' do
before do
stub_application_setting(outbound_local_requests_whitelist: [])
end
it 'returns an error' do
expect(project.repository).not_to receive(:import_repository)
expect(subject.execute).to include(
status: :error,
message: end_with('Requests to localhost are not allowed')
)
end
end
end
end
@ -323,7 +343,8 @@ RSpec.describe Projects::ImportService, feature_category: :importers do
allow_local_network: false,
allow_localhost: false,
dns_rebind_protection: false,
deny_all_requests_except_allowed: false
deny_all_requests_except_allowed: false,
outbound_local_requests_allowlist: []
)
.and_return([Addressable::URI.parse("https://example.com/group/project"), nil])
end
@ -359,7 +380,8 @@ RSpec.describe Projects::ImportService, feature_category: :importers do
allow_local_network: false,
allow_localhost: false,
dns_rebind_protection: true,
deny_all_requests_except_allowed: false
deny_all_requests_except_allowed: false,
outbound_local_requests_allowlist: []
)
.and_return([Addressable::URI.parse("https://172.16.123.1/group/project"), 'example.com'])
end
@ -388,7 +410,8 @@ RSpec.describe Projects::ImportService, feature_category: :importers do
allow_local_network: false,
allow_localhost: false,
dns_rebind_protection: true,
deny_all_requests_except_allowed: false
deny_all_requests_except_allowed: false,
outbound_local_requests_allowlist: []
)
.and_return([Addressable::URI.parse('https://[2606:4700:90:0:f22e:fbec:5bed:a9b9]/gitlab-org/gitlab-development-kit'), 'gitlab.com'])
end
@ -419,7 +442,8 @@ RSpec.describe Projects::ImportService, feature_category: :importers do
allow_local_network: false,
allow_localhost: false,
dns_rebind_protection: true,
deny_all_requests_except_allowed: false
deny_all_requests_except_allowed: false,
outbound_local_requests_allowlist: []
)
.and_return([Addressable::URI.parse("http://172.16.123.1/group/project"), 'example.com'])
end
@ -449,7 +473,8 @@ RSpec.describe Projects::ImportService, feature_category: :importers do
allow_local_network: false,
allow_localhost: false,
dns_rebind_protection: true,
deny_all_requests_except_allowed: false
deny_all_requests_except_allowed: false,
outbound_local_requests_allowlist: []
)
.and_return([Addressable::URI.parse("git://172.16.123.1/group/project"), 'example.com'])
end

View File

@ -0,0 +1,20 @@
include:
- local: gems/gem.gitlab-ci.yml
inputs:
gem_name: "sidekiq-7.1.6"
gem_path_prefix: "vendor/gems/"
rspec:
script:
- bundle exec rake test
extends:
- .with_redis
parallel:
matrix:
- RUBY_VERSION: ["3.0", "3.1", "3.2"]
.with_redis:
services:
- redis:6.0-alpine
variables:
REDIS_URL: "redis://redis"

2112
vendor/gems/sidekiq-7.1.6/Changes.md vendored Normal file

File diff suppressed because it is too large Load Diff

35
vendor/gems/sidekiq-7.1.6/Gemfile vendored Normal file
View File

@ -0,0 +1,35 @@
# frozen_string_literal: true
source "https://rubygems.org"
gemspec
gem "rake"
RAILS_VERSION = "~> 7.0.4"
gem "actionmailer", RAILS_VERSION
gem "actionpack", RAILS_VERSION
gem "activejob", RAILS_VERSION
gem "activerecord", RAILS_VERSION
gem "railties", RAILS_VERSION
gem "redis-client"
# gem "bumbler"
# gem "debug"
gem "sqlite3", platforms: :ruby
gem "activerecord-jdbcsqlite3-adapter", platforms: :jruby
gem "after_commit_everywhere", require: false
gem "yard"
group :test do
gem "maxitest"
gem "simplecov"
end
group :development, :test do
gem "standard", require: false
end
group :load_test do
gem "toxiproxy"
gem "ruby-prof"
end

195
vendor/gems/sidekiq-7.1.6/Gemfile.lock vendored Normal file
View File

@ -0,0 +1,195 @@
PATH
remote: .
specs:
sidekiq (7.1.6)
concurrent-ruby (< 2)
connection_pool (>= 2.3.0)
rack (>= 2.2.4)
redis-client (>= 0.14.0)
GEM
remote: https://rubygems.org/
specs:
actionmailer (7.0.8)
actionpack (= 7.0.8)
actionview (= 7.0.8)
activejob (= 7.0.8)
activesupport (= 7.0.8)
mail (~> 2.5, >= 2.5.4)
net-imap
net-pop
net-smtp
rails-dom-testing (~> 2.0)
actionpack (7.0.8)
actionview (= 7.0.8)
activesupport (= 7.0.8)
rack (~> 2.0, >= 2.2.4)
rack-test (>= 0.6.3)
rails-dom-testing (~> 2.0)
rails-html-sanitizer (~> 1.0, >= 1.2.0)
actionview (7.0.8)
activesupport (= 7.0.8)
builder (~> 3.1)
erubi (~> 1.4)
rails-dom-testing (~> 2.0)
rails-html-sanitizer (~> 1.1, >= 1.2.0)
activejob (7.0.8)
activesupport (= 7.0.8)
globalid (>= 0.3.6)
activemodel (7.0.8)
activesupport (= 7.0.8)
activerecord (7.0.8)
activemodel (= 7.0.8)
activesupport (= 7.0.8)
activesupport (7.0.8)
concurrent-ruby (~> 1.0, >= 1.0.2)
i18n (>= 1.6, < 2)
minitest (>= 5.1)
tzinfo (~> 2.0)
after_commit_everywhere (1.3.1)
activerecord (>= 4.2)
activesupport
ast (2.4.2)
builder (3.2.4)
concurrent-ruby (1.2.3)
connection_pool (2.4.1)
crass (1.0.6)
date (3.3.4)
docile (1.4.0)
erubi (1.12.0)
globalid (1.2.1)
activesupport (>= 6.1)
i18n (1.14.1)
concurrent-ruby (~> 1.0)
json (2.7.1)
language_server-protocol (3.17.0.3)
lint_roller (1.1.0)
loofah (2.22.0)
crass (~> 1.0.2)
nokogiri (>= 1.12.0)
mail (2.8.1)
mini_mime (>= 0.1.1)
net-imap
net-pop
net-smtp
maxitest (5.4.0)
minitest (>= 5.14.0, < 5.21.0)
method_source (1.0.0)
mini_mime (1.1.5)
mini_portile2 (2.8.5)
minitest (5.20.0)
net-imap (0.4.9.1)
date
net-protocol
net-pop (0.1.2)
net-protocol
net-protocol (0.2.2)
timeout
net-smtp (0.4.0.1)
net-protocol
nokogiri (1.16.0)
mini_portile2 (~> 2.8.2)
racc (~> 1.4)
nokogiri (1.16.0-arm64-darwin)
racc (~> 1.4)
parallel (1.24.0)
parser (3.3.0.5)
ast (~> 2.4.1)
racc
racc (1.7.3)
rack (2.2.8)
rack-test (2.1.0)
rack (>= 1.3)
rails-dom-testing (2.2.0)
activesupport (>= 5.0.0)
minitest
nokogiri (>= 1.6)
rails-html-sanitizer (1.6.0)
loofah (~> 2.21)
nokogiri (~> 1.14)
railties (7.0.8)
actionpack (= 7.0.8)
activesupport (= 7.0.8)
method_source
rake (>= 12.2)
thor (~> 1.0)
zeitwerk (~> 2.5)
rainbow (3.1.1)
rake (13.1.0)
redis-client (0.19.1)
connection_pool
regexp_parser (2.9.0)
rexml (3.2.6)
rubocop (1.59.0)
json (~> 2.3)
language_server-protocol (>= 3.17.0)
parallel (~> 1.10)
parser (>= 3.2.2.4)
rainbow (>= 2.2.2, < 4.0)
regexp_parser (>= 1.8, < 3.0)
rexml (>= 3.2.5, < 4.0)
rubocop-ast (>= 1.30.0, < 2.0)
ruby-progressbar (~> 1.7)
unicode-display_width (>= 2.4.0, < 3.0)
rubocop-ast (1.30.0)
parser (>= 3.2.1.0)
rubocop-performance (1.20.2)
rubocop (>= 1.48.1, < 2.0)
rubocop-ast (>= 1.30.0, < 2.0)
ruby-prof (1.7.0)
ruby-progressbar (1.13.0)
simplecov (0.22.0)
docile (~> 1.1)
simplecov-html (~> 0.11)
simplecov_json_formatter (~> 0.1)
simplecov-html (0.12.3)
simplecov_json_formatter (0.1.4)
sqlite3 (1.7.1)
mini_portile2 (~> 2.8.0)
sqlite3 (1.7.1-arm64-darwin)
standard (1.33.0)
language_server-protocol (~> 3.17.0.2)
lint_roller (~> 1.0)
rubocop (~> 1.59.0)
standard-custom (~> 1.0.0)
standard-performance (~> 1.3)
standard-custom (1.0.2)
lint_roller (~> 1.0)
rubocop (~> 1.50)
standard-performance (1.3.1)
lint_roller (~> 1.1)
rubocop-performance (~> 1.20.2)
thor (1.3.0)
timeout (0.4.1)
toxiproxy (2.0.2)
tzinfo (2.0.6)
concurrent-ruby (~> 1.0)
unicode-display_width (2.5.0)
yard (0.9.34)
zeitwerk (2.6.12)
PLATFORMS
arm64-darwin-23
ruby
DEPENDENCIES
actionmailer (~> 7.0.4)
actionpack (~> 7.0.4)
activejob (~> 7.0.4)
activerecord (~> 7.0.4)
activerecord-jdbcsqlite3-adapter
after_commit_everywhere
maxitest
railties (~> 7.0.4)
rake
redis-client
ruby-prof
sidekiq!
simplecov
sqlite3
standard
toxiproxy
yard
BUNDLED WITH
2.5.5

9
vendor/gems/sidekiq-7.1.6/LICENSE.txt vendored Normal file
View File

@ -0,0 +1,9 @@
Copyright (c) Contributed Systems LLC
Sidekiq is an Open Source project licensed under the terms of
the LGPLv3 license. Please see <http://www.gnu.org/licenses/lgpl-3.0.html>
for license text.
Sidekiq Pro and Sidekiq Enterprise have a commercial-friendly license.
You can find the commercial license in COMM-LICENSE.txt.
Please see https://sidekiq.org for purchasing options.

12
vendor/gems/sidekiq-7.1.6/NOTICE.txt vendored Normal file
View File

@ -0,0 +1,12 @@
Copyright (c) Contributed Systems LLC
This product includes software developed at
Contributed Systems LLC(https://contribsys.com/).
Modifications to the following files were made on 30 Jan 2024 by GitLab:
- lib/sidekiq/redis_client_adapter.rb
- lib/sidekiq/redis_connection.rb
- lib/sidekiq/api.rb
- lib/sidekiq/cli.rb
- lib/sidekiq/paginator.rb
- lib/sidekiq/scheduled.rb

106
vendor/gems/sidekiq-7.1.6/README.md vendored Normal file
View File

@ -0,0 +1,106 @@
Sidekiq
==============
[![Gem Version](https://badge.fury.io/rb/sidekiq.svg)](https://rubygems.org/gems/sidekiq)
![Build](https://github.com/sidekiq/sidekiq/workflows/CI/badge.svg)
Simple, efficient background processing for Ruby.
Sidekiq uses threads to handle many jobs at the same time in the
same process. It does not require Rails but will integrate tightly with
Rails to make background processing dead simple.
Requirements
-----------------
- Redis: 6.2+
- Ruby: MRI 2.7+ or JRuby 9.3+.
Sidekiq 7.0 supports Rails 6.0+ but does not require it.
Installation
-----------------
bundle add sidekiq
Getting Started
-----------------
See the [Getting Started wiki page](https://github.com/sidekiq/sidekiq/wiki/Getting-Started) and follow the simple setup process.
You can watch [this YouTube playlist](https://www.youtube.com/playlist?list=PLjeHh2LSCFrWGT5uVjUuFKAcrcj5kSai1) to learn all about
Sidekiq and see its features in action. Here's the Web UI:
![Web UI](https://github.com/sidekiq/sidekiq/raw/main/examples/web-ui.png)
Performance
---------------
The benchmark in `bin/sidekiqload` creates 500,000 no-op jobs and drains them as fast as possible, assuming a fixed Redis network latency of 1ms.
This requires a lot of Redis network I/O and JSON parsing.
This benchmark is IO-bound so we increase the concurrency to 25.
If your application is sending lots of emails or performing other network-intensive work, you could see a similar benefit but be careful not to saturate the CPU.
Version | Time to process 500k jobs | Throughput (jobs/sec) | Ruby | Concurrency | Job Type
-----------------|------|---------|---------|------------------------|---
Sidekiq 7.0.3 | 21.3 sec| 23,500 | 3.2.0+yjit | 30 | Sidekiq::Job
Sidekiq 7.0.3 | 33.8 sec| 14,700 | 3.2.0+yjit | 30 | ActiveJob 7.0.4
Sidekiq 7.0.3 | 23.5 sec| 21,300 | 3.2.0 | 30 | Sidekiq::Job
Sidekiq 7.0.3 | 46.5 sec| 10,700 | 3.2.0 | 30 | ActiveJob 7.0.4
Sidekiq 7.0.3 | 23.0 sec| 21,700 | 2.7.5 | 30 | Sidekiq::Job
Sidekiq 7.0.3 | 46.5 sec| 10,850 | 2.7.5 | 30 | ActiveJob 7.0.4
Most of Sidekiq's overhead is Redis network I/O.
ActiveJob adds a notable amount of CPU overhead due to argument deserialization and callbacks.
Concurrency of 30 was determined experimentally to maximize one CPU without saturating it.
Want to Upgrade?
-------------------
Use `bundle up sidekiq` to upgrade Sidekiq and all its dependencies.
Upgrade notes between each major version can be found in the `docs/` directory.
I also sell Sidekiq Pro and Sidekiq Enterprise, extensions to Sidekiq which provide more
features, a commercial-friendly license and allow you to support high
quality open source development all at the same time. Please see the
[Sidekiq](https://sidekiq.org/) homepage for more detail.
Problems?
-----------------
**Please do not directly email any Sidekiq committers with questions or problems.**
A community is best served when discussions are held in public.
If you have a problem, please review the [FAQ](https://github.com/sidekiq/sidekiq/wiki/FAQ) and [Troubleshooting](https://github.com/sidekiq/sidekiq/wiki/Problems-and-Troubleshooting) wiki pages.
Searching the [issues](https://github.com/sidekiq/sidekiq/issues) for your problem is also a good idea.
Sidekiq Pro and Sidekiq Enterprise customers get private email support.
You can purchase at https://sidekiq.org; email support@contribsys.com for help.
Useful resources:
* Product documentation is in the [wiki](https://github.com/sidekiq/sidekiq/wiki).
* Occasional announcements are made to the [@sidekiq](https://ruby.social/@sidekiq) Mastodon account.
* The [Sidekiq tag](https://stackoverflow.com/questions/tagged/sidekiq) on Stack Overflow has lots of useful Q &amp; A.
Every Friday morning is Sidekiq office hour: I video chat and answer questions.
See the [Sidekiq support page](https://sidekiq.org/support.html) for details.
Contributing
-----------------
Please see [the contributing guidelines](https://github.com/sidekiq/sidekiq/blob/main/.github/contributing.md).
License
-----------------
Please see [LICENSE.txt](https://github.com/sidekiq/sidekiq/blob/main/LICENSE.txt) for licensing details.
The license for Sidekiq Pro and Sidekiq Enterprise can be found in [COMM-LICENSE.txt](https://github.com/sidekiq/sidekiq/blob/main/COMM-LICENSE.txt).
Author
-----------------
Mike Perham, [@getajobmike](https://ruby.social/@getajobmike) / [@sidekiq](https://ruby.social/@sidekiq), [https://www.mikeperham.com](https://www.mikeperham.com) / [https://www.contribsys.com](https://www.contribsys.com)

19
vendor/gems/sidekiq-7.1.6/Rakefile vendored Normal file
View File

@ -0,0 +1,19 @@
require "bundler/gem_tasks"
require "rake/testtask"
require "standard/rake"
# If you want to generate API docs:
# gem install yard && yard && open doc/index.html
# YARD readme: https://rubydoc.info/gems/yard/file/README.md
# YARD tags: https://www.rubydoc.info/gems/yard/file/docs/Tags.md
# YARD cheatsheet: https://gist.github.com/phansch/db18a595d2f5f1ef16646af72fe1fb0e
# To check code coverage, include simplecov in the Gemfile and
# run `COVERAGE=1 bundle exec rake`
Rake::TestTask.new(:test) do |test|
test.warning = true
test.pattern = "test/**/*.rb"
end
task default: [:standard, :test]

37
vendor/gems/sidekiq-7.1.6/bin/sidekiq vendored Executable file
View File

@ -0,0 +1,37 @@
#!/usr/bin/env ruby
# Quiet some warnings we see when running in warning mode:
# RUBYOPT=-w bundle exec sidekiq
$TESTING = false
require_relative "../lib/sidekiq/cli"
def integrate_with_systemd
return unless ENV["NOTIFY_SOCKET"]
Sidekiq.configure_server do |config|
config.logger.info "Enabling systemd notification integration"
require "sidekiq/sd_notify"
config.on(:startup) do
Sidekiq::SdNotify.ready
end
config.on(:shutdown) do
Sidekiq::SdNotify.stopping
end
Sidekiq.start_watchdog if Sidekiq::SdNotify.watchdog?
end
end
begin
cli = Sidekiq::CLI.instance
cli.parse
integrate_with_systemd
cli.run
rescue => e
raise e if $DEBUG
warn e.message
warn e.backtrace.join("\n")
exit 1
end

247
vendor/gems/sidekiq-7.1.6/bin/sidekiqload vendored Executable file
View File

@ -0,0 +1,247 @@
#!/usr/bin/env ruby
#
# bin/sidekiqload is a helpful script to load test and
# performance tune Sidekiq's core. It creates 500,000 no-op
# jobs and executes them as fast as possible.
# Example Usage:
#
# > RUBY_YJIT_ENABLE=1 LATENCY=0 THREADS=10 bin/sidekiqload
# Result: Done, 500000 jobs in 20.264945 sec, 24673 jobs/sec
#
# Use LATENCY=1 to get a more real world network setup
# but you'll need to setup and start toxiproxy as noted below.
#
# Use AJ=1 to test ActiveJob instead of plain old Sidekiq::Jobs so
# you can see the runtime performance difference between the two APIs.
#
# None of this script is considered a public API and may change over time.
#
# Quiet some warnings we see when running in warning mode:
# RUBYOPT=-w bundle exec sidekiq
$TESTING = false
puts RUBY_DESCRIPTION
puts(%w[THREADS LATENCY AJ PROFILE].map { |x| "#{x}: #{ENV[x] || "nil"}" }.join(", "))
require "ruby-prof" if ENV["PROFILE"]
require "bundler/setup"
Bundler.require(:default, :load_test)
latency = Integer(ENV["LATENCY"] || 1)
if latency > 0
# brew tap shopify/shopify
# brew install toxiproxy
# run `toxiproxy-server` in a separate terminal window.
require "toxiproxy"
# simulate a non-localhost network for realer-world conditions.
# adding 1ms of network latency has an ENORMOUS impact on benchmarks
Toxiproxy.populate([{
name: "redis",
listen: "127.0.0.1:6380",
upstream: "127.0.0.1:6379"
}])
end
if ENV["AJ"]
require "active_job"
puts "Using ActiveJob #{ActiveJob::VERSION::STRING}"
ActiveJob::Base.queue_adapter = :sidekiq
ActiveJob::Base.logger.level = Logger::WARN
class LoadJob < ActiveJob::Base
def perform(idx, ts = nil)
puts(Time.now.to_f - ts) if !ts.nil?
end
end
end
class LoadWorker
include Sidekiq::Job
sidekiq_options retry: 1
sidekiq_retry_in do |x|
1
end
def perform(idx, ts = nil)
puts(Time.now.to_f - ts) if !ts.nil?
# raise idx.to_s if idx % 100 == 1
end
end
def Process.rss
`ps -o rss= -p #{Process.pid}`.chomp.to_i
end
class Loader
def initialize
@iter = ENV["GC"] ? 10 : 500
@count = Integer(ENV["COUNT"] || 1_000)
@latency = Integer(ENV["LATENCY"] || 1)
end
def configure
@x = Sidekiq.configure_embed do |config|
config.redis = {db: 13, port: ((@latency > 0) ? 6380 : 6379)}
config.concurrency = Integer(ENV.fetch("THREADS", "10"))
# config.redis = { db: 13, port: 6380, driver: :hiredis}
config.queues = %w[default]
config.logger.level = Logger::WARN
config.average_scheduled_poll_interval = 2
config.reliable! if defined?(Sidekiq::Pro)
end
@self_read, @self_write = IO.pipe
%w[INT TERM TSTP TTIN].each do |sig|
trap sig do
@self_write.puts(sig)
end
rescue ArgumentError
puts "Signal #{sig} not supported"
end
end
def handle_signal(sig)
launcher = @x
Sidekiq.logger.debug "Got #{sig} signal"
case sig
when "INT"
# Handle Ctrl-C in JRuby like MRI
# http://jira.codehaus.org/browse/JRUBY-4637
raise Interrupt
when "TERM"
# Heroku sends TERM and then waits 30 seconds for process to exit.
raise Interrupt
when "TSTP"
Sidekiq.logger.info "Received TSTP, no longer accepting new work"
launcher.quiet
when "TTIN"
Thread.list.each do |thread|
Sidekiq.logger.warn "Thread TID-#{(thread.object_id ^ ::Process.pid).to_s(36)} #{thread["label"]}"
if thread.backtrace
Sidekiq.logger.warn thread.backtrace.join("\n")
else
Sidekiq.logger.warn "<no backtrace available>"
end
end
end
end
def setup
Sidekiq.logger.error("Setup RSS: #{Process.rss}")
Sidekiq.redis { |c| c.flushdb }
start = Time.now
if ENV["AJ"]
@iter.times do
@count.times do |idx|
LoadJob.perform_later(idx)
end
end
else
@iter.times do
arr = Array.new(@count) { |idx| [idx] }
Sidekiq::Client.push_bulk("class" => LoadWorker, "args" => arr)
end
end
Sidekiq.logger.warn "Created #{@count * @iter} jobs in #{Time.now - start} sec"
end
def monitor
@monitor = Thread.new do
GC.start
loop do
sleep 0.2
qsize = Sidekiq.redis do |conn|
conn.llen "queue:default"
end
total = qsize
if total == 0
ending = Time.now - @start
size = @iter * @count
Sidekiq.logger.error("Done, #{size} jobs in #{ending} sec, #{(size / ending).to_i} jobs/sec")
Sidekiq.logger.error("Ending RSS: #{Process.rss}")
Sidekiq.logger.error("Now here's the latency for three jobs")
if ENV["AJ"]
LoadJob.perform_later(1, Time.now.to_f)
LoadJob.perform_later(2, Time.now.to_f)
LoadJob.perform_later(3, Time.now.to_f)
else
LoadWorker.perform_async(1, Time.now.to_f)
LoadWorker.perform_async(2, Time.now.to_f)
LoadWorker.perform_async(3, Time.now.to_f)
end
sleep 0.1
@x.stop
Process.kill("INT", $$)
break
end
end
end
end
def with_latency(latency, &block)
Sidekiq.logger.error "Simulating #{latency}ms of latency between Sidekiq and redis"
if latency > 0
Toxiproxy[:redis].downstream(:latency, latency: latency).apply(&block)
else
yield
end
end
def run(name)
Sidekiq.logger.warn("Starting #{name}")
monitor
if ENV["PROFILE"]
RubyProf.exclude_threads = [@monitor]
RubyProf.start
elsif ENV["GC"]
GC.start
GC.compact
GC.disable
Sidekiq.logger.error("GC Start RSS: #{Process.rss}")
end
@start = Time.now
with_latency(@latency) do
@x.run
while (readable_io = IO.select([@self_read]))
signal = readable_io.first[0].gets.strip
handle_signal(signal)
end
end
# normal
rescue Interrupt
rescue => e
raise e if $DEBUG
warn e.message
warn e.backtrace.join("\n")
exit 1
ensure
@x.stop
end
def done
Sidekiq.logger.error("GC End RSS: #{Process.rss}") if ENV["GC"]
if ENV["PROFILE"]
Sidekiq.logger.error("Profiling...")
result = RubyProf.stop
printer = RubyProf::GraphHtmlPrinter.new(result)
printer.print(File.new("output.html", "w"), min_percent: 1)
end
end
end
ll = Loader.new
ll.configure
if ENV["WARM"]
ll.setup
ll.run("warmup")
end
ll.setup
ll.run("load")
ll.done

11
vendor/gems/sidekiq-7.1.6/bin/sidekiqmon vendored Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/env ruby
require "sidekiq/monitor"
# disable the Redis connection pool logging
Sidekiq.default_configuration.logger.level = :warn
section = "all"
section = ARGV[0] if ARGV.size == 1
Sidekiq::Monitor::Status.new.display(section)

View File

@ -0,0 +1,57 @@
require "rails/generators/named_base"
module Sidekiq
module Generators # :nodoc:
class JobGenerator < ::Rails::Generators::NamedBase # :nodoc:
desc "This generator creates a Sidekiq Job in app/sidekiq and a corresponding test"
check_class_collision suffix: "Job"
def self.default_generator_root
File.dirname(__FILE__)
end
def create_job_file
template "job.rb.erb", File.join("app/sidekiq", class_path, "#{file_name}_job.rb")
end
def create_test_file
return unless test_framework
if test_framework == :rspec
create_job_spec
else
create_job_test
end
end
private
def create_job_spec
template_file = File.join(
"spec/sidekiq",
class_path,
"#{file_name}_job_spec.rb"
)
template "job_spec.rb.erb", template_file
end
def create_job_test
template_file = File.join(
"test/sidekiq",
class_path,
"#{file_name}_job_test.rb"
)
template "job_test.rb.erb", template_file
end
def file_name
@_file_name ||= super.sub(/_?job\z/i, "")
end
def test_framework
::Rails.application.config.generators.options[:rails][:test_framework]
end
end
end
end

View File

@ -0,0 +1,9 @@
<% module_namespacing do -%>
class <%= class_name %>Job
include Sidekiq::Job
def perform(*args)
# Do something
end
end
<% end -%>

View File

@ -0,0 +1,6 @@
require 'rails_helper'
<% module_namespacing do -%>
RSpec.describe <%= class_name %>Job, type: :job do
pending "add some examples to (or delete) #{__FILE__}"
end
<% end -%>

View File

@ -0,0 +1,8 @@
require 'test_helper'
<% module_namespacing do -%>
class <%= class_name %>JobTest < Minitest::Test
def test_example
skip "add some examples to (or delete) #{__FILE__}"
end
end
<% end -%>

147
vendor/gems/sidekiq-7.1.6/lib/sidekiq.rb vendored Normal file
View File

@ -0,0 +1,147 @@
# frozen_string_literal: true
require "sidekiq/version"
fail "Sidekiq #{Sidekiq::VERSION} does not support Ruby versions below 2.7.0." if RUBY_PLATFORM != "java" && Gem::Version.new(RUBY_VERSION) < Gem::Version.new("2.7.0")
begin
require "sidekiq-ent/version"
fail <<~EOM if Gem::Version.new(Sidekiq::Enterprise::VERSION).segments[0] != Sidekiq::MAJOR
Sidekiq Enterprise #{Sidekiq::Enterprise::VERSION} does not work with Sidekiq #{Sidekiq::VERSION}.
Starting with Sidekiq 7, major versions are synchronized so Sidekiq Enterprise 7 works with Sidekiq 7.
Use `bundle up sidekiq-ent` to upgrade.
EOM
rescue LoadError
end
begin
require "sidekiq/pro/version"
fail <<~EOM if Gem::Version.new(Sidekiq::Pro::VERSION).segments[0] != Sidekiq::MAJOR
Sidekiq Pro #{Sidekiq::Pro::VERSION} does not work with Sidekiq #{Sidekiq::VERSION}.
Starting with Sidekiq 7, major versions are synchronized so Sidekiq Pro 7 works with Sidekiq 7.
Use `bundle up sidekiq-pro` to upgrade.
EOM
rescue LoadError
end
require "sidekiq/config"
require "sidekiq/logger"
require "sidekiq/client"
require "sidekiq/transaction_aware_client"
require "sidekiq/job"
require "sidekiq/worker_compatibility_alias"
require "sidekiq/redis_client_adapter"
require "json"
module Sidekiq
NAME = "Sidekiq"
LICENSE = "See LICENSE and the LGPL-3.0 for licensing details."
def self.°°
puts "Take a deep breath and count to ten..."
end
def self.server?
defined?(Sidekiq::CLI)
end
def self.load_json(string)
JSON.parse(string)
end
def self.dump_json(object)
JSON.generate(object)
end
def self.pro?
defined?(Sidekiq::Pro)
end
def self.ent?
defined?(Sidekiq::Enterprise)
end
def self.redis_pool
(Thread.current[:sidekiq_capsule] || default_configuration).redis_pool
end
def self.redis(&block)
(Thread.current[:sidekiq_capsule] || default_configuration).redis(&block)
end
def self.strict_args!(mode = :raise)
Sidekiq::Config::DEFAULTS[:on_complex_arguments] = mode
end
def self.default_job_options=(hash)
@default_job_options = default_job_options.merge(hash.transform_keys(&:to_s))
end
def self.default_job_options
@default_job_options ||= {"retry" => true, "queue" => "default"}
end
def self.default_configuration
@config ||= Sidekiq::Config.new
end
def self.logger
default_configuration.logger
end
def self.configure_server(&block)
(@config_blocks ||= []) << block
yield default_configuration if server?
end
def self.freeze!
@frozen = true
@config_blocks = nil
end
# Creates a Sidekiq::Config instance that is more tuned for embedding
# within an arbitrary Ruby process. Notably it reduces concurrency by
# default so there is less contention for CPU time with other threads.
#
# inst = Sidekiq.configure_embed do |config|
# config.queues = %w[critical default low]
# end
# inst.run
# sleep 10
# inst.terminate
#
# NB: it is really easy to overload a Ruby process with threads due to the GIL.
# I do not recommend setting concurrency higher than 2-3.
#
# NB: Sidekiq only supports one instance in memory. You will get undefined behavior
# if you try to embed Sidekiq twice in the same process.
def self.configure_embed(&block)
raise "Sidekiq global configuration is frozen, you must create all embedded instances BEFORE calling `run`" if @frozen
require "sidekiq/embedded"
cfg = default_configuration
cfg.concurrency = 2
@config_blocks&.each { |block| block.call(cfg) }
yield cfg
Sidekiq::Embedded.new(cfg)
end
def self.configure_client
yield default_configuration unless server?
end
# We are shutting down Sidekiq but what about threads that
# are working on some long job? This error is
# raised in jobs that have not finished within the hard
# timeout limit. This is needed to rollback db transactions,
# otherwise Ruby's Thread#kill will commit. See #377.
# DO NOT RESCUE THIS ERROR IN YOUR JOBS
class Shutdown < Interrupt; end
end
require "sidekiq/rails" if defined?(::Rails::Engine)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,127 @@
require "sidekiq/component"
module Sidekiq
# A Sidekiq::Capsule is the set of resources necessary to
# process one or more queues with a given concurrency.
# One "default" Capsule is started but the user may declare additional
# Capsules in their initializer.
#
# This capsule will pull jobs from the "single" queue and process
# the jobs with one thread, meaning the jobs will be processed serially.
#
# Sidekiq.configure_server do |config|
# config.capsule("single-threaded") do |cap|
# cap.concurrency = 1
# cap.queues = %w(single)
# end
# end
class Capsule
include Sidekiq::Component
attr_reader :name
attr_reader :queues
attr_accessor :concurrency
attr_reader :mode
attr_reader :weights
def initialize(name, config)
@name = name
@config = config
@queues = ["default"]
@weights = {"default" => 0}
@concurrency = config[:concurrency]
@mode = :strict
end
def fetcher
@fetcher ||= begin
inst = (config[:fetch_class] || Sidekiq::BasicFetch).new(self)
inst.setup(config[:fetch_setup]) if inst.respond_to?(:setup)
inst
end
end
def stop
fetcher&.bulk_requeue([])
end
# Sidekiq checks queues in three modes:
# - :strict - all queues have 0 weight and are checked strictly in order
# - :weighted - queues have arbitrary weight between 1 and N
# - :random - all queues have weight of 1
def queues=(val)
@weights = {}
@queues = Array(val).each_with_object([]) do |qstr, memo|
arr = qstr
arr = qstr.split(",") if qstr.is_a?(String)
name, weight = arr
@weights[name] = weight.to_i
[weight.to_i, 1].max.times do
memo << name
end
end
@mode = if @weights.values.all?(&:zero?)
:strict
elsif @weights.values.all? { |x| x == 1 }
:random
else
:weighted
end
end
# Allow the middleware to be different per-capsule.
# Avoid if possible and add middleware globally so all
# capsules share the same chains. Easier to debug that way.
def client_middleware
@client_chain ||= config.client_middleware.copy_for(self)
yield @client_chain if block_given?
@client_chain
end
def server_middleware
@server_chain ||= config.server_middleware.copy_for(self)
yield @server_chain if block_given?
@server_chain
end
def redis_pool
Thread.current[:sidekiq_redis_pool] || local_redis_pool
end
def local_redis_pool
# connection pool is lazy, it will not create connections unless you actually need them
# so don't be skimpy!
@redis ||= config.new_redis_pool(@concurrency, name)
end
def redis
raise ArgumentError, "requires a block" unless block_given?
redis_pool.with do |conn|
retryable = true
begin
yield conn
rescue RedisClientAdapter::BaseError => ex
# 2550 Failover can cause the server to become a replica, need
# to disconnect and reopen the socket to get back to the primary.
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
# 4985 Use the same logic when a blocking command is force-unblocked
# The same retry logic is also used in client.rb
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
conn.close
retryable = false
retry
end
raise
end
end
end
def lookup(name)
config.lookup(name)
end
def logger
config.logger
end
end
end

View File

@ -0,0 +1,423 @@
# frozen_string_literal: true
$stdout.sync = true
require "yaml"
require "singleton"
require "optparse"
require "erb"
require "fileutils"
require "sidekiq"
require "sidekiq/config"
require "sidekiq/component"
require "sidekiq/capsule"
require "sidekiq/launcher"
module Sidekiq # :nodoc:
class CLI
include Sidekiq::Component
include Singleton unless $TESTING
attr_accessor :launcher
attr_accessor :environment
attr_accessor :config
def parse(args = ARGV.dup)
@config ||= Sidekiq.default_configuration
setup_options(args)
initialize_logger
validate!
end
def jruby?
defined?(::JRUBY_VERSION)
end
# Code within this method is not tested because it alters
# global process state irreversibly. PRs which improve the
# test coverage of Sidekiq::CLI are welcomed.
def run(boot_app: true)
boot_application if boot_app
if environment == "development" && $stdout.tty? && @config.logger.formatter.is_a?(Sidekiq::Logger::Formatters::Pretty)
print_banner
end
logger.info "Booted Rails #{::Rails.version} application in #{environment} environment" if rails_app?
self_read, self_write = IO.pipe
sigs = %w[INT TERM TTIN TSTP]
# USR1 and USR2 don't work on the JVM
sigs << "USR2" if Sidekiq.pro? && !jruby?
sigs.each do |sig|
old_handler = Signal.trap(sig) do
if old_handler.respond_to?(:call)
begin
old_handler.call
rescue Exception => exc
# signal handlers can't use Logger so puts only
puts ["Error in #{sig} handler", exc].inspect
end
end
self_write.puts(sig)
end
rescue ArgumentError
puts "Signal #{sig} not supported"
end
logger.info "Running in #{RUBY_DESCRIPTION}"
logger.info Sidekiq::LICENSE
logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
# touch the connection pool so it is created before we
# fire startup and start multithreading.
info = @config.redis_info
ver = Gem::Version.new(info["redis_version"])
logger.warn "You are connecting to Redis #{ver}, Sidekiq requires Redis 6.2.0 or greater" if ver < Gem::Version.new("6.2.0")
maxmemory_policy = info["maxmemory_policy"]
if maxmemory_policy != "noeviction" && maxmemory_policy != ""
# Redis Enterprise Cloud returns "" for their policy 😳
logger.warn <<~EOM
WARNING: Your Redis instance will evict Sidekiq data under heavy load.
The 'noeviction' maxmemory policy is recommended (current policy: '#{maxmemory_policy}').
See: https://github.com/sidekiq/sidekiq/wiki/Using-Redis#memory
EOM
end
# Since the user can pass us a connection pool explicitly in the initializer, we
# need to verify the size is large enough or else Sidekiq's performance is dramatically slowed.
@config.capsules.each_pair do |name, cap|
raise ArgumentError, "Pool size too small for #{name}" if cap.redis_pool.size < cap.concurrency
end
# cache process identity
@config[:identity] = identity
# Touch middleware so it isn't lazy loaded by multiple threads, #3043
@config.server_middleware
# Before this point, the process is initializing with just the main thread.
# Starting here the process will now have multiple threads running.
fire_event(:startup, reverse: false, reraise: true)
logger.debug { "Client Middleware: #{@config.default_capsule.client_middleware.map(&:klass).join(", ")}" }
logger.debug { "Server Middleware: #{@config.default_capsule.server_middleware.map(&:klass).join(", ")}" }
launch(self_read)
end
def launch(self_read)
if environment == "development" && $stdout.tty?
logger.info "Starting processing, hit Ctrl-C to stop"
end
@launcher = Sidekiq::Launcher.new(@config)
begin
launcher.run
while self_read.wait_readable
signal = self_read.gets.strip
handle_signal(signal)
end
rescue Interrupt
logger.info "Shutting down"
launcher.stop
logger.info "Bye!"
# Explicitly exit so busy Processor threads won't block process shutdown.
#
# NB: slow at_exit handlers will prevent a timely exit if they take
# a while to run. If Sidekiq is getting here but the process isn't exiting,
# use the TTIN signal to determine where things are stuck.
exit(0)
end
end
HOLIDAY_COLORS = {
# got other color-specific holidays from around the world?
# https://developer-book.com/post/definitive-guide-for-colored-text-in-terminal/#256-color-escape-codes
"3-17" => "\e[1;32m", # St. Patrick's Day green
"10-31" => "\e[38;5;208m" # Halloween orange
}
def self.day
@@day ||= begin
t = Date.today
"#{t.month}-#{t.day}"
end
end
def self.r
@@r ||= HOLIDAY_COLORS[day] || "\e[1;31m"
end
def self.b
@@b ||= HOLIDAY_COLORS[day] || "\e[30m"
end
def self.w
"\e[1;37m"
end
def self.reset
@@b = @@r = @@day = nil
"\e[0m"
end
def self.banner
%{
#{w} m,
#{w} `$b
#{w} .ss, $$: .,d$
#{w} `$$P,d$P' .,md$P"'
#{w} ,$$$$$b#{b}/#{w}md$$$P^'
#{w} .d$$$$$$#{b}/#{w}$$$P'
#{w} $$^' `"#{b}/#{w}$$$' #{r}____ _ _ _ _
#{w} $: #{b}'#{w},$$: #{r} / ___|(_) __| | ___| | _(_) __ _
#{w} `b :$$ #{r} \\___ \\| |/ _` |/ _ \\ |/ / |/ _` |
#{w} $$: #{r} ___) | | (_| | __/ <| | (_| |
#{w} $$ #{r}|____/|_|\\__,_|\\___|_|\\_\\_|\\__, |
#{w} .d$$ #{r} |_|
#{reset}}
end
SIGNAL_HANDLERS = {
# Ctrl-C in terminal
"INT" => ->(cli) { raise Interrupt },
# TERM is the signal that Sidekiq must exit.
# Heroku sends TERM and then waits 30 seconds for process to exit.
"TERM" => ->(cli) { raise Interrupt },
"TSTP" => ->(cli) {
cli.logger.info "Received TSTP, no longer accepting new work"
cli.launcher.quiet
},
"TTIN" => ->(cli) {
Thread.list.each do |thread|
cli.logger.warn "Thread TID-#{(thread.object_id ^ ::Process.pid).to_s(36)} #{thread.name}"
if thread.backtrace
cli.logger.warn thread.backtrace.join("\n")
else
cli.logger.warn "<no backtrace available>"
end
end
}
}
UNHANDLED_SIGNAL_HANDLER = ->(cli) { cli.logger.info "No signal handler registered, ignoring" }
SIGNAL_HANDLERS.default = UNHANDLED_SIGNAL_HANDLER
def handle_signal(sig)
logger.debug "Got #{sig} signal"
SIGNAL_HANDLERS[sig].call(self)
end
private
def print_banner
puts "\e[31m"
puts Sidekiq::CLI.banner
puts "\e[0m"
end
def set_environment(cli_env)
# See #984 for discussion.
# APP_ENV is now the preferred ENV term since it is not tech-specific.
# Both Sinatra 2.0+ and Sidekiq support this term.
# RAILS_ENV and RACK_ENV are there for legacy support.
@environment = cli_env || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
config[:environment] = @environment
end
def symbolize_keys_deep!(hash)
hash.keys.each do |k|
symkey = k.respond_to?(:to_sym) ? k.to_sym : k
hash[symkey] = hash.delete k
symbolize_keys_deep! hash[symkey] if hash[symkey].is_a? Hash
end
end
alias_method :die, :exit
alias_method :, :exit
def setup_options(args)
# parse CLI options
opts = parse_options(args)
set_environment opts[:environment]
# check config file presence
if opts[:config_file]
unless File.exist?(opts[:config_file])
raise ArgumentError, "No such file #{opts[:config_file]}"
end
else
config_dir = if File.directory?(opts[:require].to_s)
File.join(opts[:require], "config")
else
File.join(@config[:require], "config")
end
%w[sidekiq.yml sidekiq.yml.erb].each do |config_file|
path = File.join(config_dir, config_file)
opts[:config_file] ||= path if File.exist?(path)
end
end
# parse config file options
opts = parse_config(opts[:config_file]).merge(opts) if opts[:config_file]
# set defaults
opts[:queues] = ["default"] if opts[:queues].nil?
opts[:concurrency] = Integer(ENV["RAILS_MAX_THREADS"]) if opts[:concurrency].nil? && ENV["RAILS_MAX_THREADS"]
# merge with defaults
@config.merge!(opts)
@config.default_capsule.tap do |cap|
cap.queues = opts[:queues]
cap.concurrency = opts[:concurrency] || @config[:concurrency]
end
opts[:capsules]&.each do |name, cap_config|
@config.capsule(name.to_s) do |cap|
cap.queues = cap_config[:queues]
cap.concurrency = cap_config[:concurrency]
end
end
end
def boot_application
ENV["RACK_ENV"] = ENV["RAILS_ENV"] = environment
if File.directory?(@config[:require])
require "rails"
if ::Rails::VERSION::MAJOR < 6
warn "Sidekiq #{Sidekiq::VERSION} only supports Rails 6+"
end
require "sidekiq/rails"
require File.expand_path("#{@config[:require]}/config/environment.rb")
@config[:tag] ||= default_tag
else
require @config[:require]
end
end
def default_tag
dir = ::Rails.root
name = File.basename(dir)
prevdir = File.dirname(dir) # Capistrano release directory?
if name.to_i != 0 && prevdir
if File.basename(prevdir) == "releases"
return File.basename(File.dirname(prevdir))
end
end
name
end
def validate!
if !File.exist?(@config[:require]) ||
(File.directory?(@config[:require]) && !File.exist?("#{@config[:require]}/config/application.rb"))
logger.info "=================================================================="
logger.info " Please point Sidekiq to a Rails application or a Ruby file "
logger.info " to load your job classes with -r [DIR|FILE]."
logger.info "=================================================================="
logger.info @parser
die(1)
end
[:concurrency, :timeout].each do |opt|
raise ArgumentError, "#{opt}: #{@config[opt]} is not a valid value" if @config[opt].to_i <= 0
end
end
def parse_options(argv)
opts = {}
@parser = option_parser(opts)
@parser.parse!(argv)
opts
end
def option_parser(opts)
parser = OptionParser.new { |o|
o.on "-c", "--concurrency INT", "processor threads to use" do |arg|
opts[:concurrency] = Integer(arg)
end
o.on "-e", "--environment ENV", "Application environment" do |arg|
opts[:environment] = arg
end
o.on "-g", "--tag TAG", "Process tag for procline" do |arg|
opts[:tag] = arg
end
o.on "-q", "--queue QUEUE[,WEIGHT]", "Queues to process with optional weights" do |arg|
opts[:queues] ||= []
opts[:queues] << arg
end
o.on "-r", "--require [PATH|DIR]", "Location of Rails application with jobs or file to require" do |arg|
opts[:require] = arg
end
o.on "-t", "--timeout NUM", "Shutdown timeout" do |arg|
opts[:timeout] = Integer(arg)
end
o.on "-v", "--verbose", "Print more verbose output" do |arg|
opts[:verbose] = arg
end
o.on "-C", "--config PATH", "path to YAML config file" do |arg|
opts[:config_file] = arg
end
o.on "-V", "--version", "Print version and exit" do
puts "Sidekiq #{Sidekiq::VERSION}"
die(0)
end
}
parser.banner = "sidekiq [options]"
parser.on_tail "-h", "--help", "Show help" do
logger.info parser
die 1
end
parser
end
def initialize_logger
@config.logger.level = ::Logger::DEBUG if @config[:verbose]
end
def parse_config(path)
erb = ERB.new(File.read(path), trim_mode: "-")
erb.filename = File.expand_path(path)
opts = YAML.safe_load(erb.result, permitted_classes: [Symbol], aliases: true) || {}
if opts.respond_to? :deep_symbolize_keys!
opts.deep_symbolize_keys!
else
symbolize_keys_deep!(opts)
end
opts = opts.merge(opts.delete(environment.to_sym) || {})
opts.delete(:strict)
opts
end
def rails_app?
defined?(::Rails) && ::Rails.respond_to?(:application)
end
end
end
require "sidekiq/systemd"
require "sidekiq/metrics/tracking"

View File

@ -0,0 +1,268 @@
# frozen_string_literal: true
require "securerandom"
require "sidekiq/middleware/chain"
require "sidekiq/job_util"
module Sidekiq
class Client
include Sidekiq::JobUtil
##
# Define client-side middleware:
#
# client = Sidekiq::Client.new
# client.middleware do |chain|
# chain.use MyClientMiddleware
# end
# client.push('class' => 'SomeJob', 'args' => [1,2,3])
#
# All client instances default to the globally-defined
# Sidekiq.client_middleware but you can change as necessary.
#
def middleware(&block)
if block
@chain = @chain.dup
yield @chain
end
@chain
end
attr_accessor :redis_pool
# Sidekiq::Client is responsible for pushing job payloads to Redis.
# Requires the :pool or :config keyword argument.
#
# Sidekiq::Client.new(pool: Sidekiq::RedisConnection.create)
#
# Inside the Sidekiq process, you can reuse the configured resources:
#
# Sidekiq::Client.new(config: config)
#
# @param pool [ConnectionPool] explicit Redis pool to use
# @param config [Sidekiq::Config] use the pool and middleware from the given Sidekiq container
# @param chain [Sidekiq::Middleware::Chain] use the given middleware chain
def initialize(*args, **kwargs)
if args.size == 1 && kwargs.size == 0
warn "Sidekiq::Client.new(pool) is deprecated, please use Sidekiq::Client.new(pool: pool), #{caller(0..3)}"
# old calling method, accept 1 pool argument
@redis_pool = args[0]
@chain = Sidekiq.default_configuration.client_middleware
@config = Sidekiq.default_configuration
else
# new calling method: keyword arguments
@config = kwargs[:config] || Sidekiq.default_configuration
@redis_pool = kwargs[:pool] || Thread.current[:sidekiq_redis_pool] || @config&.redis_pool
@chain = kwargs[:chain] || @config&.client_middleware
raise ArgumentError, "No Redis pool available for Sidekiq::Client" unless @redis_pool
end
end
##
# The main method used to push a job to Redis. Accepts a number of options:
#
# queue - the named queue to use, default 'default'
# class - the job class to call, required
# args - an array of simple arguments to the perform method, must be JSON-serializable
# at - timestamp to schedule the job (optional), must be Numeric (e.g. Time.now.to_f)
# retry - whether to retry this job if it fails, default true or an integer number of retries
# retry_for - relative amount of time to retry this job if it fails, default nil
# backtrace - whether to save any error backtrace, default false
#
# If class is set to the class name, the jobs' options will be based on Sidekiq's default
# job options. Otherwise, they will be based on the job class's options.
#
# Any options valid for a job class's sidekiq_options are also available here.
#
# All keys must be strings, not symbols. NB: because we are serializing to JSON, all
# symbols in 'args' will be converted to strings. Note that +backtrace: true+ can take quite a bit of
# space in Redis; a large volume of failing jobs can start Redis swapping if you aren't careful.
#
# Returns a unique Job ID. If middleware stops the job, nil will be returned instead.
#
# Example:
# push('queue' => 'my_queue', 'class' => MyJob, 'args' => ['foo', 1, :bat => 'bar'])
#
def push(item)
normed = normalize_item(item)
payload = middleware.invoke(item["class"], normed, normed["queue"], @redis_pool) do
normed
end
if payload
verify_json(payload)
raw_push([payload])
payload["jid"]
end
end
##
# Push a large number of jobs to Redis. This method cuts out the redis
# network round trip latency. It pushes jobs in batches if more than
# `:batch_size` (1000 by default) of jobs are passed. I wouldn't recommend making `:batch_size`
# larger than 1000 but YMMV based on network quality, size of job args, etc.
# A large number of jobs can cause a bit of Redis command processing latency.
#
# Takes the same arguments as #push except that args is expected to be
# an Array of Arrays. All other keys are duplicated for each job. Each job
# is run through the client middleware pipeline and each job gets its own Job ID
# as normal.
#
# Returns an array of the of pushed jobs' jids, may contain nils if any client middleware
# prevented a job push.
#
# Example (pushing jobs in batches):
# push_bulk('class' => MyJob, 'args' => (1..100_000).to_a, batch_size: 1_000)
#
def push_bulk(items)
batch_size = items.delete(:batch_size) || items.delete("batch_size") || 1_000
args = items["args"]
at = items.delete("at")
raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all? { |entry| entry.is_a?(Numeric) })
raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
jid = items.delete("jid")
raise ArgumentError, "Explicitly passing 'jid' when pushing more than one job is not supported" if jid && args.size > 1
normed = normalize_item(items)
slice_index = 0
result = args.each_slice(batch_size).flat_map do |slice|
raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless slice.is_a?(Array) && slice.all?(Array)
break [] if slice.empty? # no jobs to push
payloads = slice.map.with_index { |job_args, index|
copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12))
copy["at"] = (at.is_a?(Array) ? at[slice_index + index] : at) if at
result = middleware.invoke(items["class"], copy, copy["queue"], @redis_pool) do
verify_json(copy)
copy
end
result || nil
}
slice_index += batch_size
to_push = payloads.compact
raw_push(to_push) unless to_push.empty?
payloads.map { |payload| payload&.[]("jid") }
end
result.is_a?(Enumerator::Lazy) ? result.force : result
end
# Allows sharding of jobs across any number of Redis instances. All jobs
# defined within the block will use the given Redis connection pool.
#
# pool = ConnectionPool.new { Redis.new }
# Sidekiq::Client.via(pool) do
# SomeJob.perform_async(1,2,3)
# SomeOtherJob.perform_async(1,2,3)
# end
#
# Generally this is only needed for very large Sidekiq installs processing
# thousands of jobs per second. I do not recommend sharding unless
# you cannot scale any other way (e.g. splitting your app into smaller apps).
def self.via(pool)
raise ArgumentError, "No pool given" if pool.nil?
current_sidekiq_pool = Thread.current[:sidekiq_redis_pool]
Thread.current[:sidekiq_redis_pool] = pool
yield
ensure
Thread.current[:sidekiq_redis_pool] = current_sidekiq_pool
end
class << self
def push(item)
new.push(item)
end
def push_bulk(...)
new.push_bulk(...)
end
# Resque compatibility helpers. Note all helpers
# should go through Sidekiq::Job#client_push.
#
# Example usage:
# Sidekiq::Client.enqueue(MyJob, 'foo', 1, :bat => 'bar')
#
# Messages are enqueued to the 'default' queue.
#
def enqueue(klass, *args)
klass.client_push("class" => klass, "args" => args)
end
# Example usage:
# Sidekiq::Client.enqueue_to(:queue_name, MyJob, 'foo', 1, :bat => 'bar')
#
def enqueue_to(queue, klass, *args)
klass.client_push("queue" => queue, "class" => klass, "args" => args)
end
# Example usage:
# Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyJob, 'foo', 1, :bat => 'bar')
#
def enqueue_to_in(queue, interval, klass, *args)
int = interval.to_f
now = Time.now.to_f
ts = ((int < 1_000_000_000) ? now + int : int)
item = {"class" => klass, "args" => args, "at" => ts, "queue" => queue}
item.delete("at") if ts <= now
klass.client_push(item)
end
# Example usage:
# Sidekiq::Client.enqueue_in(3.minutes, MyJob, 'foo', 1, :bat => 'bar')
#
def enqueue_in(interval, klass, *args)
klass.perform_in(interval, *args)
end
end
private
def raw_push(payloads)
@redis_pool.with do |conn|
retryable = true
begin
conn.pipelined do |pipeline|
atomic_push(pipeline, payloads)
end
rescue RedisClient::Error => ex
# 2550 Failover can cause the server to become a replica, need
# to disconnect and reopen the socket to get back to the primary.
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
# 4985 Use the same logic when a blocking command is force-unblocked
# The retry logic is copied from sidekiq.rb
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
conn.close
retryable = false
retry
end
raise
end
end
true
end
def atomic_push(conn, payloads)
if payloads.first.key?("at")
conn.zadd("schedule", payloads.flat_map { |hash|
at = hash.delete("at").to_s
# ActiveJob sets this but the job has not been enqueued yet
hash.delete("enqueued_at")
[at, Sidekiq.dump_json(hash)]
})
else
queue = payloads.first["queue"]
now = Time.now.to_f
to_push = payloads.map { |entry|
entry["enqueued_at"] = now
Sidekiq.dump_json(entry)
}
conn.sadd("queues", [queue])
conn.lpush("queue:#{queue}", to_push)
end
end
end
end

View File

@ -0,0 +1,68 @@
# frozen_string_literal: true
module Sidekiq
##
# Sidekiq::Component assumes a config instance is available at @config
module Component # :nodoc:
attr_reader :config
def watchdog(last_words)
yield
rescue Exception => ex
handle_exception(ex, {context: last_words})
raise ex
end
def safe_thread(name, &block)
Thread.new do
Thread.current.name = "sidekiq.#{name}"
watchdog(name, &block)
end
end
def logger
config.logger
end
def redis(&block)
config.redis(&block)
end
def tid
Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
end
def hostname
ENV["DYNO"] || Socket.gethostname
end
def process_nonce
@@process_nonce ||= SecureRandom.hex(6)
end
def identity
@@identity ||= "#{hostname}:#{::Process.pid}:#{process_nonce}"
end
def handle_exception(ex, ctx = {})
config.handle_exception(ex, ctx)
end
def fire_event(event, options = {})
oneshot = options.fetch(:oneshot, true)
reverse = options[:reverse]
reraise = options[:reraise]
logger.debug("Firing #{event} event") if oneshot
arr = config[:lifecycle_events][event]
arr.reverse! if reverse
arr.each do |block|
block.call
rescue => ex
handle_exception(ex, {context: "Exception during Sidekiq lifecycle event.", event: event})
raise ex if reraise
end
arr.clear if oneshot # once we've fired an event, we never fire it again
end
end
end

View File

@ -0,0 +1,287 @@
require "forwardable"
require "set"
require "sidekiq/redis_connection"
module Sidekiq
# Sidekiq::Config represents the global configuration for an instance of Sidekiq.
class Config
extend Forwardable
DEFAULTS = {
labels: Set.new,
require: ".",
environment: nil,
concurrency: 5,
timeout: 25,
poll_interval_average: nil,
average_scheduled_poll_interval: 5,
on_complex_arguments: :raise,
error_handlers: [],
death_handlers: [],
lifecycle_events: {
startup: [],
quiet: [],
shutdown: [],
# triggers when we fire the first heartbeat on startup OR repairing a network partition
heartbeat: [],
# triggers on EVERY heartbeat call, every 10 seconds
beat: []
},
dead_max_jobs: 10_000,
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
reloader: proc { |&block| block.call },
backtrace_cleaner: ->(backtrace) { backtrace }
}
ERROR_HANDLER = ->(ex, ctx, cfg = Sidekiq.default_configuration) {
l = cfg.logger
l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
l.warn("#{ex.class.name}: #{ex.message}")
unless ex.backtrace.nil?
backtrace = cfg[:backtrace_cleaner].call(ex.backtrace)
l.warn(backtrace.join("\n"))
end
}
def initialize(options = {})
@options = DEFAULTS.merge(options)
@options[:error_handlers] << ERROR_HANDLER if @options[:error_handlers].empty?
@directory = {}
@redis_config = {}
@capsules = {}
end
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
attr_reader :capsules
def to_json(*)
Sidekiq.dump_json(@options)
end
# LEGACY: edits the default capsule
# config.concurrency = 5
def concurrency=(val)
default_capsule.concurrency = Integer(val)
end
def concurrency
default_capsule.concurrency
end
def total_concurrency
capsules.each_value.sum(&:concurrency)
end
# Edit the default capsule.
# config.queues = %w( high default low ) # strict
# config.queues = %w( high,3 default,2 low,1 ) # weighted
# config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random
#
# With weighted priority, queue will be checked first (weight / total) of the time.
# high will be checked first (3/6) or 50% of the time.
# I'd recommend setting weights between 1-10. Weights in the hundreds or thousands
# are ridiculous and unnecessarily expensive. You can get random queue ordering
# by explicitly setting all weights to 1.
def queues=(val)
default_capsule.queues = val
end
def queues
default_capsule.queues
end
def client_middleware
@client_chain ||= Sidekiq::Middleware::Chain.new(self)
yield @client_chain if block_given?
@client_chain
end
def server_middleware
@server_chain ||= Sidekiq::Middleware::Chain.new(self)
yield @server_chain if block_given?
@server_chain
end
def default_capsule(&block)
capsule("default", &block)
end
# register a new queue processing subsystem
def capsule(name)
nm = name.to_s
cap = @capsules.fetch(nm) do
cap = Sidekiq::Capsule.new(nm, self)
@capsules[nm] = cap
end
yield cap if block_given?
cap
end
# All capsules must use the same Redis configuration
def redis=(hash)
@redis_config = @redis_config.merge(hash)
end
def redis_pool
Thread.current[:sidekiq_redis_pool] || Thread.current[:sidekiq_capsule]&.redis_pool || local_redis_pool
end
private def local_redis_pool
# this is our internal client/housekeeping pool. each capsule has its
# own pool for executing threads.
@redis ||= new_redis_pool(10, "internal")
end
def new_redis_pool(size, name = "unset")
# connection pool is lazy, it will not create connections unless you actually need them
# so don't be skimpy!
RedisConnection.create({size: size, logger: logger, pool_name: name}.merge(@redis_config))
end
def redis_info
redis do |conn|
conn.call("INFO") { |i| i.lines(chomp: true).map { |l| l.split(":", 2) }.select { |l| l.size == 2 }.to_h }
rescue RedisClientAdapter::CommandError => ex
# 2850 return fake version when INFO command has (probably) been renamed
raise unless /unknown command/.match?(ex.message)
{
"redis_version" => "9.9.9",
"uptime_in_days" => "9999",
"connected_clients" => "9999",
"used_memory_human" => "9P",
"used_memory_peak_human" => "9P"
}.freeze
end
end
def redis
raise ArgumentError, "requires a block" unless block_given?
redis_pool.with do |conn|
retryable = true
begin
yield conn
rescue RedisClientAdapter::BaseError => ex
# 2550 Failover can cause the server to become a replica, need
# to disconnect and reopen the socket to get back to the primary.
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
# 4985 Use the same logic when a blocking command is force-unblocked
# The same retry logic is also used in client.rb
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
conn.close
retryable = false
retry
end
raise
end
end
end
# register global singletons which can be accessed elsewhere
def register(name, instance)
@directory[name] = instance
end
# find a singleton
def lookup(name, default_class = nil)
# JNDI is just a fancy name for a hash lookup
@directory.fetch(name) do |key|
return nil unless default_class
@directory[key] = default_class.new(self)
end
end
##
# Death handlers are called when all retries for a job have been exhausted and
# the job dies. It's the notification to your application
# that this job will not succeed without manual intervention.
#
# Sidekiq.configure_server do |config|
# config.death_handlers << ->(job, ex) do
# end
# end
def death_handlers
@options[:death_handlers]
end
# How frequently Redis should be checked by a random Sidekiq process for
# scheduled and retriable jobs. Each individual process will take turns by
# waiting some multiple of this value.
#
# See sidekiq/scheduled.rb for an in-depth explanation of this value
def average_scheduled_poll_interval=(interval)
@options[:average_scheduled_poll_interval] = interval
end
# Register a proc to handle any error which occurs within the Sidekiq process.
#
# Sidekiq.configure_server do |config|
# config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
# end
#
# The default error handler logs errors to @logger.
def error_handlers
@options[:error_handlers]
end
# Register a block to run at a point in the Sidekiq lifecycle.
# :startup, :quiet or :shutdown are valid events.
#
# Sidekiq.configure_server do |config|
# config.on(:shutdown) do
# puts "Goodbye cruel world!"
# end
# end
def on(event, &block)
raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
raise ArgumentError, "Invalid event name: #{event}" unless @options[:lifecycle_events].key?(event)
@options[:lifecycle_events][event] << block
end
def logger
@logger ||= Sidekiq::Logger.new($stdout, level: :info).tap do |log|
log.level = Logger::INFO
log.formatter = if ENV["DYNO"]
Sidekiq::Logger::Formatters::WithoutTimestamp.new
else
Sidekiq::Logger::Formatters::Pretty.new
end
end
end
def logger=(logger)
if logger.nil?
self.logger.level = Logger::FATAL
return
end
@logger = logger
end
private def arity(handler)
return handler.arity if handler.is_a?(Proc)
handler.method(:call).arity
end
# INTERNAL USE ONLY
def handle_exception(ex, ctx = {})
if @options[:error_handlers].size == 0
p ["!!!!!", ex]
end
@options[:error_handlers].each do |handler|
if arity(handler) == 2
# TODO Remove in 8.0
logger.info { "DEPRECATION: Sidekiq exception handlers now take three arguments, see #{handler}" }
handler.call(ex, {_config: self}.merge(ctx))
else
handler.call(ex, ctx, self)
end
rescue Exception => e
l = logger
l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
l.error e
l.error e.backtrace.join("\n") unless e.backtrace.nil?
end
end
end
end

View File

@ -0,0 +1,62 @@
require "sidekiq/redis_connection"
require "time"
# This file is designed to be required within the user's
# deployment script; it should need a bare minimum of dependencies.
# Usage:
#
# require "sidekiq/deploy"
# Sidekiq::Deploy.mark!("Some change")
#
# If you do not pass a label, Sidekiq will try to use the latest
# git commit info.
#
module Sidekiq
class Deploy
MARK_TTL = 90 * 24 * 60 * 60 # 90 days
LABEL_MAKER = -> {
`git log -1 --format="%h %s"`.strip
}
def self.mark!(label = nil)
Sidekiq::Deploy.new.mark!(label: label)
end
def initialize(pool = Sidekiq::RedisConnection.create)
@pool = pool
end
def mark!(at: Time.now, label: nil)
label ||= LABEL_MAKER.call
# we need to round the timestamp so that we gracefully
# handle an very common error in marking deploys:
# having every process mark its deploy, leading
# to N marks for each deploy. Instead we round the time
# to the minute so that multple marks within that minute
# will all naturally rollup into one mark per minute.
whence = at.utc
floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
datecode = floor.strftime("%Y%m%d")
key = "#{datecode}-marks"
stamp = floor.iso8601
@pool.with do |c|
# only allow one deploy mark for a given label for the next minute
lock = c.set("deploylock-#{label}", stamp, nx: true, ex: 60)
if lock
c.multi do |pipe|
pipe.hsetnx(key, stamp, label)
pipe.expire(key, MARK_TTL)
end
end
end
end
def fetch(date = Time.now.utc.to_date)
datecode = date.strftime("%Y%m%d")
@pool.with { |c| c.hgetall("#{datecode}-marks") }
end
end
end

View File

@ -0,0 +1,61 @@
require "sidekiq/component"
require "sidekiq/launcher"
require "sidekiq/metrics/tracking"
module Sidekiq
class Embedded
include Sidekiq::Component
def initialize(config)
@config = config
end
def run
housekeeping
fire_event(:startup, reverse: false, reraise: true)
@launcher = Sidekiq::Launcher.new(@config, embedded: true)
@launcher.run
sleep 0.2 # pause to give threads time to spin up
logger.info "Sidekiq running embedded, total process thread count: #{Thread.list.size}"
logger.debug { Thread.list.map(&:name) }
end
def quiet
@launcher&.quiet
end
def stop
@launcher&.stop
end
private
def housekeeping
logger.info "Running in #{RUBY_DESCRIPTION}"
logger.info Sidekiq::LICENSE
logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
# touch the connection pool so it is created before we
# fire startup and start multithreading.
info = config.redis_info
ver = Gem::Version.new(info["redis_version"])
raise "You are connecting to Redis #{ver}, Sidekiq requires Redis 6.2.0 or greater" if ver < Gem::Version.new("6.2.0")
maxmemory_policy = info["maxmemory_policy"]
if maxmemory_policy != "noeviction"
logger.warn <<~EOM
WARNING: Your Redis instance will evict Sidekiq data under heavy load.
The 'noeviction' maxmemory policy is recommended (current policy: '#{maxmemory_policy}').
See: https://github.com/sidekiq/sidekiq/wiki/Using-Redis#memory
EOM
end
logger.debug { "Client Middleware: #{@config.default_capsule.client_middleware.map(&:klass).join(", ")}" }
logger.debug { "Server Middleware: #{@config.default_capsule.server_middleware.map(&:klass).join(", ")}" }
end
end
end

View File

@ -0,0 +1,88 @@
# frozen_string_literal: true
require "sidekiq"
require "sidekiq/component"
require "sidekiq/capsule"
module Sidekiq # :nodoc:
class BasicFetch
include Sidekiq::Component
# We want the fetch operation to timeout every few seconds so the thread
# can check if the process is shutting down.
TIMEOUT = 2
UnitOfWork = Struct.new(:queue, :job, :config) {
def acknowledge
# nothing to do
end
def queue_name
queue.delete_prefix("queue:")
end
def requeue
config.redis do |conn|
conn.rpush(queue, job)
end
end
}
def initialize(cap)
raise ArgumentError, "missing queue list" unless cap.queues
@config = cap
@strictly_ordered_queues = cap.mode == :strict
@queues = config.queues.map { |q| "queue:#{q}" }
@queues.uniq! if @strictly_ordered_queues
end
def retrieve_work
qs = queues_cmd
# 4825 Sidekiq Pro with all queues paused will return an
# empty set of queues
if qs.size <= 0
sleep(TIMEOUT)
return nil
end
queue, job = redis { |conn| conn.blocking_call(conn.read_timeout + TIMEOUT, "brpop", *qs, TIMEOUT) }
UnitOfWork.new(queue, job, config) if queue
end
def bulk_requeue(inprogress)
return if inprogress.empty?
logger.debug { "Re-queueing terminated jobs" }
jobs_to_requeue = {}
inprogress.each do |unit_of_work|
jobs_to_requeue[unit_of_work.queue] ||= []
jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
end
redis do |conn|
conn.pipelined do |pipeline|
jobs_to_requeue.each do |queue, jobs|
pipeline.rpush(queue, jobs)
end
end
end
logger.info("Pushed #{inprogress.size} jobs back to Redis")
rescue => ex
logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
end
# Creating the Redis#brpop command takes into account any
# configured queue weights. By default Redis#brpop returns
# data from the first queue that has pending elements. We
# recreate the queue command each time we invoke Redis#brpop
# to honor weights and avoid queue starvation.
def queues_cmd
if @strictly_ordered_queues
@queues
else
permute = @queues.shuffle
permute.uniq!
permute
end
end
end
end

View File

@ -0,0 +1,374 @@
# frozen_string_literal: true
require "sidekiq/client"
module Sidekiq
##
# Include this module in your job class and you can easily create
# asynchronous jobs:
#
# class HardJob
# include Sidekiq::Job
# sidekiq_options queue: 'critical', retry: 5
#
# def perform(*args)
# # do some work
# end
# end
#
# Then in your Rails app, you can do this:
#
# HardJob.perform_async(1, 2, 3)
#
# Note that perform_async is a class method, perform is an instance method.
#
# Sidekiq::Job also includes several APIs to provide compatibility with
# ActiveJob.
#
# class SomeJob
# include Sidekiq::Job
# queue_as :critical
#
# def perform(...)
# end
# end
#
# SomeJob.set(wait_until: 1.hour).perform_async(123)
#
# Note that arguments passed to the job must still obey Sidekiq's
# best practice for simple, JSON-native data types. Sidekiq will not
# implement ActiveJob's more complex argument serialization. For
# this reason, we don't implement `perform_later` as our call semantics
# are very different.
#
module Job
##
# The Options module is extracted so we can include it in ActiveJob::Base
# and allow native AJs to configure Sidekiq features/internals.
module Options
def self.included(base)
base.extend(ClassMethods)
base.sidekiq_class_attribute :sidekiq_options_hash
base.sidekiq_class_attribute :sidekiq_retry_in_block
base.sidekiq_class_attribute :sidekiq_retries_exhausted_block
end
module ClassMethods
ACCESSOR_MUTEX = Mutex.new
##
# Allows customization for this type of Job.
# Legal options:
#
# queue - name of queue to use for this job type, default *default*
# retry - enable retries for this Job in case of error during execution,
# *true* to use the default or *Integer* count
# backtrace - whether to save any error backtrace in the retry payload to display in web UI,
# can be true, false or an integer number of lines to save, default *false*
#
# In practice, any option is allowed. This is the main mechanism to configure the
# options for a specific job.
def sidekiq_options(opts = {})
opts = opts.transform_keys(&:to_s) # stringify
self.sidekiq_options_hash = get_sidekiq_options.merge(opts)
end
def sidekiq_retry_in(&block)
self.sidekiq_retry_in_block = block
end
def sidekiq_retries_exhausted(&block)
self.sidekiq_retries_exhausted_block = block
end
def get_sidekiq_options # :nodoc:
self.sidekiq_options_hash ||= Sidekiq.default_job_options
end
def sidekiq_class_attribute(*attrs)
instance_reader = true
instance_writer = true
attrs.each do |name|
synchronized_getter = "__synchronized_#{name}"
singleton_class.instance_eval do
undef_method(name) if method_defined?(name) || private_method_defined?(name)
end
define_singleton_method(synchronized_getter) { nil }
singleton_class.class_eval do
private(synchronized_getter)
end
define_singleton_method(name) { ACCESSOR_MUTEX.synchronize { send synchronized_getter } }
ivar = "@#{name}"
singleton_class.instance_eval do
m = "#{name}="
undef_method(m) if method_defined?(m) || private_method_defined?(m)
end
define_singleton_method("#{name}=") do |val|
singleton_class.class_eval do
ACCESSOR_MUTEX.synchronize do
undef_method(synchronized_getter) if method_defined?(synchronized_getter) || private_method_defined?(synchronized_getter)
define_method(synchronized_getter) { val }
end
end
if singleton_class?
class_eval do
undef_method(name) if method_defined?(name) || private_method_defined?(name)
define_method(name) do
if instance_variable_defined? ivar
instance_variable_get ivar
else
singleton_class.send name
end
end
end
end
val
end
if instance_reader
undef_method(name) if method_defined?(name) || private_method_defined?(name)
define_method(name) do
if instance_variable_defined?(ivar)
instance_variable_get ivar
else
self.class.public_send name
end
end
end
if instance_writer
m = "#{name}="
undef_method(m) if method_defined?(m) || private_method_defined?(m)
attr_writer name
end
end
end
end
end
attr_accessor :jid
def self.included(base)
raise ArgumentError, "Sidekiq::Job cannot be included in an ActiveJob: #{base.name}" if base.ancestors.any? { |c| c.name == "ActiveJob::Base" }
base.include(Options)
base.extend(ClassMethods)
end
def logger
Sidekiq.logger
end
# This helper class encapsulates the set options for `set`, e.g.
#
# SomeJob.set(queue: 'foo').perform_async(....)
#
class Setter
include Sidekiq::JobUtil
def initialize(klass, opts)
@klass = klass
# NB: the internal hash always has stringified keys
@opts = opts.transform_keys(&:to_s)
# ActiveJob compatibility
interval = @opts.delete("wait_until") || @opts.delete("wait")
at(interval) if interval
end
def set(options)
hash = options.transform_keys(&:to_s)
interval = hash.delete("wait_until") || @opts.delete("wait")
@opts.merge!(hash)
at(interval) if interval
self
end
def perform_async(*args)
if @opts["sync"] == true
perform_inline(*args)
else
@klass.client_push(@opts.merge("args" => args, "class" => @klass))
end
end
# Explicit inline execution of a job. Returns nil if the job did not
# execute, true otherwise.
def perform_inline(*args)
raw = @opts.merge("args" => args, "class" => @klass)
# validate and normalize payload
item = normalize_item(raw)
queue = item["queue"]
# run client-side middleware
cfg = Sidekiq.default_configuration
result = cfg.client_middleware.invoke(item["class"], item, queue, cfg.redis_pool) do
item
end
return nil unless result
# round-trip the payload via JSON
msg = Sidekiq.load_json(Sidekiq.dump_json(item))
# prepare the job instance
klass = Object.const_get(msg["class"])
job = klass.new
job.jid = msg["jid"]
job.bid = msg["bid"] if job.respond_to?(:bid)
# run the job through server-side middleware
result = cfg.server_middleware.invoke(job, msg, msg["queue"]) do
# perform it
job.perform(*msg["args"])
true
end
return nil unless result
# jobs do not return a result. they should store any
# modified state.
true
end
alias_method :perform_sync, :perform_inline
def perform_bulk(args, batch_size: 1_000)
client = @klass.build_client
client.push_bulk(@opts.merge("class" => @klass, "args" => args, :batch_size => batch_size))
end
# +interval+ must be a timestamp, numeric or something that acts
# numeric (like an activesupport time interval).
def perform_in(interval, *args)
at(interval).perform_async(*args)
end
alias_method :perform_at, :perform_in
private
def at(interval)
int = interval.to_f
now = Time.now.to_f
ts = ((int < 1_000_000_000) ? now + int : int)
# Optimization to enqueue something now that is scheduled to go out now or in the past
@opts["at"] = ts if ts > now
self
end
end
module ClassMethods
def delay(*args)
raise ArgumentError, "Do not call .delay on a Sidekiq::Job class, call .perform_async"
end
def delay_for(*args)
raise ArgumentError, "Do not call .delay_for on a Sidekiq::Job class, call .perform_in"
end
def delay_until(*args)
raise ArgumentError, "Do not call .delay_until on a Sidekiq::Job class, call .perform_at"
end
def queue_as(q)
sidekiq_options("queue" => q.to_s)
end
def set(options)
Setter.new(self, options)
end
def perform_async(*args)
Setter.new(self, {}).perform_async(*args)
end
# Inline execution of job's perform method after passing through Sidekiq.client_middleware and Sidekiq.server_middleware
def perform_inline(*args)
Setter.new(self, {}).perform_inline(*args)
end
alias_method :perform_sync, :perform_inline
##
# Push a large number of jobs to Redis, while limiting the batch of
# each job payload to 1,000. This method helps cut down on the number
# of round trips to Redis, which can increase the performance of enqueueing
# large numbers of jobs.
#
# +items+ must be an Array of Arrays.
#
# For finer-grained control, use `Sidekiq::Client.push_bulk` directly.
#
# Example (3 Redis round trips):
#
# SomeJob.perform_async(1)
# SomeJob.perform_async(2)
# SomeJob.perform_async(3)
#
# Would instead become (1 Redis round trip):
#
# SomeJob.perform_bulk([[1], [2], [3]])
#
def perform_bulk(*args, **kwargs)
Setter.new(self, {}).perform_bulk(*args, **kwargs)
end
# +interval+ must be a timestamp, numeric or something that acts
# numeric (like an activesupport time interval).
def perform_in(interval, *args)
int = interval.to_f
now = Time.now.to_f
ts = ((int < 1_000_000_000) ? now + int : int)
item = {"class" => self, "args" => args}
# Optimization to enqueue something now that is scheduled to go out now or in the past
item["at"] = ts if ts > now
client_push(item)
end
alias_method :perform_at, :perform_in
##
# Allows customization for this type of Job.
# Legal options:
#
# queue - use a named queue for this Job, default 'default'
# retry - enable the RetryJobs middleware for this Job, *true* to use the default
# or *Integer* count
# backtrace - whether to save any error backtrace in the retry payload to display in web UI,
# can be true, false or an integer number of lines to save, default *false*
# pool - use the given Redis connection pool to push this type of job to a given shard.
#
# In practice, any option is allowed. This is the main mechanism to configure the
# options for a specific job.
def sidekiq_options(opts = {})
super
end
def client_push(item) # :nodoc:
raise ArgumentError, "Job payloads should contain no Symbols: #{item}" if item.any? { |k, v| k.is_a?(::Symbol) }
# allow the user to dynamically re-target jobs to another shard using the "pool" attribute
# FooJob.set(pool: SOME_POOL).perform_async
old = Thread.current[:sidekiq_redis_pool]
pool = item.delete("pool")
Thread.current[:sidekiq_redis_pool] = pool if pool
begin
build_client.push(item)
ensure
Thread.current[:sidekiq_redis_pool] = old
end
end
def build_client # :nodoc:
pool = Thread.current[:sidekiq_redis_pool] || get_sidekiq_options["pool"] || Sidekiq.default_configuration.redis_pool
client_class = get_sidekiq_options["client_class"] || Sidekiq::Client
client_class.new(pool: pool)
end
end
end
end

View File

@ -0,0 +1,51 @@
# frozen_string_literal: true
module Sidekiq
class JobLogger
def initialize(logger)
@logger = logger
end
def call(item, queue)
start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
@logger.info("start")
yield
Sidekiq::Context.add(:elapsed, elapsed(start))
@logger.info("done")
rescue Exception
Sidekiq::Context.add(:elapsed, elapsed(start))
@logger.info("fail")
raise
end
def prepare(job_hash, &block)
# If we're using a wrapper class, like ActiveJob, use the "wrapped"
# attribute to expose the underlying thing.
h = {
class: job_hash["display_class"] || job_hash["wrapped"] || job_hash["class"],
jid: job_hash["jid"]
}
h[:bid] = job_hash["bid"] if job_hash.has_key?("bid")
h[:tags] = job_hash["tags"] if job_hash.has_key?("tags")
Thread.current[:sidekiq_context] = h
level = job_hash["log_level"]
if level && @logger.respond_to?(:log_at)
@logger.log_at(level, &block)
else
yield
end
ensure
Thread.current[:sidekiq_context] = nil
end
private
def elapsed(start)
(::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
end
end
end

View File

@ -0,0 +1,300 @@
# frozen_string_literal: true
require "zlib"
require "base64"
require "sidekiq/component"
module Sidekiq
##
# Automatically retry jobs that fail in Sidekiq.
# Sidekiq's retry support assumes a typical development lifecycle:
#
# 0. Push some code changes with a bug in it.
# 1. Bug causes job processing to fail, Sidekiq's middleware captures
# the job and pushes it onto a retry queue.
# 2. Sidekiq retries jobs in the retry queue multiple times with
# an exponential delay, the job continues to fail.
# 3. After a few days, a developer deploys a fix. The job is
# reprocessed successfully.
# 4. Once retries are exhausted, Sidekiq will give up and move the
# job to the Dead Job Queue (aka morgue) where it must be dealt with
# manually in the Web UI.
# 5. After 6 months on the DJQ, Sidekiq will discard the job.
#
# A job looks like:
#
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true }
#
# The 'retry' option also accepts a number (in place of 'true'):
#
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 }
#
# The job will be retried this number of times before giving up. (If simply
# 'true', Sidekiq retries 25 times)
#
# Relevant options for job retries:
#
# * 'queue' - the queue for the initial job
# * 'retry_queue' - if job retries should be pushed to a different (e.g. lower priority) queue
# * 'retry_count' - number of times we've retried so far.
# * 'error_message' - the message from the exception
# * 'error_class' - the exception class
# * 'failed_at' - the first time it failed
# * 'retried_at' - the last time it was retried
# * 'backtrace' - the number of lines of error backtrace to store
#
# We don't store the backtrace by default as that can add a lot of overhead
# to the job and everyone is using an error service, right?
#
# The default number of retries is 25 which works out to about 3 weeks
# You can change the default maximum number of retries in your initializer:
#
# Sidekiq.default_configuration[:max_retries] = 7
#
# or limit the number of retries for a particular job and send retries to
# a low priority queue with:
#
# class MyJob
# include Sidekiq::Job
# sidekiq_options retry: 10, retry_queue: 'low'
# end
#
class JobRetry
class Handled < ::RuntimeError; end
class Skip < Handled; end
include Sidekiq::Component
DEFAULT_MAX_RETRY_ATTEMPTS = 25
def initialize(capsule)
@config = @capsule = capsule
@max_retries = Sidekiq.default_configuration[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
@backtrace_cleaner = Sidekiq.default_configuration[:backtrace_cleaner]
end
# The global retry handler requires only the barest of data.
# We want to be able to retry as much as possible so we don't
# require the job to be instantiated.
def global(jobstr, queue)
yield
rescue Handled => ex
raise ex
rescue Sidekiq::Shutdown => ey
# ignore, will be pushed back onto queue during hard_shutdown
raise ey
rescue Exception => e
# ignore, will be pushed back onto queue during hard_shutdown
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
msg = Sidekiq.load_json(jobstr)
if msg["retry"]
process_retry(nil, msg, queue, e)
else
@capsule.config.death_handlers.each do |handler|
handler.call(msg, e)
rescue => handler_ex
handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
end
end
raise Handled
end
# The local retry support means that any errors that occur within
# this block can be associated with the given job instance.
# This is required to support the `sidekiq_retries_exhausted` block.
#
# Note that any exception from the block is wrapped in the Skip
# exception so the global block does not reprocess the error. The
# Skip exception is unwrapped within Sidekiq::Processor#process before
# calling the handle_exception handlers.
def local(jobinst, jobstr, queue)
yield
rescue Handled => ex
raise ex
rescue Sidekiq::Shutdown => ey
# ignore, will be pushed back onto queue during hard_shutdown
raise ey
rescue Exception => e
# ignore, will be pushed back onto queue during hard_shutdown
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
msg = Sidekiq.load_json(jobstr)
if msg["retry"].nil?
msg["retry"] = jobinst.class.get_sidekiq_options["retry"]
end
raise e unless msg["retry"]
process_retry(jobinst, msg, queue, e)
# We've handled this error associated with this job, don't
# need to handle it at the global level
raise Skip
end
private
# Note that +jobinst+ can be nil here if an error is raised before we can
# instantiate the job instance. All access must be guarded and
# best effort.
def process_retry(jobinst, msg, queue, exception)
max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
msg["queue"] = (msg["retry_queue"] || queue)
m = exception_message(exception)
if m.respond_to?(:scrub!)
m.force_encoding("utf-8")
m.scrub!
end
msg["error_message"] = m
msg["error_class"] = exception.class.name
count = if msg["retry_count"]
msg["retried_at"] = Time.now.to_f
msg["retry_count"] += 1
else
msg["failed_at"] = Time.now.to_f
msg["retry_count"] = 0
end
if msg["backtrace"]
backtrace = @backtrace_cleaner.call(exception.backtrace)
lines = if msg["backtrace"] == true
backtrace
else
backtrace[0...msg["backtrace"].to_i]
end
msg["error_backtrace"] = compress_backtrace(lines)
end
return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts
rf = msg["retry_for"]
return retries_exhausted(jobinst, msg, exception) if rf && ((msg["failed_at"] + rf) < Time.now.to_f)
strategy, delay = delay_for(jobinst, count, exception, msg)
case strategy
when :discard
return # poof!
when :kill
return retries_exhausted(jobinst, msg, exception)
end
# Logging here can break retries if the logging device raises ENOSPC #3979
# logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
jitter = rand(10) * (count + 1)
retry_at = Time.now.to_f + delay + jitter
payload = Sidekiq.dump_json(msg)
redis do |conn|
conn.zadd("retry", retry_at.to_s, payload)
end
end
# returns (strategy, seconds)
def delay_for(jobinst, count, exception, msg)
rv = begin
# sidekiq_retry_in can return two different things:
# 1. When to retry next, as an integer of seconds
# 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default
block = jobinst&.sidekiq_retry_in_block
# the sidekiq_retry_in_block can be defined in a wrapped class (ActiveJob for instance)
unless msg["wrapped"].nil?
wrapped = Object.const_get(msg["wrapped"])
block = wrapped.respond_to?(:sidekiq_retry_in_block) ? wrapped.sidekiq_retry_in_block : nil
end
block&.call(count, exception, msg)
rescue Exception => e
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
nil
end
rv = rv.to_i if rv.respond_to?(:to_i)
delay = (count**4) + 15
if Integer === rv && rv > 0
delay = rv
elsif rv == :discard
return [:discard, nil] # do nothing, job goes poof
elsif rv == :kill
return [:kill, nil]
end
[:default, delay]
end
def retries_exhausted(jobinst, msg, exception)
begin
block = jobinst&.sidekiq_retries_exhausted_block
# the sidekiq_retries_exhausted_block can be defined in a wrapped class (ActiveJob for instance)
unless msg["wrapped"].nil?
wrapped = Object.const_get(msg["wrapped"])
block = wrapped.respond_to?(:sidekiq_retries_exhausted_block) ? wrapped.sidekiq_retries_exhausted_block : nil
end
block&.call(msg, exception)
rescue => e
handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
end
send_to_morgue(msg) unless msg["dead"] == false
@capsule.config.death_handlers.each do |handler|
handler.call(msg, exception)
rescue => e
handle_exception(e, {context: "Error calling death handler", job: msg})
end
end
def send_to_morgue(msg)
logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
payload = Sidekiq.dump_json(msg)
now = Time.now.to_f
redis do |conn|
conn.multi do |xa|
xa.zadd("dead", now.to_s, payload)
xa.zremrangebyscore("dead", "-inf", now - @capsule.config[:dead_timeout_in_seconds])
xa.zremrangebyrank("dead", 0, - @capsule.config[:dead_max_jobs])
end
end
end
def retry_attempts_from(msg_retry, default)
if msg_retry.is_a?(Integer)
msg_retry
else
default
end
end
def exception_caused_by_shutdown?(e, checked_causes = [])
return false unless e.cause
# Handle circular causes
checked_causes << e.object_id
return false if checked_causes.include?(e.cause.object_id)
e.cause.instance_of?(Sidekiq::Shutdown) ||
exception_caused_by_shutdown?(e.cause, checked_causes)
end
# Extract message from exception.
# Set a default if the message raises an error
def exception_message(exception)
# App code can stuff all sorts of crazy binary data into the error message
# that won't convert to JSON.
exception.message.to_s[0, 10_000]
rescue
+"!!! ERROR MESSAGE THREW AN ERROR !!!"
end
def compress_backtrace(backtrace)
serialized = Sidekiq.dump_json(backtrace)
compressed = Zlib::Deflate.deflate(serialized)
Base64.encode64(compressed)
end
end
end

View File

@ -0,0 +1,107 @@
require "securerandom"
require "time"
module Sidekiq
module JobUtil
# These functions encapsulate various job utilities.
TRANSIENT_ATTRIBUTES = %w[]
def validate(item)
raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array) || item["args"].is_a?(Enumerator::Lazy)
raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
raise(ArgumentError, "retry_for must be a relative amount of time, e.g. 48.hours `#{item}`") if item["retry_for"] && item["retry_for"] > 1_000_000_000
end
def verify_json(item)
job_class = item["wrapped"] || item["class"]
args = item["args"]
mode = Sidekiq::Config::DEFAULTS[:on_complex_arguments]
if mode == :raise || mode == :warn
if (unsafe_item = json_unsafe?(args))
msg = <<~EOM
Job arguments to #{job_class} must be native JSON types, but #{unsafe_item.inspect} is a #{unsafe_item.class}.
See https://github.com/sidekiq/sidekiq/wiki/Best-Practices
To disable this error, add `Sidekiq.strict_args!(false)` to your initializer.
EOM
if mode == :raise
raise(ArgumentError, msg)
else
warn(msg)
end
end
end
end
def normalize_item(item)
validate(item)
# merge in the default sidekiq_options for the item's class and/or wrapped element
# this allows ActiveJobs to control sidekiq_options too.
defaults = normalized_hash(item["class"])
defaults = defaults.merge(item["wrapped"].get_sidekiq_options) if item["wrapped"].respond_to?(:get_sidekiq_options)
item = defaults.merge(item)
raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
# remove job attributes which aren't necessary to persist into Redis
TRANSIENT_ATTRIBUTES.each { |key| item.delete(key) }
item["jid"] ||= SecureRandom.hex(12)
item["class"] = item["class"].to_s
item["queue"] = item["queue"].to_s
item["retry_for"] = item["retry_for"].to_i if item["retry_for"]
item["created_at"] ||= Time.now.to_f
item
end
def normalized_hash(item_class)
if item_class.is_a?(Class)
raise(ArgumentError, "Message must include a Sidekiq::Job class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
item_class.get_sidekiq_options
else
Sidekiq.default_job_options
end
end
private
RECURSIVE_JSON_UNSAFE = {
Integer => ->(val) {},
Float => ->(val) {},
TrueClass => ->(val) {},
FalseClass => ->(val) {},
NilClass => ->(val) {},
String => ->(val) {},
Array => ->(val) {
val.each do |e|
unsafe_item = RECURSIVE_JSON_UNSAFE[e.class].call(e)
return unsafe_item unless unsafe_item.nil?
end
nil
},
Hash => ->(val) {
val.each do |k, v|
return k unless String === k
unsafe_item = RECURSIVE_JSON_UNSAFE[v.class].call(v)
return unsafe_item unless unsafe_item.nil?
end
nil
}
}
RECURSIVE_JSON_UNSAFE.default = ->(val) { val }
RECURSIVE_JSON_UNSAFE.compare_by_identity
private_constant :RECURSIVE_JSON_UNSAFE
def json_unsafe?(item)
RECURSIVE_JSON_UNSAFE[item.class].call(item)
end
end
end

View File

@ -0,0 +1,271 @@
# frozen_string_literal: true
require "sidekiq/manager"
require "sidekiq/capsule"
require "sidekiq/scheduled"
require "sidekiq/ring_buffer"
module Sidekiq
# The Launcher starts the Capsule Managers, the Poller thread and provides the process heartbeat.
class Launcher
include Sidekiq::Component
STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
PROCTITLES = [
proc { "sidekiq" },
proc { Sidekiq::VERSION },
proc { |me, data| data["tag"] },
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{me.config.total_concurrency} busy]" },
proc { |me, data| "stopping" if me.stopping? }
]
attr_accessor :managers, :poller
def initialize(config, embedded: false)
@config = config
@embedded = embedded
@managers = config.capsules.values.map do |cap|
Sidekiq::Manager.new(cap)
end
@poller = Sidekiq::Scheduled::Poller.new(@config)
@done = false
end
# Start this Sidekiq instance. If an embedding process already
# has a heartbeat thread, caller can use `async_beat: false`
# and instead have thread call Launcher#heartbeat every N seconds.
def run(async_beat: true)
Sidekiq.freeze!
logger.debug { @config.merge!({}) }
@thread = safe_thread("heartbeat", &method(:start_heartbeat)) if async_beat
@poller.start
@managers.each(&:start)
end
# Stops this instance from processing any more jobs,
def quiet
return if @done
@done = true
@managers.each(&:quiet)
@poller.terminate
fire_event(:quiet, reverse: true)
end
# Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
def stop
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
quiet
stoppers = @managers.map do |mgr|
Thread.new do
mgr.stop(deadline)
end
end
fire_event(:shutdown, reverse: true)
stoppers.each(&:join)
clear_heartbeat
end
def stopping?
@done
end
# If embedding Sidekiq, you can have the process heartbeat
# call this method to regularly heartbeat rather than creating
# a separate thread.
def heartbeat
end
private unless $TESTING
BEAT_PAUSE = 10
def start_heartbeat
loop do
beat
sleep BEAT_PAUSE
end
logger.info("Heartbeat stopping...")
end
def beat
$0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ") unless @embedded
end
def clear_heartbeat
flush_stats
# Remove record from Redis since we are shutting down.
# Note we don't stop the heartbeat thread; if the process
# doesn't actually exit, it'll reappear in the Web UI.
redis do |conn|
conn.pipelined do |pipeline|
pipeline.srem("processes", [identity])
pipeline.unlink("#{identity}:work")
end
end
rescue
# best effort, ignore network errors
end
def flush_stats
fails = Processor::FAILURE.reset
procd = Processor::PROCESSED.reset
return if fails + procd == 0
nowdate = Time.now.utc.strftime("%Y-%m-%d")
begin
redis do |conn|
conn.pipelined do |pipeline|
pipeline.incrby("stat:processed", procd)
pipeline.incrby("stat:processed:#{nowdate}", procd)
pipeline.expire("stat:processed:#{nowdate}", STATS_TTL)
pipeline.incrby("stat:failed", fails)
pipeline.incrby("stat:failed:#{nowdate}", fails)
pipeline.expire("stat:failed:#{nowdate}", STATS_TTL)
end
end
rescue => ex
logger.warn("Unable to flush stats: #{ex}")
end
end
def
key = identity
fails = procd = 0
begin
flush_stats
curstate = Processor::WORK_STATE.dup
redis do |conn|
# work is the current set of executing jobs
work_key = "#{key}:work"
conn.pipelined do |transaction|
transaction.unlink(work_key)
curstate.each_pair do |tid, hash|
transaction.hset(work_key, tid, Sidekiq.dump_json(hash))
end
transaction.expire(work_key, 60)
end
end
rtt = check_rtt
fails = procd = 0
kb = memory_usage(::Process.pid)
_, exists, _, _, signal = redis { |conn|
conn.multi { |transaction|
transaction.sadd("processes", [key])
transaction.exists(key)
transaction.hset(key, "info", to_json,
"busy", curstate.size,
"beat", Time.now.to_f,
"rtt_us", rtt,
"quiet", @done.to_s,
"rss", kb)
transaction.expire(key, 60)
transaction.rpop("#{key}-signals")
}
}
# first heartbeat or recovering from an outage and need to reestablish our heartbeat
fire_event(:heartbeat) unless exists > 0
fire_event(:beat, oneshot: false)
::Process.kill(signal, ::Process.pid) if signal && !@embedded
rescue => e
# ignore all redis/network issues
logger.error("heartbeat: #{e}")
# don't lose the counts if there was a network issue
Processor::PROCESSED.incr(procd)
Processor::FAILURE.incr(fails)
end
end
# We run the heartbeat every five seconds.
# Capture five samples of RTT, log a warning if each sample
# is above our warning threshold.
RTT_READINGS = RingBuffer.new(5)
RTT_WARNING_LEVEL = 50_000
def check_rtt
a = b = 0
redis do |x|
a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
x.ping
b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
end
rtt = b - a
RTT_READINGS << rtt
# Ideal RTT for Redis is < 1000µs
# Workable is < 10,000µs
# Log a warning if it's a disaster.
if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
logger.warn <<~EOM
Your Redis network connection is performing extremely poorly.
Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
Ensure Redis is running in the same AZ or datacenter as Sidekiq.
If these values are close to 100,000, that means your Sidekiq process may be
CPU-saturated; reduce your concurrency and/or see https://github.com/sidekiq/sidekiq/discussions/5039
EOM
RTT_READINGS.reset
end
rtt
end
MEMORY_GRABBER = case RUBY_PLATFORM
when /linux/
->(pid) {
IO.readlines("/proc/#{$$}/status").each do |line|
next unless line.start_with?("VmRSS:")
break line.split[1].to_i
end
}
when /darwin|bsd/
->(pid) {
`ps -o pid,rss -p #{pid}`.lines.last.split.last.to_i
}
else
->(pid) { 0 }
end
def memory_usage(pid)
MEMORY_GRABBER.call(pid)
end
def to_data
@data ||= {
"hostname" => hostname,
"started_at" => Time.now.to_f,
"pid" => ::Process.pid,
"tag" => @config[:tag] || "",
"concurrency" => @config.total_concurrency,
"queues" => @config.capsules.values.flat_map { |cap| cap.queues }.uniq,
"weights" => to_weights,
"labels" => @config[:labels].to_a,
"identity" => identity,
"version" => Sidekiq::VERSION,
"embedded" => @embedded
}
end
def to_weights
@config.capsules.values.map(&:weights)
end
def to_json
# this data changes infrequently so dump it to a string
# now so we don't need to dump it every heartbeat.
@json ||= Sidekiq.dump_json(to_data)
end
end
end

View File

@ -0,0 +1,131 @@
# frozen_string_literal: true
require "logger"
require "time"
module Sidekiq
module Context
def self.with(hash)
orig_context = current.dup
current.merge!(hash)
yield
ensure
Thread.current[:sidekiq_context] = orig_context
end
def self.current
Thread.current[:sidekiq_context] ||= {}
end
def self.add(k, v)
current[k] = v
end
end
module LoggingUtils
LEVELS = {
"debug" => 0,
"info" => 1,
"warn" => 2,
"error" => 3,
"fatal" => 4
}
LEVELS.default_proc = proc do |_, level|
puts("Invalid log level: #{level.inspect}")
nil
end
LEVELS.each do |level, numeric_level|
define_method("#{level}?") do
local_level.nil? ? super() : local_level <= numeric_level
end
end
def local_level
Thread.current[:sidekiq_log_level]
end
def local_level=(level)
case level
when Integer
Thread.current[:sidekiq_log_level] = level
when Symbol, String
Thread.current[:sidekiq_log_level] = LEVELS[level.to_s]
when nil
Thread.current[:sidekiq_log_level] = nil
else
raise ArgumentError, "Invalid log level: #{level.inspect}"
end
end
def level
local_level || super
end
# Change the thread-local level for the duration of the given block.
def log_at(level)
old_local_level = local_level
self.local_level = level
yield
ensure
self.local_level = old_local_level
end
end
class Logger < ::Logger
include LoggingUtils
module Formatters
class Base < ::Logger::Formatter
def tid
Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
end
def ctx
Sidekiq::Context.current
end
def format_context
if ctx.any?
" " + ctx.compact.map { |k, v|
case v
when Array
"#{k}=#{v.join(",")}"
else
"#{k}=#{v}"
end
}.join(" ")
end
end
end
class Pretty < Base
def call(severity, time, program_name, message)
"#{time.utc.iso8601(3)} pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
end
end
class WithoutTimestamp < Pretty
def call(severity, time, program_name, message)
"pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
end
end
class JSON < Base
def call(severity, time, program_name, message)
hash = {
ts: time.utc.iso8601(3),
pid: ::Process.pid,
tid: tid,
lvl: severity,
msg: message
}
c = ctx
hash["ctx"] = c unless c.empty?
Sidekiq.dump_json(hash) << "\n"
end
end
end
end
end

View File

@ -0,0 +1,134 @@
# frozen_string_literal: true
require "sidekiq/processor"
require "set"
module Sidekiq
##
# The Manager is the central coordination point in Sidekiq, controlling
# the lifecycle of the Processors.
#
# Tasks:
#
# 1. start: Spin up Processors.
# 3. processor_died: Handle job failure, throw away Processor, create new one.
# 4. quiet: shutdown idle Processors.
# 5. stop: hard stop the Processors by deadline.
#
# Note that only the last task requires its own Thread since it has to monitor
# the shutdown process. The other tasks are performed by other threads.
#
class Manager
include Sidekiq::Component
attr_reader :workers
attr_reader :capsule
def initialize(capsule)
@config = @capsule = capsule
@count = capsule.concurrency
raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
@done = false
@workers = Set.new
@plock = Mutex.new
@count.times do
@workers << Processor.new(@config, &method(:processor_result))
end
end
def start
@workers.each(&:start)
end
def quiet
return if @done
@done = true
logger.info { "Terminating quiet threads for #{capsule.name} capsule" }
@workers.each(&:terminate)
end
def stop(deadline)
quiet
# some of the shutdown events can be async,
# we don't have any way to know when they're done but
# give them a little time to take effect
sleep PAUSE_TIME
return if @workers.empty?
logger.info { "Pausing to allow jobs to finish..." }
wait_for(deadline) { @workers.empty? }
return if @workers.empty?
hard_shutdown
ensure
capsule.stop
end
def processor_result(processor, reason = nil)
@plock.synchronize do
@workers.delete(processor)
unless @done
p = Processor.new(@config, &method(:processor_result))
@workers << p
p.start
end
end
end
def stopped?
@done
end
private
def hard_shutdown
# We've reached the timeout and we still have busy threads.
# They must die but their jobs shall live on.
cleanup = nil
@plock.synchronize do
cleanup = @workers.dup
end
if cleanup.size > 0
jobs = cleanup.map { |p| p.job }.compact
logger.warn { "Terminating #{cleanup.size} busy threads" }
logger.debug { "Jobs still in progress #{jobs.inspect}" }
# Re-enqueue unfinished jobs
# NOTE: You may notice that we may push a job back to redis before
# the thread is terminated. This is ok because Sidekiq's
# contract says that jobs are run AT LEAST once. Process termination
# is delayed until we're certain the jobs are back in Redis because
# it is worse to lose a job than to run it twice.
capsule.fetcher.bulk_requeue(jobs)
end
cleanup.each do |processor|
processor.kill
end
# when this method returns, we immediately call `exit` which may not give
# the remaining threads time to run `ensure` blocks, etc. We pause here up
# to 3 seconds to give threads a minimal amount of time to run `ensure` blocks.
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + 3
wait_for(deadline) { @workers.empty? }
end
# hack for quicker development / testing environment #2774
PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5
# Wait for the orblock to be true or the deadline passed.
def wait_for(deadline, &condblock)
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
while remaining > PAUSE_TIME
return if condblock.call
sleep PAUSE_TIME
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
end
end
end
end

View File

@ -0,0 +1,153 @@
require "sidekiq"
require "date"
require "set"
require "sidekiq/metrics/shared"
module Sidekiq
module Metrics
# Allows caller to query for Sidekiq execution metrics within Redis.
# Caller sets a set of attributes to act as filters. {#fetch} will call
# Redis and return a Hash of results.
#
# NB: all metrics and times/dates are UTC only. We specifically do not
# support timezones.
class Query
def initialize(pool: nil, now: Time.now)
@time = now.utc
@pool = pool || Sidekiq.default_configuration.redis_pool
@klass = nil
end
# Get metric data for all jobs from the last hour
def top_jobs(minutes: 60)
result = Result.new
time = @time
redis_results = @pool.with do |conn|
conn.pipelined do |pipe|
minutes.times do |idx|
key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}"
pipe.hgetall key
result.prepend_bucket time
time -= 60
end
end
end
time = @time
redis_results.each do |hash|
hash.each do |k, v|
kls, metric = k.split("|")
result.job_results[kls].add_metric metric, time, v.to_i
end
time -= 60
end
result.marks = fetch_marks(result.starts_at..result.ends_at)
result
end
def for_job(klass, minutes: 60)
result = Result.new
time = @time
redis_results = @pool.with do |conn|
conn.pipelined do |pipe|
minutes.times do |idx|
key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}"
pipe.hmget key, "#{klass}|ms", "#{klass}|p", "#{klass}|f"
result.prepend_bucket time
time -= 60
end
end
end
time = @time
@pool.with do |conn|
redis_results.each do |(ms, p, f)|
result.job_results[klass].add_metric "ms", time, ms.to_i if ms
result.job_results[klass].add_metric "p", time, p.to_i if p
result.job_results[klass].add_metric "f", time, f.to_i if f
result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time).reverse
time -= 60
end
end
result.marks = fetch_marks(result.starts_at..result.ends_at)
result
end
class Result < Struct.new(:starts_at, :ends_at, :size, :buckets, :job_results, :marks)
def initialize
super
self.buckets = []
self.marks = []
self.job_results = Hash.new { |h, k| h[k] = JobResult.new }
end
def prepend_bucket(time)
buckets.unshift time.strftime("%H:%M")
self.ends_at ||= time
self.starts_at = time
end
end
class JobResult < Struct.new(:series, :hist, :totals)
def initialize
super
self.series = Hash.new { |h, k| h[k] = Hash.new(0) }
self.hist = Hash.new { |h, k| h[k] = [] }
self.totals = Hash.new(0)
end
def add_metric(metric, time, value)
totals[metric] += value
series[metric][time.strftime("%H:%M")] += value
# Include timing measurements in seconds for convenience
add_metric("s", time, value / 1000.0) if metric == "ms"
end
def add_hist(time, hist_result)
hist[time.strftime("%H:%M")] = hist_result
end
def total_avg(metric = "ms")
completed = totals["p"] - totals["f"]
totals[metric].to_f / completed
end
def series_avg(metric = "ms")
series[metric].each_with_object(Hash.new(0)) do |(bucket, value), result|
completed = series.dig("p", bucket) - series.dig("f", bucket)
result[bucket] = (completed == 0) ? 0 : value.to_f / completed
end
end
end
class MarkResult < Struct.new(:time, :label)
def bucket
time.strftime("%H:%M")
end
end
private
def fetch_marks(time_range)
[].tap do |result|
marks = @pool.with { |c| c.hgetall("#{@time.strftime("%Y%m%d")}-marks") }
marks.each do |timestamp, label|
time = Time.parse(timestamp)
if time_range.cover? time
result << MarkResult.new(time, label)
end
end
end
end
end
end
end

View File

@ -0,0 +1,95 @@
require "concurrent"
module Sidekiq
module Metrics
# This is the only dependency on concurrent-ruby in Sidekiq but it's
# mandatory for thread-safety until MRI supports atomic operations on values.
Counter = ::Concurrent::AtomicFixnum
# Implements space-efficient but statistically useful histogram storage.
# A precise time histogram stores every time. Instead we break times into a set of
# known buckets and increment counts of the associated time bucket. Even if we call
# the histogram a million times, we'll still only store 26 buckets.
# NB: needs to be thread-safe or resiliant to races.
#
# To store this data, we use Redis' BITFIELD command to store unsigned 16-bit counters
# per bucket per klass per minute. It's unlikely that most people will be executing more
# than 1000 job/sec for a full minute of a specific type.
class Histogram
include Enumerable
# This number represents the maximum milliseconds for this bucket.
# 20 means all job executions up to 20ms, e.g. if a job takes
# 280ms, it'll increment bucket[7]. Note we can track job executions
# up to about 5.5 minutes. After that, it's assumed you're probably
# not too concerned with its performance.
BUCKET_INTERVALS = [
20, 30, 45, 65, 100,
150, 225, 335, 500, 750,
1100, 1700, 2500, 3800, 5750,
8500, 13000, 20000, 30000, 45000,
65000, 100000, 150000, 225000, 335000,
1e20 # the "maybe your job is too long" bucket
].freeze
LABELS = [
"20ms", "30ms", "45ms", "65ms", "100ms",
"150ms", "225ms", "335ms", "500ms", "750ms",
"1.1s", "1.7s", "2.5s", "3.8s", "5.75s",
"8.5s", "13s", "20s", "30s", "45s",
"65s", "100s", "150s", "225s", "335s",
"Slow"
].freeze
FETCH = "GET u16 #0 GET u16 #1 GET u16 #2 GET u16 #3 \
GET u16 #4 GET u16 #5 GET u16 #6 GET u16 #7 \
GET u16 #8 GET u16 #9 GET u16 #10 GET u16 #11 \
GET u16 #12 GET u16 #13 GET u16 #14 GET u16 #15 \
GET u16 #16 GET u16 #17 GET u16 #18 GET u16 #19 \
GET u16 #20 GET u16 #21 GET u16 #22 GET u16 #23 \
GET u16 #24 GET u16 #25".split
HISTOGRAM_TTL = 8 * 60 * 60
def each
buckets.each { |counter| yield counter.value }
end
def label(idx)
LABELS[idx]
end
attr_reader :buckets
def initialize(klass)
@klass = klass
@buckets = Array.new(BUCKET_INTERVALS.size) { Counter.new }
end
def record_time(ms)
index_to_use = BUCKET_INTERVALS.each_index do |idx|
break idx if ms < BUCKET_INTERVALS[idx]
end
@buckets[index_to_use].increment
end
def fetch(conn, now = Time.now)
window = now.utc.strftime("%d-%H:%-M")
key = "#{@klass}-#{window}"
conn.bitfield_ro(key, *FETCH)
end
def persist(conn, now = Time.now)
buckets, @buckets = @buckets, []
window = now.utc.strftime("%d-%H:%-M")
key = "#{@klass}-#{window}"
cmd = [key, "OVERFLOW", "SAT"]
buckets.each_with_index do |counter, idx|
val = counter.value
cmd << "INCRBY" << "u16" << "##{idx}" << val.to_s if val > 0
end
conn.bitfield(*cmd) if cmd.size > 3
conn.expire(key, HISTOGRAM_TTL)
key
end
end
end
end

View File

@ -0,0 +1,136 @@
# frozen_string_literal: true
require "time"
require "sidekiq"
require "sidekiq/metrics/shared"
# This file contains the components which track execution metrics within Sidekiq.
module Sidekiq
module Metrics
class ExecutionTracker
include Sidekiq::Component
def initialize(config)
@config = config
@jobs = Hash.new(0)
@totals = Hash.new(0)
@grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) }
@lock = Mutex.new
end
def track(queue, klass)
start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
time_ms = 0
begin
begin
yield
ensure
finish = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
time_ms = finish - start
end
# We don't track time for failed jobs as they can have very unpredictable
# execution times. more important to know average time for successful jobs so we
# can better recognize when a perf regression is introduced.
@lock.synchronize {
@grams[klass].record_time(time_ms)
@jobs["#{klass}|ms"] += time_ms
@totals["ms"] += time_ms
}
rescue Exception
@lock.synchronize {
@jobs["#{klass}|f"] += 1
@totals["f"] += 1
}
raise
ensure
@lock.synchronize {
@jobs["#{klass}|p"] += 1
@totals["p"] += 1
}
end
end
# LONG_TERM = 90 * 24 * 60 * 60
# MID_TERM = 7 * 24 * 60 * 60
SHORT_TERM = 8 * 60 * 60
def flush(time = Time.now)
totals, jobs, grams = reset
procd = totals["p"]
fails = totals["f"]
return if procd == 0 && fails == 0
now = time.utc
# nowdate = now.strftime("%Y%m%d")
# nowhour = now.strftime("%Y%m%d|%-H")
nowmin = now.strftime("%Y%m%d|%-H:%-M")
count = 0
redis do |conn|
# persist fine-grained histogram data
if grams.size > 0
conn.pipelined do |pipe|
grams.each do |_, gram|
gram.persist(pipe, now)
end
end
end
# persist coarse grained execution count + execution millis.
# note as of today we don't use or do anything with the
# daily or hourly rollups.
[
# ["j", jobs, nowdate, LONG_TERM],
# ["j", jobs, nowhour, MID_TERM],
["j", jobs, nowmin, SHORT_TERM]
].each do |prefix, data, bucket, ttl|
conn.pipelined do |xa|
stats = "#{prefix}|#{bucket}"
data.each_pair do |key, value|
xa.hincrby stats, key, value
count += 1
end
xa.expire(stats, ttl)
end
end
logger.debug "Flushed #{count} metrics"
count
end
end
private
def reset
@lock.synchronize {
array = [@totals, @jobs, @grams]
@totals = Hash.new(0)
@jobs = Hash.new(0)
@grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) }
array
}
end
end
class Middleware
include Sidekiq::ServerMiddleware
def initialize(options)
@exec = options
end
def call(_instance, hash, queue, &block)
@exec.track(queue, hash["wrapped"] || hash["class"], &block)
end
end
end
end
Sidekiq.configure_server do |config|
exec = Sidekiq::Metrics::ExecutionTracker.new(config)
config.server_middleware do |chain|
chain.add Sidekiq::Metrics::Middleware, exec
end
config.on(:beat) do
exec.flush
end
end

View File

@ -0,0 +1,207 @@
# frozen_string_literal: true
require "sidekiq/middleware/modules"
module Sidekiq
# Middleware is code configured to run before/after
# a job is processed. It is patterned after Rack
# middleware. Middleware exists for the client side
# (pushing jobs onto the queue) as well as the server
# side (when jobs are actually processed).
#
# Callers will register middleware Classes and Sidekiq will
# create new instances of the middleware for every job. This
# is important so that instance state is not shared accidentally
# between job executions.
#
# To add middleware for the client:
#
# Sidekiq.configure_client do |config|
# config.client_middleware do |chain|
# chain.add MyClientHook
# end
# end
#
# To modify middleware for the server, just call
# with another block:
#
# Sidekiq.configure_server do |config|
# config.server_middleware do |chain|
# chain.add MyServerHook
# chain.remove ActiveRecord
# end
# end
#
# To insert immediately preceding another entry:
#
# Sidekiq.configure_client do |config|
# config.client_middleware do |chain|
# chain.insert_before ActiveRecord, MyClientHook
# end
# end
#
# To insert immediately after another entry:
#
# Sidekiq.configure_client do |config|
# config.client_middleware do |chain|
# chain.insert_after ActiveRecord, MyClientHook
# end
# end
#
# This is an example of a minimal server middleware:
#
# class MyServerHook
# include Sidekiq::ServerMiddleware
#
# def call(job_instance, msg, queue)
# logger.info "Before job"
# redis {|conn| conn.get("foo") } # do something in Redis
# yield
# logger.info "After job"
# end
# end
#
# This is an example of a minimal client middleware, note
# the method must return the result or the job will not push
# to Redis:
#
# class MyClientHook
# include Sidekiq::ClientMiddleware
#
# def call(job_class, msg, queue, redis_pool)
# logger.info "Before push"
# result = yield
# logger.info "After push"
# result
# end
# end
#
module Middleware
class Chain
include Enumerable
# Iterate through each middleware in the chain
def each(&block)
entries.each(&block)
end
# @api private
def initialize(config = nil) # :nodoc:
@config = config
@entries = nil
yield self if block_given?
end
def entries
@entries ||= []
end
def copy_for(capsule)
chain = Sidekiq::Middleware::Chain.new(capsule)
chain.instance_variable_set(:@entries, entries.dup)
chain
end
# Remove all middleware matching the given Class
# @param klass [Class]
def remove(klass)
entries.delete_if { |entry| entry.klass == klass }
end
# Add the given middleware to the end of the chain.
# Sidekiq will call `klass.new(*args)` to create a clean
# copy of your middleware for every job executed.
#
# chain.add(Statsd::Metrics, { collector: "localhost:8125" })
#
# @param klass [Class] Your middleware class
# @param *args [Array<Object>] Set of arguments to pass to every instance of your middleware
def add(klass, *args)
remove(klass)
entries << Entry.new(@config, klass, *args)
end
# Identical to {#add} except the middleware is added to the front of the chain.
def prepend(klass, *args)
remove(klass)
entries.insert(0, Entry.new(@config, klass, *args))
end
# Inserts +newklass+ before +oldklass+ in the chain.
# Useful if one middleware must run before another middleware.
def insert_before(oldklass, newklass, *args)
i = entries.index { |entry| entry.klass == newklass }
new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i)
i = entries.index { |entry| entry.klass == oldklass } || 0
entries.insert(i, new_entry)
end
# Inserts +newklass+ after +oldklass+ in the chain.
# Useful if one middleware must run after another middleware.
def insert_after(oldklass, newklass, *args)
i = entries.index { |entry| entry.klass == newklass }
new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i)
i = entries.index { |entry| entry.klass == oldklass } || entries.count - 1
entries.insert(i + 1, new_entry)
end
# @return [Boolean] if the given class is already in the chain
def exists?(klass)
any? { |entry| entry.klass == klass }
end
alias_method :include?, :exists?
# @return [Boolean] if the chain contains no middleware
def empty?
@entries.nil? || @entries.empty?
end
def retrieve
map(&:make_new)
end
def clear
entries.clear
end
# Used by Sidekiq to execute the middleware at runtime
# @api private
def invoke(*args, &block)
return yield if empty?
chain = retrieve
traverse(chain, 0, args, &block)
end
private
def traverse(chain, index, args, &block)
if index >= chain.size
yield
else
chain[index].call(*args) do
traverse(chain, index + 1, args, &block)
end
end
end
end
# Represents each link in the middleware chain
# @api private
class Entry
attr_reader :klass
def initialize(config, klass, *args)
@config = config
@klass = klass
@args = args
end
def make_new
x = @klass.new(*@args)
x.config = @config if @config && x.respond_to?(:config=)
x
end
end
end
end

View File

@ -0,0 +1,95 @@
require "active_support/current_attributes"
module Sidekiq
##
# Automatically save and load any current attributes in the execution context
# so context attributes "flow" from Rails actions into any associated jobs.
# This can be useful for multi-tenancy, i18n locale, timezone, any implicit
# per-request attribute. See +ActiveSupport::CurrentAttributes+.
#
# For multiple current attributes, pass an array of current attributes.
#
# @example
#
# # in your initializer
# require "sidekiq/middleware/current_attributes"
# Sidekiq::CurrentAttributes.persist("Myapp::Current")
# # or multiple current attributes
# Sidekiq::CurrentAttributes.persist(["Myapp::Current", "Myapp::OtherCurrent"])
#
module CurrentAttributes
class Save
include Sidekiq::ClientMiddleware
def initialize(cattrs)
@cattrs = cattrs
end
def call(_, job, _, _)
@cattrs.each do |(key, strklass)|
if !job.has_key?(key)
attrs = strklass.constantize.attributes
# Retries can push the job N times, we don't
# want retries to reset cattr. #5692, #5090
job[key] = attrs if attrs.any?
end
end
yield
end
end
class Load
include Sidekiq::ServerMiddleware
def initialize(cattrs)
@cattrs = cattrs
end
def call(_, job, _, &block)
cattrs_to_reset = []
@cattrs.each do |(key, strklass)|
if job.has_key?(key)
constklass = strklass.constantize
cattrs_to_reset << constklass
job[key].each do |(attribute, value)|
constklass.public_send("#{attribute}=", value)
end
end
end
yield
ensure
cattrs_to_reset.each(&:reset)
end
end
class << self
def persist(klass_or_array, config = Sidekiq.default_configuration)
cattrs = build_cattrs_hash(klass_or_array)
config.client_middleware.add Save, cattrs
config.server_middleware.add Load, cattrs
end
private
def build_cattrs_hash(klass_or_array)
if klass_or_array.is_a?(Array)
{}.tap do |hash|
klass_or_array.each_with_index do |klass, index|
hash[key_at(index)] = klass.to_s
end
end
else
{key_at(0) => klass_or_array.to_s}
end
end
def key_at(index)
(index == 0) ? "cattr" : "cattr_#{index}"
end
end
end
end

View File

@ -0,0 +1,42 @@
# frozen_string_literal: true
#
# Simple middleware to save the current locale and restore it when the job executes.
# Use it by requiring it in your initializer:
#
# require 'sidekiq/middleware/i18n'
#
module Sidekiq::Middleware::I18n
# Get the current locale and store it in the message
# to be sent to Sidekiq.
class Client
include Sidekiq::ClientMiddleware
def call(_jobclass, job, _queue, _redis)
job["locale"] ||= I18n.locale
yield
end
end
# Pull the msg locale out and set the current thread to use it.
class Server
include Sidekiq::ServerMiddleware
def call(_jobclass, job, _queue, &block)
I18n.with_locale(job.fetch("locale", I18n.default_locale), &block)
end
end
end
Sidekiq.configure_client do |config|
config.client_middleware do |chain|
chain.add Sidekiq::Middleware::I18n::Client
end
end
Sidekiq.configure_server do |config|
config.client_middleware do |chain|
chain.add Sidekiq::Middleware::I18n::Client
end
config.server_middleware do |chain|
chain.add Sidekiq::Middleware::I18n::Server
end
end

View File

@ -0,0 +1,21 @@
module Sidekiq
# Server-side middleware must import this Module in order
# to get access to server resources during `call`.
module ServerMiddleware
attr_accessor :config
def redis_pool
config.redis_pool
end
def logger
config.logger
end
def redis(&block)
config.redis(&block)
end
end
# no difference for now
ClientMiddleware = ServerMiddleware
end

View File

@ -0,0 +1,146 @@
#!/usr/bin/env ruby
require "fileutils"
require "sidekiq/api"
class Sidekiq::Monitor
class Status
VALID_SECTIONS = %w[all version overview processes queues]
COL_PAD = 2
def display(section = nil)
section ||= "all"
unless VALID_SECTIONS.include? section
puts "I don't know how to check the status of '#{section}'!"
puts "Try one of these: #{VALID_SECTIONS.join(", ")}"
return
end
send(section)
end
def all
version
puts
overview
puts
processes
puts
queues
end
def version
puts "Sidekiq #{Sidekiq::VERSION}"
puts Time.now.utc
end
def overview
puts "---- Overview ----"
puts " Processed: #{delimit stats.processed}"
puts " Failed: #{delimit stats.failed}"
puts " Busy: #{delimit stats.workers_size}"
puts " Enqueued: #{delimit stats.enqueued}"
puts " Retries: #{delimit stats.retry_size}"
puts " Scheduled: #{delimit stats.scheduled_size}"
puts " Dead: #{delimit stats.dead_size}"
end
def processes
puts "---- Processes (#{process_set.size}) ----"
process_set.each_with_index do |process, index|
# Keep compatibility with legacy versions since we don't want to break sidekiqmon during rolling upgrades or downgrades.
#
# Before:
# ["default", "critical"]
#
# After:
# {"default" => 1, "critical" => 10}
queues =
if process["weights"]
process["weights"].sort_by { |queue| queue[0] }.map { |capsule| capsule.map { |name, weight| (weight > 0) ? "#{name}: #{weight}" : name }.join(", ") }
else
process["queues"].sort
end
puts "#{process["identity"]} #{tags_for(process)}"
puts " Started: #{Time.at(process["started_at"])} (#{time_ago(process["started_at"])})"
puts " Threads: #{process["concurrency"]} (#{process["busy"]} busy)"
puts " Queues: #{split_multiline(queues, pad: 11)}"
puts " Version: #{process["version"] || "Unknown"}" if process["version"] != Sidekiq::VERSION
puts "" unless (index + 1) == process_set.size
end
end
def queues
puts "---- Queues (#{queue_data.size}) ----"
columns = {
name: [:ljust, (["name"] + queue_data.map(&:name)).map(&:length).max + COL_PAD],
size: [:rjust, (["size"] + queue_data.map(&:size)).map(&:length).max + COL_PAD],
latency: [:rjust, (["latency"] + queue_data.map(&:latency)).map(&:length).max + COL_PAD]
}
columns.each { |col, (dir, width)| print col.to_s.upcase.public_send(dir, width) }
puts
queue_data.each do |q|
columns.each do |col, (dir, width)|
print q.send(col).public_send(dir, width)
end
puts
end
end
private
def delimit(number)
number.to_s.reverse.scan(/.{1,3}/).join(",").reverse
end
def split_multiline(values, opts = {})
return "none" unless values
pad = opts[:pad] || 0
max_length = opts[:max_length] || (80 - pad)
out = []
line = ""
values.each do |value|
if (line.length + value.length) > max_length
out << line
line = " " * pad
end
line << value + ", "
end
out << line[0..-3]
out.join("\n")
end
def tags_for(process)
tags = [
process["tag"],
process["labels"],
((process["quiet"] == "true") ? "quiet" : nil)
].flatten.compact
tags.any? ? "[#{tags.join("] [")}]" : nil
end
def time_ago(timestamp)
seconds = Time.now - Time.at(timestamp)
return "just now" if seconds < 60
return "a minute ago" if seconds < 120
return "#{seconds.floor / 60} minutes ago" if seconds < 3600
return "an hour ago" if seconds < 7200
"#{seconds.floor / 60 / 60} hours ago"
end
QUEUE_STRUCT = Struct.new(:name, :size, :latency)
def queue_data
@queue_data ||= Sidekiq::Queue.all.map { |q|
QUEUE_STRUCT.new(q.name, q.size.to_s, sprintf("%#.2f", q.latency))
}
end
def process_set
@process_set ||= Sidekiq::ProcessSet.new
end
def stats
@stats ||= Sidekiq::Stats.new
end
end
end

View File

@ -0,0 +1,55 @@
# frozen_string_literal: true
module Sidekiq
module Paginator
def page(key, pageidx = 1, page_size = 25, opts = nil)
current_page = (pageidx.to_i < 1) ? 1 : pageidx.to_i
pageidx = current_page - 1
total_size = 0
items = []
starting = pageidx * page_size
ending = starting + page_size - 1
Sidekiq.redis do |conn|
type = conn.type(key)
rev = opts && opts[:reverse]
case type
when "zset"
total_size, items = conn.multi { |transaction|
transaction.zcard(key)
if rev
transaction.zrevrange(key, starting, ending, withscores: true)
else
transaction.zrange(key, starting, ending, withscores: true)
end
}
[current_page, total_size, items]
when "list"
total_size, items = conn.multi { |transaction|
transaction.llen(key)
if rev
transaction.lrange(key, -ending - 1, -starting - 1)
else
transaction.lrange(key, starting, ending)
end
}
items.reverse! if rev
[current_page, total_size, items]
when "none"
[1, 0, []]
else
raise "can't page a #{type}"
end
end
end
def page_items(items, pageidx = 1, page_size = 25)
current_page = (pageidx.to_i < 1) ? 1 : pageidx.to_i
pageidx = current_page - 1
starting = pageidx * page_size
items = items.to_a
[current_page, items.size, items[starting, page_size]]
end
end
end

Some files were not shown because too many files have changed in this diff Show More