Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
41c37d81e7
commit
548e9e6911
|
|
@ -53,5 +53,5 @@ include:
|
|||
gem_path_prefix: "vendor/gems/"
|
||||
- local: .gitlab/ci/templates/gem.gitlab-ci.yml
|
||||
inputs:
|
||||
gem_name: "sidekiq-7.2.4"
|
||||
gem_name: "sidekiq"
|
||||
gem_path_prefix: "vendor/gems/"
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
3d13cb66eadf8a4659adf0e19d362a78e0edc764
|
||||
89509d2a8ff4213622ca9c6c19de75f2307a594c
|
||||
|
|
|
|||
2
Gemfile
2
Gemfile
|
|
@ -279,7 +279,7 @@ end
|
|||
gem 'state_machines-activerecord', '~> 0.8.0' # rubocop:todo Gemfile/MissingFeatureCategory
|
||||
|
||||
# Background jobs
|
||||
gem 'sidekiq', path: 'vendor/gems/sidekiq-7.2.4', require: 'sidekiq', feature_category: :scalability
|
||||
gem 'sidekiq', path: 'vendor/gems/sidekiq', require: 'sidekiq', feature_category: :scalability
|
||||
gem 'sidekiq-cron', '~> 1.12.0', feature_category: :scalability
|
||||
gem 'gitlab-sidekiq-fetcher',
|
||||
path: 'vendor/gems/sidekiq-reliable-fetch',
|
||||
|
|
|
|||
|
|
@ -604,7 +604,7 @@
|
|||
{"name":"rubocop-rails","version":"2.24.1","platform":"ruby","checksum":"03edf766954947468f3686cedb69142fae4f10e2007287f89cc0ea7072eeac19"},
|
||||
{"name":"rubocop-rspec","version":"2.27.1","platform":"ruby","checksum":"2f27ce04700be75db65afe83d7993a36e0fafd07ec062222f4b3cc10137a7a9e"},
|
||||
{"name":"ruby-fogbugz","version":"0.3.0","platform":"ruby","checksum":"5e04cde474648f498a71cf1e1a7ab42c66b953862fbe224f793ec0a7a1d5f657"},
|
||||
{"name":"ruby-lsp","version":"0.17.14","platform":"ruby","checksum":"02ee435f03aaf3f858dc1c1f8a001fbb85887a3291534c054e556340712fb1c9"},
|
||||
{"name":"ruby-lsp","version":"0.17.15","platform":"ruby","checksum":"c7c6f1dc793713bfefe2ed818ac3a5e4aae4a44c03978f7cc526b8c5002b7e57"},
|
||||
{"name":"ruby-lsp-rails","version":"0.3.13","platform":"ruby","checksum":"689bf2d0961cb73192d3c65c589000b9ed99275cf4abb5fe2c5243e0e42fe7fa"},
|
||||
{"name":"ruby-lsp-rspec","version":"0.1.12","platform":"ruby","checksum":"34fe775e27dc4c2f31df901f3d44ee885ed0806b05ba9be0ea564682dd4811e5"},
|
||||
{"name":"ruby-magic","version":"0.6.0","platform":"ruby","checksum":"7b2138877b7d23aff812c95564eba6473b74b815ef85beb0eb792e729a2b6101"},
|
||||
|
|
|
|||
21
Gemfile.lock
21
Gemfile.lock
|
|
@ -193,15 +193,6 @@ PATH
|
|||
nokogiri (>= 1.4.4)
|
||||
omniauth (~> 2.0)
|
||||
|
||||
PATH
|
||||
remote: vendor/gems/sidekiq-7.2.4
|
||||
specs:
|
||||
sidekiq (7.2.4)
|
||||
concurrent-ruby (< 2)
|
||||
connection_pool (>= 2.3.0)
|
||||
rack (>= 2.2.4)
|
||||
redis-client (>= 0.19.0)
|
||||
|
||||
PATH
|
||||
remote: vendor/gems/sidekiq-reliable-fetch
|
||||
specs:
|
||||
|
|
@ -209,6 +200,16 @@ PATH
|
|||
json (>= 2.5)
|
||||
sidekiq (~> 7.0)
|
||||
|
||||
PATH
|
||||
remote: vendor/gems/sidekiq
|
||||
specs:
|
||||
sidekiq (7.3.1)
|
||||
concurrent-ruby (< 2)
|
||||
connection_pool (>= 2.3.0)
|
||||
logger
|
||||
rack (>= 2.2.4)
|
||||
redis-client (>= 0.22.2)
|
||||
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
|
|
@ -1644,7 +1645,7 @@ GEM
|
|||
ruby-fogbugz (0.3.0)
|
||||
crack (~> 0.4)
|
||||
multipart-post (~> 2.0)
|
||||
ruby-lsp (0.17.14)
|
||||
ruby-lsp (0.17.15)
|
||||
language_server-protocol (~> 3.17.0)
|
||||
prism (>= 0.29.0, < 0.31)
|
||||
rbs (>= 3, < 4)
|
||||
|
|
|
|||
|
|
@ -193,15 +193,6 @@ PATH
|
|||
nokogiri (>= 1.4.4)
|
||||
omniauth (~> 2.0)
|
||||
|
||||
PATH
|
||||
remote: vendor/gems/sidekiq-7.2.4
|
||||
specs:
|
||||
sidekiq (7.2.4)
|
||||
concurrent-ruby (< 2)
|
||||
connection_pool (>= 2.3.0)
|
||||
rack (>= 2.2.4)
|
||||
redis-client (>= 0.19.0)
|
||||
|
||||
PATH
|
||||
remote: vendor/gems/sidekiq-reliable-fetch
|
||||
specs:
|
||||
|
|
@ -209,6 +200,16 @@ PATH
|
|||
json (>= 2.5)
|
||||
sidekiq (~> 7.0)
|
||||
|
||||
PATH
|
||||
remote: vendor/gems/sidekiq
|
||||
specs:
|
||||
sidekiq (7.3.1)
|
||||
concurrent-ruby (< 2)
|
||||
connection_pool (>= 2.3.0)
|
||||
logger
|
||||
rack (>= 2.2.4)
|
||||
redis-client (>= 0.22.2)
|
||||
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
|
|
|
|||
|
|
@ -186,6 +186,7 @@
|
|||
"VulnerabilityDetailBase",
|
||||
"VulnerabilityDetailBoolean",
|
||||
"VulnerabilityDetailCode",
|
||||
"VulnerabilityDetailCodeFlows",
|
||||
"VulnerabilityDetailCommit",
|
||||
"VulnerabilityDetailDiff",
|
||||
"VulnerabilityDetailFileLocation",
|
||||
|
|
|
|||
|
|
@ -560,10 +560,13 @@ export default {
|
|||
:is-hidden="isHidden"
|
||||
:is-imported="isImported"
|
||||
:is-locked="isLocked"
|
||||
:issuable-status="issuableStatus"
|
||||
:issuable-state="issuableStatus"
|
||||
:issuable-type="issuableType"
|
||||
:show="isStickyHeaderShowing"
|
||||
:title="state.titleText"
|
||||
:duplicated-to-issue-url="duplicatedToIssueUrl"
|
||||
:moved-to-issue-url="movedToIssueUrl"
|
||||
:promoted-to-epic-url="promotedToEpicUrl"
|
||||
@hide="hideStickyHeader"
|
||||
@show="showStickyHeader"
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -1,10 +1,11 @@
|
|||
<script>
|
||||
import { GlBadge, GlIntersectionObserver, GlLink } from '@gitlab/ui';
|
||||
import { GlBadge, GlIntersectionObserver, GlLink, GlSprintf } from '@gitlab/ui';
|
||||
import HiddenBadge from '~/issuable/components/hidden_badge.vue';
|
||||
import LockedBadge from '~/issuable/components/locked_badge.vue';
|
||||
import { issuableStatusText, STATUS_CLOSED, WORKSPACE_PROJECT } from '~/issues/constants';
|
||||
import { STATUS_OPEN, STATUS_REOPENED, STATUS_CLOSED, WORKSPACE_PROJECT } from '~/issues/constants';
|
||||
import ConfidentialityBadge from '~/vue_shared/components/confidentiality_badge.vue';
|
||||
import ImportedBadge from '~/vue_shared/components/imported_badge.vue';
|
||||
import { __, s__ } from '~/locale';
|
||||
|
||||
export default {
|
||||
WORKSPACE_PROJECT,
|
||||
|
|
@ -13,6 +14,7 @@ export default {
|
|||
GlBadge,
|
||||
GlIntersectionObserver,
|
||||
GlLink,
|
||||
GlSprintf,
|
||||
HiddenBadge,
|
||||
ImportedBadge,
|
||||
LockedBadge,
|
||||
|
|
@ -38,7 +40,7 @@ export default {
|
|||
required: false,
|
||||
default: false,
|
||||
},
|
||||
issuableStatus: {
|
||||
issuableState: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
|
|
@ -55,16 +57,52 @@ export default {
|
|||
type: String,
|
||||
required: true,
|
||||
},
|
||||
duplicatedToIssueUrl: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
movedToIssueUrl: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
promotedToEpicUrl: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
computed: {
|
||||
isOpen() {
|
||||
return this.issuableState === STATUS_OPEN || this.issuableState === STATUS_REOPENED;
|
||||
},
|
||||
isClosed() {
|
||||
return this.issuableStatus === STATUS_CLOSED;
|
||||
return this.issuableState === STATUS_CLOSED;
|
||||
},
|
||||
statusIcon() {
|
||||
return this.isClosed ? 'issue-close' : 'issue-open-m';
|
||||
},
|
||||
statusText() {
|
||||
return issuableStatusText[this.issuableStatus];
|
||||
if (this.isOpen) {
|
||||
return __('Open');
|
||||
}
|
||||
if (this.closedStatusLink) {
|
||||
return s__('IssuableStatus|Closed (%{link})');
|
||||
}
|
||||
return s__('IssuableStatus|Closed');
|
||||
},
|
||||
closedStatusLink() {
|
||||
return this.duplicatedToIssueUrl || this.movedToIssueUrl || this.promotedToEpicUrl;
|
||||
},
|
||||
closedStatusText() {
|
||||
if (this.duplicatedToIssueUrl) {
|
||||
return s__('IssuableStatus|duplicated');
|
||||
}
|
||||
if (this.movedToIssueUrl) {
|
||||
return s__('IssuableStatus|moved');
|
||||
}
|
||||
if (this.promotedToEpicUrl) {
|
||||
return s__('IssuableStatus|promoted');
|
||||
}
|
||||
return '';
|
||||
},
|
||||
statusVariant() {
|
||||
return this.isClosed ? 'info' : 'success';
|
||||
|
|
@ -83,7 +121,17 @@ export default {
|
|||
>
|
||||
<div class="issue-sticky-header-text gl-mx-auto gl-flex gl-items-center gl-gap-2">
|
||||
<gl-badge :variant="statusVariant" :icon="statusIcon" class="gl-shrink-0">
|
||||
{{ statusText }}
|
||||
<gl-sprintf v-if="closedStatusLink" :message="statusText">
|
||||
<template #link>
|
||||
<gl-link
|
||||
data-testid="sticky-header-closed-status-link"
|
||||
class="!gl-text-inherit gl-underline"
|
||||
:href="closedStatusLink"
|
||||
>{{ closedStatusText }}</gl-link
|
||||
>
|
||||
</template>
|
||||
</gl-sprintf>
|
||||
<template v-else>{{ statusText }}</template>
|
||||
</gl-badge>
|
||||
<confidentiality-badge
|
||||
v-if="isConfidential"
|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ export default {
|
|||
/>
|
||||
</div>
|
||||
<div
|
||||
class="committer gl-basis-full"
|
||||
class="committer gl-basis-full gl-truncate gl-text-sm"
|
||||
:class="{ 'gl-inline-flex': truncateAuthorName }"
|
||||
data-testid="committer"
|
||||
>
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ module Ci
|
|||
ignore_columns :partition, remove_never: true
|
||||
|
||||
partitioned_by :partition, strategy: :sliding_list,
|
||||
next_partition_if: ->(active_partition) { any_older_exist?(active_partition, PARTITION_DURATION) },
|
||||
next_partition_if: ->(active_partition) { any_older_partitions_exist?(active_partition, PARTITION_DURATION) },
|
||||
detach_partition_if: ->(partition) { detach_partition?(partition) }
|
||||
|
||||
validates :pipeline_id, presence: true
|
||||
|
|
@ -48,18 +48,18 @@ module Ci
|
|||
return true unless pending.for_partition(partition.value).exists?
|
||||
|
||||
# or if there are pending events, they are outside the cleanup threshold
|
||||
return true unless any_newer_exist?(partition, PARTITION_CLEANUP_THRESHOLD)
|
||||
return true unless any_newer_partitions_exist?(partition, PARTITION_CLEANUP_THRESHOLD)
|
||||
|
||||
false
|
||||
end
|
||||
|
||||
def self.any_older_exist?(partition, duration)
|
||||
def self.any_older_partitions_exist?(partition, duration)
|
||||
for_partition(partition.value)
|
||||
.where(arel_table[:pipeline_finished_at].lteq(duration.ago))
|
||||
.exists?
|
||||
end
|
||||
|
||||
def self.any_newer_exist?(partition, duration)
|
||||
def self.any_newer_partitions_exist?(partition, duration)
|
||||
for_partition(partition.value)
|
||||
.where(arel_table[:pipeline_finished_at].gt(duration.ago))
|
||||
.exists?
|
||||
|
|
|
|||
|
|
@ -79,19 +79,6 @@ class UserPreference < ApplicationRecord
|
|||
self[notes_filter_field_for(resource)]
|
||||
end
|
||||
|
||||
def tab_width
|
||||
read_attribute(:tab_width) || self.class.column_defaults['tab_width']
|
||||
end
|
||||
|
||||
def tab_width=(value)
|
||||
if value.nil?
|
||||
default = self.class.column_defaults['tab_width']
|
||||
super(default)
|
||||
else
|
||||
super(value)
|
||||
end
|
||||
end
|
||||
|
||||
class << self
|
||||
def time_display_formats
|
||||
{
|
||||
|
|
@ -102,38 +89,6 @@ class UserPreference < ApplicationRecord
|
|||
end
|
||||
end
|
||||
|
||||
def time_display_relative
|
||||
value = read_attribute(:time_display_relative)
|
||||
return value unless value.nil?
|
||||
|
||||
self.class.column_defaults['time_display_relative']
|
||||
end
|
||||
|
||||
def time_display_relative=(value)
|
||||
if value.nil?
|
||||
default = self.class.column_defaults['time_display_relative']
|
||||
super(default)
|
||||
else
|
||||
super(value)
|
||||
end
|
||||
end
|
||||
|
||||
def render_whitespace_in_code
|
||||
value = read_attribute(:render_whitespace_in_code)
|
||||
return value unless value.nil?
|
||||
|
||||
self.class.column_defaults['render_whitespace_in_code']
|
||||
end
|
||||
|
||||
def render_whitespace_in_code=(value)
|
||||
if value.nil?
|
||||
default = self.class.column_defaults['render_whitespace_in_code']
|
||||
super(default)
|
||||
else
|
||||
super(value)
|
||||
end
|
||||
end
|
||||
|
||||
def early_access_event_tracking?
|
||||
early_access_program_participant? && early_access_program_tracking?
|
||||
end
|
||||
|
|
|
|||
|
|
@ -14,6 +14,16 @@ module VirtualRegistries
|
|||
validates :cache_validity_hours, numericality: { greater_than_or_equal_to: 0, only_integer: true }
|
||||
|
||||
scope :for_group, ->(group) { where(group: group) }
|
||||
|
||||
before_destroy :destroy_upstream
|
||||
|
||||
private
|
||||
|
||||
# TODO: revisit this when we support multiple upstreams.
|
||||
# https://gitlab.com/gitlab-org/gitlab/-/issues/480461
|
||||
def destroy_upstream
|
||||
upstream&.destroy!
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
= commit_data.commit_link
|
||||
= commit_data.project_blame_link
|
||||
|
||||
.light
|
||||
.gl-text-sm.gl-text-subtle
|
||||
= commit_data.commit_author_link
|
||||
= _('committed')
|
||||
#{commit_data.time_ago_tooltip}
|
||||
|
|
|
|||
|
|
@ -208,6 +208,10 @@ dast_scanner_profiles_builds:
|
|||
- table: p_ci_builds
|
||||
column: ci_build_id
|
||||
on_delete: async_delete
|
||||
dast_site_profiles:
|
||||
- table: projects
|
||||
column: project_id
|
||||
on_delete: async_delete
|
||||
dast_site_profiles_builds:
|
||||
- table: ci_builds
|
||||
column: ci_build_id
|
||||
|
|
@ -338,6 +342,10 @@ p_ci_builds_metadata:
|
|||
- table: projects
|
||||
column: project_id
|
||||
on_delete: async_delete
|
||||
p_ci_finished_build_ch_sync_events:
|
||||
- table: projects
|
||||
column: project_id
|
||||
on_delete: async_delete
|
||||
p_ci_finished_pipeline_ch_sync_events:
|
||||
- table: namespaces
|
||||
column: project_namespace_id
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ strict_args_mode = Gitlab.dev_or_test_env? ? :warn : false
|
|||
Sidekiq.strict_args!(strict_args_mode)
|
||||
|
||||
# Perform version check before configuring server with the custome scheduled job enqueue class
|
||||
unless Gem::Version.new(Sidekiq::VERSION) == Gem::Version.new('7.2.4')
|
||||
unless Gem::Version.new(Sidekiq::VERSION) == Gem::Version.new('7.3.1')
|
||||
raise 'New version of Sidekiq detected, please either update the version for this check ' \
|
||||
'and update Gitlab::SidekiqSharding::ScheduledEnq is compatible.'
|
||||
end
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ feature_categories:
|
|||
description: A site profile describes the attributes of a web site to scan on demand with DAST
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/36659
|
||||
milestone: '13.2'
|
||||
gitlab_schema: gitlab_main_cell
|
||||
gitlab_schema: gitlab_sec
|
||||
allow_cross_foreign_keys:
|
||||
- gitlab_main_clusterwide
|
||||
sharding_key:
|
||||
|
|
|
|||
|
|
@ -8,4 +8,5 @@ description: Holds references to finished CI builds ready to be synced to ClickH
|
|||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/131902
|
||||
milestone: '16.5'
|
||||
gitlab_schema: gitlab_ci
|
||||
sharding_key_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/459997
|
||||
sharding_key:
|
||||
project_id: projects
|
||||
|
|
|
|||
|
|
@ -10,4 +10,12 @@ milestone: '13.4'
|
|||
gitlab_schema: gitlab_sec
|
||||
allow_cross_foreign_keys:
|
||||
- gitlab_main_clusterwide
|
||||
sharding_key_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/454949
|
||||
desired_sharding_key:
|
||||
project_id:
|
||||
references: projects
|
||||
backfill_via:
|
||||
parent:
|
||||
foreign_key: scanner_id
|
||||
table: vulnerability_scanners
|
||||
sharding_key: project_id
|
||||
belongs_to: scanner
|
||||
|
|
|
|||
|
|
@ -0,0 +1,15 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class AddProjectIdColumnToCiFinishedBuildChSyncEvent < Gitlab::Database::Migration[2.2]
|
||||
milestone '17.4'
|
||||
|
||||
def change
|
||||
# Normally, we'd make this a nullable column, and over the course of multiple milestones, make it a
|
||||
# non-nullable column with a constraint.
|
||||
# We can save time knowing that the records in this table have a maximum lifetime of 30 days
|
||||
# (less in .com since the daily partition gets dropped after being processed by the worker).
|
||||
# We'll start by adding -1 to all project_ids, knowing that this value is not used yet.
|
||||
# Once all the rows have valid values, we can drop all the rows having a -1 project_id.
|
||||
add_column :p_ci_finished_build_ch_sync_events, :project_id, :bigint, default: -1, null: false
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class RemoveProjectsDastSiteProfilesProjectIdFk < Gitlab::Database::Migration[2.2]
|
||||
milestone '17.3'
|
||||
disable_ddl_transaction!
|
||||
|
||||
FOREIGN_KEY_NAME = "fk_rails_83e309d69e"
|
||||
|
||||
def up
|
||||
with_lock_retries do
|
||||
remove_foreign_key_if_exists(:dast_site_profiles, :projects,
|
||||
name: FOREIGN_KEY_NAME, reverse_lock_order: true)
|
||||
end
|
||||
end
|
||||
|
||||
def down
|
||||
add_concurrent_foreign_key(:dast_site_profiles, :projects,
|
||||
name: FOREIGN_KEY_NAME, column: :project_id,
|
||||
target_column: :id, on_delete: :cascade)
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class AddIndexForCiFinishedBuildChSyncEventOnProjectId < Gitlab::Database::Migration[2.2]
|
||||
include Gitlab::Database::PartitioningMigrationHelpers
|
||||
|
||||
disable_ddl_transaction!
|
||||
|
||||
milestone '17.4'
|
||||
|
||||
TABLE_NAME = :p_ci_finished_build_ch_sync_events
|
||||
INDEX_NAME = :index_p_ci_finished_build_ch_sync_events_on_project_id
|
||||
COLUMN = :project_id
|
||||
|
||||
def up
|
||||
add_concurrent_partitioned_index(TABLE_NAME, COLUMN, name: INDEX_NAME)
|
||||
end
|
||||
|
||||
def down
|
||||
remove_concurrent_partitioned_index_by_name(TABLE_NAME, INDEX_NAME)
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1 @@
|
|||
3ad9103f070e812b9ec7246e78f6fc440189565afbf7adb8f67f84e1fc20d388
|
||||
|
|
@ -0,0 +1 @@
|
|||
197316872638bd735359f1adb3f8ef01d42162a48177ff22990cb7ebcd115a08
|
||||
|
|
@ -0,0 +1 @@
|
|||
df86994deaac3114e5ab544bfd7f7725ce9f552af2f3a2d3729e2b888d82def6
|
||||
|
|
@ -2743,7 +2743,8 @@ CREATE TABLE p_ci_finished_build_ch_sync_events (
|
|||
build_id bigint NOT NULL,
|
||||
partition bigint DEFAULT 1 NOT NULL,
|
||||
build_finished_at timestamp without time zone NOT NULL,
|
||||
processed boolean DEFAULT false NOT NULL
|
||||
processed boolean DEFAULT false NOT NULL,
|
||||
project_id bigint DEFAULT '-1'::integer NOT NULL
|
||||
)
|
||||
PARTITION BY LIST (partition);
|
||||
|
||||
|
|
@ -29280,6 +29281,8 @@ CREATE INDEX index_p_ci_builds_execution_configs_on_project_id ON ONLY p_ci_buil
|
|||
|
||||
CREATE INDEX index_p_ci_finished_build_ch_sync_events_finished_at ON ONLY p_ci_finished_build_ch_sync_events USING btree (partition, build_finished_at);
|
||||
|
||||
CREATE INDEX index_p_ci_finished_build_ch_sync_events_on_project_id ON ONLY p_ci_finished_build_ch_sync_events USING btree (project_id);
|
||||
|
||||
CREATE UNIQUE INDEX index_p_ci_job_annotations_on_partition_id_job_id_name ON ONLY p_ci_job_annotations USING btree (partition_id, job_id, name);
|
||||
|
||||
CREATE INDEX index_p_ci_runner_machine_builds_on_runner_machine_id ON ONLY p_ci_runner_machine_builds USING btree (runner_machine_id);
|
||||
|
|
@ -35517,9 +35520,6 @@ ALTER TABLE ONLY cluster_enabled_grants
|
|||
ALTER TABLE ONLY virtual_registries_packages_maven_registry_upstreams
|
||||
ADD CONSTRAINT fk_rails_838d054752 FOREIGN KEY (group_id) REFERENCES namespaces(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY dast_site_profiles
|
||||
ADD CONSTRAINT fk_rails_83e309d69e FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY dependency_list_export_parts
|
||||
ADD CONSTRAINT fk_rails_83f26c0e6f FOREIGN KEY (dependency_list_export_id) REFERENCES dependency_list_exports(id) ON DELETE CASCADE;
|
||||
|
||||
|
|
|
|||
|
|
@ -33957,6 +33957,33 @@ Represents the vulnerability details code field.
|
|||
| <a id="vulnerabilitydetailcodename"></a>`name` | [`String`](#string) | Name of the field. |
|
||||
| <a id="vulnerabilitydetailcodevalue"></a>`value` | [`String!`](#string) | Source code. |
|
||||
|
||||
### `VulnerabilityDetailCodeFlowNode`
|
||||
|
||||
Represents the vulnerability details code flow node item.
|
||||
|
||||
#### Fields
|
||||
|
||||
| Name | Type | Description |
|
||||
| ---- | ---- | ----------- |
|
||||
| <a id="vulnerabilitydetailcodeflownodedescription"></a>`description` | [`String`](#string) | Description of the field. |
|
||||
| <a id="vulnerabilitydetailcodeflownodefieldname"></a>`fieldName` | [`String`](#string) | Name of the field. |
|
||||
| <a id="vulnerabilitydetailcodeflownodefilelocation"></a>`fileLocation` | [`VulnerabilityDetailFileLocation!`](#vulnerabilitydetailfilelocation) | Location of the file. |
|
||||
| <a id="vulnerabilitydetailcodeflownodename"></a>`name` | [`String`](#string) | Name of the field. |
|
||||
| <a id="vulnerabilitydetailcodeflownodenodetype"></a>`nodeType` | [`CodeFlowNodeType!`](#codeflownodetype) | Node Type. |
|
||||
|
||||
### `VulnerabilityDetailCodeFlows`
|
||||
|
||||
Represents the vulnerability details code flows item.
|
||||
|
||||
#### Fields
|
||||
|
||||
| Name | Type | Description |
|
||||
| ---- | ---- | ----------- |
|
||||
| <a id="vulnerabilitydetailcodeflowsdescription"></a>`description` | [`String`](#string) | Description of the field. |
|
||||
| <a id="vulnerabilitydetailcodeflowsfieldname"></a>`fieldName` | [`String`](#string) | Name of the field. |
|
||||
| <a id="vulnerabilitydetailcodeflowsitems"></a>`items` | [`[[VulnerabilityDetailCodeFlowNode!]!]!`](#vulnerabilitydetailcodeflownode) | List of flows represented by list of CodeFlowNodeItem. |
|
||||
| <a id="vulnerabilitydetailcodeflowsname"></a>`name` | [`String`](#string) | Name of the field. |
|
||||
|
||||
### `VulnerabilityDetailCommit`
|
||||
|
||||
Represents the vulnerability details commit field.
|
||||
|
|
@ -35690,6 +35717,16 @@ Values for sorting variables.
|
|||
| <a id="civariabletypeenv_var"></a>`ENV_VAR` | Env var type. |
|
||||
| <a id="civariabletypefile"></a>`FILE` | File type. |
|
||||
|
||||
### `CodeFlowNodeType`
|
||||
|
||||
The code flow node type.
|
||||
|
||||
| Value | Description |
|
||||
| ----- | ----------- |
|
||||
| <a id="codeflownodetypepropagation"></a>`PROPAGATION` | Propagation node. |
|
||||
| <a id="codeflownodetypesink"></a>`SINK` | Sink node. |
|
||||
| <a id="codeflownodetypesource"></a>`SOURCE` | Source node. |
|
||||
|
||||
### `CodeQualityDegradationSeverity`
|
||||
|
||||
| Value | Description |
|
||||
|
|
@ -39648,6 +39685,7 @@ One of:
|
|||
- [`VulnerabilityDetailBase`](#vulnerabilitydetailbase)
|
||||
- [`VulnerabilityDetailBoolean`](#vulnerabilitydetailboolean)
|
||||
- [`VulnerabilityDetailCode`](#vulnerabilitydetailcode)
|
||||
- [`VulnerabilityDetailCodeFlows`](#vulnerabilitydetailcodeflows)
|
||||
- [`VulnerabilityDetailCommit`](#vulnerabilitydetailcommit)
|
||||
- [`VulnerabilityDetailDiff`](#vulnerabilitydetaildiff)
|
||||
- [`VulnerabilityDetailFileLocation`](#vulnerabilitydetailfilelocation)
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
require 'sidekiq/version'
|
||||
require 'sidekiq/cron/version'
|
||||
|
||||
if Gem::Version.new(Sidekiq::VERSION) != Gem::Version.new('7.2.4')
|
||||
if Gem::Version.new(Sidekiq::VERSION) != Gem::Version.new('7.3.1')
|
||||
raise 'New version of sidekiq detected, please remove or update this patch'
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
if Gem::Version.new(Sidekiq::VERSION) != Gem::Version.new('7.2.4')
|
||||
if Gem::Version.new(Sidekiq::VERSION) != Gem::Version.new('7.3.1')
|
||||
raise 'New version of sidekiq detected, please remove or update this patch'
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import { GlBadge, GlLink } from '@gitlab/ui';
|
||||
import { GlBadge, GlLink, GlSprintf } from '@gitlab/ui';
|
||||
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
|
||||
import HiddenBadge from '~/issuable/components/hidden_badge.vue';
|
||||
import LockedBadge from '~/issuable/components/locked_badge.vue';
|
||||
|
|
@ -23,21 +23,32 @@ describe('StickyHeader component', () => {
|
|||
const findImportedBadge = () => wrapper.findComponent(ImportedBadge);
|
||||
const findLockedBadge = () => wrapper.findComponent(LockedBadge);
|
||||
const findTitle = () => wrapper.findComponent(GlLink);
|
||||
const findClosedStatusLink = () =>
|
||||
wrapper.find('[data-testid="sticky-header-closed-status-link"');
|
||||
const findIssuableHeader = () => wrapper.findComponent(StickyHeader);
|
||||
|
||||
const createComponent = (props = {}) => {
|
||||
wrapper = shallowMountExtended(StickyHeader, {
|
||||
propsData: {
|
||||
issuableStatus: STATUS_OPEN,
|
||||
issuableState: STATUS_OPEN,
|
||||
issuableType: TYPE_ISSUE,
|
||||
movedToIssueUrl: '',
|
||||
promotedToEpicUrl: '',
|
||||
duplicatedToIssueUrl: '',
|
||||
show: true,
|
||||
title: 'A sticky issue',
|
||||
...props,
|
||||
},
|
||||
stubs: {
|
||||
GlBadge,
|
||||
GlSprintf,
|
||||
GlLink,
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
it.each`
|
||||
issuableType | issuableStatus | statusIcon
|
||||
issuableType | issuableState | statusIcon
|
||||
${TYPE_INCIDENT} | ${STATUS_OPEN} | ${'issue-open-m'}
|
||||
${TYPE_INCIDENT} | ${STATUS_CLOSED} | ${'issue-close'}
|
||||
${TYPE_ISSUE} | ${STATUS_OPEN} | ${'issue-open-m'}
|
||||
|
|
@ -45,23 +56,74 @@ describe('StickyHeader component', () => {
|
|||
${TYPE_EPIC} | ${STATUS_OPEN} | ${'issue-open-m'}
|
||||
${TYPE_EPIC} | ${STATUS_CLOSED} | ${'issue-close'}
|
||||
`(
|
||||
'shows with state icon "$statusIcon" for $issuableType when status is $issuableStatus',
|
||||
({ issuableType, issuableStatus, statusIcon }) => {
|
||||
createComponent({ issuableType, issuableStatus });
|
||||
'shows with state icon "$statusIcon" for $issuableType when status is $issuableState',
|
||||
({ issuableType, issuableState, statusIcon }) => {
|
||||
createComponent({ issuableType, issuableState });
|
||||
|
||||
expect(wrapper.findComponent(GlBadge).props('icon')).toBe(statusIcon);
|
||||
},
|
||||
);
|
||||
|
||||
it.each`
|
||||
title | issuableStatus
|
||||
title | issuableState
|
||||
${'shows with Open when status is opened'} | ${STATUS_OPEN}
|
||||
${'shows with Closed when status is closed'} | ${STATUS_CLOSED}
|
||||
${'shows with Open when status is reopened'} | ${STATUS_REOPENED}
|
||||
`('$title', ({ issuableStatus }) => {
|
||||
createComponent({ issuableStatus });
|
||||
`('$title', ({ issuableState }) => {
|
||||
createComponent({ issuableState });
|
||||
|
||||
expect(wrapper.text()).toContain(issuableStatusText[issuableStatus]);
|
||||
expect(wrapper.text()).toContain(issuableStatusText[issuableState]);
|
||||
});
|
||||
|
||||
describe('when status is closed', () => {
|
||||
beforeEach(() => {
|
||||
createComponent({ issuableState: STATUS_CLOSED });
|
||||
});
|
||||
|
||||
describe('when issue is marked as duplicate', () => {
|
||||
beforeEach(() => {
|
||||
createComponent({
|
||||
issuableState: STATUS_CLOSED,
|
||||
duplicatedToIssueUrl: 'project/-/issue/5',
|
||||
});
|
||||
});
|
||||
|
||||
it('renders `Closed (duplicated)`', () => {
|
||||
expect(findIssuableHeader().text()).toContain('Closed (duplicated)');
|
||||
});
|
||||
|
||||
it('links to the duplicated issue', () => {
|
||||
expect(findClosedStatusLink().attributes('href')).toBe('project/-/issue/5');
|
||||
});
|
||||
});
|
||||
|
||||
describe('when issue is marked as moved', () => {
|
||||
beforeEach(() => {
|
||||
createComponent({ issuableState: STATUS_CLOSED, movedToIssueUrl: 'project/-/issue/6' });
|
||||
});
|
||||
|
||||
it('renders `Closed (moved)`', () => {
|
||||
expect(findIssuableHeader().text()).toContain('Closed (moved)');
|
||||
});
|
||||
|
||||
it('links to the moved issue', () => {
|
||||
expect(findClosedStatusLink().attributes('href')).toBe('project/-/issue/6');
|
||||
});
|
||||
});
|
||||
|
||||
describe('when issue is marked as promoted', () => {
|
||||
beforeEach(() => {
|
||||
createComponent({ issuableState: STATUS_CLOSED, promotedToEpicUrl: 'group/-/epic/7' });
|
||||
});
|
||||
|
||||
it('renders `Closed (promoted)`', () => {
|
||||
expect(findIssuableHeader().text()).toContain('Closed (promoted)');
|
||||
});
|
||||
|
||||
it('links to the promoted epic', () => {
|
||||
expect(findClosedStatusLink().attributes('href')).toBe('group/-/epic/7');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it.each`
|
||||
|
|
|
|||
|
|
@ -53,6 +53,8 @@ describe('Repository last commit component', () => {
|
|||
expect(findCommitterWrapper().classes()).toEqual([
|
||||
'committer',
|
||||
'gl-basis-full',
|
||||
'gl-truncate',
|
||||
'gl-text-sm',
|
||||
'gl-inline-flex',
|
||||
]);
|
||||
expect(findUserLink().classes()).toEqual([
|
||||
|
|
|
|||
|
|
@ -233,7 +233,7 @@ RSpec.describe Gitlab::Database::LoadBalancing::SidekiqServerMiddleware, :clean_
|
|||
it 'raises an error and retries', :aggregate_failures do
|
||||
expect do
|
||||
Gitlab::SidekiqSharding::Validator.allow_unrouted_sidekiq_calls { process_job(job) }
|
||||
end.to raise_error(Sidekiq::JobRetry::Skip)
|
||||
end.to raise_error(Sidekiq::JobRetry::Handled)
|
||||
|
||||
job_for_retry = Gitlab::SidekiqSharding::Validator.allow_unrouted_sidekiq_calls { Sidekiq::RetrySet.new.first }
|
||||
expect(job_for_retry['error_class']).to eq('Gitlab::Database::LoadBalancing::SidekiqServerMiddleware::JobReplicaNotUpToDate')
|
||||
|
|
|
|||
|
|
@ -72,7 +72,8 @@ RSpec.describe 'cross-database foreign keys' do
|
|||
'dast_site_profiles.dast_site_id',
|
||||
'dast_site_profiles_builds.dast_site_profile_id',
|
||||
'dast_site_validations.dast_site_token_id',
|
||||
'dast_sites.dast_site_validation_id'
|
||||
'dast_sites.dast_site_validation_id',
|
||||
'dast_site_profile_secret_variables.dast_site_profile_id'
|
||||
]
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::SidekiqLogging::StructuredLogger do
|
||||
RSpec.describe Gitlab::SidekiqLogging::StructuredLogger, feature_category: :shared do
|
||||
before do
|
||||
# We disable a memory instrumentation feature
|
||||
# as this requires a special patched Ruby
|
||||
|
|
@ -492,7 +492,13 @@ RSpec.describe Gitlab::SidekiqLogging::StructuredLogger do
|
|||
'completed_at' => current_utc_time.to_i }
|
||||
end
|
||||
|
||||
subject { described_class.new(Sidekiq.logger) }
|
||||
let(:config) do
|
||||
config = Sidekiq::Config.new
|
||||
config.logger = Sidekiq.logger
|
||||
config
|
||||
end
|
||||
|
||||
subject { described_class.new(config) }
|
||||
|
||||
it 'update payload correctly' do
|
||||
travel_to(current_utc_time) do
|
||||
|
|
|
|||
|
|
@ -198,69 +198,6 @@ RSpec.describe UserPreference, feature_category: :user_profile do
|
|||
end
|
||||
end
|
||||
|
||||
describe '#tab_width' do
|
||||
it 'is set to 8 by default' do
|
||||
# Intentionally not using factory here to test the constructor.
|
||||
pref = described_class.new
|
||||
|
||||
expect(pref.tab_width).to eq(8)
|
||||
end
|
||||
|
||||
it 'returns default value when assigning nil' do
|
||||
pref = described_class.new(tab_width: nil)
|
||||
|
||||
expect(pref.tab_width).to eq(8)
|
||||
end
|
||||
end
|
||||
|
||||
describe '#tab_width=' do
|
||||
it 'sets to default value when nil' do
|
||||
pref = described_class.new(tab_width: nil)
|
||||
|
||||
expect(pref.read_attribute(:tab_width)).to eq(8)
|
||||
end
|
||||
|
||||
it 'sets user values' do
|
||||
pref = described_class.new(tab_width: 12)
|
||||
|
||||
expect(pref.read_attribute(:tab_width)).to eq(12)
|
||||
end
|
||||
end
|
||||
|
||||
describe '#time_display_relative' do
|
||||
it 'is set to true by default' do
|
||||
pref = described_class.new
|
||||
|
||||
expect(pref.time_display_relative).to eq(true)
|
||||
end
|
||||
|
||||
it 'returns default value when assigning nil' do
|
||||
pref = described_class.new(time_display_relative: nil)
|
||||
|
||||
expect(pref.time_display_relative).to eq(true)
|
||||
end
|
||||
|
||||
it 'returns assigned value' do
|
||||
pref = described_class.new(time_display_relative: false)
|
||||
|
||||
expect(pref.time_display_relative).to eq(false)
|
||||
end
|
||||
end
|
||||
|
||||
describe '#time_display_relative=' do
|
||||
it 'sets to default value when nil' do
|
||||
pref = described_class.new(time_display_relative: nil)
|
||||
|
||||
expect(pref.read_attribute(:time_display_relative)).to eq(true)
|
||||
end
|
||||
|
||||
it 'sets user values' do
|
||||
pref = described_class.new(time_display_relative: false)
|
||||
|
||||
expect(pref.read_attribute(:time_display_relative)).to eq(false)
|
||||
end
|
||||
end
|
||||
|
||||
describe '#project_shortcut_buttons' do
|
||||
it 'is set to true by default' do
|
||||
pref = described_class.new
|
||||
|
|
@ -289,40 +226,6 @@ RSpec.describe UserPreference, feature_category: :user_profile do
|
|||
end
|
||||
end
|
||||
|
||||
describe '#render_whitespace_in_code' do
|
||||
it 'is set to false by default' do
|
||||
pref = described_class.new
|
||||
|
||||
expect(pref.render_whitespace_in_code).to eq(false)
|
||||
end
|
||||
|
||||
it 'returns default value when assigning nil' do
|
||||
pref = described_class.new(render_whitespace_in_code: nil)
|
||||
|
||||
expect(pref.render_whitespace_in_code).to eq(false)
|
||||
end
|
||||
|
||||
it 'returns assigned value' do
|
||||
pref = described_class.new(render_whitespace_in_code: true)
|
||||
|
||||
expect(pref.render_whitespace_in_code).to eq(true)
|
||||
end
|
||||
end
|
||||
|
||||
describe '#render_whitespace_in_code=' do
|
||||
it 'sets to default value when nil' do
|
||||
pref = described_class.new(render_whitespace_in_code: nil)
|
||||
|
||||
expect(pref.read_attribute(:render_whitespace_in_code)).to eq(false)
|
||||
end
|
||||
|
||||
it 'sets user values' do
|
||||
pref = described_class.new(render_whitespace_in_code: true)
|
||||
|
||||
expect(pref.read_attribute(:render_whitespace_in_code)).to eq(true)
|
||||
end
|
||||
end
|
||||
|
||||
describe '#early_access_event_tracking?' do
|
||||
let(:participant) { true }
|
||||
let(:tracking) { true }
|
||||
|
|
|
|||
|
|
@ -36,4 +36,21 @@ RSpec.describe VirtualRegistries::Packages::Maven::Registry, type: :model, featu
|
|||
|
||||
it { is_expected.to eq([registry]) }
|
||||
end
|
||||
|
||||
describe 'callbacks' do
|
||||
describe '.destroy_upstream' do
|
||||
let(:upstream) { build(:virtual_registries_packages_maven_upstream) }
|
||||
|
||||
before do
|
||||
allow(registry).to receive(:upstream).and_return(upstream)
|
||||
allow(upstream).to receive(:destroy!)
|
||||
end
|
||||
|
||||
it 'destroys the upstream' do
|
||||
registry.destroy!
|
||||
|
||||
expect(upstream).to have_received(:destroy!)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -347,7 +347,7 @@ RSpec.describe 'Query.runners', feature_category: :fleet_visibility do
|
|||
end
|
||||
end
|
||||
|
||||
RSpec.describe 'Group.runners' do
|
||||
RSpec.describe 'Group.runners', feature_category: :fleet_visibility do
|
||||
include GraphqlHelpers
|
||||
|
||||
let_it_be(:group) { create(:group) }
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ RSpec.shared_context 'when handling retried jobs' do |url|
|
|||
|
||||
begin
|
||||
Sidekiq::JobRetry.new(Sidekiq).local(klass, message, klass.queue) { raise 'boom' }
|
||||
rescue Sidekiq::JobRetry::Skip
|
||||
rescue Sidekiq::JobRetry::Handled
|
||||
# Sidekiq scheduled the retry
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -92,6 +92,12 @@ RSpec.shared_context 'structured_logger' do
|
|||
)
|
||||
end
|
||||
|
||||
let(:config) do
|
||||
config = Sidekiq::Config.new
|
||||
config.logger = logger
|
||||
config
|
||||
end
|
||||
|
||||
before do
|
||||
allow(subject).to receive(:current_time).and_return(timestamp.to_f)
|
||||
|
||||
|
|
@ -102,7 +108,7 @@ RSpec.shared_context 'structured_logger' do
|
|||
allow(Process).to receive(:clock_gettime).with(anything, :float_millisecond).and_call_original
|
||||
end
|
||||
|
||||
subject { described_class.new(logger) }
|
||||
subject { described_class.new(config) }
|
||||
|
||||
def call_subject(job, queue)
|
||||
# This structured logger strongly depends on execution of `InstrumentationLogger`
|
||||
|
|
|
|||
|
|
@ -27,7 +27,11 @@ RSpec.shared_examples 'desired sharding key backfill job' do
|
|||
vulnerability_flags: {
|
||||
vulnerability_occurrences: 'https://gitlab.com/gitlab-org/gitlab/-/issues/480354'
|
||||
},
|
||||
dast_site_validations: { dast_site_tokens: 'https://gitlab.com/gitlab-org/gitlab/-/issues/474985' }
|
||||
dast_site_validations: { dast_site_tokens: 'https://gitlab.com/gitlab-org/gitlab/-/issues/474985' },
|
||||
dast_site_profile_secret_variables: {
|
||||
dast_site_profiles: 'https://gitlab.com/gitlab-org/gitlab/-/issues/480014'
|
||||
},
|
||||
dast_site_profiles_builds: { dast_site_profiles: 'https://gitlab.com/gitlab-org/gitlab/-/issues/480014' }
|
||||
}
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -69,8 +69,8 @@ RSpec.shared_examples 'cleanup by a loose foreign key' do
|
|||
def find_model
|
||||
query = model.class
|
||||
# handle composite primary keys
|
||||
connection = model.class.connection
|
||||
connection.primary_keys(model.class.table_name).each do |primary_key|
|
||||
primary_keys = model.class.connection.primary_keys(model.class.table_name) - model.class.ignored_columns
|
||||
primary_keys.each do |primary_key|
|
||||
query = query.where(primary_key => model.public_send(primary_key))
|
||||
end
|
||||
query.first
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
include:
|
||||
- local: gems/gem.gitlab-ci.yml
|
||||
inputs:
|
||||
gem_name: "sidekiq-7.2.4"
|
||||
gem_name: "sidekiq"
|
||||
gem_path_prefix: "vendor/gems/"
|
||||
|
||||
rspec:
|
||||
|
|
@ -2,6 +2,49 @@
|
|||
|
||||
[Sidekiq Changes](https://github.com/sidekiq/sidekiq/blob/main/Changes.md) | [Sidekiq Pro Changes](https://github.com/sidekiq/sidekiq/blob/main/Pro-Changes.md) | [Sidekiq Enterprise Changes](https://github.com/sidekiq/sidekiq/blob/main/Ent-Changes.md)
|
||||
|
||||
7.3.1
|
||||
----------
|
||||
|
||||
- Don't count job interruptions as failures in metrics [#6386]
|
||||
- Add frozen string literal to a number of .rb files.
|
||||
- Fix frozen string error with style_tag and script_tag [#6371]
|
||||
- Fix an error on Ruby 2.7 because of usage of `Hash#except` [#6376]
|
||||
|
||||
7.3.0
|
||||
----------
|
||||
|
||||
- **NEW FEATURE** Add `Sidekiq::IterableJob`, iteration support for long-running jobs. [#6286, fatkodima]
|
||||
Iterable jobs are interruptible and can restart quickly if
|
||||
running during a deploy. You must ensure that `each_iteration`
|
||||
doesn't take more than Sidekiq's `-t` timeout (default: 25 seconds). Iterable jobs must not implement `perform`.
|
||||
```ruby
|
||||
class ProcessArrayJob
|
||||
include Sidekiq::IterableJob
|
||||
def build_enumerator(*args, **kwargs)
|
||||
array_enumerator(args, **kwargs)
|
||||
end
|
||||
def each_iteration(arg)
|
||||
puts arg
|
||||
end
|
||||
end
|
||||
ProcessArrayJob.perform_async(1, 2, 3)
|
||||
```
|
||||
See the [Iteration](//github.com/sidekiq/sidekiq/wiki/Iteration) wiki page and the RDoc in `Sidekiq::IterableJob`.
|
||||
This feature should be considered BETA until the next minor release.
|
||||
- **SECURITY** The Web UI no longer allows extensions to use `<script>`.
|
||||
Adjust CSP to disallow inline scripts within the Web UI. Please see
|
||||
`examples/webui-ext` for how to register Web UI extensions and use
|
||||
dynamic CSS and JS. This will make Sidekiq immune to XSS attacks. [#6270]
|
||||
- Add config option, `:skip_default_job_logging` to disable Sidekiq's default
|
||||
start/finish job logging. [#6200]
|
||||
- Allow `Sidekiq::Limiter.redis` to use Redis Cluster [#6288]
|
||||
- Retain CurrentAttributeѕ after inline execution [#6307]
|
||||
- Ignore non-existent CurrentAttributes attributes when restoring [#6341]
|
||||
- Raise default Redis {read,write,connect} timeouts from 1 to 3 seconds
|
||||
to minimize ReadTimeoutErrors [#6162]
|
||||
- Add `logger` as a dependency since it will become bundled in Ruby 3.5 [#6320]
|
||||
- Ignore unsupported locales in the Web UI [#6313]
|
||||
|
||||
7.2.4
|
||||
----------
|
||||
|
||||
|
|
@ -1,11 +1,12 @@
|
|||
PATH
|
||||
remote: .
|
||||
specs:
|
||||
sidekiq (7.2.4)
|
||||
sidekiq (7.3.1)
|
||||
concurrent-ruby (< 2)
|
||||
connection_pool (>= 2.3.0)
|
||||
logger
|
||||
rack (>= 2.2.4)
|
||||
redis-client (>= 0.19.0)
|
||||
redis-client (>= 0.22.2)
|
||||
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
|
|
@ -3,11 +3,10 @@ Copyright (c) Contributed Systems LLC
|
|||
This product includes software developed at
|
||||
Contributed Systems LLC(https://contribsys.com/).
|
||||
|
||||
Modifications to the following files were made on 2024-08-22 by GitLab:
|
||||
Modifications to the following files were made on 2024-08-29 by GitLab:
|
||||
- lib/sidekiq/redis_client_adapter.rb
|
||||
- lib/sidekiq/api.rb
|
||||
- lib/sidekiq/cli.rb
|
||||
- lib/sidekiq/paginator.rb
|
||||
- lib/sidekiq/scheduled.rb
|
||||
- sidekiq.gemspec
|
||||
- test/web_test.rb
|
||||
|
|
@ -86,7 +86,7 @@ Useful resources:
|
|||
* Occasional announcements are made to the [@sidekiq](https://ruby.social/@sidekiq) Mastodon account.
|
||||
* The [Sidekiq tag](https://stackoverflow.com/questions/tagged/sidekiq) on Stack Overflow has lots of useful Q & A.
|
||||
|
||||
Every Friday morning is Sidekiq office hour: I video chat and answer questions.
|
||||
Every Thursday morning is Sidekiq office hour: I video chat and answer questions.
|
||||
See the [Sidekiq support page](https://sidekiq.org/support.html) for details.
|
||||
|
||||
Contributing
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require "rails/generators/named_base"
|
||||
|
||||
module Sidekiq
|
||||
|
|
@ -32,6 +32,7 @@ require "sidekiq/logger"
|
|||
require "sidekiq/client"
|
||||
require "sidekiq/transaction_aware_client"
|
||||
require "sidekiq/job"
|
||||
require "sidekiq/iterable_job"
|
||||
require "sidekiq/worker_compatibility_alias"
|
||||
require "sidekiq/redis_client_adapter"
|
||||
|
||||
|
|
@ -112,7 +113,7 @@ module Sidekiq
|
|||
# end
|
||||
# inst.run
|
||||
# sleep 10
|
||||
# inst.terminate
|
||||
# inst.stop
|
||||
#
|
||||
# NB: it is really easy to overload a Ruby process with threads due to the GIL.
|
||||
# I do not recommend setting concurrency higher than 2-3.
|
||||
|
|
@ -813,6 +813,8 @@ module Sidekiq
|
|||
|
||||
# Add the given job to the Dead set.
|
||||
# @param message [String] the job data as JSON
|
||||
# @option opts :notify_failure [Boolean] (true) Whether death handlers should be called
|
||||
# @option opts :ex [Exception] (RuntimeError) An exception to pass to the death handlers
|
||||
def kill(message, opts = {})
|
||||
now = Time.now.to_f
|
||||
Sidekiq.redis do |conn|
|
||||
|
|
@ -825,10 +827,14 @@ module Sidekiq
|
|||
|
||||
if opts[:notify_failure] != false
|
||||
job = Sidekiq.load_json(message)
|
||||
r = RuntimeError.new("Job killed by API")
|
||||
r.set_backtrace(caller)
|
||||
if opts[:ex]
|
||||
ex = opt[:ex]
|
||||
else
|
||||
ex = RuntimeError.new("Job killed by API")
|
||||
ex.set_backtrace(caller)
|
||||
end
|
||||
Sidekiq.default_configuration.death_handlers.each do |handle|
|
||||
handle.call(job, r)
|
||||
handle.call(job, ex)
|
||||
end
|
||||
end
|
||||
true
|
||||
|
|
@ -1199,7 +1205,7 @@ module Sidekiq
|
|||
@hsh.send(*all)
|
||||
end
|
||||
|
||||
def respond_to_missing?(name)
|
||||
def respond_to_missing?(name, *args)
|
||||
@hsh.respond_to?(name)
|
||||
end
|
||||
end
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require "sidekiq/component"
|
||||
|
||||
module Sidekiq
|
||||
|
|
@ -17,6 +19,7 @@ module Sidekiq
|
|||
# end
|
||||
class Capsule
|
||||
include Sidekiq::Component
|
||||
extend Forwardable
|
||||
|
||||
attr_reader :name
|
||||
attr_reader :queues
|
||||
|
|
@ -24,6 +27,8 @@ module Sidekiq
|
|||
attr_reader :mode
|
||||
attr_reader :weights
|
||||
|
||||
def_delegators :@config, :[], :[]=, :fetch, :key?, :has_key?, :merge!, :dig
|
||||
|
||||
def initialize(name, config)
|
||||
@name = name
|
||||
@config = config
|
||||
|
|
@ -423,3 +423,4 @@ end
|
|||
|
||||
require "sidekiq/systemd"
|
||||
require "sidekiq/metrics/tracking"
|
||||
require "sidekiq/job/interrupt_handler"
|
||||
|
|
@ -248,9 +248,12 @@ module Sidekiq
|
|||
def atomic_push(conn, payloads)
|
||||
if payloads.first.key?("at")
|
||||
conn.zadd("schedule", payloads.flat_map { |hash|
|
||||
at = hash.delete("at").to_s
|
||||
at = hash["at"].to_s
|
||||
# ActiveJob sets this but the job has not been enqueued yet
|
||||
hash.delete("enqueued_at")
|
||||
# TODO: Use hash.except("at") when support for Ruby 2.7 is dropped
|
||||
hash = hash.dup
|
||||
hash.delete("at")
|
||||
[at, Sidekiq.dump_json(hash)]
|
||||
})
|
||||
else
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require "forwardable"
|
||||
|
||||
require "set"
|
||||
|
|
@ -17,6 +19,10 @@ module Sidekiq
|
|||
poll_interval_average: nil,
|
||||
average_scheduled_poll_interval: 5,
|
||||
on_complex_arguments: :raise,
|
||||
iteration: {
|
||||
max_job_runtime: nil,
|
||||
retry_backoff: 0
|
||||
},
|
||||
error_handlers: [],
|
||||
death_handlers: [],
|
||||
lifecycle_events: {
|
||||
|
|
@ -52,7 +58,7 @@ module Sidekiq
|
|||
@capsules = {}
|
||||
end
|
||||
|
||||
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
|
||||
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!, :dig
|
||||
attr_reader :capsules
|
||||
|
||||
def to_json(*)
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require "sidekiq/redis_connection"
|
||||
require "time"
|
||||
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require "sidekiq/component"
|
||||
require "sidekiq/launcher"
|
||||
require "sidekiq/metrics/tracking"
|
||||
|
|
@ -44,7 +44,7 @@ module Sidekiq # :nodoc:
|
|||
return nil
|
||||
end
|
||||
|
||||
queue, job = redis { |conn| conn.blocking_call(conn.read_timeout + TIMEOUT, "brpop", *qs, TIMEOUT) }
|
||||
queue, job = redis { |conn| conn.blocking_call(TIMEOUT, "brpop", *qs, TIMEOUT) }
|
||||
UnitOfWork.new(queue, job, config) if queue
|
||||
end
|
||||
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require "sidekiq/job/iterable"
|
||||
|
||||
# Iterable jobs are ones which provide a sequence to process using
|
||||
# `build_enumerator(*args, cursor: cursor)` and then process each
|
||||
# element of that sequence in `each_iteration(item, *args)`.
|
||||
#
|
||||
# The job is kicked off as normal:
|
||||
#
|
||||
# ProcessUserSet.perform_async(123)
|
||||
#
|
||||
# but instead of calling `perform`, Sidekiq will call:
|
||||
#
|
||||
# enum = ProcessUserSet#build_enumerator(123, cursor:nil)
|
||||
#
|
||||
# Your Enumerator must yield `(object, updated_cursor)` and
|
||||
# Sidekiq will call your `each_iteration` method:
|
||||
#
|
||||
# ProcessUserSet#each_iteration(object, 123)
|
||||
#
|
||||
# After every iteration, Sidekiq will check for shutdown. If we are
|
||||
# stopping, the cursor will be saved to Redis and the job re-queued
|
||||
# to pick up the rest of the work upon restart. Your job will get
|
||||
# the updated_cursor so it can pick up right where it stopped.
|
||||
#
|
||||
# enum = ProcessUserSet#build_enumerator(123, cursor: updated_cursor)
|
||||
#
|
||||
# The cursor object must be serializable to JSON.
|
||||
#
|
||||
# Note there are several APIs to help you build enumerators for
|
||||
# ActiveRecord Relations, CSV files, etc. See sidekiq/job/iterable/*.rb.
|
||||
module Sidekiq
|
||||
module IterableJob
|
||||
def self.included(base)
|
||||
base.include Sidekiq::Job
|
||||
base.include Sidekiq::Job::Iterable
|
||||
end
|
||||
|
||||
# def build_enumerator(*args, cursor:)
|
||||
# def each_iteration(item, *args)
|
||||
|
||||
# Your job can also define several callbacks during points
|
||||
# in each job's lifecycle.
|
||||
#
|
||||
# def on_start
|
||||
# def on_resume
|
||||
# def on_stop
|
||||
# def on_complete
|
||||
# def around_iteration
|
||||
#
|
||||
# To keep things simple and compatible, this is the same
|
||||
# API as the `sidekiq-iteration` gem.
|
||||
end
|
||||
end
|
||||
|
|
@ -69,7 +69,11 @@ module Sidekiq
|
|||
# In practice, any option is allowed. This is the main mechanism to configure the
|
||||
# options for a specific job.
|
||||
def sidekiq_options(opts = {})
|
||||
opts = opts.transform_keys(&:to_s) # stringify
|
||||
# stringify 2 levels of keys
|
||||
opts = opts.to_h do |k, v|
|
||||
[k.to_s, (Hash === v) ? v.transform_keys(&:to_s) : v]
|
||||
end
|
||||
|
||||
self.sidekiq_options_hash = get_sidekiq_options.merge(opts)
|
||||
end
|
||||
|
||||
|
|
@ -155,6 +159,9 @@ module Sidekiq
|
|||
|
||||
attr_accessor :jid
|
||||
|
||||
# This attribute is implementation-specific and not a public API
|
||||
attr_accessor :_context
|
||||
|
||||
def self.included(base)
|
||||
raise ArgumentError, "Sidekiq::Job cannot be included in an ActiveJob: #{base.name}" if base.ancestors.any? { |c| c.name == "ActiveJob::Base" }
|
||||
|
||||
|
|
@ -166,6 +173,10 @@ module Sidekiq
|
|||
Sidekiq.logger
|
||||
end
|
||||
|
||||
def interrupted?
|
||||
@_context&.stopping?
|
||||
end
|
||||
|
||||
# This helper class encapsulates the set options for `set`, e.g.
|
||||
#
|
||||
# SomeJob.set(queue: 'foo').perform_async(....)
|
||||
|
|
@ -366,7 +377,7 @@ module Sidekiq
|
|||
|
||||
def build_client # :nodoc:
|
||||
pool = Thread.current[:sidekiq_redis_pool] || get_sidekiq_options["pool"] || Sidekiq.default_configuration.redis_pool
|
||||
client_class = get_sidekiq_options["client_class"] || Sidekiq::Client
|
||||
client_class = Thread.current[:sidekiq_client_class] || get_sidekiq_options["client_class"] || Sidekiq::Client
|
||||
client_class.new(pool: pool)
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Sidekiq
|
||||
module Job
|
||||
class InterruptHandler
|
||||
include Sidekiq::ServerMiddleware
|
||||
|
||||
def call(instance, hash, queue)
|
||||
yield
|
||||
rescue Interrupted
|
||||
logger.debug "Interrupted, re-queueing..."
|
||||
c = Sidekiq::Client.new
|
||||
c.push(hash)
|
||||
raise Sidekiq::JobRetry::Skip
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
Sidekiq.configure_server do |config|
|
||||
config.server_middleware do |chain|
|
||||
chain.add Sidekiq::Job::InterruptHandler
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,231 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require_relative "iterable/enumerators"
|
||||
|
||||
module Sidekiq
|
||||
module Job
|
||||
class Interrupted < ::RuntimeError; end
|
||||
|
||||
module Iterable
|
||||
include Enumerators
|
||||
|
||||
# @api private
|
||||
def self.included(base)
|
||||
base.extend(ClassMethods)
|
||||
end
|
||||
|
||||
# @api private
|
||||
module ClassMethods
|
||||
def method_added(method_name)
|
||||
raise "#{self} is an iterable job and must not define #perform" if method_name == :perform
|
||||
super
|
||||
end
|
||||
end
|
||||
|
||||
# @api private
|
||||
def initialize
|
||||
super
|
||||
|
||||
@_executions = 0
|
||||
@_cursor = nil
|
||||
@_start_time = nil
|
||||
@_runtime = 0
|
||||
end
|
||||
|
||||
# A hook to override that will be called when the job starts iterating.
|
||||
#
|
||||
# It is called only once, for the first time.
|
||||
#
|
||||
def on_start
|
||||
end
|
||||
|
||||
# A hook to override that will be called around each iteration.
|
||||
#
|
||||
# Can be useful for some metrics collection, performance tracking etc.
|
||||
#
|
||||
def around_iteration
|
||||
yield
|
||||
end
|
||||
|
||||
# A hook to override that will be called when the job resumes iterating.
|
||||
#
|
||||
def on_resume
|
||||
end
|
||||
|
||||
# A hook to override that will be called each time the job is interrupted.
|
||||
#
|
||||
# This can be due to interruption or sidekiq stopping.
|
||||
#
|
||||
def on_stop
|
||||
end
|
||||
|
||||
# A hook to override that will be called when the job finished iterating.
|
||||
#
|
||||
def on_complete
|
||||
end
|
||||
|
||||
# The enumerator to be iterated over.
|
||||
#
|
||||
# @return [Enumerator]
|
||||
#
|
||||
# @raise [NotImplementedError] with a message advising subclasses to
|
||||
# implement an override for this method.
|
||||
#
|
||||
def build_enumerator(*)
|
||||
raise NotImplementedError, "#{self.class.name} must implement a '#build_enumerator' method"
|
||||
end
|
||||
|
||||
# The action to be performed on each item from the enumerator.
|
||||
#
|
||||
# @return [void]
|
||||
#
|
||||
# @raise [NotImplementedError] with a message advising subclasses to
|
||||
# implement an override for this method.
|
||||
#
|
||||
def each_iteration(*)
|
||||
raise NotImplementedError, "#{self.class.name} must implement an '#each_iteration' method"
|
||||
end
|
||||
|
||||
def iteration_key
|
||||
"it-#{jid}"
|
||||
end
|
||||
|
||||
# @api private
|
||||
def perform(*arguments)
|
||||
fetch_previous_iteration_state
|
||||
|
||||
@_executions += 1
|
||||
@_start_time = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
||||
|
||||
enumerator = build_enumerator(*arguments, cursor: @_cursor)
|
||||
unless enumerator
|
||||
logger.info("'#build_enumerator' returned nil, skipping the job.")
|
||||
return
|
||||
end
|
||||
|
||||
assert_enumerator!(enumerator)
|
||||
|
||||
if @_executions == 1
|
||||
on_start
|
||||
else
|
||||
on_resume
|
||||
end
|
||||
|
||||
completed = catch(:abort) do
|
||||
iterate_with_enumerator(enumerator, arguments)
|
||||
end
|
||||
|
||||
on_stop
|
||||
completed = handle_completed(completed)
|
||||
|
||||
if completed
|
||||
on_complete
|
||||
cleanup
|
||||
else
|
||||
reenqueue_iteration_job
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def fetch_previous_iteration_state
|
||||
state = Sidekiq.redis { |conn| conn.hgetall(iteration_key) }
|
||||
|
||||
unless state.empty?
|
||||
@_executions = state["ex"].to_i
|
||||
@_cursor = Sidekiq.load_json(state["c"])
|
||||
@_runtime = state["rt"].to_f
|
||||
end
|
||||
end
|
||||
|
||||
STATE_FLUSH_INTERVAL = 5 # seconds
|
||||
# we need to keep the state around as long as the job
|
||||
# might be retrying
|
||||
STATE_TTL = 30 * 24 * 60 * 60 # one month
|
||||
|
||||
def iterate_with_enumerator(enumerator, arguments)
|
||||
found_record = false
|
||||
state_flushed_at = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
||||
|
||||
enumerator.each do |object, cursor|
|
||||
found_record = true
|
||||
@_cursor = cursor
|
||||
|
||||
is_interrupted = interrupted?
|
||||
if ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - state_flushed_at >= STATE_FLUSH_INTERVAL || is_interrupted
|
||||
flush_state
|
||||
state_flushed_at = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
||||
end
|
||||
|
||||
return false if is_interrupted
|
||||
|
||||
around_iteration do
|
||||
each_iteration(object, *arguments)
|
||||
end
|
||||
end
|
||||
|
||||
logger.debug("Enumerator found nothing to iterate!") unless found_record
|
||||
true
|
||||
ensure
|
||||
@_runtime += (::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - @_start_time)
|
||||
end
|
||||
|
||||
def reenqueue_iteration_job
|
||||
flush_state
|
||||
logger.debug { "Interrupting job (cursor=#{@_cursor.inspect})" }
|
||||
|
||||
raise Interrupted
|
||||
end
|
||||
|
||||
def assert_enumerator!(enum)
|
||||
unless enum.is_a?(Enumerator)
|
||||
raise ArgumentError, <<~MSG
|
||||
#build_enumerator must return an Enumerator, but returned #{enum.class}.
|
||||
Example:
|
||||
def build_enumerator(params, cursor:)
|
||||
active_record_records_enumerator(
|
||||
Shop.find(params["shop_id"]).products,
|
||||
cursor: cursor
|
||||
)
|
||||
end
|
||||
MSG
|
||||
end
|
||||
end
|
||||
|
||||
def flush_state
|
||||
key = iteration_key
|
||||
state = {
|
||||
"ex" => @_executions,
|
||||
"c" => Sidekiq.dump_json(@_cursor),
|
||||
"rt" => @_runtime
|
||||
}
|
||||
|
||||
Sidekiq.redis do |conn|
|
||||
conn.multi do |pipe|
|
||||
pipe.hset(key, state)
|
||||
pipe.expire(key, STATE_TTL)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def cleanup
|
||||
logger.debug {
|
||||
format("Completed iteration. executions=%d runtime=%.3f", @_executions, @_runtime)
|
||||
}
|
||||
Sidekiq.redis { |conn| conn.unlink(iteration_key) }
|
||||
end
|
||||
|
||||
def handle_completed(completed)
|
||||
case completed
|
||||
when nil, # someone aborted the job but wants to call the on_complete callback
|
||||
true
|
||||
true
|
||||
when false
|
||||
false
|
||||
else
|
||||
raise "Unexpected thrown value: #{completed.inspect}"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Sidekiq
|
||||
module Job
|
||||
module Iterable
|
||||
# @api private
|
||||
class ActiveRecordEnumerator
|
||||
def initialize(relation, cursor: nil, **options)
|
||||
@relation = relation
|
||||
@cursor = cursor
|
||||
@options = options
|
||||
end
|
||||
|
||||
def records
|
||||
Enumerator.new(-> { @relation.count }) do |yielder|
|
||||
@relation.find_each(**@options, start: @cursor) do |record|
|
||||
yielder.yield(record, record.id)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def batches
|
||||
Enumerator.new(-> { @relation.count }) do |yielder|
|
||||
@relation.find_in_batches(**@options, start: @cursor) do |batch|
|
||||
yielder.yield(batch, batch.last.id)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def relations
|
||||
Enumerator.new(-> { relations_size }) do |yielder|
|
||||
# Convenience to use :batch_size for all the
|
||||
# ActiveRecord batching methods.
|
||||
options = @options.dup
|
||||
options[:of] ||= options.delete(:batch_size)
|
||||
|
||||
@relation.in_batches(**options, start: @cursor) do |relation|
|
||||
last_record = relation.last
|
||||
yielder.yield(relation, last_record.id)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def relations_size
|
||||
batch_size = @options[:batch_size] || 1000
|
||||
(@relation.count + batch_size - 1) / batch_size # ceiling division
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Sidekiq
|
||||
module Job
|
||||
module Iterable
|
||||
# @api private
|
||||
class CsvEnumerator
|
||||
def initialize(csv)
|
||||
unless defined?(CSV) && csv.instance_of?(CSV)
|
||||
raise ArgumentError, "CsvEnumerator.new takes CSV object"
|
||||
end
|
||||
|
||||
@csv = csv
|
||||
end
|
||||
|
||||
def rows(cursor:)
|
||||
@csv.lazy
|
||||
.each_with_index
|
||||
.drop(cursor || 0)
|
||||
.to_enum { count_of_rows_in_file }
|
||||
end
|
||||
|
||||
def batches(cursor:, batch_size: 100)
|
||||
@csv.lazy
|
||||
.each_slice(batch_size)
|
||||
.with_index
|
||||
.drop(cursor || 0)
|
||||
.to_enum { (count_of_rows_in_file.to_f / batch_size).ceil }
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def count_of_rows_in_file
|
||||
filepath = @csv.path
|
||||
return unless filepath
|
||||
|
||||
count = IO.popen(["wc", "-l", filepath]) do |out|
|
||||
out.read.strip.to_i
|
||||
end
|
||||
|
||||
count -= 1 if @csv.headers
|
||||
count
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,135 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require_relative "active_record_enumerator"
|
||||
require_relative "csv_enumerator"
|
||||
|
||||
module Sidekiq
|
||||
module Job
|
||||
module Iterable
|
||||
module Enumerators
|
||||
# Builds Enumerator object from a given array, using +cursor+ as an offset.
|
||||
#
|
||||
# @param array [Array]
|
||||
# @param cursor [Integer] offset to start iteration from
|
||||
#
|
||||
# @return [Enumerator]
|
||||
#
|
||||
# @example
|
||||
# array_enumerator(['build', 'enumerator', 'from', 'any', 'array'], cursor: cursor)
|
||||
#
|
||||
def array_enumerator(array, cursor:)
|
||||
raise ArgumentError, "array must be an Array" unless array.is_a?(Array)
|
||||
|
||||
x = array.each_with_index.drop(cursor || 0)
|
||||
x.to_enum { x.size }
|
||||
end
|
||||
|
||||
# Builds Enumerator from `ActiveRecord::Relation`.
|
||||
# Each Enumerator tick moves the cursor one row forward.
|
||||
#
|
||||
# @param relation [ActiveRecord::Relation] relation to iterate
|
||||
# @param cursor [Object] offset id to start iteration from
|
||||
# @param options [Hash] additional options that will be passed to relevant
|
||||
# ActiveRecord batching methods
|
||||
#
|
||||
# @return [ActiveRecordEnumerator]
|
||||
#
|
||||
# @example
|
||||
# def build_enumerator(cursor:)
|
||||
# active_record_records_enumerator(User.all, cursor: cursor)
|
||||
# end
|
||||
#
|
||||
# def each_iteration(user)
|
||||
# user.notify_about_something
|
||||
# end
|
||||
#
|
||||
def active_record_records_enumerator(relation, cursor:, **options)
|
||||
ActiveRecordEnumerator.new(relation, cursor: cursor, **options).records
|
||||
end
|
||||
|
||||
# Builds Enumerator from `ActiveRecord::Relation` and enumerates on batches of records.
|
||||
# Each Enumerator tick moves the cursor `:batch_size` rows forward.
|
||||
# @see #active_record_records_enumerator
|
||||
#
|
||||
# @example
|
||||
# def build_enumerator(product_id, cursor:)
|
||||
# active_record_batches_enumerator(
|
||||
# Comment.where(product_id: product_id).select(:id),
|
||||
# cursor: cursor,
|
||||
# batch_size: 100
|
||||
# )
|
||||
# end
|
||||
#
|
||||
# def each_iteration(batch_of_comments, product_id)
|
||||
# comment_ids = batch_of_comments.map(&:id)
|
||||
# CommentService.call(comment_ids: comment_ids)
|
||||
# end
|
||||
#
|
||||
def active_record_batches_enumerator(relation, cursor:, **options)
|
||||
ActiveRecordEnumerator.new(relation, cursor: cursor, **options).batches
|
||||
end
|
||||
|
||||
# Builds Enumerator from `ActiveRecord::Relation` and enumerates on batches,
|
||||
# yielding `ActiveRecord::Relation`s.
|
||||
# @see #active_record_records_enumerator
|
||||
#
|
||||
# @example
|
||||
# def build_enumerator(product_id, cursor:)
|
||||
# active_record_relations_enumerator(
|
||||
# Product.find(product_id).comments,
|
||||
# cursor: cursor,
|
||||
# batch_size: 100,
|
||||
# )
|
||||
# end
|
||||
#
|
||||
# def each_iteration(batch_of_comments, product_id)
|
||||
# # batch_of_comments will be a Comment::ActiveRecord_Relation
|
||||
# batch_of_comments.update_all(deleted: true)
|
||||
# end
|
||||
#
|
||||
def active_record_relations_enumerator(relation, cursor:, **options)
|
||||
ActiveRecordEnumerator.new(relation, cursor: cursor, **options).relations
|
||||
end
|
||||
|
||||
# Builds Enumerator from a CSV file.
|
||||
#
|
||||
# @param csv [CSV] an instance of CSV object
|
||||
# @param cursor [Integer] offset to start iteration from
|
||||
#
|
||||
# @example
|
||||
# def build_enumerator(import_id, cursor:)
|
||||
# import = Import.find(import_id)
|
||||
# csv_enumerator(import.csv, cursor: cursor)
|
||||
# end
|
||||
#
|
||||
# def each_iteration(csv_row)
|
||||
# # insert csv_row into database
|
||||
# end
|
||||
#
|
||||
def csv_enumerator(csv, cursor:)
|
||||
CsvEnumerator.new(csv).rows(cursor: cursor)
|
||||
end
|
||||
|
||||
# Builds Enumerator from a CSV file and enumerates on batches of records.
|
||||
#
|
||||
# @param csv [CSV] an instance of CSV object
|
||||
# @param cursor [Integer] offset to start iteration from
|
||||
# @option options :batch_size [Integer] (100) size of the batch
|
||||
#
|
||||
# @example
|
||||
# def build_enumerator(import_id, cursor:)
|
||||
# import = Import.find(import_id)
|
||||
# csv_batches_enumerator(import.csv, cursor: cursor)
|
||||
# end
|
||||
#
|
||||
# def each_iteration(batch_of_csv_rows)
|
||||
# # ...
|
||||
# end
|
||||
#
|
||||
def csv_batches_enumerator(csv, cursor:, **options)
|
||||
CsvEnumerator.new(csv).batches(cursor: cursor, **options)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -2,23 +2,34 @@
|
|||
|
||||
module Sidekiq
|
||||
class JobLogger
|
||||
def initialize(logger)
|
||||
@logger = logger
|
||||
def initialize(config)
|
||||
@config = config
|
||||
@logger = @config.logger
|
||||
end
|
||||
|
||||
# If true we won't do any job logging out of the box.
|
||||
# The user is responsible for any logging.
|
||||
def skip_default_logging?
|
||||
@config[:skip_default_job_logging]
|
||||
end
|
||||
|
||||
def call(item, queue)
|
||||
start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
||||
@logger.info("start")
|
||||
return yield if skip_default_logging?
|
||||
|
||||
yield
|
||||
begin
|
||||
start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
||||
@logger.info("start")
|
||||
|
||||
Sidekiq::Context.add(:elapsed, elapsed(start))
|
||||
@logger.info("done")
|
||||
rescue Exception
|
||||
Sidekiq::Context.add(:elapsed, elapsed(start))
|
||||
@logger.info("fail")
|
||||
yield
|
||||
|
||||
raise
|
||||
Sidekiq::Context.add(:elapsed, elapsed(start))
|
||||
@logger.info("done")
|
||||
rescue Exception
|
||||
Sidekiq::Context.add(:elapsed, elapsed(start))
|
||||
@logger.info("fail")
|
||||
|
||||
raise
|
||||
end
|
||||
end
|
||||
|
||||
def prepare(job_hash, &block)
|
||||
|
|
@ -59,8 +59,13 @@ module Sidekiq
|
|||
# end
|
||||
#
|
||||
class JobRetry
|
||||
# Handled means the job failed but has been dealt with
|
||||
# (by creating a retry, rescheduling it, etc). It still
|
||||
# needs to be logged and dispatched to error_handlers.
|
||||
class Handled < ::RuntimeError; end
|
||||
|
||||
# Skip means the job failed but Sidekiq does not need to
|
||||
# create a retry, log it or send to error_handlers.
|
||||
class Skip < Handled; end
|
||||
|
||||
include Sidekiq::Component
|
||||
|
|
@ -129,7 +134,7 @@ module Sidekiq
|
|||
process_retry(jobinst, msg, queue, e)
|
||||
# We've handled this error associated with this job, don't
|
||||
# need to handle it at the global level
|
||||
raise Skip
|
||||
raise Handled
|
||||
end
|
||||
|
||||
private
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require "securerandom"
|
||||
require "time"
|
||||
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require "sidekiq"
|
||||
require "date"
|
||||
require "set"
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require "concurrent"
|
||||
|
||||
module Sidekiq
|
||||
|
|
@ -31,11 +31,11 @@ module Sidekiq
|
|||
# We don't track time for failed jobs as they can have very unpredictable
|
||||
# execution times. more important to know average time for successful jobs so we
|
||||
# can better recognize when a perf regression is introduced.
|
||||
@lock.synchronize {
|
||||
@grams[klass].record_time(time_ms)
|
||||
@jobs["#{klass}|ms"] += time_ms
|
||||
@totals["ms"] += time_ms
|
||||
}
|
||||
track_time(klass, time_ms)
|
||||
rescue JobRetry::Skip
|
||||
# This is raised when iterable job is interrupted.
|
||||
track_time(klass, time_ms)
|
||||
raise
|
||||
rescue Exception
|
||||
@lock.synchronize {
|
||||
@jobs["#{klass}|f"] += 1
|
||||
|
|
@ -100,6 +100,14 @@ module Sidekiq
|
|||
|
||||
private
|
||||
|
||||
def track_time(klass, time_ms)
|
||||
@lock.synchronize {
|
||||
@grams[klass].record_time(time_ms)
|
||||
@jobs["#{klass}|ms"] += time_ms
|
||||
@totals["ms"] += time_ms
|
||||
}
|
||||
end
|
||||
|
||||
def reset
|
||||
@lock.synchronize {
|
||||
array = [@totals, @jobs, @grams]
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/current_attributes"
|
||||
|
||||
module Sidekiq
|
||||
|
|
@ -46,22 +48,38 @@ module Sidekiq
|
|||
end
|
||||
|
||||
def call(_, job, _, &block)
|
||||
cattrs_to_reset = []
|
||||
klass_attrs = {}
|
||||
|
||||
@cattrs.each do |(key, strklass)|
|
||||
if job.has_key?(key)
|
||||
constklass = strklass.constantize
|
||||
cattrs_to_reset << constklass
|
||||
next unless job.has_key?(key)
|
||||
|
||||
job[key].each do |(attribute, value)|
|
||||
constklass.public_send(:"#{attribute}=", value)
|
||||
end
|
||||
end
|
||||
klass_attrs[strklass.constantize] = job[key]
|
||||
end
|
||||
|
||||
yield
|
||||
ensure
|
||||
cattrs_to_reset.each(&:reset)
|
||||
wrap(klass_attrs.to_a, &block)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def wrap(klass_attrs, &block)
|
||||
klass, attrs = klass_attrs.shift
|
||||
return block.call unless klass
|
||||
|
||||
retried = false
|
||||
|
||||
begin
|
||||
klass.set(attrs) do
|
||||
wrap(klass_attrs, &block)
|
||||
end
|
||||
rescue NoMethodError
|
||||
raise if retried
|
||||
|
||||
# It is possible that the `CurrentAttributes` definition
|
||||
# was changed before the job started processing.
|
||||
attrs = attrs.select { |attr| klass.respond_to?(attr) }
|
||||
retried = true
|
||||
retry
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Sidekiq
|
||||
# Server-side middleware must import this Module in order
|
||||
# to get access to server resources during `call`.
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
#!/usr/bin/env ruby
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "fileutils"
|
||||
require "sidekiq/api"
|
||||
|
|
@ -98,7 +99,7 @@ class Sidekiq::Monitor
|
|||
pad = opts[:pad] || 0
|
||||
max_length = opts[:max_length] || (80 - pad)
|
||||
out = []
|
||||
line = ""
|
||||
line = +""
|
||||
values.each do |value|
|
||||
if (line.length + value.length) > max_length
|
||||
out << line
|
||||
|
|
@ -36,7 +36,7 @@ module Sidekiq
|
|||
@job = nil
|
||||
@thread = nil
|
||||
@reloader = Sidekiq.default_configuration[:reloader]
|
||||
@job_logger = (capsule.config[:job_logger] || Sidekiq::JobLogger).new(logger)
|
||||
@job_logger = (capsule.config[:job_logger] || Sidekiq::JobLogger).new(capsule.config)
|
||||
@retrier = Sidekiq::JobRetry.new(capsule)
|
||||
end
|
||||
|
||||
|
|
@ -58,6 +58,10 @@ module Sidekiq
|
|||
@thread.value if wait
|
||||
end
|
||||
|
||||
def stopping?
|
||||
@done
|
||||
end
|
||||
|
||||
def start
|
||||
@thread ||= safe_thread("#{config.name}/processor", &method(:run))
|
||||
end
|
||||
|
|
@ -136,6 +140,7 @@ module Sidekiq
|
|||
klass = Object.const_get(job_hash["class"])
|
||||
inst = klass.new
|
||||
inst.jid = job_hash["jid"]
|
||||
inst._context = self
|
||||
@retrier.local(inst, jobstr, queue) do
|
||||
yield inst
|
||||
end
|
||||
|
|
@ -185,6 +190,11 @@ module Sidekiq
|
|||
# Had to force kill this job because it didn't finish
|
||||
# within the timeout. Don't acknowledge the work since
|
||||
# we didn't properly finish it.
|
||||
rescue Sidekiq::JobRetry::Skip => s
|
||||
# Skip means we handled this error elsewhere. We don't
|
||||
# need to log or report the error.
|
||||
ack = true
|
||||
raise s
|
||||
rescue Sidekiq::JobRetry::Handled => h
|
||||
# this is the common case: job raised error and Sidekiq::JobRetry::Handled
|
||||
# signals that we created a retry successfully. We can acknowledge the job.
|
||||
|
|
@ -64,6 +64,13 @@ module Sidekiq
|
|||
opts = client_opts(options)
|
||||
@config = if opts.key?(:sentinels)
|
||||
RedisClient.sentinel(**opts)
|
||||
elsif opts.key?(:nodes)
|
||||
# Sidekiq does not support Redis clustering but Sidekiq Enterprise's
|
||||
# rate limiters are cluster-safe so we can scale to millions
|
||||
# of rate limiters using a Redis cluster. This requires the
|
||||
# `redis-cluster-client` gem.
|
||||
# Sidekiq::Limiter.redis = { nodes: [...] }
|
||||
RedisClient.cluster(**opts)
|
||||
else
|
||||
RedisClient.config(**opts)
|
||||
end
|
||||
|
|
@ -90,13 +97,9 @@ module Sidekiq
|
|||
opts.delete(:network_timeout)
|
||||
end
|
||||
|
||||
if opts[:driver]
|
||||
opts[:driver] = opts[:driver].to_sym
|
||||
end
|
||||
|
||||
opts[:name] = opts.delete(:master_name) if opts.key?(:master_name)
|
||||
opts[:role] = opts[:role].to_sym if opts.key?(:role)
|
||||
opts.delete(:url) if opts.key?(:sentinels)
|
||||
opts[:driver] = opts[:driver].to_sym if opts.key?(:driver)
|
||||
|
||||
# Issue #3303, redis-rb will silently retry an operation.
|
||||
# This can lead to duplicate jobs if Sidekiq::Client's LPUSH
|
||||
|
|
@ -8,17 +8,28 @@ module Sidekiq
|
|||
module RedisConnection
|
||||
class << self
|
||||
def create(options = {})
|
||||
symbolized_options = options.transform_keys(&:to_sym)
|
||||
symbolized_options = deep_symbolize_keys(options)
|
||||
symbolized_options[:url] ||= determine_redis_provider
|
||||
|
||||
logger = symbolized_options.delete(:logger)
|
||||
logger&.info { "Sidekiq #{Sidekiq::VERSION} connecting to Redis with options #{scrub(symbolized_options)}" }
|
||||
|
||||
raise "Sidekiq 7+ does not support Redis protocol 2" if symbolized_options[:protocol] == 2
|
||||
|
||||
safe = !!symbolized_options.delete(:cluster_safe)
|
||||
raise ":nodes not allowed, Sidekiq is not safe to run on Redis Cluster" if !safe && symbolized_options.key?(:nodes)
|
||||
|
||||
size = symbolized_options.delete(:size) || 5
|
||||
pool_timeout = symbolized_options.delete(:pool_timeout) || 1
|
||||
pool_name = symbolized_options.delete(:pool_name)
|
||||
|
||||
# Default timeout in redis-client is 1 second, which can be too aggressive
|
||||
# if the Sidekiq process is CPU-bound. With 10-15 threads and a thread quantum of 100ms,
|
||||
# it can be easy to get the occasional ReadTimeoutError. You can still provide
|
||||
# a smaller timeout explicitly:
|
||||
# config.redis = { url: "...", timeout: 1 }
|
||||
symbolized_options[:timeout] ||= 3
|
||||
|
||||
redis_config = Sidekiq::RedisClientAdapter.new(symbolized_options)
|
||||
ConnectionPool.new(timeout: pool_timeout, size: size, name: pool_name) do
|
||||
redis_config.new_client
|
||||
|
|
@ -27,6 +38,19 @@ module Sidekiq
|
|||
|
||||
private
|
||||
|
||||
def deep_symbolize_keys(object)
|
||||
case object
|
||||
when Hash
|
||||
object.each_with_object({}) do |(key, value), result|
|
||||
result[key.to_sym] = deep_symbolize_keys(value)
|
||||
end
|
||||
when Array
|
||||
object.map { |e| deep_symbolize_keys(e) }
|
||||
else
|
||||
object
|
||||
end
|
||||
end
|
||||
|
||||
def scrub(options)
|
||||
redacted = "REDACTED"
|
||||
|
||||
|
|
@ -42,7 +66,14 @@ module Sidekiq
|
|||
scrubbed_options[:password] = redacted if scrubbed_options[:password]
|
||||
scrubbed_options[:sentinel_password] = redacted if scrubbed_options[:sentinel_password]
|
||||
scrubbed_options[:sentinels]&.each do |sentinel|
|
||||
sentinel[:password] = redacted if sentinel[:password]
|
||||
if sentinel.is_a?(String)
|
||||
if (uri = URI(sentinel)) && uri.password
|
||||
uri.password = redacted
|
||||
sentinel.replace(uri.to_s)
|
||||
end
|
||||
elsif sentinel[:password]
|
||||
sentinel[:password] = redacted
|
||||
end
|
||||
end
|
||||
scrubbed_options
|
||||
end
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require "forwardable"
|
||||
|
||||
module Sidekiq
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
#
|
||||
# Sidekiq's systemd integration allows Sidekiq to inform systemd:
|
||||
# 1. when it has successfully started
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue