Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-09-13 18:11:46 +00:00
parent 85d19f5eac
commit 02c9272b54
43 changed files with 1090 additions and 537 deletions

View File

@ -78,8 +78,8 @@ export default {
v-if="resolveAllDiscussionsIssuePath && !allResolved"
v-gl-tooltip
:href="resolveAllDiscussionsIssuePath"
:title="s__('Resolve all threads in new issue')"
:aria-label="s__('Resolve all threads in new issue')"
:title="s__('Create issue to resolve all threads')"
:aria-label="s__('Create issue to resolve all threads')"
class="new-issue-for-discussion discussion-create-issue-btn"
icon="issue-new"
/>

View File

@ -4,7 +4,7 @@ import { s__ } from '~/locale';
export default {
i18n: {
buttonLabel: s__('MergeRequests|Resolve this thread in a new issue'),
buttonLabel: s__('MergeRequests|Create issue to resolve thread'),
},
name: 'ResolveWithIssueButton',
components: {

View File

@ -46,7 +46,7 @@ export default {
size="small"
icon="issue-new"
>
{{ s__('mrWidget|Resolve all threads in new issue') }}
{{ s__('mrWidget|Create issue to resolve all threads') }}
</gl-button>
</div>
</div>

View File

@ -176,6 +176,16 @@ module ApplicationSettingsHelper
"and the value is encrypted at rest.")
end
def sidekiq_job_limiter_mode_help_text
_("How the job limiter handles jobs exceeding the thresholds specified below. "\
"The 'track' mode only logs the jobs. The 'compress' mode compresses the jobs and "\
"raises an exception if the compressed size exceeds the limit.")
end
def sidekiq_job_limiter_modes_for_select
ApplicationSetting.sidekiq_job_limiter_modes.keys.map { |mode| [mode.humanize, mode] }
end
def visible_attributes
[
:abuse_notification_email,
@ -387,7 +397,10 @@ module ApplicationSettingsHelper
:container_registry_cleanup_tags_service_max_list_size,
:keep_latest_artifact,
:whats_new_variant,
:user_deactivation_emails_enabled
:user_deactivation_emails_enabled,
:sidekiq_job_limiter_mode,
:sidekiq_job_limiter_compression_threshold_bytes,
:sidekiq_job_limiter_limit_bytes
].tap do |settings|
settings << :deactivate_dormant_users unless Gitlab.com?
end

View File

@ -571,6 +571,18 @@ class ApplicationSetting < ApplicationRecord
validates :floc_enabled,
inclusion: { in: [true, false], message: _('must be a boolean value') }
enum sidekiq_job_limiter_mode: {
Gitlab::SidekiqMiddleware::SizeLimiter::Validator::TRACK_MODE => 0,
Gitlab::SidekiqMiddleware::SizeLimiter::Validator::COMPRESS_MODE => 1 # The default
}
validates :sidekiq_job_limiter_mode,
inclusion: { in: self.sidekiq_job_limiter_modes }
validates :sidekiq_job_limiter_compression_threshold_bytes,
numericality: { only_integer: true, greater_than_or_equal_to: 0 }
validates :sidekiq_job_limiter_limit_bytes,
numericality: { only_integer: true, greater_than_or_equal_to: 0 }
attr_encrypted :asset_proxy_secret_key,
mode: :per_attribute_iv,
key: Settings.attr_encrypted_db_key_base_truncated,

View File

@ -0,0 +1,111 @@
# frozen_string_literal: true
module Ci
module StuckBuilds
class DropService
BUILD_RUNNING_OUTDATED_TIMEOUT = 1.hour
BUILD_PENDING_OUTDATED_TIMEOUT = 1.day
BUILD_SCHEDULED_OUTDATED_TIMEOUT = 1.hour
BUILD_PENDING_STUCK_TIMEOUT = 1.hour
BUILD_LOOKBACK = 5.days
def execute
Gitlab::AppLogger.info "#{self.class}: Cleaning stuck builds"
drop(running_timed_out_builds, failure_reason: :stuck_or_timeout_failure)
drop(
pending_builds(BUILD_PENDING_OUTDATED_TIMEOUT.ago),
failure_reason: :stuck_or_timeout_failure
)
drop(scheduled_timed_out_builds, failure_reason: :stale_schedule)
drop_stuck(
pending_builds(BUILD_PENDING_STUCK_TIMEOUT.ago),
failure_reason: :stuck_or_timeout_failure
)
end
private
# rubocop: disable CodeReuse/ActiveRecord
# We're adding the ordering clause by `created_at` and `project_id`
# because we want to force the query planner to use the
# `ci_builds_gitlab_monitor_metrics` index all the time.
def pending_builds(timeout)
if Feature.enabled?(:ci_new_query_for_pending_stuck_jobs)
Ci::Build.pending.created_at_before(timeout).updated_at_before(timeout).order(created_at: :asc, project_id: :asc)
else
Ci::Build.pending.updated_before(lookback: BUILD_LOOKBACK.ago, timeout: timeout)
end
end
# rubocop: enable CodeReuse/ActiveRecord
def scheduled_timed_out_builds
Ci::Build.where(status: :scheduled).where( # rubocop: disable CodeReuse/ActiveRecord
'ci_builds.scheduled_at IS NOT NULL AND ci_builds.scheduled_at < ?',
BUILD_SCHEDULED_OUTDATED_TIMEOUT.ago
)
end
def running_timed_out_builds
Ci::Build.running.where( # rubocop: disable CodeReuse/ActiveRecord
'ci_builds.updated_at < ?',
BUILD_RUNNING_OUTDATED_TIMEOUT.ago
)
end
def drop(builds, failure_reason:)
fetch(builds) do |build|
drop_build :outdated, build, failure_reason
end
end
def drop_stuck(builds, failure_reason:)
fetch(builds) do |build|
break unless build.stuck?
drop_build :stuck, build, failure_reason
end
end
# rubocop: disable CodeReuse/ActiveRecord
def fetch(builds)
loop do
jobs = builds.includes(:tags, :runner, project: [:namespace, :route])
.limit(100)
.to_a
break if jobs.empty?
jobs.each do |job|
Gitlab::ApplicationContext.with_context(project: job.project) { yield(job) }
end
end
end
# rubocop: enable CodeReuse/ActiveRecord
def drop_build(type, build, reason)
Gitlab::AppLogger.info "#{self.class}: Dropping #{type} build #{build.id} for runner #{build.runner_id} (status: #{build.status}, failure_reason: #{reason})"
Gitlab::OptimisticLocking.retry_lock(build, 3, name: 'stuck_ci_jobs_worker_drop_build') do |b|
b.drop(reason)
end
rescue StandardError => ex
build.doom!
track_exception_for_build(ex, build)
end
def track_exception_for_build(ex, build)
Gitlab::ErrorTracking.track_exception(ex,
build_id: build.id,
build_name: build.name,
build_stage: build.stage,
pipeline_id: build.pipeline_id,
project_id: build.project_id
)
end
end
end
end

View File

@ -3,10 +3,13 @@
%fieldset
.form-group
= f.label :notes_create_limit, _('Max requests per minute per user'), class: 'label-bold'
= f.label :notes_create_limit, _('Maximum requests per minute'), class: 'label-bold'
= f.number_field :notes_create_limit, class: 'form-control gl-form-input'
.form-group
= f.label :notes_create_limit_allowlist, _('List of users to be excluded from the limit'), class: 'label-bold'
= f.label :notes_create_limit_allowlist, _('Users to exclude from the rate limit'), class: 'label-bold'
= f.text_area :notes_create_limit_allowlist_raw, placeholder: 'username1, username2', class: 'form-control gl-form-input', rows: 5
.form-text.text-muted
= _('Comma-separated list of users allowed to exceed the rate limit.')
= f.submit _('Save changes'), class: "gl-button btn btn-confirm", data: { qa_selector: 'save_changes_button' }

View File

@ -0,0 +1,21 @@
= form_for @application_setting, url: preferences_admin_application_settings_path(anchor: 'js-sidekiq-job-limits-settings'), html: { class: 'fieldset-form' } do |f|
= form_errors(@application_setting)
%fieldset
.form-group
= f.label :sidekiq_job_limiter_mode, _('Limiting mode'), class: 'label-bold'
= f.select :sidekiq_job_limiter_mode, sidekiq_job_limiter_modes_for_select, {}, class: 'form-control'
.form-text.text-muted
= sidekiq_job_limiter_mode_help_text
.form-group
= f.label :sidekiq_job_limiter_compression_threshold_bytes, _('Sidekiq job compression threshold (bytes)'), class: 'label-bold'
= f.number_field :sidekiq_job_limiter_compression_threshold_bytes, class: 'form-control gl-form-input'
.form-text.text-muted
= _('Threshold in bytes at which to compress Sidekiq job arguments.')
.form-group
= f.label :sidekiq_job_limiter_limit_bytes, _('Sidekiq job size limit (bytes)'), class: 'label-bold'
= f.number_field :sidekiq_job_limiter_limit_bytes, class: 'form-control gl-form-input'
.form-text.text-muted
= _("Threshold in bytes at which to reject Sidekiq jobs. Set this to 0 to if you don't want to limit Sidekiq jobs.")
= f.submit _('Save changes'), class: "gl-button btn btn-confirm"

View File

@ -99,11 +99,12 @@
%section.settings.as-note-limits.no-animate#js-note-limits-settings{ class: ('expanded' if expanded_by_default?) }
.settings-header
%h4
= _('Notes Rate Limits')
= _('Notes rate limit')
%button.btn.gl-button.btn-default.js-settings-toggle{ type: 'button' }
= expanded_by_default? ? _('Collapse') : _('Expand')
%p
= _('Configure limit for notes created per minute by web and API requests.')
= _('Set the per-user rate limit for notes created by web or API requests.')
= link_to _('Learn more.'), help_page_path('user/admin_area/settings/rate_limit_on_notes_creation.md'), target: '_blank', rel: 'noopener noreferrer'
.settings-content
= render 'note_limits'

View File

@ -82,3 +82,17 @@
= _('Configure the default first day of the week and time tracking units.')
.settings-content
= render 'localization'
%section.settings.as-sidekiq-job-limits.no-animate#js-sidekiq-job-limits-settings{ class: ('expanded' if expanded_by_default?) }
.settings-header
%h4
= _('Sidekiq job size limits')
%button.btn.gl-button.btn-default.js-settings-toggle{ type: 'button' }
= expanded_by_default? ? _('Collapse') : _('Expand')
%p
= _('Limit the size of Sidekiq jobs stored in Redis.')
%span
= link_to _('Learn more.'), help_page_path('user/admin_area/settings/sidekiq_job_limits.md'), target: '_blank', rel: 'noopener noreferrer'
.settings-content
= render 'sidekiq_job_limits'

View File

@ -27,6 +27,6 @@ class PurgeDependencyProxyCacheWorker
def valid?
return unless @group
can?(@current_user, :admin_group, @group) && @group.dependency_proxy_feature_available?
can?(@current_user, :admin_group, @group)
end
end

View File

@ -3,72 +3,29 @@
class StuckCiJobsWorker # rubocop:disable Scalability/IdempotentWorker
include ApplicationWorker
data_consistency :always
# rubocop:disable Scalability/CronWorkerContext
# This is an instance-wide cleanup query, so there's no meaningful
# scope to consider this in the context of.
include CronjobQueue
# rubocop:enable Scalability/CronWorkerContext
data_consistency :always
feature_category :continuous_integration
worker_resource_boundary :cpu
EXCLUSIVE_LEASE_KEY = 'stuck_ci_builds_worker_lease'
BUILD_RUNNING_OUTDATED_TIMEOUT = 1.hour
BUILD_PENDING_OUTDATED_TIMEOUT = 1.day
BUILD_SCHEDULED_OUTDATED_TIMEOUT = 1.hour
BUILD_PENDING_STUCK_TIMEOUT = 1.hour
BUILD_LOOKBACK = 5.days
def perform
return unless try_obtain_lease
Gitlab::AppLogger.info "#{self.class}: Cleaning stuck builds"
drop(running_timed_out_builds, failure_reason: :stuck_or_timeout_failure)
drop(
pending_builds(BUILD_PENDING_OUTDATED_TIMEOUT.ago),
failure_reason: :stuck_or_timeout_failure
)
drop(scheduled_timed_out_builds, failure_reason: :stale_schedule)
drop_stuck(
pending_builds(BUILD_PENDING_STUCK_TIMEOUT.ago),
failure_reason: :stuck_or_timeout_failure
)
Ci::StuckBuilds::DropService.new.execute
remove_lease
end
private
# rubocop: disable CodeReuse/ActiveRecord
# We're adding the ordering clause by `created_at` and `project_id`
# because we want to force the query planner to use the
# `ci_builds_gitlab_monitor_metrics` index all the time.
def pending_builds(timeout)
if Feature.enabled?(:ci_new_query_for_pending_stuck_jobs)
Ci::Build.pending.created_at_before(timeout).updated_at_before(timeout).order(created_at: :asc, project_id: :asc)
else
Ci::Build.pending.updated_before(lookback: BUILD_LOOKBACK.ago, timeout: timeout)
end
end
# rubocop: enable CodeReuse/ActiveRecord
def scheduled_timed_out_builds
Ci::Build.where(status: :scheduled).where( # rubocop: disable CodeReuse/ActiveRecord
'ci_builds.scheduled_at IS NOT NULL AND ci_builds.scheduled_at < ?',
BUILD_SCHEDULED_OUTDATED_TIMEOUT.ago
)
end
def running_timed_out_builds
Ci::Build.running.where( # rubocop: disable CodeReuse/ActiveRecord
'ci_builds.updated_at < ?',
BUILD_RUNNING_OUTDATED_TIMEOUT.ago
)
end
def try_obtain_lease
@uuid = Gitlab::ExclusiveLease.new(EXCLUSIVE_LEASE_KEY, timeout: 30.minutes).try_obtain
end
@ -76,55 +33,4 @@ class StuckCiJobsWorker # rubocop:disable Scalability/IdempotentWorker
def remove_lease
Gitlab::ExclusiveLease.cancel(EXCLUSIVE_LEASE_KEY, @uuid)
end
def drop(builds, failure_reason:)
fetch(builds) do |build|
drop_build :outdated, build, failure_reason
end
end
def drop_stuck(builds, failure_reason:)
fetch(builds) do |build|
break unless build.stuck?
drop_build :stuck, build, failure_reason
end
end
# rubocop: disable CodeReuse/ActiveRecord
def fetch(builds)
loop do
jobs = builds.includes(:tags, :runner, project: [:namespace, :route])
.limit(100)
.to_a
break if jobs.empty?
jobs.each do |job|
with_context(project: job.project) { yield(job) }
end
end
end
# rubocop: enable CodeReuse/ActiveRecord
def drop_build(type, build, reason)
Gitlab::AppLogger.info "#{self.class}: Dropping #{type} build #{build.id} for runner #{build.runner_id} (status: #{build.status}, failure_reason: #{reason})"
Gitlab::OptimisticLocking.retry_lock(build, 3, name: 'stuck_ci_jobs_worker_drop_build') do |b|
b.drop(reason)
end
rescue StandardError => ex
build.doom!
track_exception_for_build(ex, build)
end
def track_exception_for_build(ex, build)
Gitlab::ErrorTracking.track_exception(ex,
build_id: build.id,
build_name: build.name,
build_stage: build.stage,
pipeline_id: build.pipeline_id,
project_id: build.project_id
)
end
end

View File

@ -0,0 +1,21 @@
# frozen_string_literal: true
class AddSidekiqLimitsToApplicationSettings < Gitlab::Database::Migration[1.0]
disable_ddl_transaction! # needed for now to avoid subtransactions
def up
with_lock_retries do
add_column :application_settings, :sidekiq_job_limiter_mode, :smallint, default: 1, null: false
add_column :application_settings, :sidekiq_job_limiter_compression_threshold_bytes, :integer, default: 100_000, null: false
add_column :application_settings, :sidekiq_job_limiter_limit_bytes, :integer, default: 0, null: false
end
end
def down
with_lock_retries do
remove_column :application_settings, :sidekiq_job_limiter_mode
remove_column :application_settings, :sidekiq_job_limiter_compression_threshold_bytes
remove_column :application_settings, :sidekiq_job_limiter_limit_bytes
end
end
end

View File

@ -0,0 +1,217 @@
# frozen_string_literal: true
class FinalizeCiBuildsBigintConversion < Gitlab::Database::Migration[1.0]
disable_ddl_transaction!
TABLE_NAME = 'ci_builds'
PK_INDEX_NAME = 'index_ci_builds_on_converted_id'
SECONDARY_INDEXES = [
{
original_name: :index_ci_builds_on_commit_id_artifacts_expired_at_and_id,
temporary_name: :index_ci_builds_on_commit_id_expire_at_and_converted_id,
columns: [:commit_id, :artifacts_expire_at, :id_convert_to_bigint],
options: {
where: "type::text = 'Ci::Build'::text
AND (retried = false OR retried IS NULL)
AND (name::text = ANY (ARRAY['sast'::character varying::text,
'secret_detection'::character varying::text,
'dependency_scanning'::character varying::text,
'container_scanning'::character varying::text,
'dast'::character varying::text]))"
}
},
{
original_name: :index_ci_builds_on_project_id_and_id,
temporary_name: :index_ci_builds_on_project_and_converted_id,
columns: [:project_id, :id_convert_to_bigint],
options: {}
},
{
original_name: :index_ci_builds_on_runner_id_and_id_desc,
temporary_name: :index_ci_builds_on_runner_id_and_converted_id_desc,
columns: [:runner_id, :id_convert_to_bigint],
options: { order: { id_convert_to_bigint: :desc } }
},
{
original_name: :index_for_resource_group,
temporary_name: :index_ci_builds_on_resource_group_and_converted_id,
columns: [:resource_group_id, :id_convert_to_bigint],
options: { where: 'resource_group_id IS NOT NULL' }
},
{
original_name: :index_security_ci_builds_on_name_and_id_parser_features,
temporary_name: :index_security_ci_builds_on_name_and_converted_id_parser,
columns: [:name, :id_convert_to_bigint],
options: {
where: "(name::text = ANY (ARRAY['container_scanning'::character varying::text,
'dast'::character varying::text,
'dependency_scanning'::character varying::text,
'license_management'::character varying::text,
'sast'::character varying::text,
'secret_detection'::character varying::text,
'coverage_fuzzing'::character varying::text,
'license_scanning'::character varying::text])
) AND type::text = 'Ci::Build'::text"
}
}
].freeze
MANUAL_INDEX_NAMES = {
original_name: :index_ci_builds_runner_id_pending_covering,
temporary_name: :index_ci_builds_runner_id_and_converted_id_pending_covering
}.freeze
REFERENCING_FOREIGN_KEYS = [
[:ci_build_needs, :build_id, :cascade, 'fk_rails_'],
[:ci_build_pending_states, :build_id, :cascade, 'fk_rails_'],
[:ci_build_report_results, :build_id, :cascade, 'fk_rails_'],
[:ci_build_trace_chunks, :build_id, :cascade, 'fk_rails_'],
[:ci_build_trace_metadata, :build_id, :cascade, 'fk_rails_'],
[:ci_builds_runner_session, :build_id, :cascade, 'fk_rails_'],
[:ci_builds_metadata, :build_id, :cascade, 'fk_'],
[:ci_job_artifacts, :job_id, :cascade, 'fk_rails_'],
[:ci_job_variables, :job_id, :cascade, 'fk_rails_'],
[:ci_pending_builds, :build_id, :cascade, 'fk_rails_'],
[:ci_resources, :build_id, :nullify, 'fk_'],
[:ci_running_builds, :build_id, :cascade, 'fk_rails_'],
[:ci_sources_pipelines, :source_job_id, :cascade, 'fk_'],
[:ci_unit_test_failures, :build_id, :cascade, 'fk_'],
[:dast_scanner_profiles_builds, :ci_build_id, :cascade, 'fk_'],
[:dast_site_profiles_builds, :ci_build_id, :cascade, 'fk_'],
[:pages_deployments, :ci_build_id, :nullify, 'fk_rails_'],
[:requirements_management_test_reports, :build_id, :nullify, 'fk_rails_'],
[:security_scans, :build_id, :cascade, 'fk_rails_'],
[:terraform_state_versions, :ci_build_id, :nullify, 'fk_']
].freeze
def up
ensure_batched_background_migration_is_finished(
job_class_name: 'CopyColumnUsingBackgroundMigrationJob',
table_name: TABLE_NAME,
column_name: 'id',
job_arguments: [%w[id stage_id], %w[id_convert_to_bigint stage_id_convert_to_bigint]]
)
# Remove this upfront since this table is being dropped, and doesn't need to be migrated
if foreign_key_exists?(:dep_ci_build_trace_sections, TABLE_NAME, column: :build_id)
remove_foreign_key(:dep_ci_build_trace_sections, TABLE_NAME, column: :build_id)
end
swap_columns
end
def down
swap_columns
end
private
def swap_columns
# Copy existing indexes from the original column to the new column
create_indexes
# Copy existing FKs from the original column to the new column
create_referencing_foreign_keys
# Remove existing FKs from the referencing tables, so we don't have to lock on them when we drop the existing PK
replace_referencing_foreign_keys
with_lock_retries(raise_on_exhaustion: true) do
quoted_table_name = quote_table_name(TABLE_NAME)
# Swap the original and new column names
temporary_name = 'id_tmp'
execute "ALTER TABLE #{quoted_table_name} RENAME COLUMN #{quote_column_name(:id)} TO #{quote_column_name(temporary_name)}"
execute "ALTER TABLE #{quoted_table_name} RENAME COLUMN #{quote_column_name(:id_convert_to_bigint)} TO #{quote_column_name(:id)}"
execute "ALTER TABLE #{quoted_table_name} RENAME COLUMN #{quote_column_name(temporary_name)} TO #{quote_column_name(:id_convert_to_bigint)}"
# Reset the function so PG drops the plan cache for the incorrect integer type
function_name = Gitlab::Database::UnidirectionalCopyTrigger.on_table(TABLE_NAME)
.name([:id, :stage_id], [:id_convert_to_bigint, :stage_id_convert_to_bigint])
execute "ALTER FUNCTION #{quote_table_name(function_name)} RESET ALL"
# Swap defaults of the two columns, and change ownership of the sequence to the new id
execute "ALTER SEQUENCE ci_builds_id_seq OWNED BY #{TABLE_NAME}.id"
change_column_default TABLE_NAME, :id, -> { "nextval('ci_builds_id_seq'::regclass)" }
change_column_default TABLE_NAME, :id_convert_to_bigint, 0
# Swap the PK constraint from the original column to the new column
# We deliberately don't CASCADE here because the old FKs should be removed already
execute "ALTER TABLE #{quoted_table_name} DROP CONSTRAINT ci_builds_pkey"
rename_index TABLE_NAME, PK_INDEX_NAME, 'ci_builds_pkey'
execute "ALTER TABLE #{quoted_table_name} ADD CONSTRAINT ci_builds_pkey PRIMARY KEY USING INDEX ci_builds_pkey"
# Remove old column indexes and change new column indexes to have the original names
rename_secondary_indexes # rubocop:disable Migration/WithLockRetriesDisallowedMethod
end
end
def create_indexes
add_concurrent_index TABLE_NAME, :id_convert_to_bigint, unique: true, name: PK_INDEX_NAME
SECONDARY_INDEXES.each do |index_definition|
options = index_definition[:options]
options[:name] = index_definition[:temporary_name]
add_concurrent_index(TABLE_NAME, index_definition[:columns], options)
end
unless index_name_exists?(TABLE_NAME, MANUAL_INDEX_NAMES[:temporary_name])
execute(<<~SQL)
CREATE INDEX CONCURRENTLY #{MANUAL_INDEX_NAMES[:temporary_name]}
ON ci_builds (runner_id, id_convert_to_bigint) INCLUDE (project_id)
WHERE status::text = 'pending'::text AND type::text = 'Ci::Build'::text
SQL
end
end
def rename_secondary_indexes
(SECONDARY_INDEXES + [MANUAL_INDEX_NAMES]).each do |index_definition|
remove_index(TABLE_NAME, name: index_definition[:original_name]) # rubocop:disable Migration/RemoveIndex
rename_index(TABLE_NAME, index_definition[:temporary_name], index_definition[:original_name])
end
end
def create_referencing_foreign_keys
REFERENCING_FOREIGN_KEYS.each do |(from_table, column, on_delete, prefix)|
# Don't attempt to create the FK if one already exists from the table to the new column
# The check in `add_concurrent_foreign_key` already checks for this, but it looks for the foreign key
# with the new name only (containing the `_tmp` suffix).
#
# Since we might partially rename FKs and re-run the migration, we also have to check and see if a FK exists
# on those columns that might not match the `_tmp` name.
next if foreign_key_exists?(from_table, TABLE_NAME, column: column, primary_key: :id_convert_to_bigint)
temporary_name = "#{concurrent_foreign_key_name(from_table, column, prefix: prefix)}_tmp"
add_concurrent_foreign_key(
from_table,
TABLE_NAME,
column: column,
target_column: :id_convert_to_bigint,
name: temporary_name,
on_delete: on_delete,
reverse_lock_order: true)
end
end
def replace_referencing_foreign_keys
REFERENCING_FOREIGN_KEYS.each do |(from_table, column, _, prefix)|
existing_name = concurrent_foreign_key_name(from_table, column, prefix: prefix)
# Don't attempt to replace the FK unless it exists and points at the original column.
# This could happen if the migration is re-run due to failing midway.
next unless foreign_key_exists?(from_table, TABLE_NAME, column: column, primary_key: :id, name: existing_name)
with_lock_retries do
# Explicitly lock table in order of parent, child to attempt to avoid deadlocks
execute "LOCK TABLE #{TABLE_NAME}, #{from_table} IN ACCESS EXCLUSIVE MODE"
temporary_name = "#{existing_name}_tmp"
remove_foreign_key(from_table, TABLE_NAME, column: column, primary_key: :id, name: existing_name)
rename_constraint(from_table, temporary_name, existing_name)
end
end
end
end

View File

@ -0,0 +1 @@
387dcbda7c3b32050298d8a679361a17916a66d0ab686211f0d1a0dc708c4a74

View File

@ -0,0 +1 @@
a8dc6d1fecf7b26182dd89f4dae088fb315774ff4720c282f608bd0c45c75a41

View File

@ -10340,6 +10340,9 @@ CREATE TABLE application_settings (
throttle_unauthenticated_api_enabled boolean DEFAULT false NOT NULL,
throttle_unauthenticated_api_requests_per_period integer DEFAULT 3600 NOT NULL,
throttle_unauthenticated_api_period_in_seconds integer DEFAULT 3600 NOT NULL,
sidekiq_job_limiter_mode smallint DEFAULT 1 NOT NULL,
sidekiq_job_limiter_compression_threshold_bytes integer DEFAULT 100000 NOT NULL,
sidekiq_job_limiter_limit_bytes integer DEFAULT 0 NOT NULL,
CONSTRAINT app_settings_container_reg_cleanup_tags_max_list_size_positive CHECK ((container_registry_cleanup_tags_service_max_list_size >= 0)),
CONSTRAINT app_settings_ext_pipeline_validation_service_url_text_limit CHECK ((char_length(external_pipeline_validation_service_url) <= 255)),
CONSTRAINT app_settings_registry_exp_policies_worker_capacity_positive CHECK ((container_registry_expiration_policies_worker_capacity >= 0)),
@ -11303,7 +11306,7 @@ CREATE TABLE ci_build_trace_metadata (
);
CREATE TABLE ci_builds (
id integer NOT NULL,
id_convert_to_bigint integer DEFAULT 0 NOT NULL,
status character varying,
finished_at timestamp without time zone,
trace text,
@ -11348,7 +11351,7 @@ CREATE TABLE ci_builds (
waiting_for_resource_at timestamp with time zone,
processed boolean,
scheduling_type smallint,
id_convert_to_bigint bigint DEFAULT 0 NOT NULL,
id bigint NOT NULL,
stage_id bigint,
CONSTRAINT check_1e2fbd1b39 CHECK ((lock_version IS NOT NULL))
);
@ -27531,9 +27534,6 @@ ALTER TABLE ONLY releases
ALTER TABLE ONLY geo_event_log
ADD CONSTRAINT fk_4a99ebfd60 FOREIGN KEY (repositories_changed_event_id) REFERENCES geo_repositories_changed_events(id) ON DELETE CASCADE;
ALTER TABLE ONLY dep_ci_build_trace_sections
ADD CONSTRAINT fk_4ebe41f502 FOREIGN KEY (build_id) REFERENCES ci_builds(id) ON DELETE CASCADE;
ALTER TABLE ONLY alert_management_alerts
ADD CONSTRAINT fk_51ab4b6089 FOREIGN KEY (prometheus_alert_id) REFERENCES prometheus_alerts(id) ON DELETE CASCADE;

View File

@ -14,9 +14,6 @@ info: To determine the technical writer assigned to the Stage/Group associated w
Deletes the cached manifests and blobs for a group. This endpoint requires the [Owner role](../user/permissions.md)
for the group.
WARNING:
[A bug exists](https://gitlab.com/gitlab-org/gitlab/-/issues/277161) for this API.
```plaintext
DELETE /groups/:id/dependency_proxy/cache
```

View File

@ -386,6 +386,9 @@ listed in the descriptions of the relevant settings.
| `shared_runners_enabled` | boolean | no | (**If enabled, requires:** `shared_runners_text` and `shared_runners_minutes`) Enable shared runners for new projects. |
| `shared_runners_minutes` | integer | required by: `shared_runners_enabled` | **(PREMIUM)** Set the maximum number of pipeline minutes that a group can use on shared runners per month. |
| `shared_runners_text` | string | required by: `shared_runners_enabled` | Shared runners text. |
| `sidekiq_job_limiter_mode` | string | no | `track` or `compress`. Sets the behavior for [Sidekiq job size limits](../user/admin_area/settings/sidekiq_job_limits.md). Default: 'compress'. |
| `sidekiq_job_limiter_compression_threshold_bytes` | integer | no | The threshold in bytes at which Sidekiq jobs are compressed before being stored in Redis. Default: 100 000 bytes (100KB). |
| `sidekiq_job_limiter_limit_bytes` | integer | no | The threshold in bytes at which Sidekiq jobs are rejected. Default: 0 bytes (doesn't reject any job). |
| `sign_in_text` | string | no | Text on the login page. |
| `signin_enabled` | string | no | (Deprecated: Use `password_authentication_enabled_for_web` instead) Flag indicating if password authentication is enabled for the web interface. |
| `signup_enabled` | boolean | no | Enable registration. Default is `true`. |

View File

@ -603,8 +603,8 @@ For monitoring deployed apps, see [Jaeger tracing documentation](../operations/t
- Layer: Core Service
- Process: `logrotate`
GitLab is comprised of a large number of services that all log. We started bundling our own Logrotate
as of GitLab 7.4 to make sure we were logging responsibly. This is just a packaged version of the common open source offering.
GitLab is comprised of a large number of services that all log. We bundle our own Logrotate
to make sure we were logging responsibly. This is just a packaged version of the common open source offering.
#### Mattermost

View File

@ -141,8 +141,7 @@ Feature.disable(:reject_unsigned_commits_by_gitlab)
## Prevent pushing secrets to the repository
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/385) in GitLab 8.12.
> - Moved to GitLab Premium in 13.9.
> Moved to GitLab Premium in 13.9.
Secrets such as credential files, SSH private keys, and other files containing secrets should never be committed to source control.
GitLab enables you to turn on a predefined denylist of files which can't be
@ -217,8 +216,7 @@ id_ecdsa
## Prohibited file names
> - Introduced in GitLab 7.10.
> - Moved to GitLab Premium in 13.9.
> Moved to GitLab Premium in 13.9.
Each filename contained in a Git push is compared to the regular expression in this field. Filenames in Git consist of both the file's name and any directory that may precede it. A singular regular expression can contain multiple independent matches used as exclusions. File names can be broadly matched to any location in the repository, or restricted to specific locations. Filenames can also be partial matches used to exclude file types by extension.

View File

@ -120,6 +120,7 @@ To access the default page for Admin Area settings:
| [Polling interval multiplier](../../../administration/polling.md) | Configure how frequently the GitLab UI polls for updates. |
| [Gitaly timeouts](gitaly_timeouts.md) | Configure Gitaly timeouts. |
| Localization | [Default first day of the week](../../profile/preferences.md) and [Time tracking](../../project/time_tracking.md#limit-displayed-units-to-hours). |
| [Sidekiq Job Limits](sidekiq_job_limits.md) | Limit the size of Sidekiq jobs stored in Redis. |
### Default first day of the week

View File

@ -9,15 +9,15 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/53637) in GitLab 13.9.
This setting allows you to rate limit the requests to the note creation endpoint.
You can configure the per-user rate limit for requests to the note creation endpoint.
To change the note creation rate limit:
1. On the top bar, select **Menu > Admin**.
1. On the left sidebar, select **Settings > Network**.
1. Expand **Notes Rate Limits**.
1. Under **Max requests per minute per user**, enter the new value.
1. Optional. Under **List of users to be excluded from the limit**, list users to be excluded from the limit.
1. Expand **Notes rate limit**.
1. In the **Maximum requests per minute** box, enter the new value.
1. Optional. In the **Users to exclude from the rate limit** box, list users allowed to exceed the limit.
1. Select **Save changes**.
This limit is:

View File

@ -0,0 +1,36 @@
---
stage: none
group: unassigned
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
type: reference
---
# Sidekiq job size limits **(FREE SELF)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68982) in GitLab 14.3.
[Sidekiq](../../../administration/sidekiq.md) jobs get stored in
Redis. To avoid excessive memory for Redis, we:
- Compress job arguments before storing them in Redis.
arguments before storing them in Redis, and rejecting jobs that exceed
- Reject jobs that exceed the specified threshold limit after compression.
To access Sidekiq job size limits:
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Settings > Preferences**.
1. Expand **Sidekiq job size limits**.
1. Adjust the compression threshold or size limit. The compression can
be disabled by selecting "Track" mode.
## Available settings
| Setting | Default | Description |
|-------------------------------------------|------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Limiting mode | Compress | This mode compresses the jobs at the specified threshold and rejects them if they exceed the specified limit after compression. |
| Sidekiq job compression threshold (bytes) | 100 000 (100 KB) | When the size of arguments exceeds this threshold, they are compressed before being stored in Redis. |
| Sidekiq job size limit (bytes) | 0 | The jobs exceeding this size after compression are rejected. This avoids excessive memory usage in Redis leading to instability. Setting it to 0 prevents rejecting jobs. |
After changing these values, [restart
Sidekiq](../../../administration/restart_gitlab.md).

View File

@ -122,15 +122,11 @@ email addresses to disallowed domains after sign up.
### Allowlist email domains
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/598) in GitLab 7.11.0
You can restrict users only to sign up using email addresses matching the given
domains list.
### Denylist email domains
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/5259) in GitLab 8.10.
You can block users from signing up when using an email addresses of specific domains. This can
reduce the risk of malicious users creating spam accounts with disposable email addresses.

View File

@ -25,8 +25,8 @@ Read more in the Helm documentation about these topics:
To authenticate to the Helm repository, you need either:
- A [personal access token](../../../api/index.md#personalproject-access-tokens).
- A [deploy token](../../project/deploy_tokens/index.md).
- A [personal access token](../../../api/index.md#personalproject-access-tokens) with the scope set to `api`.
- A [deploy token](../../project/deploy_tokens/index.md) with the scope set to `read_package_registry`, `write_package_registry`, or both.
- A [CI/CD job token](../../../ci/jobs/ci_job_token.md).
## Publish a package
@ -35,24 +35,35 @@ NOTE:
You can publish Helm charts with duplicate names or versions. If duplicates exist, GitLab always
returns the chart with the latest version.
Once built, a chart can be uploaded to the `stable` channel with `curl` or `helm-push`:
Once built, a chart can be uploaded to the desired channel with `curl` or `helm-push`:
- With `curl`:
```shell
curl --request POST \
--form 'chart=@mychart-0.1.0.tgz' \
--user <username>:<personal_access_token> \
https://gitlab.example.com/api/v4/projects/1/packages/helm/api/stable/charts
--user <username>:<access_token> \
https://gitlab.example.com/api/v4/projects/<project_id>/packages/helm/api/<channel>/charts
```
- `<username>`: the GitLab username or the deploy token username.
- `<access_token>`: the personal access token or the deploy token.
- `<project_id>`: the project ID (like `42`) or the
[URL-encoded](../../../api/index.md#namespaced-path-encoding) path of the project (like `group%2Fproject`).
- `<channel>`: the name of the channel (like `stable`).
- With the [`helm-push`](https://github.com/chartmuseum/helm-push/#readme) plugin:
```shell
helm repo add --username <username> --password <personal_access_token> project-1 https://gitlab.example.com/api/v4/projects/1/packages/helm/stable
helm repo add --username <username> --password <access_token> project-1 https://gitlab.example.com/api/v4/projects/<project_id>/packages/helm/<channel>
helm push mychart-0.1.0.tgz project-1
```
- `<username>`: the GitLab username or the deploy token username.
- `<access_token>`: the personal access token or the deploy token.
- `<project_id>`: the project ID (like `42`).
- `<channel>`: the name of the channel (like `stable`).
## Use CI/CD to publish a Helm package
To publish a Helm package automated through [GitLab CI/CD](../../../ci/index.md), you can use
@ -69,18 +80,27 @@ stages:
upload:
stage: upload
script:
- 'curl --request POST --user gitlab-ci-token:$CI_JOB_TOKEN --form "chart=@mychart-0.1.0.tgz" "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/helm/api/stable/charts"'
- 'curl --request POST --user gitlab-ci-token:$CI_JOB_TOKEN --form "chart=@mychart-0.1.0.tgz" "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/helm/api/<channel>/charts"'
```
- `<username>`: the GitLab username or the deploy token username.
- `<access_token>`: the personal access token or the deploy token.
- `<channel>`: the name of the channel (like `stable`).
## Install a package
To install the latest version of a chart, use the following command:
```shell
helm repo add --username <username> --password <personal_access_token> project-1 https://gitlab.example.com/api/v4/projects/1/packages/helm/stable
helm repo add --username <username> --password <access_token> project-1 https://gitlab.example.com/api/v4/projects/<project_id>/packages/helm/<channel>
helm install my-release project-1/mychart
```
- `<username>`: the GitLab username or the deploy token username.
- `<access_token>`: the personal access token or the deploy token.
- `<project_id>`: the project ID (like `42`).
- `<channel>`: the name of the channel (like `stable`).
If the repo has previously been added, you may need to run:
```shell

View File

@ -15,7 +15,7 @@ module API
end
end
before do
after_validation do
authorize! :admin_group, user_group
end
@ -35,6 +35,8 @@ module API
# rubocop:disable CodeReuse/Worker
PurgeDependencyProxyCacheWorker.perform_async(current_user.id, user_group.id)
# rubocop:enable CodeReuse/Worker
status :accepted
end
end
end

View File

@ -191,11 +191,7 @@ module Gitlab
def unsafe_archive!
raise ArchiveError, 'Job is not finished yet' unless job.complete?
if trace_artifact
unsafe_trace_cleanup!
raise AlreadyArchivedError, 'Could not archive again'
end
unsafe_trace_conditionally_cleanup_before_retry!
if job.trace_chunks.any?
Gitlab::Ci::Trace::ChunkedIO.new(job) do |stream|
@ -215,12 +211,19 @@ module Gitlab
end
end
def unsafe_trace_cleanup!
def already_archived?
# TODO check checksum to ensure archive completed successfully
# See https://gitlab.com/gitlab-org/gitlab/-/issues/259619
trace_artifact.archived_trace_exists?
end
def unsafe_trace_conditionally_cleanup_before_retry!
return unless trace_artifact
if trace_artifact.archived_trace_exists?
if already_archived?
# An archive already exists, so make sure to remove the trace chunks
erase_trace_chunks!
raise AlreadyArchivedError, 'Could not archive again'
else
# An archive already exists, but its associated file does not, so remove it
trace_artifact.destroy!

View File

@ -4,12 +4,12 @@ module Gitlab
module SidekiqMiddleware
module SizeLimiter
# Handle a Sidekiq job payload limit based on current configuration.
# This validator pulls the configuration from the environment variables:
# - GITLAB_SIDEKIQ_SIZE_LIMITER_MODE: the current mode of the size
# limiter. This must be either `track` or `compress`.
# - GITLAB_SIDEKIQ_SIZE_LIMITER_COMPRESSION_THRESHOLD_BYTES: the
# threshold before the input job payload is compressed.
# - GITLAB_SIDEKIQ_SIZE_LIMITER_LIMIT_BYTES: the size limit in bytes.
# This validator pulls the configuration from application settings:
# - limiter_mode: the current mode of the size
# limiter. This must be either `track` or `compress`.
# - compression_threshold_bytes: the threshold before the input job
# payload is compressed.
# - limit_bytes: the size limit in bytes.
#
# In track mode, if a job payload limit exceeds the size limit, an
# event is sent to Sentry and the job is scheduled like normal.
@ -18,12 +18,29 @@ module Gitlab
# then compressed. If the compressed payload still exceeds the limit, the
# job is discarded, and a ExceedLimitError exception is raised.
class Validator
def self.validate!(worker_class, job)
new(worker_class, job).validate!
# Avoid limiting the size of jobs for `BackgroundMigrationWorker` classes.
# We can't read the configuration from `ApplicationSetting` for those jobs
# when migrating a path that modifies the `application_settings` table.
# Reading the application settings through `ApplicationSetting#current`
# causes a `SELECT` with a list of column names, but that list of column
# names might not match what the table currently looks like causing
# an error when scheduling background migrations.
#
# The worker classes aren't constants here, because that would force
# Application Settings to be loaded earlier causing failures loading
# the environmant in rake tasks
EXEMPT_WORKER_NAMES = ["BackgroundMigrationWorker", "Database::BatchedBackgroundMigrationWorker"].to_set
class << self
def validate!(worker_class, job)
return if EXEMPT_WORKER_NAMES.include?(worker_class.to_s)
new(worker_class, job).validate!
end
end
DEFAULT_SIZE_LIMIT = 0
DEFAULT_COMPRESION_THRESHOLD_BYTES = 100_000 # 100kb
DEFAULT_COMPRESSION_THRESHOLD_BYTES = 100_000 # 100kb
MODES = [
TRACK_MODE = 'track',
@ -34,9 +51,9 @@ module Gitlab
def initialize(
worker_class, job,
mode: ENV['GITLAB_SIDEKIQ_SIZE_LIMITER_MODE'],
compression_threshold: ENV['GITLAB_SIDEKIQ_SIZE_LIMITER_COMPRESSION_THRESHOLD_BYTES'],
size_limit: ENV['GITLAB_SIDEKIQ_SIZE_LIMITER_LIMIT_BYTES']
mode: Gitlab::CurrentSettings.sidekiq_job_limiter_mode,
compression_threshold: Gitlab::CurrentSettings.sidekiq_job_limiter_compression_threshold_bytes,
size_limit: Gitlab::CurrentSettings.sidekiq_job_limiter_limit_bytes
)
@worker_class = worker_class
@job = job
@ -72,10 +89,10 @@ module Gitlab
end
def set_compression_threshold(compression_threshold)
@compression_threshold = (compression_threshold || DEFAULT_COMPRESION_THRESHOLD_BYTES).to_i
@compression_threshold = (compression_threshold || DEFAULT_COMPRESSION_THRESHOLD_BYTES).to_i
if @compression_threshold <= 0
::Sidekiq.logger.warn "Invalid Sidekiq size limiter compression threshold: #{@compression_threshold}"
@compression_threshold = DEFAULT_COMPRESION_THRESHOLD_BYTES
@compression_threshold = DEFAULT_COMPRESSION_THRESHOLD_BYTES
end
end
@ -83,7 +100,7 @@ module Gitlab
@size_limit = (size_limit || DEFAULT_SIZE_LIMIT).to_i
if @size_limit < 0
::Sidekiq.logger.warn "Invalid Sidekiq size limiter limit: #{@size_limit}"
@size_limit = 0
@size_limit = DEFAULT_SIZE_LIMIT
end
end

View File

@ -8158,6 +8158,9 @@ msgstr ""
msgid "Comma-separated list of email addresses."
msgstr ""
msgid "Comma-separated list of users allowed to exceed the rate limit."
msgstr ""
msgid "Comma-separated, e.g. '1.1.1.1, 2.2.2.0/24'"
msgstr ""
@ -8520,9 +8523,6 @@ msgstr ""
msgid "Configure existing installation"
msgstr ""
msgid "Configure limit for notes created per minute by web and API requests."
msgstr ""
msgid "Configure limits for web and API requests."
msgstr ""
@ -9501,6 +9501,9 @@ msgstr ""
msgid "Create issue"
msgstr ""
msgid "Create issue to resolve all threads"
msgstr ""
msgid "Create iteration"
msgstr ""
@ -10429,6 +10432,9 @@ msgstr ""
msgid "DastProfiles|Scanner name"
msgstr ""
msgid "DastProfiles|Schedule"
msgstr ""
msgid "DastProfiles|Select branch"
msgstr ""
@ -13522,21 +13528,33 @@ msgstr ""
msgid "Every 3 months"
msgstr ""
msgid "Every 3 months on the %{day} at %{time} %{timezone}"
msgstr ""
msgid "Every 6 months"
msgstr ""
msgid "Every 6 months on the %{day} at %{time} %{timezone}"
msgstr ""
msgid "Every day"
msgstr ""
msgid "Every day (at %{time})"
msgstr ""
msgid "Every day at %{time} %{timezone}"
msgstr ""
msgid "Every month"
msgstr ""
msgid "Every month (Day %{day} at %{time})"
msgstr ""
msgid "Every month on the %{day} at %{time} %{timezone}"
msgstr ""
msgid "Every three months"
msgstr ""
@ -13551,9 +13569,15 @@ msgstr[1] ""
msgid "Every week (%{weekday} at %{time})"
msgstr ""
msgid "Every week on %{day} at %{time} %{timezone}"
msgstr ""
msgid "Every year"
msgstr ""
msgid "Every year on %{day} at %{time} %{timezone}"
msgstr ""
msgid "Everyone"
msgstr ""
@ -16754,6 +16778,9 @@ msgstr ""
msgid "How many seconds an IP will be counted towards the limit"
msgstr ""
msgid "How the job limiter handles jobs exceeding the thresholds specified below. The 'track' mode only logs the jobs. The 'compress' mode compresses the jobs and raises an exception if the compressed size exceeds the limit."
msgstr ""
msgid "I accept the %{terms_link}"
msgstr ""
@ -20181,11 +20208,17 @@ msgstr ""
msgid "Limit the number of issues and epics per minute a user can create through web and API requests."
msgstr ""
msgid "Limit the size of Sidekiq jobs stored in Redis."
msgstr ""
msgid "Limited to showing %d event at most"
msgid_plural "Limited to showing %d events at most"
msgstr[0] ""
msgstr[1] ""
msgid "Limiting mode"
msgstr ""
msgid "Line changes"
msgstr ""
@ -20255,9 +20288,6 @@ msgstr ""
msgid "List of all merge commits"
msgstr ""
msgid "List of users to be excluded from the limit"
msgstr ""
msgid "List options"
msgstr ""
@ -20675,9 +20705,6 @@ msgstr ""
msgid "Max file size is 200 KB."
msgstr ""
msgid "Max requests per minute per user"
msgstr ""
msgid "Max role"
msgstr ""
@ -20837,6 +20864,9 @@ msgstr ""
msgid "Maximum push size (MB)"
msgstr ""
msgid "Maximum requests per minute"
msgstr ""
msgid "Maximum running slices"
msgstr ""
@ -21200,10 +21230,10 @@ msgstr ""
msgid "MergeRequests|An error occurred while saving the draft comment."
msgstr ""
msgid "MergeRequests|Failed to squash. Should be done manually."
msgid "MergeRequests|Create issue to resolve thread"
msgstr ""
msgid "MergeRequests|Resolve this thread in a new issue"
msgid "MergeRequests|Failed to squash. Should be done manually."
msgstr ""
msgid "MergeRequests|Saving the comment failed"
@ -22998,7 +23028,7 @@ msgstr ""
msgid "NoteForm|Note"
msgstr ""
msgid "Notes Rate Limits"
msgid "Notes rate limit"
msgstr ""
msgid "Notes|Are you sure you want to cancel creating this comment?"
@ -28628,9 +28658,6 @@ msgstr ""
msgid "Resolve"
msgstr ""
msgid "Resolve all threads in new issue"
msgstr ""
msgid "Resolve conflicts"
msgstr ""
@ -30709,6 +30736,9 @@ msgstr ""
msgid "Set the milestone to %{milestone_reference}."
msgstr ""
msgid "Set the per-user rate limit for notes created by web or API requests."
msgstr ""
msgid "Set the timeout in seconds to send a secondary site status to the primary and IPs allowed for the secondary sites."
msgstr ""
@ -31049,6 +31079,15 @@ msgstr ""
msgid "Sidebar|Weight"
msgstr ""
msgid "Sidekiq job compression threshold (bytes)"
msgstr ""
msgid "Sidekiq job size limit (bytes)"
msgstr ""
msgid "Sidekiq job size limits"
msgstr ""
msgid "Sign in"
msgstr ""
@ -34772,6 +34811,12 @@ msgstr ""
msgid "ThreatMonitoring|View documentation"
msgstr ""
msgid "Threshold in bytes at which to compress Sidekiq job arguments."
msgstr ""
msgid "Threshold in bytes at which to reject Sidekiq jobs. Set this to 0 to if you don't want to limit Sidekiq jobs."
msgstr ""
msgid "Throughput"
msgstr ""
@ -36890,6 +36935,9 @@ msgstr ""
msgid "Users requesting access to"
msgstr ""
msgid "Users to exclude from the rate limit"
msgstr ""
msgid "Users were successfully added."
msgstr ""
@ -40102,6 +40150,9 @@ msgid_plural "mrWidget|Closes issues"
msgstr[0] ""
msgstr[1] ""
msgid "mrWidget|Create issue to resolve all threads"
msgstr ""
msgid "mrWidget|Delete source branch"
msgstr ""
@ -40212,9 +40263,6 @@ msgstr ""
msgid "mrWidget|Request to merge"
msgstr ""
msgid "mrWidget|Resolve all threads in new issue"
msgstr ""
msgid "mrWidget|Resolve conflicts"
msgstr ""

View File

@ -35,6 +35,7 @@ RSpec.describe 'Database schema' do
cluster_providers_gcp: %w[gcp_project_id operation_id],
compliance_management_frameworks: %w[group_id],
commit_user_mentions: %w[commit_id],
dep_ci_build_trace_sections: %w[build_id],
deploy_keys_projects: %w[deploy_key_id],
deployments: %w[deployable_id user_id],
draft_notes: %w[discussion_id commit_id],

View File

@ -27,7 +27,7 @@ RSpec.describe 'Resolving all open threads in a merge request from an issue', :j
it 'shows a button to resolve all threads by creating a new issue' do
within('.line-resolve-all-container') do
expect(page).to have_selector resolve_all_discussions_link_selector( title: "Resolve all threads in new issue" )
expect(page).to have_selector resolve_all_discussions_link_selector( title: "Create issue to resolve all threads" )
end
end
@ -38,7 +38,7 @@ RSpec.describe 'Resolving all open threads in a merge request from an issue', :j
it 'hides the link for creating a new issue' do
expect(page).not_to have_selector resolve_all_discussions_link_selector
expect(page).not_to have_content "Resolve all threads in new issue"
expect(page).not_to have_content "Create issue to resolve all threads"
end
end
@ -62,7 +62,7 @@ RSpec.describe 'Resolving all open threads in a merge request from an issue', :j
end
it 'does not show a link to create a new issue' do
expect(page).not_to have_link 'Resolve all threads in new issue'
expect(page).not_to have_link 'Create issue to resolve all threads'
end
end
@ -77,14 +77,14 @@ RSpec.describe 'Resolving all open threads in a merge request from an issue', :j
it 'has a link to resolve all threads by creating an issue' do
page.within '.mr-widget-body' do
expect(page).to have_link 'Resolve all threads in new issue', href: new_project_issue_path(project, merge_request_to_resolve_discussions_of: merge_request.iid)
expect(page).to have_link 'Create issue to resolve all threads', href: new_project_issue_path(project, merge_request_to_resolve_discussions_of: merge_request.iid)
end
end
context 'creating an issue for threads' do
before do
page.within '.mr-widget-body' do
page.click_link 'Resolve all threads in new issue', href: new_project_issue_path(project, merge_request_to_resolve_discussions_of: merge_request.iid)
page.click_link 'Create issue to resolve all threads', href: new_project_issue_path(project, merge_request_to_resolve_discussions_of: merge_request.iid)
wait_for_all_requests
end

View File

@ -9,7 +9,7 @@ RSpec.describe 'Resolve an open thread in a merge request by creating an issue',
let!(:discussion) { create(:diff_note_on_merge_request, noteable: merge_request, project: project).to_discussion }
def resolve_discussion_selector
title = 'Resolve this thread in a new issue'
title = 'Create issue to resolve thread'
url = new_project_issue_path(project, discussion_to_resolve: discussion.id, merge_request_to_resolve_discussions_of: merge_request.iid)
"a[title=\"#{title}\"][href=\"#{url}\"]"
end

View File

@ -45,7 +45,7 @@ describe('UnresolvedDiscussions', () => {
expect(wrapper.element.innerText).toContain(`Merge blocked: all threads must be resolved.`);
expect(wrapper.element.innerText).toContain('Jump to first unresolved thread');
expect(wrapper.element.innerText).toContain('Resolve all threads in new issue');
expect(wrapper.element.innerText).toContain('Create issue to resolve all threads');
expect(wrapper.element.querySelector('.js-create-issue').getAttribute('href')).toEqual(
TEST_HOST,
);
@ -57,7 +57,7 @@ describe('UnresolvedDiscussions', () => {
expect(wrapper.element.innerText).toContain(`Merge blocked: all threads must be resolved.`);
expect(wrapper.element.innerText).toContain('Jump to first unresolved thread');
expect(wrapper.element.innerText).not.toContain('Resolve all threads in new issue');
expect(wrapper.element.innerText).not.toContain('Create issue to resolve all threads');
expect(wrapper.element.querySelector('.js-create-issue')).toEqual(null);
});
});

View File

@ -284,4 +284,10 @@ RSpec.describe ApplicationSettingsHelper do
end
end
end
describe '#sidekiq_job_limiter_modes_for_select' do
subject { helper.sidekiq_job_limiter_modes_for_select }
it { is_expected.to eq([%w(Track track), %w(Compress compress)]) }
end
end

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator, :aggregate_failures do
let(:base_payload) do
{
"class" => "ARandomWorker",
@ -31,10 +31,35 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
end
before do
# Settings aren't in the database in specs, but stored in memory, this is fine
# for these tests.
allow(Gitlab::CurrentSettings).to receive(:current_application_settings?).and_return(true)
stub_const("TestSizeLimiterWorker", worker_class)
end
describe '#initialize' do
context 'configuration from application settings' do
let(:validator) { described_class.new(worker_class, job_payload) }
it 'has the right defaults' do
expect(validator.mode).to eq(described_class::COMPRESS_MODE)
expect(validator.compression_threshold).to eq(described_class::DEFAULT_COMPRESSION_THRESHOLD_BYTES)
expect(validator.size_limit).to eq(described_class::DEFAULT_SIZE_LIMIT)
end
it 'allows configuration through application settings' do
stub_application_setting(
sidekiq_job_limiter_mode: 'track',
sidekiq_job_limiter_compression_threshold_bytes: 1,
sidekiq_job_limiter_limit_bytes: 2
)
expect(validator.mode).to eq(described_class::TRACK_MODE)
expect(validator.compression_threshold).to eq(1)
expect(validator.size_limit).to eq(2)
end
end
context 'when the input mode is valid' do
it 'does not log a warning message' do
expect(::Sidekiq.logger).not_to receive(:warn)
@ -58,7 +83,7 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
it 'defaults to track mode' do
expect(::Sidekiq.logger).not_to receive(:warn)
validator = described_class.new(TestSizeLimiterWorker, job_payload)
validator = described_class.new(TestSizeLimiterWorker, job_payload, mode: nil)
expect(validator.mode).to eql('track')
end
@ -74,7 +99,7 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
end
context 'when the size input is invalid' do
it 'defaults to 0 and logs a warning message' do
it 'logs a warning message' do
expect(::Sidekiq.logger).to receive(:warn).with('Invalid Sidekiq size limiter limit: -1')
validator = described_class.new(TestSizeLimiterWorker, job_payload, size_limit: -1)
@ -87,9 +112,9 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
it 'defaults to 0' do
expect(::Sidekiq.logger).not_to receive(:warn)
validator = described_class.new(TestSizeLimiterWorker, job_payload)
validator = described_class.new(TestSizeLimiterWorker, job_payload, size_limit: nil)
expect(validator.size_limit).to be(0)
expect(validator.size_limit).to be(described_class::DEFAULT_SIZE_LIMIT)
end
end
@ -318,20 +343,30 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
end
end
describe '#validate!' do
context 'when calling SizeLimiter.validate!' do
let(:validate) { ->(worker_clas, job) { described_class.validate!(worker_class, job) } }
describe '.validate!' do
let(:validate) { ->(worker_class, job) { described_class.validate!(worker_class, job) } }
it_behaves_like 'validate limit job payload size' do
before do
stub_env('GITLAB_SIDEKIQ_SIZE_LIMITER_MODE', mode)
stub_env('GITLAB_SIDEKIQ_SIZE_LIMITER_LIMIT_BYTES', size_limit)
stub_env('GITLAB_SIDEKIQ_SIZE_LIMITER_COMPRESSION_THRESHOLD_BYTES', compression_threshold)
stub_application_setting(
sidekiq_job_limiter_mode: mode,
sidekiq_job_limiter_compression_threshold_bytes: compression_threshold,
sidekiq_job_limiter_limit_bytes: size_limit
)
end
it_behaves_like 'validate limit job payload size'
end
context 'when creating an instance with the related ENV variables' do
it "skips background migrations" do
expect(described_class).not_to receive(:new)
described_class::EXEMPT_WORKER_NAMES.each do |class_name|
validate.call(class_name.constantize, job_payload)
end
end
end
describe '#validate!' do
context 'when creating an instance with the related configuration variables' do
let(:validate) do
->(worker_clas, job) do
described_class.new(worker_class, job).validate!
@ -339,9 +374,11 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
end
before do
stub_env('GITLAB_SIDEKIQ_SIZE_LIMITER_MODE', mode)
stub_env('GITLAB_SIDEKIQ_SIZE_LIMITER_LIMIT_BYTES', size_limit)
stub_env('GITLAB_SIDEKIQ_SIZE_LIMITER_COMPRESSION_THRESHOLD_BYTES', compression_threshold)
stub_application_setting(
sidekiq_job_limiter_mode: mode,
sidekiq_job_limiter_compression_threshold_bytes: compression_threshold,
sidekiq_job_limiter_limit_bytes: size_limit
)
end
it_behaves_like 'validate limit job payload size'

View File

@ -956,6 +956,20 @@ RSpec.describe ApplicationSetting do
it { is_expected.not_to allow_value(nil).for(throttle_setting) }
end
end
context 'sidekiq job limiter settings' do
it 'has the right defaults', :aggregate_failures do
expect(setting.sidekiq_job_limiter_mode).to eq('compress')
expect(setting.sidekiq_job_limiter_compression_threshold_bytes)
.to eq(Gitlab::SidekiqMiddleware::SizeLimiter::Validator::DEFAULT_COMPRESSION_THRESHOLD_BYTES)
expect(setting.sidekiq_job_limiter_limit_bytes)
.to eq(Gitlab::SidekiqMiddleware::SizeLimiter::Validator::DEFAULT_SIZE_LIMIT)
end
it { is_expected.to allow_value('track').for(:sidekiq_job_limiter_mode) }
it { is_expected.to validate_numericality_of(:sidekiq_job_limiter_compression_threshold_bytes).only_integer.is_greater_than_or_equal_to(0) }
it { is_expected.to validate_numericality_of(:sidekiq_job_limiter_limit_bytes).only_integer.is_greater_than_or_equal_to(0) }
end
end
context 'restrict creating duplicates' do

View File

@ -13,60 +13,74 @@ RSpec.describe API::DependencyProxy, api: true do
group.add_owner(user)
stub_config(dependency_proxy: { enabled: true })
stub_last_activity_update
group.create_dependency_proxy_setting!(enabled: true)
end
describe 'DELETE /groups/:id/dependency_proxy/cache' do
subject { delete api("/groups/#{group.id}/dependency_proxy/cache", user) }
subject { delete api("/groups/#{group_id}/dependency_proxy/cache", user) }
context 'with feature available and enabled' do
let_it_be(:lease_key) { "dependency_proxy:delete_group_blobs:#{group.id}" }
shared_examples 'responding to purge requests' do
context 'with feature available and enabled' do
let_it_be(:lease_key) { "dependency_proxy:delete_group_blobs:#{group.id}" }
context 'an admin user' do
it 'deletes the blobs and returns no content' do
stub_exclusive_lease(lease_key, timeout: 1.hour)
expect(PurgeDependencyProxyCacheWorker).to receive(:perform_async)
subject
expect(response).to have_gitlab_http_status(:no_content)
end
context 'called multiple times in one hour', :clean_gitlab_redis_shared_state do
it 'returns 409 with an error message' do
stub_exclusive_lease_taken(lease_key, timeout: 1.hour)
context 'an admin user' do
it 'deletes the blobs and returns no content' do
stub_exclusive_lease(lease_key, timeout: 1.hour)
expect(PurgeDependencyProxyCacheWorker).to receive(:perform_async)
subject
expect(response).to have_gitlab_http_status(:conflict)
expect(response.body).to include('This request has already been made.')
expect(response).to have_gitlab_http_status(:accepted)
expect(response.body).to eq('202')
end
it 'executes service only for the first time' do
expect(PurgeDependencyProxyCacheWorker).to receive(:perform_async).once
context 'called multiple times in one hour', :clean_gitlab_redis_shared_state do
it 'returns 409 with an error message' do
stub_exclusive_lease_taken(lease_key, timeout: 1.hour)
2.times { subject }
subject
expect(response).to have_gitlab_http_status(:conflict)
expect(response.body).to include('This request has already been made.')
end
it 'executes service only for the first time' do
expect(PurgeDependencyProxyCacheWorker).to receive(:perform_async).once
2.times { subject }
end
end
end
context 'a non-admin' do
let(:user) { create(:user) }
before do
group.add_maintainer(user)
end
it_behaves_like 'returning response status', :forbidden
end
end
context 'a non-admin' do
let(:user) { create(:user) }
context 'depencency proxy is not enabled in the config' do
before do
group.add_maintainer(user)
stub_config(dependency_proxy: { enabled: false })
end
it_behaves_like 'returning response status', :forbidden
it_behaves_like 'returning response status', :not_found
end
end
context 'depencency proxy is not enabled' do
before do
stub_config(dependency_proxy: { enabled: false })
end
context 'with a group id' do
let(:group_id) { group.id }
it_behaves_like 'returning response status', :not_found
it_behaves_like 'responding to purge requests'
end
context 'with an url encoded group id' do
let(:group_id) { ERB::Util.url_encode(group.full_path) }
it_behaves_like 'responding to purge requests'
end
end
end

View File

@ -594,5 +594,20 @@ RSpec.describe API::Settings, 'Settings', :do_not_mock_admin_mode_setting do
expect(json_response['error']).to eq('whats_new_variant does not have a valid value')
end
end
context 'sidekiq job limit settings' do
it 'updates the settings' do
settings = {
sidekiq_job_limiter_mode: 'track',
sidekiq_job_limiter_compression_threshold_bytes: 1,
sidekiq_job_limiter_limit_bytes: 2
}.stringify_keys
put api("/application/settings", admin), params: settings
expect(response).to have_gitlab_http_status(:ok)
expect(json_response.slice(*settings.keys)).to eq(settings)
end
end
end
end

View File

@ -28,7 +28,7 @@ RSpec.describe Ci::ArchiveTraceService, '#execute' do
context 'when live trace chunks still exist' do
before do
create(:ci_build_trace_chunk, build: job)
create(:ci_build_trace_chunk, build: job, chunk_index: 0)
end
it 'removes the trace chunks' do
@ -40,8 +40,14 @@ RSpec.describe Ci::ArchiveTraceService, '#execute' do
job.job_artifacts_trace.file.remove!
end
it 'removes the trace artifact' do
expect { subject }.to change { job.reload.job_artifacts_trace }.to(nil)
it 'removes the trace artifact and builds a new one' do
existing_trace = job.job_artifacts_trace
expect(existing_trace).to receive(:destroy!).and_call_original
subject
expect(job.reload.job_artifacts_trace).to be_present
expect(job.reload.job_artifacts_trace.file.file).to be_present
end
end
end

View File

@ -0,0 +1,284 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::StuckBuilds::DropService do
let!(:runner) { create :ci_runner }
let!(:job) { create :ci_build, runner: runner }
let(:created_at) { }
let(:updated_at) { }
subject(:service) { described_class.new }
before do
job_attributes = { status: status }
job_attributes[:created_at] = created_at if created_at
job_attributes[:updated_at] = updated_at if updated_at
job.update!(job_attributes)
end
shared_examples 'job is dropped' do
it 'changes status' do
expect(service).to receive(:drop).exactly(3).times.and_call_original
expect(service).to receive(:drop_stuck).exactly(:once).and_call_original
service.execute
job.reload
expect(job).to be_failed
expect(job).to be_stuck_or_timeout_failure
end
context 'when job have data integrity problem' do
it "does drop the job and logs the reason" do
job.update_columns(yaml_variables: '[{"key" => "value"}]')
expect(Gitlab::ErrorTracking).to receive(:track_exception)
.with(anything, a_hash_including(build_id: job.id))
.once
.and_call_original
service.execute
job.reload
expect(job).to be_failed
expect(job).to be_data_integrity_failure
end
end
end
shared_examples 'job is unchanged' do
it 'does not change status' do
expect(service).to receive(:drop).exactly(3).times.and_call_original
expect(service).to receive(:drop_stuck).exactly(:once).and_call_original
service.execute
job.reload
expect(job.status).to eq(status)
end
end
context 'when job is pending' do
let(:status) { 'pending' }
context 'when job is not stuck' do
before do
allow_next_found_instance_of(Ci::Build) do |build|
allow(build).to receive(:stuck?).and_return(false)
end
end
context 'when job was updated_at more than 1 day ago' do
let(:updated_at) { 1.5.days.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 1.5.days.ago }
it_behaves_like 'job is dropped'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is dropped'
end
context 'when created_at is outside lookback window' do
let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
it_behaves_like 'job is unchanged'
end
end
context 'when job was updated less than 1 day ago' do
let(:updated_at) { 6.hours.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 1.5.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is outside lookback window' do
let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
it_behaves_like 'job is unchanged'
end
end
context 'when job was updated more than 1 hour ago' do
let(:updated_at) { 2.hours.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 2.hours.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is outside lookback window' do
let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
it_behaves_like 'job is unchanged'
end
end
end
context 'when job is stuck' do
before do
allow_next_found_instance_of(Ci::Build) do |build|
allow(build).to receive(:stuck?).and_return(true)
end
end
context 'when job was updated_at more than 1 hour ago' do
let(:updated_at) { 1.5.hours.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 1.5.hours.ago }
it_behaves_like 'job is dropped'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is dropped'
end
context 'when created_at is outside lookback window' do
let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
it_behaves_like 'job is unchanged'
end
end
context 'when job was updated in less than 1 hour ago' do
let(:updated_at) { 30.minutes.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 30.minutes.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is before updated_at' do
let(:created_at) { 2.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is outside lookback window' do
let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
it_behaves_like 'job is unchanged'
end
end
end
end
context 'when job is running' do
let(:status) { 'running' }
context 'when job was updated_at more than an hour ago' do
let(:updated_at) { 2.hours.ago }
it_behaves_like 'job is dropped'
end
context 'when job was updated in less than 1 hour ago' do
let(:updated_at) { 30.minutes.ago }
it_behaves_like 'job is unchanged'
end
end
%w(success skipped failed canceled).each do |status|
context "when job is #{status}" do
let(:status) { status }
let(:updated_at) { 2.days.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 2.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is outside lookback window' do
let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
it_behaves_like 'job is unchanged'
end
end
end
context 'for deleted project' do
let(:status) { 'running' }
let(:updated_at) { 2.days.ago }
before do
job.project.update!(pending_delete: true)
end
it_behaves_like 'job is dropped'
end
describe 'drop stale scheduled builds' do
let(:status) { 'scheduled' }
let(:updated_at) { }
context 'when scheduled at 2 hours ago but it is not executed yet' do
let!(:job) { create(:ci_build, :scheduled, scheduled_at: 2.hours.ago) }
it 'drops the stale scheduled build' do
expect(Ci::Build.scheduled.count).to eq(1)
expect(job).to be_scheduled
service.execute
job.reload
expect(Ci::Build.scheduled.count).to eq(0)
expect(job).to be_failed
expect(job).to be_stale_schedule
end
end
context 'when scheduled at 30 minutes ago but it is not executed yet' do
let!(:job) { create(:ci_build, :scheduled, scheduled_at: 30.minutes.ago) }
it 'does not drop the stale scheduled build yet' do
expect(Ci::Build.scheduled.count).to eq(1)
expect(job).to be_scheduled
service.execute
expect(Ci::Build.scheduled.count).to eq(1)
expect(job).to be_scheduled
end
end
context 'when there are no stale scheduled builds' do
it 'does not drop the stale scheduled build yet' do
expect { service.execute }.not_to raise_error
end
end
end
end

View File

@ -11,14 +11,9 @@ RSpec.describe PurgeDependencyProxyCacheWorker do
subject { described_class.new.perform(user.id, group_id) }
before do
stub_config(dependency_proxy: { enabled: true })
group.create_dependency_proxy_setting!(enabled: true)
end
describe '#perform' do
shared_examples 'returns nil' do
it 'returns nil', :aggregate_failures do
shared_examples 'not removing blobs and manifests' do
it 'does not remove blobs and manifests', :aggregate_failures do
expect { subject }.not_to change { group.dependency_proxy_blobs.size }
expect { subject }.not_to change { group.dependency_proxy_manifests.size }
expect(subject).to be_nil
@ -43,26 +38,26 @@ RSpec.describe PurgeDependencyProxyCacheWorker do
end
context 'when admin mode is disabled' do
it_behaves_like 'returns nil'
it_behaves_like 'not removing blobs and manifests'
end
end
context 'a non-admin user' do
let(:user) { create(:user) }
it_behaves_like 'returns nil'
it_behaves_like 'not removing blobs and manifests'
end
context 'an invalid user id' do
let(:user) { double('User', id: 99999 ) }
it_behaves_like 'returns nil'
it_behaves_like 'not removing blobs and manifests'
end
context 'an invalid group' do
let(:group_id) { 99999 }
it_behaves_like 'returns nil'
it_behaves_like 'not removing blobs and manifests'
end
end
end

View File

@ -5,311 +5,50 @@ require 'spec_helper'
RSpec.describe StuckCiJobsWorker do
include ExclusiveLeaseHelpers
let!(:runner) { create :ci_runner }
let!(:job) { create :ci_build, runner: runner }
let(:worker_lease_key) { StuckCiJobsWorker::EXCLUSIVE_LEASE_KEY }
let(:worker_lease_key) { StuckCiJobsWorker::EXCLUSIVE_LEASE_KEY }
let(:worker_lease_uuid) { SecureRandom.uuid }
let(:created_at) { }
let(:updated_at) { }
let(:worker2) { described_class.new }
subject(:worker) { described_class.new }
before do
stub_exclusive_lease(worker_lease_key, worker_lease_uuid)
job_attributes = { status: status }
job_attributes[:created_at] = created_at if created_at
job_attributes[:updated_at] = updated_at if updated_at
job.update!(job_attributes)
end
shared_examples 'job is dropped' do
it "changes status" do
worker.perform
job.reload
expect(job).to be_failed
expect(job).to be_stuck_or_timeout_failure
end
context 'when job have data integrity problem' do
it "does drop the job and logs the reason" do
job.update_columns(yaml_variables: '[{"key" => "value"}]')
expect(Gitlab::ErrorTracking).to receive(:track_exception)
.with(anything, a_hash_including(build_id: job.id))
.once
.and_call_original
worker.perform
job.reload
expect(job).to be_failed
expect(job).to be_data_integrity_failure
end
end
end
shared_examples 'job is unchanged' do
before do
worker.perform
job.reload
end
it "doesn't change status" do
expect(job.status).to eq(status)
end
end
context 'when job is pending' do
let(:status) { 'pending' }
context 'when job is not stuck' do
before do
allow_any_instance_of(Ci::Build).to receive(:stuck?).and_return(false)
describe '#perform' do
it 'executes an instance of Ci::StuckBuildsDropService' do
expect_next_instance_of(Ci::StuckBuilds::DropService) do |service|
expect(service).to receive(:execute).exactly(:once)
end
context 'when job was updated_at more than 1 day ago' do
let(:updated_at) { 1.5.days.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 1.5.days.ago }
it_behaves_like 'job is dropped'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is dropped'
end
context 'when created_at is outside lookback window' do
let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
it_behaves_like 'job is unchanged'
end
end
context 'when job was updated less than 1 day ago' do
let(:updated_at) { 6.hours.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 1.5.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is outside lookback window' do
let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
it_behaves_like 'job is unchanged'
end
end
context 'when job was updated more than 1 hour ago' do
let(:updated_at) { 2.hours.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 2.hours.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is outside lookback window' do
let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
it_behaves_like 'job is unchanged'
end
end
end
context 'when job is stuck' do
before do
allow_any_instance_of(Ci::Build).to receive(:stuck?).and_return(true)
end
context 'when job was updated_at more than 1 hour ago' do
let(:updated_at) { 1.5.hours.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 1.5.hours.ago }
it_behaves_like 'job is dropped'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is dropped'
end
context 'when created_at is outside lookback window' do
let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
it_behaves_like 'job is unchanged'
end
end
context 'when job was updated in less than 1 hour ago' do
let(:updated_at) { 30.minutes.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 30.minutes.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is before updated_at' do
let(:created_at) { 2.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is outside lookback window' do
let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
it_behaves_like 'job is unchanged'
end
end
end
end
context 'when job is running' do
let(:status) { 'running' }
context 'when job was updated_at more than an hour ago' do
let(:updated_at) { 2.hours.ago }
it_behaves_like 'job is dropped'
end
context 'when job was updated in less than 1 hour ago' do
let(:updated_at) { 30.minutes.ago }
it_behaves_like 'job is unchanged'
end
end
%w(success skipped failed canceled).each do |status|
context "when job is #{status}" do
let(:status) { status }
let(:updated_at) { 2.days.ago }
context 'when created_at is the same as updated_at' do
let(:created_at) { 2.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is before updated_at' do
let(:created_at) { 3.days.ago }
it_behaves_like 'job is unchanged'
end
context 'when created_at is outside lookback window' do
let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
it_behaves_like 'job is unchanged'
end
end
end
context 'for deleted project' do
let(:status) { 'running' }
let(:updated_at) { 2.days.ago }
before do
job.project.update!(pending_delete: true)
end
it 'does drop job' do
expect_any_instance_of(Ci::Build).to receive(:drop).and_call_original
worker.perform
end
end
describe 'drop stale scheduled builds' do
let(:status) { 'scheduled' }
let(:updated_at) { }
context 'when scheduled at 2 hours ago but it is not executed yet' do
let!(:job) { create(:ci_build, :scheduled, scheduled_at: 2.hours.ago) }
it 'drops the stale scheduled build' do
expect(Ci::Build.scheduled.count).to eq(1)
expect(job).to be_scheduled
worker.perform
job.reload
expect(Ci::Build.scheduled.count).to eq(0)
expect(job).to be_failed
expect(job).to be_stale_schedule
end
end
context 'when scheduled at 30 minutes ago but it is not executed yet' do
let!(:job) { create(:ci_build, :scheduled, scheduled_at: 30.minutes.ago) }
it 'does not drop the stale scheduled build yet' do
expect(Ci::Build.scheduled.count).to eq(1)
expect(job).to be_scheduled
context 'with an exclusive lease' do
it 'does not execute concurrently' do
expect(worker).to receive(:remove_lease).exactly(:once)
expect(worker2).not_to receive(:remove_lease)
worker.perform
expect(Ci::Build.scheduled.count).to eq(1)
expect(job).to be_scheduled
stub_exclusive_lease_taken(worker_lease_key)
worker2.perform
end
end
context 'when there are no stale scheduled builds' do
it 'does not drop the stale scheduled build yet' do
expect { worker.perform }.not_to raise_error
it 'can execute in sequence' do
expect(worker).to receive(:remove_lease).at_least(:once)
expect(worker2).to receive(:remove_lease).at_least(:once)
worker.perform
worker2.perform
end
end
end
describe 'exclusive lease' do
let(:status) { 'running' }
let(:updated_at) { 2.days.ago }
let(:worker2) { described_class.new }
it 'cancels exclusive leases after worker perform' do
expect_to_cancel_exclusive_lease(worker_lease_key, worker_lease_uuid)
it 'is guard by exclusive lease when executed concurrently' do
expect(worker).to receive(:drop).at_least(:once).and_call_original
expect(worker2).not_to receive(:drop)
worker.perform
stub_exclusive_lease_taken(worker_lease_key)
worker2.perform
end
it 'can be executed in sequence' do
expect(worker).to receive(:drop).at_least(:once).and_call_original
expect(worker2).to receive(:drop).at_least(:once).and_call_original
worker.perform
worker2.perform
end
it 'cancels exclusive leases after worker perform' do
expect_to_cancel_exclusive_lease(worker_lease_key, worker_lease_uuid)
worker.perform
worker.perform
end
end
end
end