Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2025-06-10 18:11:48 +00:00
parent 4c61af12af
commit 0c84203069
92 changed files with 2171 additions and 498 deletions

View File

@ -418,7 +418,9 @@ Dangerfile
/ee/app/helpers/analytics/analytics_dashboards_helper.rb
/ee/app/helpers/analytics/analytics_settings_helper.rb
/ee/app/helpers/projects/analytics_dashboard_helper.rb
/ee/app/models/product_analytics/
/ee/app/models/analytics/dashboard.rb
/ee/app/models/analytics/panel.rb
/ee/app/models/analytics/visualization.rb
^[Analytics Dashboards frontend] @gitlab-org/analytics-section/platform-insights/engineers/frontend @gitlab-org/plan-stage/optimize-group/engineers/frontend
/app/assets/javascripts/vue_shared/components/customizable_dashboard/

View File

@ -2710,7 +2710,6 @@ Gitlab/BoundedContexts:
- 'ee/app/models/concerns/member_roles/member_role_relation.rb'
- 'ee/app/models/concerns/mirror_configuration.rb'
- 'ee/app/models/concerns/password_complexity.rb'
- 'ee/app/models/concerns/product_analytics/schema_validator.rb'
- 'ee/app/models/concerns/product_analytics_helpers.rb'
- 'ee/app/models/concerns/scim_paginatable.rb'
- 'ee/app/models/concerns/singleton_record.rb'
@ -2902,11 +2901,8 @@ Gitlab/BoundedContexts:
- 'ee/app/models/preloaders/user_member_roles_for_admin_preloader.rb'
- 'ee/app/models/preloaders/user_member_roles_in_groups_preloader.rb'
- 'ee/app/models/preloaders/user_member_roles_in_projects_preloader.rb'
- 'ee/app/models/product_analytics/dashboard.rb'
- 'ee/app/models/product_analytics/funnel.rb'
- 'ee/app/models/product_analytics/funnel_step.rb'
- 'ee/app/models/product_analytics/panel.rb'
- 'ee/app/models/product_analytics/visualization.rb'
- 'ee/app/models/productivity_analytics.rb'
- 'ee/app/models/project_alias.rb'
- 'ee/app/models/project_security_setting.rb'

View File

@ -1,7 +1,6 @@
---
# Cop supports --autocorrect.
InternalAffairs/CopDescriptionWithExample:
Details: grace period
Exclude:
- 'rubocop/cop/active_model_errors_direct_manipulation.rb'
- 'rubocop/cop/active_record_association_reload.rb'

View File

@ -193,8 +193,6 @@ Layout/ClassStructure:
- 'ee/app/models/namespaces/free_user_cap/enforcement.rb'
- 'ee/app/models/package_metadata/advisory.rb'
- 'ee/app/models/package_metadata/package.rb'
- 'ee/app/models/product_analytics/dashboard.rb'
- 'ee/app/models/product_analytics/visualization.rb'
- 'ee/app/models/projects/saved_reply.rb'
- 'ee/app/models/protected_branch/unprotect_access_level.rb'
- 'ee/app/models/sbom/occurrence.rb'

View File

@ -214,7 +214,6 @@ Layout/LineEndStringConcatenationIndentation:
- 'ee/spec/mailers/emails/block_seat_overages_spec.rb'
- 'ee/spec/models/dependency_proxy/packages/setting_spec.rb'
- 'ee/spec/models/members/member_role_spec.rb'
- 'ee/spec/models/product_analytics/visualization_spec.rb'
- 'ee/spec/models/sbom/occurrence_spec.rb'
- 'ee/spec/presenters/ci/build_presenter_spec.rb'
- 'ee/spec/requests/api/gitlab_subscriptions/add_on_purchases_spec.rb'

View File

@ -6,13 +6,9 @@ Lint/MissingCopEnableDirective:
- 'app/graphql/types/base_enum.rb'
- 'app/graphql/types/boards/board_issue_input_base_type.rb'
- 'app/graphql/types/ci/build_need_type.rb'
- 'app/graphql/types/ci/config/config_type.rb'
- 'app/graphql/types/ci/config/group_type.rb'
- 'app/graphql/types/ci/config/include_type.rb'
- 'app/graphql/types/ci/config/job_restriction_type.rb'
- 'app/graphql/types/ci/config/job_type.rb'
- 'app/graphql/types/ci/config/need_type.rb'
- 'app/graphql/types/ci/config/stage_type.rb'
- 'app/graphql/types/ci/group_type.rb'
- 'app/graphql/types/ci/group_variable_type.rb'
- 'app/graphql/types/ci/instance_variable_type.rb'

View File

@ -569,8 +569,6 @@ RSpec/NamedSubject:
- 'ee/spec/models/namespaces/free_user_cap/root_size_spec.rb'
- 'ee/spec/models/namespaces/storage/root_size_spec.rb'
- 'ee/spec/models/packages/package_file_spec.rb'
- 'ee/spec/models/product_analytics/dashboard_spec.rb'
- 'ee/spec/models/product_analytics/visualization_spec.rb'
- 'ee/spec/models/projects/all_protected_branches_rule_spec.rb'
- 'ee/spec/models/projects/target_branch_rule_spec.rb'
- 'ee/spec/models/protected_environment_spec.rb'

View File

@ -180,13 +180,9 @@ Style/InlineDisableAnnotation:
- 'app/graphql/types/ci/build_need_type.rb'
- 'app/graphql/types/ci/catalog/resource_type.rb'
- 'app/graphql/types/ci/code_quality_report_summary_type.rb'
- 'app/graphql/types/ci/config/config_type.rb'
- 'app/graphql/types/ci/config/group_type.rb'
- 'app/graphql/types/ci/config/include_type.rb'
- 'app/graphql/types/ci/config/job_restriction_type.rb'
- 'app/graphql/types/ci/config/job_type.rb'
- 'app/graphql/types/ci/config/need_type.rb'
- 'app/graphql/types/ci/config/stage_type.rb'
- 'app/graphql/types/ci/config_variable_type.rb'
- 'app/graphql/types/ci/detailed_status_type.rb'
- 'app/graphql/types/ci/group_environment_scope_connection_type.rb'

View File

@ -42,7 +42,7 @@ export const VERIFICATION_LEVEL_VERIFIED_CREATOR_SELF_MANAGED = 'VERIFIED_CREATO
export const VERIFICATION_LEVEL_VERIFIED_CREATOR_SELF_MANAGED_BADGE_TEXT = s__(
'CiCatalog|Verified creator',
);
export const VERIFICATION_LEVEL_VERIFIED_CREATOR_SELF_MANAGED_ICON = 'check';
export const VERIFICATION_LEVEL_VERIFIED_CREATOR_SELF_MANAGED_ICON = 'partner-verified';
export const VERIFICATION_LEVEL_VERIFIED_CREATOR_SELF_MANAGED_POPOVER_TEXT = s__(
'CiCatalog|Created and maintained by a %{boldStart}verified creator%{boldEnd}',
);

View File

@ -69,8 +69,15 @@ export default {
type: String,
required: true,
},
status: {
type: String,
required: true,
},
},
computed: {
environment() {
return this.approvalEnvironment.environment;
},
isRollbackAvailable() {
return Boolean(this.rollback?.lastDeployment);
},
@ -80,9 +87,6 @@ export default {
isActionsShown() {
return this.actions.length > 0;
},
environment() {
return this.approvalEnvironment.environment;
},
rollbackButtonTitle() {
return this.rollback.lastDeployment?.isLast
? translations.redeployButtonTitle
@ -115,9 +119,10 @@ export default {
/>
<environment-approval
v-if="approvalEnvironment.isApprovalActionAvailable"
:required-approval-count="environment.requiredApprovalCount"
:deployment-web-path="deploymentWebPath"
:required-approval-count="environment.requiredApprovalCount"
:show-text="false"
:status="status"
/>
</div>
</template>

View File

@ -68,6 +68,7 @@ export default {
:rollback="item.rollback"
:approval-environment="item.deploymentApproval"
:deployment-web-path="item.webPath"
:status="item.status"
/>
</template>
</gl-table-lite>

View File

@ -2,7 +2,7 @@ import { __ } from '~/locale';
export const PLACEHOLDER_USER_EXTERNAL_DEFAULT_TRUE = __('Regex pattern');
export const PLACEHOLDER_USER_EXTERNAL_DEFAULT_FALSE = __(
'To define internal users, first enable new users set to external',
'Regex pattern. To use, select external by default setting',
);
function setUserInternalRegexPlaceholder(checkbox) {

View File

@ -52,6 +52,8 @@ export const ExpandLinesAdapter = {
hunkHeaderRow.nextElementSibling.querySelector('[data-line-number]').focus();
}
hunkHeaderRow.remove();
const totalRows = this.diffElement.querySelectorAll('[data-file-body] tbody tr').length;
this.diffElement.style.setProperty('--total-rows', totalRows);
},
},
};

View File

@ -251,6 +251,7 @@ module IssuableActions
:state_event,
:subscription_event,
:confidential,
:status,
{ assignee_ids: [],
add_label_ids: [],
remove_label_ids: [] }

View File

@ -19,6 +19,16 @@ module Issues
return issues if user_can_see_all_confidential_issues?
# Since the CTE is used in access_to_parent_exists only if @related_groups is not null, we can skip the CTE if
# it's null
if Feature.enabled?(:use_cte_optimization_for_confidentiality_filter,
parent&.root_ancestor) && !@related_groups.nil?
issues = issues.with_accessible_sub_namespace_ids_cte(Group.groups_user_can(@related_groups,
current_user,
:read_confidential_issues,
same_root: true).select('id'))
end
issues.public_only.or(
issues.confidential_only.merge(
issues.authored(current_user)
@ -50,13 +60,19 @@ module Issues
return access_to_project_level_issue_exists if @related_groups.nil?
access_to_project_level_issue_exists.project_level.or(
issues.group_level.in_namespaces(
Group.id_in(
Group.groups_user_can(@related_groups, current_user, :read_confidential_issues, same_root: true)
if Feature.enabled?(:use_cte_optimization_for_confidentiality_filter, parent&.root_ancestor)
access_to_project_level_issue_exists.project_level.or(
issues.group_level.in_namespaces(Group.in_accessible_sub_namespaces)
)
else
access_to_project_level_issue_exists.project_level.or(
issues.group_level.in_namespaces(
Group.id_in(
Group.groups_user_can(@related_groups, current_user, :read_confidential_issues, same_root: true)
)
)
)
)
end
end
end
end

View File

@ -31,14 +31,12 @@ module Mutations
required: false,
description: 'Run pipeline creation simulation, or only do static check.'
field :config, Types::Ci::Config::ConfigType, null: true, description: 'Linted CI config and metadata.'
field :config, Types::Ci::ConfigType, null: true, description: 'Linted CI config and metadata.'
def resolve(project_path:, content:, ref: nil, dry_run: false)
project = authorized_find!(project_path: project_path)
ref ||= project.default_branch
return feature_unfinished_error unless Feature.enabled?(:ci_lint_mutation, project)
result = ::Gitlab::Ci::Lint
.new(project: project, current_user: context[:current_user])
.validate(content, dry_run: dry_run, ref: ref)
@ -53,16 +51,6 @@ module Mutations
private
def feature_unfinished_error
unfinished_error = "This mutation is unfinished and not yet available for use. " \
"Track its progress in https://gitlab.com/gitlab-org/gitlab/-/issues/540764."
{
config: nil,
errors: [unfinished_error]
}
end
def response(result)
{
status: result.status,

View File

@ -6,7 +6,7 @@ module Resolvers
include Gitlab::Graphql::Authorize::AuthorizeResource
include ResolvesProject
type Types::Ci::Config::ConfigType, null: true
type Types::Ci::LegacyConfig::ConfigType, null: true
description <<~MD
Linted and processed contents of a CI config.
Should not be requested more than once per request.

View File

@ -1,25 +0,0 @@
# frozen_string_literal: true
module Types
module Ci
# rubocop: disable Graphql/AuthorizeTypes
module Config
class ConfigType < BaseObject
graphql_name 'CiConfig'
field :errors, [GraphQL::Types::String], null: true,
description: 'Linting errors.'
field :includes, [Types::Ci::Config::IncludeType], null: true,
description: 'List of included files.'
field :merged_yaml, GraphQL::Types::String, null: true,
description: 'Merged CI configuration YAML.'
field :stages, Types::Ci::Config::StageType.connection_type, null: true,
description: 'Stages of the pipeline.'
field :status, Types::Ci::Config::StatusEnum, null: true,
description: 'Status of linting, can be either valid or invalid.'
field :warnings, [GraphQL::Types::String], null: true,
description: 'Linting warnings.'
end
end
end
end

View File

@ -2,18 +2,19 @@
module Types
module Ci
# rubocop: disable Graphql/AuthorizeTypes
module Config
# rubocop:disable Graphql/AuthorizeTypes -- Authorization handled in the CiLint mutation
class GroupType < BaseObject
graphql_name 'CiConfigGroup'
graphql_name 'CiConfigGroupV2'
field :jobs, Types::Ci::Config::JobType.connection_type, null: true,
field :jobs, [Types::Ci::Config::JobType], null: true,
description: 'Jobs in group.'
field :name, GraphQL::Types::String, null: true,
description: 'Name of the job group.'
field :size, GraphQL::Types::Int, null: true,
description: 'Size of the job group.'
end
# rubocop:enable Graphql/AuthorizeTypes
end
end
end

View File

@ -2,10 +2,10 @@
module Types
module Ci
# rubocop: disable Graphql/AuthorizeTypes
module Config
# rubocop:disable Graphql/AuthorizeTypes -- Authorization handled in the CiLint mutation
class JobType < BaseObject
graphql_name 'CiConfigJob'
graphql_name 'CiConfigJobV2'
field :after_script,
[GraphQL::Types::String],
@ -26,7 +26,7 @@ module Types
field :name, GraphQL::Types::String, null: true,
description: 'Name of the job.'
field :needs,
Types::Ci::Config::NeedType.connection_type,
[Types::Ci::Config::NeedType],
null: true,
description: 'Builds that must complete before the jobs run.'
field :only,
@ -47,6 +47,7 @@ module Types
object[:when]
end
end
# rubocop:enable Graphql/AuthorizeTypes
end
end
end

View File

@ -2,16 +2,17 @@
module Types
module Ci
# rubocop: disable Graphql/AuthorizeTypes
module Config
# rubocop:disable Graphql/AuthorizeTypes -- Authorization handled in the CiLint mutation
class StageType < BaseObject
graphql_name 'CiConfigStage'
graphql_name 'CiConfigStageV2'
field :groups, Types::Ci::Config::GroupType.connection_type, null: true,
field :groups, [Types::Ci::Config::GroupType], null: true,
description: 'Groups of jobs for the stage.'
field :name, GraphQL::Types::String, null: true,
description: 'Name of the stage.'
end
# rubocop:enable Graphql/AuthorizeTypes
end
end
end

View File

@ -0,0 +1,24 @@
# frozen_string_literal: true
module Types
module Ci
# rubocop: disable Graphql/AuthorizeTypes -- Authorization handled by the CiLint mutation
class ConfigType < BaseObject
graphql_name 'CiConfigV2'
field :errors, [GraphQL::Types::String], null: true,
description: 'Linting errors.'
field :includes, [Types::Ci::Config::IncludeType], null: true,
description: 'List of included files.'
field :merged_yaml, GraphQL::Types::String, null: true,
description: 'Merged CI configuration YAML.'
field :stages, [Types::Ci::Config::StageType], null: true,
description: 'Stages of the pipeline.'
field :status, Types::Ci::Config::StatusEnum, null: true,
description: 'Status of linting, can be either valid or invalid.'
field :warnings, [GraphQL::Types::String], null: true,
description: 'Linting warnings.'
end
# rubocop: enable Graphql/AuthorizeTypes
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
module Types
module Ci
# rubocop: disable Graphql/AuthorizeTypes -- Authorization handled by the ConfigResolver
module LegacyConfig
class ConfigType < ::Types::Ci::ConfigType
graphql_name 'CiConfig'
field :stages, Types::Ci::LegacyConfig::StageType.connection_type, null: true,
description: 'Stages of the pipeline.'
end
end
# rubocop: enable Graphql/AuthorizeTypes
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
module Types
module Ci
# rubocop: disable Graphql/AuthorizeTypes -- Authorization handled by the ConfigResolver
module LegacyConfig
class GroupType < ::Types::Ci::Config::GroupType
graphql_name 'CiConfigGroup'
field :jobs, Types::Ci::LegacyConfig::JobType.connection_type, null: true,
description: 'Jobs in group.'
end
end
# rubocop: enable Graphql/AuthorizeTypes
end
end

View File

@ -0,0 +1,18 @@
# frozen_string_literal: true
module Types
module Ci
# rubocop: disable Graphql/AuthorizeTypes -- Authorization handled by the ConfigResolver
module LegacyConfig
class JobType < ::Types::Ci::Config::JobType
graphql_name 'CiConfigJob'
field :needs,
Types::Ci::Config::NeedType.connection_type,
null: true,
description: 'Builds that must complete before the jobs run.'
end
end
# rubocop: enable Graphql/AuthorizeTypes
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
module Types
module Ci
# rubocop: disable Graphql/AuthorizeTypes -- Authorization handled by the ConfigResolver
module LegacyConfig
class StageType < ::Types::Ci::Config::StageType
graphql_name 'CiConfigStage'
field :groups, Types::Ci::LegacyConfig::GroupType.connection_type, null: true,
description: 'Groups of jobs for the stage.'
end
end
# rubocop: enable Graphql/AuthorizeTypes
end
end

View File

@ -13,7 +13,12 @@ module Types
field :ci_application_settings, Types::Ci::ApplicationSettingType,
null: true,
description: 'CI related settings that apply to the entire instance.'
field :ci_config, resolver: Resolvers::Ci::ConfigResolver, complexity: 126 # AUTHENTICATED_MAX_COMPLEXITY / 2 + 1
field :ci_config, resolver: Resolvers::Ci::ConfigResolver, complexity: 126, # AUTHENTICATED_MAX_COMPLEXITY / 2 + 1
deprecated: {
reason: 'Use CiLint mutation: <https://docs.gitlab.com/api/graphql/reference/#mutationcilint>',
milestone: '18.1'
}
field :ci_pipeline_stage, ::Types::Ci::StageType,
null: true, description: 'Stage belonging to a CI pipeline.' do

View File

@ -27,6 +27,14 @@ module Types
description: 'Supported conversion types for the work item type.',
experiment: { milestone: '17.8' }
field :unavailable_widgets_on_conversion, [::Types::WorkItems::WidgetDefinitionInterface],
null: true,
description: 'Widgets that will be lost when converting from source work item type to target work item type.' do
argument :target, ::Types::GlobalIDType[::WorkItems::Type],
required: true,
description: 'Target work item type to convert to.'
end
def widget_definitions
object.widgets(context[:resource_parent])
end
@ -34,6 +42,15 @@ module Types
def supported_conversion_types
object.supported_conversion_types(context[:resource_parent], current_user)
end
def unavailable_widgets_on_conversion(target:)
source_type = object
target_type = GitlabSchema.find_by_gid(target).sync
return [] unless source_type && target_type
source_type.unavailable_widgets_on_conversion(target_type, context[:resource_parent])
end
end
end
end

View File

@ -23,6 +23,8 @@ class DiffNote < Note
validate :positions_complete
validate :verify_supported, unless: :importing?
validate :validate_diff_file_and_line, on: :create, if: :requires_diff_file_validation_during_import?
before_validation :set_line_code, if: :on_text?, unless: :importing?
after_save :keep_around_commits, unless: -> { importing? || skip_keep_around_commits }
@ -37,10 +39,33 @@ class DiffNote < Note
DiffDiscussion
end
def validate_diff_file_and_line
diff_file = diff_file(create_missing_diff_file: false)
unless diff_file
errors.add(:base, :missing_diff_file, message: DIFF_FILE_NOT_FOUND_MESSAGE)
return
end
diff_line = diff_file.line_for_position(self.original_position)
return if diff_line
errors.add(:base, :missing_diff_line, message: DIFF_LINE_NOT_FOUND_MESSAGE % {
file_path: diff_file.file_path,
old_line: original_position.old_line,
new_line: original_position.new_line
})
end
def requires_diff_file_validation_during_import?
importing? && should_create_diff_file?
end
def create_diff_file
return unless should_create_diff_file?
diff_file = fetch_diff_file
diff_file = diff_file(create_missing_diff_file: false)
raise NoteDiffFileCreationError, DIFF_FILE_NOT_FOUND_MESSAGE unless diff_file
diff_line = diff_file.line_for_position(self.original_position)
@ -57,6 +82,7 @@ class DiffNote < Note
.merge(diff: diff_file.diff_hunk(diff_line))
create_note_diff_file(creation_params)
clear_memoization(:diff_file)
end
# Returns the diff file from `position`
@ -69,11 +95,11 @@ class DiffNote < Note
end
# Returns the diff file from `original_position`
def diff_file
def diff_file(create_missing_diff_file: true)
strong_memoize(:diff_file) do
next if for_design?
enqueue_diff_file_creation_job if should_create_diff_file?
enqueue_diff_file_creation_job if create_missing_diff_file && should_create_diff_file?
fetch_diff_file
end
@ -160,7 +186,7 @@ class DiffNote < Note
def fetch_diff_file
return note_diff_file.raw_diff_file if note_diff_file && !note_diff_file.raw_diff_file.has_renderable?
if created_at_diff?(noteable.diff_refs)
if noteable && created_at_diff?(noteable.diff_refs)
# We're able to use the already persisted diffs (Postgres) if we're
# presenting a "current version" of the MR discussion diff.
# So no need to make an extra Gitaly diff request for it.

View File

@ -232,6 +232,10 @@ class Group < Namespace
scope :with_non_invite_group_members, -> { includes(:non_invite_group_members) }
scope :with_request_group_members, -> { includes(:request_group_members) }
scope :in_accessible_sub_namespaces, -> do
where('id IN (SELECT id FROM accessible_sub_namespace_ids)')
end
scope :by_id, ->(groups) { where(id: groups) }
scope :by_ids_or_paths, ->(ids, paths) do

View File

@ -336,6 +336,13 @@ class Issue < ApplicationRecord
where('issues.namespace_id IN (SELECT id FROM namespace_ids)').with(cte.to_arel)
end
def with_accessible_sub_namespace_ids_cte(namespace_ids)
# Using materialized: true to ensure the CTE is computed once and reused, which significantly improves performance
# for complex queries. See: https://gitlab.com/gitlab-org/gitlab/-/issues/548094
accessible_sub_namespace_ids = Gitlab::SQL::CTE.new(:accessible_sub_namespace_ids, namespace_ids, materialized: true)
with(accessible_sub_namespace_ids.to_arel)
end
override :order_upvotes_desc
def order_upvotes_desc
reorder(upvotes_count: :desc)

View File

@ -42,7 +42,10 @@ class UserDetail < ApplicationRecord
( #
[0-9]{4}- # 4 digits spaced by dash
){3} # 3 times
[0-9]{4} # end with 4 digits
( #
[0-9]{3} # end with 3 digits
) #
[0-9X] # followed by a fourth digit or an X
\z # end of string
/x
@ -119,7 +122,7 @@ end
def orcid_format
return if orcid.blank? || orcid =~ UserDetail::ORCID_VALIDATION_REGEX
errors.add(:orcid, _('must contain only a orcid ID.'))
errors.add(:orcid, _('must contain only a valid ORCID.'))
end
UserDetail.prepend_mod_with('UserDetail')

View File

@ -157,6 +157,13 @@ module WorkItems
WorkItems::Type.by_type(type_names).order_by_name_asc
end
def unavailable_widgets_on_conversion(target_type, resource_parent)
source_widgets = widgets(resource_parent)
target_widgets = target_type.widgets(resource_parent)
target_widget_types = target_widgets.map(&:widget_type).to_set
source_widgets.reject { |widget| target_widget_types.include?(widget.widget_type) }
end
def allowed_child_types(cache: false, authorize: false, resource_parent: nil)
cached_data = cache ? with_reactive_cache { |query_data| query_data[:allowed_child_types_by_name] } : nil

View File

@ -7,6 +7,13 @@ module Ci
RUNNER_REMOTE_TAG_PREFIX = 'refs/tags/'
RUNNER_REMOTE_BRANCH_PREFIX = 'refs/remotes/origin/'
attr_reader :queue_size, :queue_depth
def set_queue_metrics(size:, depth:)
@queue_size = [0, size.to_i].max
@queue_depth = [0, depth.to_i].max
end
def artifacts
return unless options[:artifacts]

View File

@ -65,7 +65,7 @@ module Ci
valid = true
depth = 0
each_build(params) do |build|
each_build(params) do |build, queue_size|
depth += 1
@metrics.increment_queue_operation(:queue_iteration)
@ -93,7 +93,7 @@ module Ci
end
result = @logger.instrument(:process_build) do
process_build(build, params)
process_build(build, params, queue_size: queue_size, queue_depth: depth)
end
next unless result
@ -123,11 +123,12 @@ module Ci
builds = queue.build_candidates
build_and_partition_ids = retrieve_queue(-> { queue.execute(builds) })
queue_size = build_and_partition_ids.size
@metrics.observe_queue_size(-> { build_and_partition_ids.size }, @runner.runner_type)
@metrics.observe_queue_size(-> { queue_size }, @runner.runner_type)
build_and_partition_ids.each do |build_id, partition_id|
yield Ci::Build.find_by!(partition_id: partition_id, id: build_id)
yield Ci::Build.find_by!(partition_id: partition_id, id: build_id), queue_size
end
end
# rubocop: enable CodeReuse/ActiveRecord
@ -146,7 +147,7 @@ module Ci
end
end
def process_build(build, params)
def process_build(build, params, queue_size:, queue_depth:)
return remove_from_queue!(build) unless build.pending?
if runner_matched?(build)
@ -164,7 +165,7 @@ module Ci
# In case when 2 runners try to assign the same build, second runner will be declined
# with StateMachines::InvalidTransition or StaleObjectError when doing run! or save method.
if assign_runner_with_instrumentation!(build, params)
present_build_with_instrumentation!(build)
present_build_with_instrumentation!(build, queue_size: queue_size, queue_depth: queue_depth)
end
rescue ActiveRecord::StaleObjectError
# We are looping to find another build that is not conflicting
@ -216,17 +217,18 @@ module Ci
end
end
def present_build_with_instrumentation!(build)
def present_build_with_instrumentation!(build, queue_size:, queue_depth:)
@logger.instrument(:process_build_present_build) do
present_build!(build)
present_build!(build, queue_size: queue_size, queue_depth: queue_depth)
end
end
# Force variables evaluation to occur now
def present_build!(build)
def present_build!(build, queue_size:, queue_depth:)
# We need to use the presenter here because Gitaly calls in the presenter
# may fail, and we need to ensure the response has been generated.
presented_build = ::Ci::BuildRunnerPresenter.new(build) # rubocop:disable CodeReuse/Presenter -- old code
presented_build.set_queue_metrics(size: queue_size, depth: queue_depth)
@logger.instrument(:present_build_logs) do
log_artifacts_context(build)

View File

@ -64,14 +64,14 @@
= f.gitlab_ui_checkbox_component :ropc_without_client_credentials, _('Allow users to use resource owner password credentials flow without oauth client credentials')
.form-group
= f.label :user_default_external, _('New users set to external'), class: 'label-bold'
= f.gitlab_ui_checkbox_component :user_default_external, _('Newly-registered users are external by default')
= f.label :user_default_external, _('External users'), class: 'label-bold'
= f.gitlab_ui_checkbox_component :user_default_external, _('Make new users external by default')
.gl-mt-3
= _('Internal users')
= f.text_field :user_default_internal_regex, placeholder: _('Regex pattern'), class: 'form-control gl-form-input gl-mt-2'
= _('Email exclusion pattern')
= f.text_field :user_default_internal_regex, class: 'form-control gl-form-input gl-mt-2'
.form-text.gl-text-subtle
= _('Specify an email address regex pattern to identify default internal users.')
#{link_to _('Learn more'), help_page_path('administration/external_users.md', anchor: 'set-a-new-user-to-external'), target: '_blank', rel: 'noopener noreferrer'}.
= _('Specify a regular expression for emails. New users with matching emails are not made external users.')
#{link_to _('Learn more'), help_page_path('administration/external_users.md', anchor: 'make-new-users-external-by-default'), target: '_blank', rel: 'noopener noreferrer'}.
- unless Gitlab.com?
.form-group
= f.label :deactivate_dormant_users, _('Dormant users'), class: 'label-bold'

View File

@ -516,6 +516,16 @@
:idempotent: true
:tags: []
:queue_namespace: :cronjob
- :name: cronjob:counters_flush_stale_counter_increments_cron
:worker_name: Gitlab::Counters::FlushStaleCounterIncrementsCronWorker
:feature_category: :continuous_integration
:has_external_dependencies: false
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
:queue_namespace: :cronjob
- :name: cronjob:database_batched_background_migration
:worker_name: Database::BatchedBackgroundMigrationWorker
:feature_category: :database
@ -3518,6 +3528,16 @@
:idempotent: true
:tags: []
:queue_namespace:
- :name: counters_flush_stale_counter_increments
:worker_name: Gitlab::Counters::FlushStaleCounterIncrementsWorker
:feature_category: :continuous_integration
:has_external_dependencies: false
:urgency: :throttled
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
:queue_namespace:
- :name: create_commit_signature
:worker_name: CreateCommitSignatureWorker
:feature_category: :source_code_management
@ -3780,7 +3800,7 @@
:queue_namespace:
- :name: flush_counter_increments
:worker_name: FlushCounterIncrementsWorker
:feature_category: :not_owned
:feature_category: :continuous_integration
:has_external_dependencies: false
:urgency: :low
:resource_boundary: :unknown

View File

@ -12,10 +12,10 @@ class FlushCounterIncrementsWorker
sidekiq_options retry: 3
loggable_arguments 0, 2
defer_on_database_health_signal :gitlab_main, [:project_daily_statistics], 1.minute
# The increments in `ProjectStatistics` are owned by several teams depending
# on the counter
feature_category :not_owned # rubocop:disable Gitlab/AvoidFeatureCategoryNotOwned
feature_category :continuous_integration
urgency :low
deduplicate :until_executed, including_scheduled: true, if_deduplicated: :reschedule_once

View File

@ -0,0 +1,25 @@
# frozen_string_literal: true
module Gitlab
module Counters
class FlushStaleCounterIncrementsCronWorker
include ApplicationWorker
# rubocop:disable Scalability/CronWorkerContext -- This is an instance-wide worker and it's not scoped to any context.
include CronjobQueue
# rubocop:enable Scalability/CronWorkerContext
data_consistency :sticky
feature_category :continuous_integration
idempotent!
def perform
return unless ::Gitlab.com_except_jh? # rubocop:disable Gitlab/AvoidGitlabInstanceChecks -- we need to check on which instance this happens
FlushStaleCounterIncrementsWorker.perform_with_capacity
end
end
end
end

View File

@ -0,0 +1,98 @@
# frozen_string_literal: true
# Periodically flushes stale counter increments for specific models in batches.
# Tracks progress using Redis to resume from the last processed ID.
# Currently limited to a predefined ID range and ensures only one job runs at a time.
# See: https://gitlab.com/gitlab-com/gl-infra/production/-/issues/19461
module Gitlab
module Counters
class FlushStaleCounterIncrementsWorker
include ApplicationWorker
include LimitedCapacity::Worker
MAX_RUNNING_JOBS = 1
BATCH_LIMIT = 1000
# We hardcoded these IDs here because the FlushCounterIncrementsWorker
# was disabled in September 2024 after an incident.
# In March 2025, we reenabled the worker. These are the leftover entries
# on gitlab.com that still need to be flushed. Afterwards, we can remove this job.
ID_RANGES = {
ProjectDailyStatistic => {
initial_start_id: 3847138140,
end_id: 4074016739
}
}.freeze
data_consistency :sticky
feature_category :continuous_integration
urgency :throttled
idempotent!
deduplicate :until_executed
sidekiq_options retry: true
def perform_work
return unless ::Gitlab.com_except_jh? # rubocop:disable Gitlab/AvoidGitlabInstanceChecks -- we need to check on which instance this happens
ID_RANGES.each do |model, attributes|
min_id = start_id(model)
last_id = flush_stale_for_model(model, min_id, attributes[:end_id])
# we need to add +1 here, because otherwise, we'd process the last record twice.
update_start_id(model, last_id + 1)
end
end
def remaining_work_count
# iterate through all models and see, if there is still work to do
remaining_work = 0
ID_RANGES.each do |model, attributes|
return remaining_work if remaining_work > 0
remaining_work = [(attributes[:end_id] - start_id(model)), 0].max
end
remaining_work
end
def max_running_jobs
MAX_RUNNING_JOBS
end
private
def flush_stale_for_model(model, min_id, end_id)
scope = model.where(id: min_id..end_id).order(:id).limit(BATCH_LIMIT) # rubocop:disable CodeReuse/ActiveRecord -- best bet to look for an id > x
# if we have no records in a batch of 1000 entries, we still need to say, what the next start id could be,
# so in this case, since we have no id in this case, our best bet would be to use min_id + BATCH_LIMIT to have
# at least some id to start the next batch from.
return min_id + BATCH_LIMIT if scope.none?
Gitlab::Counters::FlushStaleCounterIncrements
.new(scope)
.execute
scope.last.id
end
def start_id(model)
with_redis do |redis|
(redis.get(redis_key(model)) || ID_RANGES[model][:initial_start_id]).to_i
end
end
def update_start_id(model, start_id)
with_redis do |redis|
redis.set(redis_key(model), start_id, ex: 1.week)
end
end
def with_redis(&)
Gitlab::Redis::SharedState.with(&) # rubocop:disable CodeReuse/ActiveRecord -- not AR
end
def redis_key(model)
"flush_stale_counters:last_id:#{model.name}"
end
end
end
end

View File

@ -0,0 +1,10 @@
---
name: use_cte_optimization_for_confidentiality_filter
description: Use a CTE to optimize confidentiality checks on Epic WorkItems List
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/548094
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/193857
rollout_issue_url:
milestone: '18.1'
group: group::product planning
type: gitlab_com_derisk
default_enabled: false

View File

@ -1,10 +0,0 @@
---
name: ci_lint_mutation
description: Used to hide the CiLint GraphQL mutation until we finish updating its structure
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/540764
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/191207
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/545316
milestone: '18.1'
group: group::pipeline authoring
type: wip
default_enabled: false

View File

@ -501,6 +501,9 @@ if Gitlab.ee? && Settings['ee_cron_jobs']
end
Settings.cron_jobs['poll_interval'] ||= ENV["GITLAB_CRON_JOBS_POLL_INTERVAL"] ? ENV["GITLAB_CRON_JOBS_POLL_INTERVAL"].to_i : nil
Settings.cron_jobs['flush_stale_counter_increments_cron_worker'] ||= {}
Settings.cron_jobs['flush_stale_counter_increments_cron_worker']['cron'] ||= '0 */1 * * *'
Settings.cron_jobs['flush_stale_counter_increments_cron_worker']['job_class'] = 'Gitlab::Counters::FlushStaleCounterIncrementsCronWorker'
Settings.cron_jobs['stuck_ci_jobs_worker'] ||= {}
Settings.cron_jobs['stuck_ci_jobs_worker']['cron'] ||= '0 * * * *'
Settings.cron_jobs['stuck_ci_jobs_worker']['job_class'] = 'StuckCiJobsWorker'

View File

@ -315,6 +315,8 @@
- 1
- - counters_cleanup_refresh
- 1
- - counters_flush_stale_counter_increments
- 1
- - create_commit_signature
- 2
- - create_github_webhook

View File

@ -5,4 +5,4 @@ feature_category: importers
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/187337
milestone: '17.11'
queued_migration_version: 20250408122860
finalized_by: # version of the migration that finalized this BBM
finalized_by: '20250609161211'

View File

@ -8,14 +8,6 @@ description: Used to store relation export files location
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/90624
milestone: '15.2'
gitlab_schema: gitlab_main_cell
desired_sharding_key:
project_id:
references: projects
backfill_via:
parent:
foreign_key: project_relation_export_id
table: project_relation_exports
sharding_key: project_id
belongs_to: relation_export
sharding_key:
project_id: projects
table_size: small
desired_sharding_key_migration_job_name: BackfillProjectRelationExportUploadsProjectId

View File

@ -0,0 +1,14 @@
# frozen_string_literal: true
class AddProjectRelationExportUploadsProjectIdNotNull < Gitlab::Database::Migration[2.3]
milestone '18.1'
disable_ddl_transaction!
def up
add_not_null_constraint :project_relation_export_uploads, :project_id
end
def down
remove_not_null_constraint :project_relation_export_uploads, :project_id
end
end

View File

@ -0,0 +1,20 @@
# frozen_string_literal: true
class FinalizeBackfillBulkImportConfigurationsOrganizationId < Gitlab::Database::Migration[2.3]
milestone '18.1'
disable_ddl_transaction!
restrict_gitlab_migration gitlab_schema: :gitlab_main_cell
def up
ensure_batched_background_migration_is_finished(
job_class_name: 'BackfillBulkImportConfigurationsOrganizationId',
table_name: :bulk_import_configurations,
column_name: :id,
job_arguments: [:organization_id, :bulk_imports, :organization_id, :bulk_import_id],
finalize: true
)
end
def down; end
end

View File

@ -0,0 +1 @@
8eb1df283276a339d834b4deed3a83175ffd56c739677d4491650e63a7d1ea82

View File

@ -0,0 +1 @@
224c16feb551d494edb99e84857d38ae922c1f4b77bc701a13d971c40191a3cf

View File

@ -21240,7 +21240,8 @@ CREATE TABLE project_relation_export_uploads (
updated_at timestamp with time zone NOT NULL,
export_file text NOT NULL,
project_id bigint,
CONSTRAINT check_d8ee243e9e CHECK ((char_length(export_file) <= 255))
CONSTRAINT check_d8ee243e9e CHECK ((char_length(export_file) <= 255)),
CONSTRAINT check_f8d6cd1562 CHECK ((project_id IS NOT NULL))
);
CREATE SEQUENCE project_relation_export_uploads_id_seq

View File

@ -56,7 +56,7 @@ After you sign in to Switchboard, follow these steps to create your instance:
- **Primary region**: Select the primary AWS region to use for data storage. Note the
[available AWS regions](../../../subscriptions/gitlab_dedicated/data_residency_and_high_availability.md#available-aws-regions).
- **Secondary region**: Select a secondary AWS region to use for data storage and [disaster recovery](../../../subscriptions/gitlab_dedicated/data_residency_and_high_availability.md#disaster-recovery). This field does not appear for Geo migrations from an existing GitLab Self-Managed instance. Some regions have [limited support](../../../subscriptions/gitlab_dedicated/data_residency_and_high_availability.md#secondary-regions-with-limited-support).
- **Secondary region**: Select a secondary AWS region to use for data storage and [disaster recovery](../disaster_recovery.md). This field does not appear for Geo migrations from an existing GitLab Self-Managed instance. Some regions have [limited support](../../../subscriptions/gitlab_dedicated/data_residency_and_high_availability.md#secondary-regions-with-limited-support).
- **Backup region**: Select a region to replicate and store your primary data backups.
You can use the same option as your primary or secondary regions, or choose a different region for [increased redundancy](../../../subscriptions/gitlab_dedicated/data_residency_and_high_availability.md#disaster-recovery).

View File

@ -0,0 +1,143 @@
---
stage: GitLab Dedicated
group: Environment Automation
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
title: Disaster recovery for GitLab Dedicated
---
{{< details >}}
- Tier: Ultimate
- Offering: GitLab Dedicated
{{< /details >}}
The disaster recovery process ensures your GitLab Dedicated instance
can be restored if a disaster affects your primary region.
GitLab can deploy your instance in these AWS regions:
- A primary region where your instance runs.
- If selected, a secondary region that serves as a backup if the primary region fails.
- A backup region where your data backups are replicated for additional protection.
If your primary region becomes unavailable due to an outage or critical system failure,
GitLab initiates a failover to your secondary region. If no secondary region is configured,
recovery uses backup restoration from the backup region.
## Prerequisites
To be eligible for the full recovery objectives, you must:
- Specify both a primary and secondary region during [onboarding](create_instance/_index.md). If you don't specify a secondary region, recovery is limited to [backup restoration](#automated-backups).
- Make sure both regions are [supported by GitLab Dedicated](../../subscriptions/gitlab_dedicated/data_residency_and_high_availability.md#available-aws-regions). If you select a secondary region with [limited support](../../subscriptions/gitlab_dedicated/data_residency_and_high_availability.md#secondary-regions-with-limited-support), the recovery time and point objectives do not apply.
## Recovery objectives
GitLab Dedicated provides disaster recovery with these recovery objectives:
- Recovery Time Objective (RTO): Service is restored to your secondary region in eight hours or less.
- Recovery Point Objective (RPO): Data loss is limited to a maximum of four hours of the most recent changes, depending on when the disaster occurs relative to the last backup.
## Components
GitLab Dedicated leverages two key components to meet disaster recovery commitments:
- Geo replication
- Automated backups
### Geo replication
When you onboard to GitLab Dedicated, you select a primary region and a secondary region for
your environment. Geo continuously replicates data between these regions, including:
- Database content
- Repository storage
- Object storage
### Automated backups
GitLab performs automated backups of the database and repositories every four hours
(six times daily) by creating snapshots. Backups are retained for 30 days
and are geographically replicated by AWS for additional protection.
Database backups:
- Use continuous log-based backups in the primary region for point-in-time recovery.
- Stream replication to the secondary region to provide a near-real-time copy.
Object storage backups:
- Use geographical replication and versioning to provide backup protection.
These backups serve as recovery points during disaster recovery operations.
The four-hour backup frequency supports the Recovery Point Objective (RPO) to ensure
you lose no more than four hours of data.
## Disaster coverage
Disaster recovery covers these scenarios with guaranteed recovery objectives:
- Partial region outage (for example, availability zone failure)
- Complete outage of your primary region
Disaster recovery covers these scenarios on a best-effort basis without guaranteed recovery objectives:
- Loss of both primary and secondary regions
- Global internet outages
- Data corruption issues
Disaster recovery has these service limitations:
- Advanced search indexes are not continuously replicated. After failover, these indexes are rebuilt when the secondary region is promoted. Basic search remains available during rebuilding.
- ClickHouse Cloud is provisioned only in the primary region. Features that require this service might be unavailable if the primary region is completely down.
- Production preview environments do not have secondary instances.
- Hosted runners are supported only in the primary region and cannot be rebuilt in the secondary instance.
- Some secondary regions have limited support and are not covered by the RPO and RTO targets. These regions have limited email functionality and resilience in your failover instance because of AWS limitations. For more information, see [secondary regions with limited support](../../subscriptions/gitlab_dedicated/data_residency_and_high_availability.md#secondary-regions-with-limited-support).
GitLab does not provide:
- Programmatic monitoring of failover events
- Customer-initiated disaster recovery testing
## Disaster recovery workflow
Disaster recovery is initiated when your instance becomes unavailable to most users due to:
- Complete region failure (for example, an AWS region outage).
- Critical component failure in the GitLab service or infrastructure that cannot be quickly recovered.
### Failover process
When your instance becomes unavailable, the GitLab Dedicated team:
1. Gets alerted by monitoring systems.
1. Investigates if failover is required.
1. If failover is required:
1. Notifies you that failover is in progress.
1. Promotes the secondary region to primary.
1. Updates DNS records for `<customer>.gitlab-dedicated.com` to point to the newly promoted
region.
1. Notifies you when failover completes.
If you use PrivateLink, you must update your internal networking configuration
to target the PrivateLink endpoint for the secondary region. To minimize downtime,
configure equivalent PrivateLink endpoints in your secondary region before a disaster occurs.
The failover process typically completes in 90 minutes or less.
### Communication during a disaster
During a disaster event, GitLab communicates with you through one or more of:
- Your operational contact information in Switchboard
- Slack
- Support tickets
GitLab may establish a temporary Slack channel and Zoom bridge to coordinate with
your team throughout the recovery process.
## Related topics
- [Data residency and high availability](../../subscriptions/gitlab_dedicated/data_residency_and_high_availability.md)
- [GitLab Dedicated architecture](architecture.md)

View File

@ -55,32 +55,39 @@ Additionally, users can be set as external users using:
- [LDAP groups](auth/ldap/ldap_synchronization.md#external-groups).
- the [External providers list](../integration/omniauth.md#create-an-external-providers-list).
## Set a new user to external
## Make new users external by default
By default, new users are not set as external users. This behavior can be changed
by an administrator:
You can configure your instance to make all new users external by default. You can modify these user
accounts later to remove the external designation.
When you configure this feature, you can also define a regular expression used to identify email
addresses. New users with a matching email are excluded and not marked as an external user. This
regular expression must:
- Use the Ruby format.
- Be convertible to JavaScript.
- Have the ignore case flag set (`/regex pattern/i`).
For example:
- `\.int@example\.com$`: Matches email addresses that end with `.int@domain.com`.
- `^(?:(?!\.ext@example\.com).)*$\r?`: Matches email address that don't include `.ext@example.com`.
{{< alert type="warning" >}}
Adding an regular expression can increase the risk of a regular expression denial of service (ReDoS) attack.
{{< /alert >}}
Prerequisites:
- You must be an administrator for the GitLab Self-Managed instance.
To make new users external by default:
1. On the left sidebar, at the bottom, select **Admin**.
1. Select **Settings > General**.
1. Expand the **Account and limit** section.
If you change the default behavior of creating new users as external, you
have the option to narrow it down by defining a set of internal users.
The **Internal users** field allows specifying an email address regex pattern to
identify default internal users. New users whose email address matches the regex
pattern are set to internal by default rather than an external collaborator.
The regex pattern format is in Ruby, but it needs to be convertible to JavaScript,
and the ignore case flag is set (`/regex pattern/i`). Here are some examples:
- Use `\.internal@domain\.com$` to mark email addresses ending with
`.internal@domain.com` as internal.
- Use `^(?:(?!\.ext@domain\.com).)*$\r?` to mark users with email addresses
not including `.ext@domain.com` as internal.
{{< alert type="warning" >}}
Be aware that this regex could lead to a
[regular expression denial of service (ReDoS) attack](https://en.wikipedia.org/wiki/ReDoS).
{{< /alert >}}
1. Select the **Make new users external by default** checkbox.
1. Optional. In the **Email exclusion pattern** field, enter a regular expression.
1. Select **Save changes**.

View File

@ -416,6 +416,11 @@ four standard [pagination arguments](#pagination-arguments):
Linted and processed contents of a CI config.
Should not be requested more than once per request.
{{< details >}}
**Deprecated** in GitLab 18.1.
Use CiLint mutation: <https://docs.gitlab.com/api/graphql/reference/#mutationcilint>.
{{< /details >}}
Returns [`CiConfig`](#ciconfig).
#### Arguments
@ -3747,7 +3752,7 @@ Input type: `CiLintInput`
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="mutationcilintclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationcilintconfig"></a>`config` | [`CiConfig`](#ciconfig) | Linted CI config and metadata. |
| <a id="mutationcilintconfig"></a>`config` | [`CiConfigV2`](#ciconfigv2) | Linted CI config and metadata. |
| <a id="mutationcilinterrors"></a>`errors` | [`[String!]!`](#string) | Errors encountered during the mutation. |
### `Mutation.clusterAgentDelete`
@ -23003,6 +23008,16 @@ Represents a component usage in a project.
| <a id="ciconfiggroupname"></a>`name` | [`String`](#string) | Name of the job group. |
| <a id="ciconfiggroupsize"></a>`size` | [`Int`](#int) | Size of the job group. |
### `CiConfigGroupV2`
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="ciconfiggroupv2jobs"></a>`jobs` | [`[CiConfigJobV2!]`](#ciconfigjobv2) | Jobs in group. |
| <a id="ciconfiggroupv2name"></a>`name` | [`String`](#string) | Name of the job group. |
| <a id="ciconfiggroupv2size"></a>`size` | [`Int`](#int) | Size of the job group. |
### `CiConfigInclude`
#### Fields
@ -23045,6 +23060,26 @@ Represents a component usage in a project.
| ---- | ---- | ----------- |
| <a id="ciconfigjobrestrictionrefs"></a>`refs` | [`[String!]`](#string) | Git refs the job restriction applies to. |
### `CiConfigJobV2`
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="ciconfigjobv2afterscript"></a>`afterScript` | [`[String!]`](#string) | Override a set of commands that are executed after the job. |
| <a id="ciconfigjobv2allowfailure"></a>`allowFailure` | [`Boolean`](#boolean) | Allow job to fail. |
| <a id="ciconfigjobv2beforescript"></a>`beforeScript` | [`[String!]`](#string) | Override a set of commands that are executed before the job. |
| <a id="ciconfigjobv2environment"></a>`environment` | [`String`](#string) | Name of an environment to which the job deploys. |
| <a id="ciconfigjobv2except"></a>`except` | [`CiConfigJobRestriction`](#ciconfigjobrestriction) | Limit when jobs are not created. |
| <a id="ciconfigjobv2groupname"></a>`groupName` | [`String`](#string) | Name of the job group. |
| <a id="ciconfigjobv2name"></a>`name` | [`String`](#string) | Name of the job. |
| <a id="ciconfigjobv2needs"></a>`needs` | [`[CiConfigNeed!]`](#ciconfigneed) | Builds that must complete before the jobs run. |
| <a id="ciconfigjobv2only"></a>`only` | [`CiConfigJobRestriction`](#ciconfigjobrestriction) | Jobs are created when these conditions do not apply. |
| <a id="ciconfigjobv2script"></a>`script` | [`[String!]`](#string) | Shell script that is executed by a runner. |
| <a id="ciconfigjobv2stage"></a>`stage` | [`String`](#string) | Name of the job stage. |
| <a id="ciconfigjobv2tags"></a>`tags` | [`[String!]`](#string) | List of tags that are used to select a runner. |
| <a id="ciconfigjobv2when"></a>`when` | [`String`](#string) | When to run the job. |
### `CiConfigNeed`
#### Fields
@ -23062,6 +23097,28 @@ Represents a component usage in a project.
| <a id="ciconfigstagegroups"></a>`groups` | [`CiConfigGroupConnection`](#ciconfiggroupconnection) | Groups of jobs for the stage. (see [Connections](#connections)) |
| <a id="ciconfigstagename"></a>`name` | [`String`](#string) | Name of the stage. |
### `CiConfigStageV2`
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="ciconfigstagev2groups"></a>`groups` | [`[CiConfigGroupV2!]`](#ciconfiggroupv2) | Groups of jobs for the stage. |
| <a id="ciconfigstagev2name"></a>`name` | [`String`](#string) | Name of the stage. |
### `CiConfigV2`
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="ciconfigv2errors"></a>`errors` | [`[String!]`](#string) | Linting errors. |
| <a id="ciconfigv2includes"></a>`includes` | [`[CiConfigInclude!]`](#ciconfiginclude) | List of included files. |
| <a id="ciconfigv2mergedyaml"></a>`mergedYaml` | [`String`](#string) | Merged CI configuration YAML. |
| <a id="ciconfigv2stages"></a>`stages` | [`[CiConfigStageV2!]`](#ciconfigstagev2) | Stages of the pipeline. |
| <a id="ciconfigv2status"></a>`status` | [`CiConfigStatus`](#ciconfigstatus) | Status of linting, can be either valid or invalid. |
| <a id="ciconfigv2warnings"></a>`warnings` | [`[String!]`](#string) | Linting warnings. |
### `CiConfigVariable`
CI/CD config variables.
@ -42471,6 +42528,20 @@ Represents status.
| <a id="workitemtypesupportedconversiontypes"></a>`supportedConversionTypes` {{< icon name="warning-solid" >}} | [`[WorkItemType!]`](#workitemtype) | **Introduced** in GitLab 17.8. **Status**: Experiment. Supported conversion types for the work item type. |
| <a id="workitemtypewidgetdefinitions"></a>`widgetDefinitions` {{< icon name="warning-solid" >}} | [`[WorkItemWidgetDefinition!]`](#workitemwidgetdefinition) | **Introduced** in GitLab 16.7. **Status**: Experiment. Available widgets for the work item type. |
#### Fields with arguments
##### `WorkItemType.unavailableWidgetsOnConversion`
Widgets that will be lost when converting from source work item type to target work item type.
Returns [`[WorkItemWidgetDefinition!]`](#workitemwidgetdefinition).
###### Arguments
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="workitemtypeunavailablewidgetsonconversiontarget"></a>`target` | [`WorkItemsTypeID!`](#workitemstypeid) | Target work item type to convert to. |
### `WorkItemTypeCountsByState`
Represents work item counts for the work item type.

View File

@ -235,10 +235,10 @@ To create a built-in analytics dashboard:
The `gridAttributes` position the panel within a 12x12 dashboard grid, powered by [gridstack](https://github.com/gridstack/gridstack.js/tree/master/doc#item-options).
1. Register the dashboard by adding it to `builtin_dashboards` in [ee/app/models/product_analytics/dashboard.rb](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/models/product_analytics/dashboard.rb).
1. Register the dashboard by adding it to `builtin_dashboards` in [ee/app/models/analytics/dashboard.rb](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/models/analytics/dashboard.rb).
Here you can make your dashboard available at project-level or group-level (or both), restrict access based on feature flags, license or user role etc.
1. Optional. Register visualization templates by adding them to `get_path_for_visualization` in [ee/app/models/product_analytics/visualization.rb](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/models/product_analytics/visualization.rb).
1. Optional. Register visualization templates by adding them to `get_path_for_visualization` in [ee/app/models/analytics/visualization.rb](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/models/analytics/visualization.rb).
For a complete example, refer to the AI Impact [dashboard config](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/lib/gitlab/analytics/ai_impact_dashboard/dashboard.yaml).
@ -281,7 +281,7 @@ See [`value_stream.js`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/ap
While developing new visualizations we can use [feature flags](../feature_flags/_index.md#create-a-new-feature-flag) to mitigate risks of disruptions or incorrect data for users.
The [`from_data`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/models/product_analytics/panel.rb) method builds the panel objects for a dashboard. Using the `filter_map` method, we can add a condition to skip rendering panels that include the visualization we are developing.
The [`from_data`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/models/analytics/panel.rb) method builds the panel objects for a dashboard. Using the `filter_map` method, we can add a condition to skip rendering panels that include the visualization we are developing.
For example, here we have added the `enable_usage_overview_visualization` feature flag and can check it's current state to determine whether panels using the `usage_overview` visualization should be rendered:

View File

@ -7,6 +7,9 @@ title: Job token permission development guidelines
## Background
Job token permissions allow fine-grained access control for CI/CD job tokens that access GitLab API endpoints.
When enabled, the job token can only perform actions allowed for the project.
Historically, job tokens have provided broad access to resources by default. With the introduction of
fine-grained permissions for job tokens, we can enable granular access controls while adhering to the
principle of least privilege.
@ -22,3 +25,130 @@ Before being accepted, all new job token permissions must:
- Tag `@gitlab-com/gl-security/product-security/appsec` for review
These requirements ensure that new permissions allow users to maintain explicit control over their security configuration, prevent unintended privilege escalation, and adhere to the principle of least privilege.
## Add a job token permission
Job token permissions are defined in several locations. When adding new permissions, ensure the following files are updated:
- **Backend permission definitions**: [`lib/ci/job_token/policies.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/ci/job_token/policies.rb) - Lists the available permissions.
- **JSON schema validation**: [`app/validators/json_schemas/ci_job_token_policies.json`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/validators/json_schemas/ci_job_token_policies.json) - Defines the validation schema for the `job_token_policies` attribute of the `Ci::JobToken::GroupScopeLink` and `Ci::JobToken::ProjectScopeLink` models.
- **Frontend constants**: [`app/assets/javascripts/token_access/constants.js`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/assets/javascripts/token_access/constants.js) - Lists the permission definitions for the UI
## Add an API endpoint to a job token permission scope
### Route settings
To add job token policy support to an API endpoint, you need to configure two route settings:
#### `route_setting :authentication`
This setting controls which authentication methods are allowed for the endpoint.
**Parameters**:
- `job_token_allowed: true` - Enables CI/CD job tokens to authenticate against this endpoint
#### `route_setting :authorization`
This setting defines the permission level and access controls for job token access.
**Parameters**:
- `job_token_policies`: The required permission level. Available policies are listed in [lib/ci/job_token/policies.rb](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/ci/job_token/policies.rb).
- `allow_public_access_for_enabled_project_features`: Optional. Allows access based on the visibility settings of the project feature. See [public access configuration](#public-access-configuration).
#### Example usage
This example shows how to add support for `tags` API endpoints to the job token policy's `repository` resource:
```ruby
# In lib/api/tags.rb
resource :projects do
# Enable job token authentication for this endpoint
route_setting :authentication, job_token_allowed: true
# Require the `read_repository` policy for reading tags
route_setting :authorization, job_token_policies: :read_repository,
allow_public_access_for_enabled_project_features: :repository
get ':id/repository/tags' do
# ... existing endpoint implementation
end
# Enable job token authentication for this endpoint
route_setting :authentication, job_token_allowed: true
# Require the `admin_repository` policy for creating tags
route_setting :authorization, job_token_policies: :admin_repository
post ':id/repository/tags' do
# ... existing endpoint implementation
end
end
```
### Key considerations
#### Permission level selection
Choose the appropriate permission level based on the operation:
- **Read operations** (GET requests): Use `:read_*` permissions
- **Write/Delete operations** (POST, PUT, DELETE requests): Use `:admin_*` permissions
#### Public access configuration
The `allow_public_access_for_enabled_project_features` parameter allows job tokens to access endpoints when:
- The project has appropriate visibility.
- The project feature is enabled.
- The project feature has appropriate visibility.
- Job token permissions are not explicitly configured for the resource.
This provides backward compatibility while enabling fine-grained control when the project feature is not publicly accessible.
### Testing
When implementing job token permissions for API endpoints, use the shared RSpec example `'enforcing job token policies'` to test the authorization behavior. This shared example provides comprehensive coverage for all job token policy scenarios.
#### Usage
Add the shared example to your API endpoint tests by including it with the required parameters:
```ruby
describe 'GET /projects/:id/repository/tags' do
let(:route) { "/projects/#{project.id}/repository/tags" }
it_behaves_like 'enforcing job token policies', :read_repository,
allow_public_access_for_enabled_project_features: :repository do
let(:user) { developer }
let(:request) do
get api(route), params: { job_token: target_job.token }
end
end
# Your other endpoint-specific tests...
end
```
#### Parameters
The shared example takes the following parameters:
- The job token policy that should be enforced (e.g., `:read_repository`)
- `allow_public_access_for_enabled_project_features` - (Optional) The project feature that the endpoint controls (e.g., `:repository`)
- `expected_success_status` - (Optional) The expected success status of the request (by default: `:success`)
#### What the shared example tests
The `'enforcing job token policies'` shared example automatically tests:
1. **Access granted**: Job tokens can access the endpoint when the required permissions are configured for the accessed project.
1. **Access denied**: Job tokens cannot access the endpoint when the required permissions are not configured for the accessed project.
1. **Public access fallback**: `allow_public_access_for_enabled_project_features` behavior when permissions aren't configured.
### Documentation
After you add job token support for a new API endpoint, you must update the [fine-grained permissions for CI/CD job tokens](../../ci/jobs/fine_grained_permissions.md#available-api-endpoints) documentation.
Run the following command to regenerate this topic:
```shell
bundle exec rake ci:job_tokens:compile_docs
```

View File

@ -258,6 +258,29 @@ When a feature becomes generally available and the flag is enabled or removed, t
GitLab Dedicated in the same GitLab version. GitLab Dedicated follows its own
[release schedule](maintenance.md) for version deployments.
## Service level availability
GitLab Dedicated maintains a monthly service level objective of 99.5% availability.
Service level availability measures the percentage of time that GitLab Dedicated is available for use during a calendar month. GitLab calculates availability based on the following core services:
| Service area | Included features |
|--|--|
| Web interface | GitLab issues, merge requests, CI job logs, GitLab API, Git operations over HTTPS |
| Container Registry | Registry HTTPS requests |
| Git operations | Git push, pull, and clone operations over SSH |
### Service level exclusions
The following are not included in service level availability calculations:
- Service interruptions caused by customer misconfigurations
- Issues with customer or cloud provider infrastructure outside of GitLab control
- Scheduled maintenance windows
- Emergency maintenance for critical security or data issues
- Service disruptions caused by natural disasters, widespread internet outages,
datacenter failures, or other events outside of GitLab control.
## Migrate to GitLab Dedicated
To migrate your data to GitLab Dedicated:

View File

@ -128,3 +128,5 @@ Some secondary regions have [limited support](#secondary-regions-with-limited-su
{{< /alert >}}
You can also opt to store backup copies in a separate cloud region for increased redundancy.
For more information, see [disaster recovery for GitLab Dedicated](../../administration/dedicated/disaster_recovery.md).

View File

@ -235,6 +235,9 @@ User-defined variables can affect the behavior of any policy jobs in the pipelin
When the `variables_override` option is not specified, the "highest precedence" behavior is maintained. For more information about this behavior, see [precedence of variables in pipeline execution policies](#precedence-of-variables-in-pipeline-execution-policies).
When the pipeline execution policy controls variable precedence, the job logs include the configured `variables_override` options and the policy name.
To view these logs, `gitlab-runner` must be updated to version 18.1 or later.
#### Example `variables_override` configuration
Add the `variables_override` option to your pipeline execution policy configuration:

View File

@ -23,7 +23,7 @@ title: GitLab Duo with Amazon Q
{{< alert type="note" >}}
If you have a GitLab Duo Pro or Duo Enterprise add-on, this feature is not available.
GitLab Duo with Amazon Q cannot be combined with other GitLab Duo add-ons.
{{< /alert >}}

View File

@ -24,7 +24,7 @@ title: Set up GitLab Duo with Amazon Q
{{< alert type="note" >}}
If you have a GitLab Duo Pro or Duo Enterprise add-on, this feature is not available.
GitLab Duo with Amazon Q cannot be combined with other GitLab Duo add-ons.
{{< /alert >}}

View File

@ -7,8 +7,10 @@ module API
class JobInfo < Grape::Entity
expose :id, :name, :stage
expose :project_id, :project_name
expose :time_in_queue_seconds
expose :project_jobs_running_on_instance_runners_count
expose :queue_size, :queue_depth
end
end
end

View File

@ -173,7 +173,7 @@ module Gitlab
end
def key
project_id = counter_record.project.id
project_id = counter_record.project_id
record_name = counter_record.class
record_id = counter_record.id

View File

@ -0,0 +1,52 @@
# frozen_string_literal: true
module Gitlab
module Counters
class FlushStaleCounterIncrements
def initialize(collection)
@collection = collection
@logger = Gitlab::AppLogger
@counter_attributes = collection.counter_attributes.pluck(:attribute)
end
def execute
collection_min_id = collection.minimum(:id)
counter_attributes.each do |attribute|
logger.info(
class: self.class,
attribute: attribute,
collection_min_id: collection_min_id
)
counters = filtered_counters(collection, attribute)
counters.each_value(&:commit_increment!)
end
end
private
def filtered_counters(scope, attribute)
counters = {}
keys = scope.map { |counter_record| counter_record.counter(attribute).key }
values = Gitlab::Redis::SharedState.with do |redis|
if Gitlab::Redis::ClusterUtil.cluster?(redis)
Gitlab::Redis::ClusterUtil.batch_get(keys, redis)
else
redis.mget(*keys)
end
end
values.each_with_index do |value, index|
next if value.nil?
key = keys[index]
counter_record = scope[index]
counters[key] = counter_record.counter(attribute)
end
counters
end
attr_reader :collection, :logger, :counter_attributes
end
end
end

View File

@ -92,6 +92,8 @@ module Gitlab
def save_batch_with_retry(relation_name, batch, retry_count = 0)
valid_records, invalid_records = batch.partition { |record| record.valid? }
invalid_records.map! { |record| ::Import::ImportRecordPreparer.recover_invalid_record(record) }
save_valid_records(relation_name, valid_records)
save_potentially_invalid_records(relation_name, invalid_records)

View File

@ -6,6 +6,7 @@ module Gitlab
module Runner
class RunnerFleetPipelineSeeder
DEFAULT_JOB_COUNT = 400
DEFAULT_USERNAME = 'root'
MAX_QUEUE_TIME_IN_SECONDS = 5.minutes.to_i
PIPELINE_CREATION_RANGE_MIN_IN_SECONDS = 2.hours.to_i
@ -27,8 +28,10 @@ module Gitlab
# @param [Gitlab::Logger] logger
# @param [Integer] job_count the number of jobs to create across the runners
# @param [Array<Hash>] projects_to_runners list of project IDs to respective runner IDs
def initialize(logger = Gitlab::AppLogger, projects_to_runners:, job_count:)
# @param [String] username the user that will create the pipelines
def initialize(logger = Gitlab::AppLogger, projects_to_runners:, job_count:, username:)
@logger = logger
@user = User.find_by_username!(username.presence || DEFAULT_USERNAME)
@projects_to_runners = projects_to_runners.map do |v|
{ project_id: v[:project_id], runners: ::Ci::Runner.id_in(v[:runner_ids]).to_a }
end
@ -47,7 +50,8 @@ module Gitlab
remaining_job_count -= create_pipeline(
job_count: remaining_job_count,
**@projects_to_runners[PROJECT_JOB_DISTRIBUTION.length],
status: random_pipeline_status
status: random_pipeline_status,
name: "Mock pipeline #{@job_count - remaining_job_count}"
)
end
@ -78,20 +82,21 @@ module Gitlab
pipeline_count = [1, total_jobs / pipeline_job_count].max
(1..pipeline_count).each do
(1..pipeline_count).each do |pipeline_index|
remaining_job_count -= create_pipeline(
job_count: pipeline_job_count,
project_id: project_id,
runners: runners,
status: random_pipeline_status
status: random_pipeline_status,
name: "Mock pipeline #{pipeline_index}"
)
end
remaining_job_count
end
def create_pipeline(job_count:, runners:, project_id:, status: 'success', **attrs)
logger.info(message: 'Creating pipeline with builds on project',
def create_pipeline(job_count:, runners:, project_id:, name:, status: 'success', **attrs)
logger.info(message: 'Creating pipeline with builds on project', name: name,
status: status, job_count: job_count, project_id: project_id, **attrs)
raise ArgumentError('runners') unless runners
@ -125,6 +130,10 @@ module Gitlab
pipeline.ensure_project_iid! # allocate an internal_id outside of pipeline creation transaction
pipeline.save!
::Ci::Pipelines::UpdateMetadataService.new(pipeline, current_user: @user, params: { name: name }).execute
# it seeds ci_finished_pipeline_ch_sync_events which is used to sync finished pipelines to Ch.
::Ci::PipelineFinishedWorker.perform_async(pipeline.id) if pipeline.complete?
if created_at.present?
(1..job_count).each do |index|
create_build(pipeline, runners.sample, job_status(pipeline.status, index, job_count), index)

View File

@ -0,0 +1,56 @@
# frozen_string_literal: true
module Import
class ImportRecordPreparer
DIFF_NOTE_TO_DISCUSSION_NOTE_EXCLUDED_ATTRS = %w[
original_position change_position position line_code type commit_id
].freeze
DIFF_NOTE_RECOVERABLE_ERRORS = %i[missing_diff_file missing_diff_line].freeze
SUPPORTED_TYPES = [DiffNote].freeze
def self.recover_invalid_record(record)
return record unless SUPPORTED_TYPES.include?(record.class)
new(record).recover_invalid_record
end
def initialize(record)
@record = record
end
# If we notice this is being used for many models in the future we should consider refactoring,
# so each model has its own preparer. We can use metaprogramming to infer the preparer class.
def recover_invalid_record
create_discussion_note_on_missing_diff || record
# As we support more types, we can start to follow this pattern:
# case record
# when DiffNote
# create_discussion_note_on_missing_diff
# when Issue
# prepare_issue
# end || record
end
private
attr_reader :record
def create_discussion_note_on_missing_diff
return unless record.errors.details[:base].any? { |error| DIFF_NOTE_RECOVERABLE_ERRORS.include?(error[:error]) }
new_note = "*Comment on"
new_note += " #{record.position.old_path}:#{record.position.old_line} -->" if record.position.old_line
new_note += " #{record.position.new_path}:#{record.position.new_line}" if record.position.new_line
new_note += "*\n\n#{record.note}"
DiscussionNote.new(record.attributes.except(*DIFF_NOTE_TO_DISCUSSION_NOTE_EXCLUDED_ATTRS)).tap do |note|
note.note = new_note
note.importing = record.importing
end
end
end
end

View File

@ -32,7 +32,8 @@ namespace :gitlab do
if projects_to_runners
Gitlab::Seeders::Ci::Runner::RunnerFleetPipelineSeeder.new(
projects_to_runners: projects_to_runners,
job_count: args.job_count&.to_i
job_count: args.job_count&.to_i,
username: args.username
).seed
end
end

View File

@ -23994,6 +23994,9 @@ msgstr ""
msgid "Email display name"
msgstr ""
msgid "Email exclusion pattern"
msgstr ""
msgid "Email must be provided."
msgstr ""
@ -25884,6 +25887,9 @@ msgstr ""
msgid "External storage for repository static objects"
msgstr ""
msgid "External users"
msgstr ""
msgid "ExternalAuthorizationService|Classification label"
msgstr ""
@ -33282,9 +33288,6 @@ msgstr ""
msgid "Internal note"
msgstr ""
msgid "Internal users"
msgstr ""
msgid "Internal users cannot be deactivated"
msgstr ""
@ -36954,6 +36957,9 @@ msgstr ""
msgid "Make everyone on your team more productive regardless of their location. GitLab Geo creates read-only mirrors of your GitLab instance so you can reduce the time it takes to clone and fetch large repos."
msgstr ""
msgid "Make new users external by default"
msgstr ""
msgid "Make protected CI/CD variables and runners available in merge request pipelines. Protected resources will only be available in merge request pipelines if both the source and target branches of the merge request are protected."
msgstr ""
@ -40670,9 +40676,6 @@ msgstr ""
msgid "New topic"
msgstr ""
msgid "New users set to external"
msgstr ""
msgid "New work item"
msgstr ""
@ -40739,9 +40742,6 @@ msgstr ""
msgid "Newest first"
msgstr ""
msgid "Newly-registered users are external by default"
msgstr ""
msgid "Next"
msgstr ""
@ -50704,6 +50704,9 @@ msgstr ""
msgid "Regex pattern"
msgstr ""
msgid "Regex pattern. To use, select external by default setting"
msgstr ""
msgid "Region"
msgstr ""
@ -59643,7 +59646,7 @@ msgstr ""
msgid "Specify IP ranges that are always allowed for inbound traffic, for use with group-level IP restrictions. Runner and Pages daemon internal IPs should be listed here so that they can access project artifacts."
msgstr ""
msgid "Specify an email address regex pattern to identify default internal users."
msgid "Specify a regular expression for emails. New users with matching emails are not made external users."
msgstr ""
msgid "Specify the input values to use in this pipeline. Any inputs left unselected will use their default values."
@ -59873,6 +59876,9 @@ msgstr ""
msgid "Status lists not available with your current license"
msgstr ""
msgid "Status not found for given params"
msgstr ""
msgid "Status not supported"
msgstr ""
@ -63922,9 +63928,6 @@ msgstr ""
msgid "To continue, you need to select the link in the confirmation email we sent to verify your email address. If you didn't get our email, select %{strongStart}Resend confirmation email.%{strongEnd}"
msgstr ""
msgid "To define internal users, first enable new users set to external"
msgstr ""
msgid "To disable the setting, set this value to 0."
msgstr ""
@ -74130,7 +74133,7 @@ msgstr ""
msgid "must contain only a mastodon handle."
msgstr ""
msgid "must contain only a orcid ID."
msgid "must contain only a valid ORCID."
msgstr ""
msgid "must have a repository"

View File

@ -145,7 +145,7 @@ RSpec.describe 'Admin updates settings', feature_category: :shared do
user_internal_regex = find('#application_setting_user_default_internal_regex', visible: :all)
expect(user_internal_regex).to be_readonly
expect(user_internal_regex['placeholder']).to eq 'To define internal users, first enable new users set to external'
expect(user_internal_regex['placeholder']).to eq 'Regex pattern. To use, select external by default setting'
check 'application_setting_user_default_external'

View File

@ -44,7 +44,7 @@ describe('~/environments/environment_details/index.vue', () => {
${'job'} | ${DeploymentJob} | ${{ job: deployment.job }}
${'created date'} | ${'[data-testid="deployment-created-at"]'} | ${{ time: deployment.created }}
${'finished date'} | ${'[data-testid="deployment-finished-at"]'} | ${{ time: deployment.finished }}
${'deployment actions'} | ${DeploymentActions} | ${{ actions: deployment.actions, rollback: deployment.rollback, approvalEnvironment: deployment.deploymentApproval, deploymentWebPath: deployment.webPath }}
${'deployment actions'} | ${DeploymentActions} | ${{ actions: deployment.actions, rollback: deployment.rollback, approvalEnvironment: deployment.deploymentApproval, deploymentWebPath: deployment.webPath, status: deployment.status }}
`('should show the correct component for $cell', ({ component, props }) => {
expect(row.findComponent(component).props()).toMatchObject(props);
});

View File

@ -10,14 +10,19 @@ jest.mock('~/rapid_diffs/expand_lines/get_lines');
describe('ExpandLinesAdapter', () => {
const getExpandButton = (direction = 'up') =>
document.querySelector(`[data-expand-direction="${direction}"]`);
const getResultingHtml = () => document.querySelector('[data-hunk-lines="3"]');
const getFirstInsertedRow = () => document.querySelector('[data-hunk-lines="3"]');
const getLastInsertedRow = () => document.querySelector('[data-hunk-lines="4"]');
const getDiffElement = () => document.querySelector('#diffElement');
const getSurroundingLines = (direction) => {
const prev = getExpandButton(direction).closest('tr').previousElementSibling;
const next = getExpandButton(direction).closest('tr').nextElementSibling;
return [prev ? new DiffLineRow(prev) : null, next ? new DiffLineRow(next) : null];
};
const getDiffFileContext = () => {
return { data: { diffLinesPath: '/lines', viewer: 'text_parallel' } };
return {
data: { diffLinesPath: '/lines', viewer: 'text_parallel' },
diffElement: getDiffElement(),
};
};
const click = (direction) => {
return ExpandLinesAdapter.clicks.expandLines.call(
@ -28,37 +33,44 @@ describe('ExpandLinesAdapter', () => {
};
// tabindex="0" makes document.activeElement actually work in JSDOM
const createLinesResponse = () =>
'<tr data-hunk-lines="3"><td><a data-line-number="5" tabindex="0"></a></td></tr>';
`
<tr data-hunk-lines="3"><td><a data-line-number="5" tabindex="0"></a></td></tr>
<tr data-hunk-lines="4"><td><a data-line-number="6" tabindex="0"></a></td></tr>
`.trim();
beforeEach(() => {
setHTMLFixture(`
<table>
<tbody>
<tr>
<td>
<button data-click="expandLines" data-expand-direction="up"></button>
</td>
</tr>
<tr data-hunk-lines="1">
<td>
</td>
</tr>
<tr>
<td>
<button data-click="expandLines" data-expand-direction="both"></button>
</td>
</tr>
<tr data-hunk-lines="2">
<td>
</td>
</tr>
<tr>
<td>
<button data-click="expandLines" data-expand-direction="down"></button>
</td>
</tr>
</tbody>
</table>
<div id="diffElement">
<div data-file-body>
<table>
<tbody>
<tr>
<td>
<button data-click="expandLines" data-expand-direction="up"></button>
</td>
</tr>
<tr data-hunk-lines="1">
<td>
</td>
</tr>
<tr>
<td>
<button data-click="expandLines" data-expand-direction="both"></button>
</td>
</tr>
<tr data-hunk-lines="2">
<td>
</td>
</tr>
<tr>
<td>
<button data-click="expandLines" data-expand-direction="down"></button>
</td>
</tr>
</tbody>
</table>
</div>
</div>
`);
});
@ -72,20 +84,26 @@ describe('ExpandLinesAdapter', () => {
diffLinesPath: '/lines',
view: 'parallel',
});
expect(getResultingHtml()).not.toBe(null);
expect(getFirstInsertedRow()).not.toBe(null);
expect(getLastInsertedRow()).not.toBe(null);
expect(getExpandButton(direction)).toBe(null);
expect(getDiffElement().style.getPropertyValue('--total-rows')).toBe('6');
});
it('focuses first inserted line number', async () => {
getLines.mockResolvedValueOnce(createLinesResponse());
await click('down');
expect(document.activeElement).toEqual(getResultingHtml().querySelector('[data-line-number]'));
expect(document.activeElement).toEqual(
getFirstInsertedRow().querySelector('[data-line-number]'),
);
});
it('focuses last inserted line number', async () => {
getLines.mockResolvedValueOnce(createLinesResponse());
await click();
expect(document.activeElement).toEqual(getResultingHtml().querySelector('[data-line-number]'));
expect(document.activeElement).toEqual(
getLastInsertedRow().querySelector('[data-line-number]'),
);
});
it('prevents expansion while processing another expansion', () => {

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Types::Ci::Config::ConfigType do
RSpec.describe Types::Ci::LegacyConfig::ConfigType, feature_category: :pipeline_composition do
specify { expect(described_class.graphql_name).to eq('CiConfig') }
it 'exposes the expected fields' do

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Types::Ci::Config::GroupType do
RSpec.describe Types::Ci::LegacyConfig::GroupType, feature_category: :pipeline_composition do
specify { expect(described_class.graphql_name).to eq('CiConfigGroup') }
it 'exposes the expected fields' do

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Types::Ci::Config::JobType do
RSpec.describe Types::Ci::LegacyConfig::JobType, feature_category: :pipeline_composition do
specify { expect(described_class.graphql_name).to eq('CiConfigJob') }
it 'exposes the expected fields' do

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Types::Ci::Config::StageType do
RSpec.describe Types::Ci::LegacyConfig::StageType, feature_category: :pipeline_composition do
specify { expect(described_class.graphql_name).to eq('CiConfigStage') }
it 'exposes the expected fields' do

View File

@ -4,7 +4,7 @@ require 'spec_helper'
RSpec.describe Types::WorkItems::TypeType, feature_category: :team_planning do
let(:fields) do
%i[id icon_name name widget_definitions supported_conversion_types]
%i[id icon_name name widget_definitions supported_conversion_types unavailable_widgets_on_conversion]
end
specify { expect(described_class.graphql_name).to eq('WorkItemType') }
@ -12,4 +12,17 @@ RSpec.describe Types::WorkItems::TypeType, feature_category: :team_planning do
specify { expect(described_class).to have_graphql_fields(fields) }
specify { expect(described_class).to require_graphql_authorizations(:read_work_item_type) }
describe 'unavailable_widgets_on_conversion field' do
it 'has the correct arguments' do
field = described_class.fields['unavailableWidgetsOnConversion']
expect(field).to be_present
expect(field.arguments.keys).to contain_exactly('target')
target_arg = field.arguments['target']
expect(target_arg.type.to_type_signature).to eq('WorkItemsTypeID!')
end
end
end

View File

@ -0,0 +1,86 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Counters::FlushStaleCounterIncrements, :clean_gitlab_redis_shared_state, feature_category: :continuous_integration do
let(:date) { 1.year.ago }
let(:collection) { ProjectDailyStatistic.where(date: date..) }
let(:service) { described_class.new(collection) }
let_it_be(:project) { create :project }
let!(:project_daily_statistic) do
create(:project_daily_statistic, date: Date.new(2025, 2, 1), fetch_count: 5, project: project)
end
let!(:project_daily_statistic_two) do
create(:project_daily_statistic, date: Date.new(2025, 2, 2), fetch_count: 0, project: project)
end
let!(:project_daily_statistic_three) do
create(:project_daily_statistic, date: Date.new(2025, 2, 3), fetch_count: 10, project: project)
end
let(:keys) do
[
project_daily_statistic.counter('fetch_count').key,
project_daily_statistic_two.counter('fetch_count').key,
project_daily_statistic_three.counter('fetch_count').key
]
end
before do
Gitlab::Redis::SharedState.with do |redis|
redis.set(keys[0], 5)
redis.set(keys[2], 10)
end
end
def expect_initial_counts
expect(project_daily_statistic.fetch_count).to eq(5)
expect(project_daily_statistic_two.fetch_count).to eq(0)
expect(project_daily_statistic_three.fetch_count).to eq(10)
end
def expect_flushed_counts
expect(project_daily_statistic.reload.fetch_count).to eq(10)
expect(project_daily_statistic_two.reload.fetch_count).to eq(0)
expect(project_daily_statistic_three.reload.fetch_count).to eq(20)
end
shared_examples 'flushes counters correctly' do
it 'flushes and calls commit_increment!' do
expect_initial_counts
Gitlab::Redis::SharedState.with do |redis|
if Gitlab::Redis::ClusterUtil.cluster?(redis)
expect(Gitlab::Redis::ClusterUtil).to receive(:batch_get).with(keys, redis).and_return(["5", nil, 10])
else
expect(redis).to receive(:mget).and_return(["5", nil, 10])
end
end
service.execute
expect_flushed_counts
end
end
describe '#execute' do
context 'when Redis is in cluster mode' do
before do
allow(Gitlab::Redis::ClusterUtil).to receive(:cluster?).and_return(true)
end
it_behaves_like 'flushes counters correctly'
end
context 'when Redis is not in cluster mode' do
before do
allow(Gitlab::Redis::ClusterUtil).to receive(:cluster?).and_return(false)
end
it_behaves_like 'flushes counters correctly'
end
end
end

View File

@ -93,6 +93,59 @@ RSpec.describe Gitlab::ImportExport::Base::RelationObjectSaver, feature_category
end
end
describe 'ImportRecordPreparer' do
let_it_be(:position) do
Gitlab::Diff::Position.new(
base_sha: "ae73cb07c9eeaf35924a10f713b364d32b2dd34f",
head_sha: "b83d6e391c22777fca1ed3012fce84f633d7fed0",
ignore_whitespace_change: false,
line_range: nil,
new_line: 9,
new_path: 'lib/ruby/popen.rb',
old_line: 8,
old_path: "files/ruby/popen.rb",
position_type: "text",
start_sha: "0b4bc9a49b562e85de7cc9e834518ea6828729b9"
)
end
let(:relation_key) { 'merge_requests' }
let(:relation_definition) { { 'notes' => {} } }
let(:relation_object) { build(:merge_request, source_project: project, target_project: project, notes: [note]) }
let(:diff_note) { create(:diff_note_on_commit) }
let(:note_diff_file) { diff_note.note_diff_file }
let(:note) do
build(
:diff_note_on_merge_request, project: project, importing: true,
line_code: "8ec9a00bfd09b3190ac6b22251dbb1aa95a0579d_4_7",
position: position, original_position: position,
note_diff_file: note_diff_file
)
end
context 'when records need preparing by ImportRecordPreparer' do
let(:note_diff_file) { nil }
it 'prepares the records' do
saver.execute
notes = project.reload.merge_requests.first.notes
expect(notes.count).to eq(1)
expect(notes.first).to be_a(DiscussionNote)
end
end
context 'when record does not need preparing by ImportRecordPreparer' do
it 'does not change the record' do
saver.execute
notes = project.reload.merge_requests.first.notes
expect(notes.count).to eq(1)
expect(notes.first).to be_a(DiffNote)
end
end
end
context 'when importable is group' do
let(:relation_key) { 'labels' }
let(:relation_definition) { { 'priorities' => {} } }

View File

@ -6,15 +6,8 @@ NULL_LOGGER = Gitlab::JsonLogger.new('/dev/null')
TAG_LIST = Gitlab::Seeders::Ci::Runner::RunnerFleetSeeder::TAG_LIST.to_set
RSpec.describe ::Gitlab::Seeders::Ci::Runner::RunnerFleetPipelineSeeder, feature_category: :fleet_visibility do
subject(:seeder) do
described_class.new(NULL_LOGGER, projects_to_runners: projects_to_runners, job_count: job_count)
end
def runner_ids_for_project(runner_count, project)
create_list(:ci_runner, runner_count, :project, projects: [project], tag_list: TAG_LIST.to_a.sample(5)).map(&:id)
end
let_it_be(:projects) { create_list(:project, 4) }
let_it_be(:admin) { create(:admin, owner_of: projects) }
let_it_be(:projects_to_runners) do
[
{ project_id: projects[0].id, runner_ids: runner_ids_for_project(2, projects[0]) },
@ -24,6 +17,15 @@ RSpec.describe ::Gitlab::Seeders::Ci::Runner::RunnerFleetPipelineSeeder, feature
]
end
subject(:seeder) do
described_class.new(NULL_LOGGER, projects_to_runners: projects_to_runners, job_count: job_count,
username: admin.username)
end
def runner_ids_for_project(runner_count, project)
create_list(:ci_runner, runner_count, :project, projects: [project], tag_list: TAG_LIST.to_a.sample(5)).map(&:id)
end
describe '#seed' do
context 'with job_count specified' do
let(:job_count) { 20 }
@ -55,6 +57,32 @@ RSpec.describe ::Gitlab::Seeders::Ci::Runner::RunnerFleetPipelineSeeder, feature
.and change { Ci::Pipeline.count }.by(2)
expect(Ci::Build.last(2).map(&:tag_list).map(&:to_set)).to all satisfy { |r| r.subset?(TAG_LIST) }
end
it 'creates pipeline meta with each pipeline it creates' do
expect { seeder.seed }.to change { ::Ci::PipelineMetadata.count }.by(2)
expect(Ci::PipelineMetadata.last(2).map(&:name)).to all(start_with('Mock pipeline'))
end
context 'when the seeded pipelines have completed statuses' do
before do
allow(seeder).to receive(:random_pipeline_status).and_return(Ci::Pipeline::COMPLETED_STATUSES.sample)
end
it 'asynchronously triggers PipelineFinishedWorker for each pipeline' do
expect(Ci::PipelineFinishedWorker).to receive(:perform_async).twice
seeder.seed
end
end
end
context 'when an invalid username is provided' do
it 'raises a record not found error' do
expect do
described_class.new(NULL_LOGGER, projects_to_runners: projects_to_runners, job_count: 2,
username: 'nonexistentuser')
end.to raise_error(ActiveRecord::RecordNotFound)
end
end
end
end

View File

@ -0,0 +1,189 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Import::ImportRecordPreparer, feature_category: :importers do
describe '.recover_invalid_record' do
subject(:recover_invalid_record) do
# the preparer expects a validated record with errors
record.validate
described_class.recover_invalid_record(record)
end
let(:returned_record) { recover_invalid_record }
context 'when record is a DiffNote' do
let_it_be(:merge_request) { create(:merge_request) }
let_it_be(:project) { create(:project) }
let_it_be(:author) { create(:user) }
let_it_be(:discussion_id) { 'some-discussion-id' }
let_it_be(:resolved_at) { Time.zone.now }
let(:record) do
build(
:diff_note_on_merge_request, noteable: merge_request, importing: true, note: 'About this line...',
position: position, discussion_id: discussion_id, author_id: author.id, project_id: project.id,
resolved_at: resolved_at, commit_id: 'some-short-id', original_position: position,
line_code: "8ec9a00bfd09b3190ac6b22251dbb1aa95a0579d_4_7"
)
end
let_it_be(:position) do
Gitlab::Diff::Position.new(
base_sha: "ae73cb07c9eeaf35924a10f713b364d32b2dd34f",
head_sha: "b83d6e391c22777fca1ed3012fce84f633d7fed0",
ignore_whitespace_change: false,
line_range: nil,
new_line: 9,
new_path: 'lib/ruby/popen.rb',
old_line: 8,
old_path: "files/ruby/popen.rb",
position_type: "text",
start_sha: "0b4bc9a49b562e85de7cc9e834518ea6828729b9"
)
end
context 'when diff file is not found' do
before do
allow(record).to receive(:fetch_diff_file).and_return(nil)
end
it 'builds a new DiscussionNote based on the provided DiffNote', :aggregate_failures do
recover_invalid_record
# Ensure the context is correct
expect(record.errors[:base]).to include(DiffNote::DIFF_FILE_NOT_FOUND_MESSAGE)
expect(record.errors[:base]).not_to include(/Failed to find diff line for.*/)
expect(returned_record).to be_a(DiscussionNote)
expect(returned_record.noteable_id).to eq(merge_request.id)
expect(returned_record.discussion_id).to eq(discussion_id)
expect(returned_record.author_id).to eq(author.id)
expect(returned_record.project_id).to eq(project.id)
expect(returned_record.resolved_at).to be(resolved_at)
expect(returned_record.importing).to be(true)
expect(returned_record.commit_id).to be_nil
expect(returned_record.line_code).to be_nil
expect(returned_record.position).to be_nil
expect(returned_record.original_position).to be_nil
end
it 'adds fallback position text before the comment' do
expect(returned_record.note).to eq(<<~COMMENT.strip)
*Comment on files/ruby/popen.rb:8 --> lib/ruby/popen.rb:9*
About this line...
COMMENT
end
context 'when the old path and position do not exist' do
let_it_be(:position) do
Gitlab::Diff::Position.new(
old_path: nil,
new_path: "lib/ruby/popen.rb",
old_line: nil,
new_line: 9
)
end
it 'only shows the new path and position in the note' do
expect(returned_record.note).to eq(<<~COMMENT.strip)
*Comment on lib/ruby/popen.rb:9*
About this line...
COMMENT
end
end
context 'when the new path and position do not exist' do
let_it_be(:position) do
Gitlab::Diff::Position.new(
old_path: "files/ruby/popen.rb",
new_path: nil,
old_line: 8,
new_line: nil
)
end
it 'only shows the old path and position in the note' do
expect(returned_record.note).to eq(<<~COMMENT.strip)
*Comment on files/ruby/popen.rb:8 -->*
About this line...
COMMENT
end
end
end
context 'when diff line is not found' do
before do
diff_file_stub = instance_double(Gitlab::Diff::File)
allow(diff_file_stub).to receive_messages(line_for_position: nil, file_path: 'lib/ruby/popen.rb')
allow(record).to receive(:fetch_diff_file).and_return(diff_file_stub)
end
it 'builds a new DiscussionNote' do
recover_invalid_record
# Ensure the context is correct
expect(record.errors[:base]).to include(/Failed to find diff line for.*/)
expect(record.errors[:base]).not_to include(DiffNote::DIFF_FILE_NOT_FOUND_MESSAGE)
expect(returned_record).not_to eq(record)
expect(returned_record).to be_a(DiscussionNote)
end
end
context 'when the diff note is valid' do
let(:record) do
build(
:diff_note_on_merge_request, position: position, original_position: position,
project: merge_request.project, noteable: merge_request,
line_code: "8ec9a00bfd09b3190ac6b22251dbb1aa95a0579d_4_7"
)
end
it 'returns the same record' do
expect(record).to be_valid
expect(returned_record).to eq(record)
end
end
context 'when the diff note is invalid due to a reason other than missing diff_file/diff_line' do
let(:record) { build(:diff_note_on_merge_request, noteable_type: User) } # Not a valid notable type
it 'returns the same record' do
expect(record).not_to be_valid
expect(returned_record).to eq(record)
end
end
it 'instanciates a preparer' do
expect(described_class).to receive(:new).with(record).and_call_original
recover_invalid_record
end
end
context 'when record is not a supported type' do
let(:record) { build(:issue) }
it 'returns the provided record' do
expect(record).to be_valid
expect(returned_record).to eq(record)
end
it 'does not instanciate a preparer' do
expect(described_class).not_to receive(:new)
recover_invalid_record
end
end
end
end

View File

@ -8,6 +8,10 @@ RSpec.describe DiffNote do
let_it_be(:merge_request) { create(:merge_request) }
let_it_be(:project) { merge_request.project }
let_it_be(:commit) { project.commit(sample_commit.id) }
let_it_be(:diff_project) { create(:project, :repository) }
let_it_be(:diff_mr) do
create(:merge_request, source_project: diff_project, target_project: diff_project)
end
let_it_be(:path) { "files/ruby/popen.rb" }
@ -110,7 +114,7 @@ RSpec.describe DiffNote do
context 'when importing' do
it "does not check if it's supported" do
note = build(:diff_note_on_merge_request, project: project, noteable: nil)
note = build(:diff_note_on_merge_request, project: project, noteable: merge_request)
note.importing = true
note.valid?
@ -384,6 +388,14 @@ RSpec.describe DiffNote do
expect(diff_note.diff_file).to be_nil
end
end
context 'when noteable is nil' do
it 'does not return a diff file' do
diff_note = build(:diff_note_on_commit, noteable: nil)
expect(diff_note.diff_file).to be_nil
end
end
end
describe '#latest_diff_file' do
@ -659,4 +671,140 @@ RSpec.describe DiffNote do
expect(diff_note.raw_truncated_diff_lines).to eq("+line 1\n+line 2\n-line 3")
end
end
describe '#validate_diff_file_and_line' do
let(:modified_file_path) do
"bar/branch-test.txt"
end
let(:valid_position) do
Gitlab::Diff::Position.new(
old_path: modified_file_path,
new_path: modified_file_path,
old_line: nil,
new_line: 1,
base_sha: diff_project.repository.commit.parent_id,
start_sha: diff_project.repository.commit.parent_id,
head_sha: diff_project.repository.commit.id
)
end
let(:invalid_line_position) do
Gitlab::Diff::Position.new(
old_path: modified_file_path,
new_path: modified_file_path,
old_line: nil,
new_line: 9999,
base_sha: diff_project.repository.commit.parent_id,
start_sha: diff_project.repository.commit.parent_id,
head_sha: diff_project.repository.commit.id
)
end
let(:diff_note) do
build(:diff_note_on_merge_request,
project: diff_project,
noteable: diff_mr,
importing: true,
position: valid_position,
original_position: valid_position
)
end
context 'when diff file exists' do
before do
diff_file = valid_position.diff_file(diff_project.repository)
allow(diff_note).to receive(:diff_file).and_return(diff_file)
end
it 'does not add errors when diff line exists' do
diff_note.validate_diff_file_and_line
expect(diff_note.errors).to be_empty
end
it 'adds an error when diff line does not exist' do
diff_note.original_position = invalid_line_position
diff_note.validate_diff_file_and_line
expect(diff_note.errors[:base]).to include(
"Failed to find diff line for: #{modified_file_path}, old_line: #{invalid_line_position.old_line}, "\
"new_line: #{invalid_line_position.new_line}"
)
expect(diff_note.errors.details[:base]).to contain_exactly(error: :missing_diff_line)
end
end
context 'when diff file does not exist' do
before do
allow(diff_note).to receive_messages(diff_file: nil, fetch_diff_file: nil)
end
it 'adds an error about the missing file' do
diff_note.validate_diff_file_and_line
expect(diff_note.errors[:base]).to include("Failed to find diff file")
expect(diff_note.errors.details[:base]).to contain_exactly(error: :missing_diff_file)
end
end
end
describe '#requires_diff_file_validation_during_import?' do
let(:modified_file_path) do
"bar/branch-test.txt"
end
let(:position) do
Gitlab::Diff::Position.new(
old_path: modified_file_path,
new_path: modified_file_path,
old_line: nil,
new_line: 1,
base_sha: diff_project.repository.commit.parent_id,
start_sha: diff_project.repository.commit.parent_id,
head_sha: diff_project.repository.commit.id
)
end
let(:diff_note) do
build(:diff_note_on_merge_request,
project: diff_project,
noteable: diff_mr,
position: position,
original_position: position
)
end
before do
diff_note.importing = true
allow(diff_note).to receive(:should_create_diff_file?).and_return(true)
end
it 'returns true when both importing? and should_create_diff_file? are true' do
expect(diff_note.requires_diff_file_validation_during_import?).to be(true)
end
it 'returns false when importing? is false' do
diff_note.importing = false
expect(diff_note.requires_diff_file_validation_during_import?).to be(false)
end
it 'returns false when should_create_diff_file? is false' do
allow(diff_note).to receive(:should_create_diff_file?).and_return(false)
expect(diff_note.requires_diff_file_validation_during_import?).to be(false)
end
it 'returns false when both importing? and should_create_diff_file? are false' do
diff_note.importing = false
allow(diff_note).to receive(:should_create_diff_file?).and_return(false)
expect(diff_note.requires_diff_file_validation_during_import?).to be(false)
end
end
end

View File

@ -395,29 +395,47 @@ RSpec.describe UserDetail, feature_category: :system_access do
expect(user_detail).to be_valid
end
context 'when orcid id is wrong' do
it 'throws an error when orcid username format is too long' do
user_detail.orcid = '1234-1234-1234-1234-1234'
it 'accepts a valid orcid username' do
user_detail.orcid = '1234-1234-1234-123X'
expect(user_detail).not_to be_valid
expect(user_detail.errors.full_messages)
.to match_array([_('Orcid must contain only a orcid ID.')])
expect(user_detail).to be_valid
end
context 'when orcid is wrong' do
shared_examples 'throws an error' do
before do
user_detail.orcid = orcid
end
it 'throws an error' do
expect(user_detail).not_to be_valid
expect(user_detail.errors.full_messages)
.to match_array([_('Orcid must contain only a valid ORCID.')])
end
end
it 'throws an error when orcid username format is too short' do
user_detail.orcid = '1234-1234'
context 'when the format is too long' do
let(:orcid) { '1234-1234-1234-1234-1234' }
expect(user_detail).not_to be_valid
expect(user_detail.errors.full_messages)
.to match_array([_('Orcid must contain only a orcid ID.')])
it_behaves_like 'throws an error'
end
it 'throws an error when orcid username format is letters' do
user_detail.orcid = 'abcd-abcd-abcd-abcd'
context 'when the format is too short' do
let(:orcid) { '1234-1234' }
expect(user_detail).not_to be_valid
expect(user_detail.errors.full_messages)
.to match_array([_('Orcid must contain only a orcid ID.')])
it_behaves_like 'throws an error'
end
context 'when the format is letters' do
let(:orcid) { 'abcd-abcd-abcd-abcd' }
it_behaves_like 'throws an error'
end
context 'when the format end with another letter than X' do
let(:orcid) { '1234-1234-1234-123Y' }
it_behaves_like 'throws an error'
end
end
end

View File

@ -15,6 +15,32 @@ RSpec.describe Ci::BuildRunnerPresenter do
}
end
describe '#set_queue_metrics' do
let(:build) { create(:ci_build) }
let(:size) { 10 }
let(:depth) { 2 }
subject(:executed) do
presenter.set_queue_metrics(size: size, depth: depth)
presenter
end
it 'tracks information about queue size and depth' do
expect(executed.queue_size).to eq(10)
expect(executed.queue_depth).to eq(2)
end
context 'when queue size or depth is negative' do
let(:size) { -1 }
let(:depth) { -1 }
it 'sets queue size and depth to 0' do
expect(executed.queue_size).to eq(0)
expect(executed.queue_depth).to eq(0)
end
end
end
describe '#artifacts' do
context "when option contains archive-type artifacts" do
let(:build) { create(:ci_build, options: { artifacts: archive }) }

View File

@ -36,37 +36,29 @@ RSpec.describe 'ciLint', feature_category: :pipeline_composition do
status
warnings
stages {
nodes {
name
groups {
name
groups {
nodes {
size
jobs {
name
groupName
stage
script
beforeScript
afterScript
allowFailure
only {
refs
}
when
except {
refs
}
environment
tags
needs {
name
size
jobs {
nodes {
name
groupName
stage
script
beforeScript
afterScript
allowFailure
only {
refs
}
when
except {
refs
}
environment
tags
needs {
nodes {
name
}
}
}
}
}
}
}
@ -80,23 +72,6 @@ RSpec.describe 'ciLint', feature_category: :pipeline_composition do
post_graphql_mutation(mutation, current_user: user)
end
context 'when ci_lint_mutation is disabled' do
before do
stub_feature_flags(ci_lint_mutation: false)
end
it 'does not lint the config' do
expect(::Gitlab::Ci::Lint).not_to receive(:new)
post_mutation
expect(graphql_mutation_response(:ci_lint)['config']).to be_nil
expect(graphql_mutation_response(:ci_lint)['errors'].first).to include(
'This mutation is unfinished and not yet available for use'
)
end
end
it_behaves_like 'a working graphql query' do
before do
post_mutation
@ -112,150 +87,120 @@ RSpec.describe 'ciLint', feature_category: :pipeline_composition do
"warnings" => [],
"includes" => [],
"mergedYaml" => a_kind_of(String),
"stages" =>
{
"nodes" =>
[
{
"name" => "build",
"groups" =>
"stages" => [
{
"name" => "build",
"groups" => [
{
"nodes" =>
[
"name" => "rspec",
"size" => 2,
"jobs" => [
{
"name" => "rspec",
"size" => 2,
"jobs" =>
{
"nodes" =>
[
{
"name" => "rspec 0 1",
"groupName" => "rspec",
"stage" => "build",
"script" => ["rake spec"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => false,
"only" => { "refs" => %w[branches master] },
"when" => "on_success",
"except" => nil,
"environment" => nil,
"tags" => %w[ruby postgres],
"needs" => { "nodes" => [] }
},
{
"name" => "rspec 0 2",
"groupName" => "rspec",
"stage" => "build",
"script" => ["rake spec"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => true,
"only" => { "refs" => %w[branches tags] },
"when" => "on_failure",
"except" => nil,
"environment" => nil,
"tags" => [],
"needs" => { "nodes" => [] }
}
]
}
"name" => "rspec 0 1",
"groupName" => "rspec",
"stage" => "build",
"script" => ["rake spec"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => false,
"only" => { "refs" => %w[branches master] },
"when" => "on_success",
"except" => nil,
"environment" => nil,
"tags" => %w[ruby postgres],
"needs" => []
},
{
"name" => "spinach", "size" => 1, "jobs" =>
{
"nodes" =>
[
{
"name" => "spinach",
"groupName" => "spinach",
"stage" => "build",
"script" => ["rake spinach"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => false,
"only" => { "refs" => %w[branches tags] },
"when" => "on_success",
"except" => { "refs" => ["tags"] },
"environment" => nil,
"tags" => [],
"needs" => { "nodes" => [] }
}
]
}
"name" => "rspec 0 2",
"groupName" => "rspec",
"stage" => "build",
"script" => ["rake spec"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => true,
"only" => { "refs" => %w[branches tags] },
"when" => "on_failure",
"except" => nil,
"environment" => nil,
"tags" => [],
"needs" => []
}
]
},
{
"name" => "spinach", "size" => 1, "jobs" => [
{
"name" => "spinach",
"groupName" => "spinach",
"stage" => "build",
"script" => ["rake spinach"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => false,
"only" => { "refs" => %w[branches tags] },
"when" => "on_success",
"except" => { "refs" => ["tags"] },
"environment" => nil,
"tags" => [],
"needs" => []
}
]
}
},
{
"name" => "test",
"groups" =>
]
},
{
"name" => "test",
"groups" => [
{
"nodes" =>
[
"name" => "docker",
"size" => 1,
"jobs" => [
{
"name" => "docker",
"size" => 1,
"jobs" =>
{
"nodes" => [
{
"name" => "docker",
"groupName" => "docker",
"stage" => "test",
"script" => ["curl http://dockerhub/URL"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => true,
"only" => { "refs" => %w[branches tags] },
"when" => "manual",
"except" => { "refs" => ["branches"] },
"environment" => nil,
"tags" => [],
"needs" => { "nodes" => [{ "name" => "spinach" }, { "name" => "rspec 0 1" }] }
}
]
}
"groupName" => "docker",
"stage" => "test",
"script" => ["curl http://dockerhub/URL"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => true,
"only" => { "refs" => %w[branches tags] },
"when" => "manual",
"except" => { "refs" => ["branches"] },
"environment" => nil,
"tags" => [],
"needs" => [{ "name" => "spinach" }, { "name" => "rspec 0 1" }]
}
]
}
},
{
"name" => "deploy",
"groups" =>
]
},
{
"name" => "deploy",
"groups" => [
{
"nodes" =>
[
"name" => "deploy_job",
"size" => 1,
"jobs" => [
{
"name" => "deploy_job",
"size" => 1,
"jobs" =>
{
"nodes" => [
{
"name" => "deploy_job",
"groupName" => "deploy_job",
"stage" => "deploy",
"script" => ["echo 'done'"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => false,
"only" => { "refs" => %w[branches tags] },
"when" => "on_success",
"except" => nil,
"environment" => "production",
"tags" => [],
"needs" => { "nodes" => [] }
}
]
}
"groupName" => "deploy_job",
"stage" => "deploy",
"script" => ["echo 'done'"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => false,
"only" => { "refs" => %w[branches tags] },
"when" => "on_success",
"except" => nil,
"environment" => "production",
"tags" => [],
"needs" => []
}
]
}
}
]
}
]
}
]
)
end
@ -307,69 +252,55 @@ RSpec.describe 'ciLint', feature_category: :pipeline_composition do
}
],
"mergedYaml" => "---\nbuild:\n script: build\nrspec:\n script: rspec\n",
"stages" =>
{
"nodes" =>
[
{
"name" => "test",
"groups" =>
"stages" => [
{
"name" => "test",
"groups" => [
{
"nodes" =>
[
"name" => "build",
"size" => 1,
"jobs" => [
{
"name" => "build",
"size" => 1,
"jobs" =>
{
"nodes" =>
[
{
"name" => "build",
"stage" => "test",
"groupName" => "build",
"script" => ["build"],
"afterScript" => [],
"beforeScript" => [],
"allowFailure" => false,
"environment" => nil,
"except" => nil,
"only" => { "refs" => %w[branches tags] },
"when" => "on_success",
"tags" => [],
"needs" => { "nodes" => [] }
}
]
}
},
"stage" => "test",
"groupName" => "build",
"script" => ["build"],
"afterScript" => [],
"beforeScript" => [],
"allowFailure" => false,
"environment" => nil,
"except" => nil,
"only" => { "refs" => %w[branches tags] },
"when" => "on_success",
"tags" => [],
"needs" => []
}
]
},
{
"name" => "rspec",
"size" => 1,
"jobs" => [
{
"name" => "rspec",
"size" => 1,
"jobs" =>
{
"nodes" =>
[
{ "name" => "rspec",
"stage" => "test",
"groupName" => "rspec",
"script" => ["rspec"],
"afterScript" => [],
"beforeScript" => [],
"allowFailure" => false,
"environment" => nil,
"except" => nil,
"only" => { "refs" => %w[branches tags] },
"when" => "on_success",
"tags" => [],
"needs" => { "nodes" => [] } }
]
}
"stage" => "test",
"groupName" => "rspec",
"script" => ["rspec"],
"afterScript" => [],
"beforeScript" => [],
"allowFailure" => false,
"environment" => nil,
"except" => nil,
"only" => { "refs" => %w[branches tags] },
"when" => "on_success",
"tags" => [],
"needs" => []
}
]
}
}
]
}
]
}
]
)
end
end
@ -499,9 +430,9 @@ RSpec.describe 'ciLint', feature_category: :pipeline_composition do
post_mutation
response_config = graphql_mutation_response(:ci_lint)['config']
response_job_names = response_config.dig('stages', 'nodes')
.flat_map { |stage| stage.dig('groups', 'nodes') }
.flat_map { |group| group.dig('jobs', 'nodes') }
response_job_names = response_config['stages']
.flat_map { |stage| stage['groups'] }
.flat_map { |group| group['jobs'] }
.pluck('name')
# The spinach job does not run for tags, so it makes a good test that the ref is being properly applied.

View File

@ -0,0 +1,117 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Query.project.workItemTypes.unavailableWidgetsOnConversion', feature_category: :team_planning do
include GraphqlHelpers
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, group: group) }
let_it_be(:current_user) { create(:user, developer_of: group) }
let_it_be(:source_type) { create(:work_item_type, :non_default) }
let_it_be(:target_type) { create(:work_item_type, :non_default) }
let_it_be(:shared_widget1) do
create(:widget_definition, work_item_type: source_type, widget_type: 'description', name: 'Description')
end
let_it_be(:shared_widget2) do
create(:widget_definition, work_item_type: target_type, widget_type: 'description', name: 'Description')
end
let_it_be(:source_only_widget1) do
create(:widget_definition, work_item_type: source_type, widget_type: 'labels', name: 'Labels')
end
let_it_be(:source_only_widget2) do
create(:widget_definition, work_item_type: source_type, widget_type: 'assignees', name: 'Assignees')
end
let_it_be(:target_only_widget) do
create(:widget_definition, work_item_type: target_type, widget_type: 'milestone', name: 'Milestone')
end
let(:query) do
<<~QUERY
query {
project(fullPath: "#{project.full_path}") {
workItemTypes {
nodes {
id
name
unavailableWidgetsOnConversion(
target: "#{target_type.to_gid}"
) {
type
}
}
}
}
}
QUERY
end
before do
post_graphql(query, current_user: current_user)
end
it_behaves_like 'a working graphql query'
it 'returns widgets lost on conversion for work item types' do
work_item_types = graphql_data.dig('project', 'workItemTypes', 'nodes')
expect(work_item_types).to be_present
# Find the source type that has the widgets we expect to be lost
source_type_node = work_item_types.find { |type| type['id'] == source_type.to_gid.to_s }
expect(source_type_node).to be_present
widgets_lost = source_type_node['unavailableWidgetsOnConversion']
expect(widgets_lost).to be_an(Array)
expect(widgets_lost.size).to eq(2)
widget_types = widgets_lost.pluck('type')
expect(widget_types).to contain_exactly('LABELS', 'ASSIGNEES')
expect(widgets_lost).to all(have_key('type'))
end
context 'when user does not have permission' do
let(:current_user) { create(:user) }
it 'returns null for project' do
expect(graphql_data['project']).to be_nil
end
end
context 'with invalid target work item type' do
let(:query) do
<<~QUERY
query {
project(fullPath: "#{project.full_path}") {
workItemTypes {
nodes {
id
name
unavailableWidgetsOnConversion(
target: "gid://gitlab/WorkItems::Type/999999"
) {
type
}
}
}
}
}
QUERY
end
it 'returns empty array for widgets lost on conversion' do
work_item_types = graphql_data.dig('project', 'workItemTypes', 'nodes')
work_item_types.each do |type|
expect(type['unavailableWidgetsOnConversion']).to be_empty
end
end
end
end

View File

@ -946,6 +946,10 @@ module Ci
let!(:attempt_counter) { double('Gitlab::Metrics::NullMetric') }
let!(:job_queue_duration_seconds) { double('Gitlab::Metrics::NullMetric') }
let!(:pending_job_2) { create(:ci_build, :pending, :queued, :protected, pipeline: pipeline, tag_list: %w[tag2]) }
let(:expected_time_in_queue_seconds) { 1800 }
before do
allow(Time).to receive(:now).and_return(current_time)
# Stub tested metrics
@ -958,7 +962,7 @@ module Ci
.and_return(job_queue_duration_seconds)
project.update!(shared_runners_enabled: true)
pending_job.update!(created_at: current_time - 3600, queued_at: current_time - 1800)
pending_job_2.update!(created_at: current_time - 3600, queued_at: current_time - 1800)
end
shared_examples 'attempt counter collector' do
@ -1008,32 +1012,55 @@ module Ci
it_behaves_like 'jobs queueing time histogram collector'
end
context 'when shared runner is used' do
shared_examples 'queue metrics presenter' do
let!(:pending_job_3) { create(:ci_build, :pending, :queued, :protected, pipeline: pipeline, tag_list: %w[tag3]) }
let!(:pending_job_4) { create(:ci_build, :pending, :queued, :protected, pipeline: pipeline, tag_list: %w[tag2]) }
subject(:execute) { described_class.new(runner, nil).execute }
before do
pending_job.reload
pending_job.create_queuing_entry!
allow(job_queue_duration_seconds).to receive(:observe)
allow(attempt_counter).to receive(:increment)
end
let(:runner) { create(:ci_runner, :instance, tag_list: %w[tag1 tag2]) }
it 'presents queue metrics' do
expect(execute.build_presented.queue_size).to eq(2)
expect(execute.build_presented.queue_depth).to eq(1)
expect(execute.build_presented.time_in_queue_seconds).to eq(expected_time_in_queue_seconds)
expect(execute.build_presented.project_jobs_running_on_instance_runners_count).to eq(expected_project_jobs_running_on_instance_runners_count)
end
end
context 'when shared runner is used' do
before do
pending_job_2.reload
pending_job_2.create_queuing_entry!
end
let(:runner) { create(:ci_runner, :instance, :ref_protected, tag_list: %w[tag1 tag2]) }
let(:expected_shared_runner) { true }
let(:expected_shard) { ::Gitlab::Ci::Queue::Metrics::DEFAULT_METRICS_SHARD }
let(:expected_jobs_running_for_project_first_job) { '0' }
let(:expected_jobs_running_for_project_third_job) { '2' }
let(:expected_project_jobs_running_on_instance_runners_count) { '0' }
it_behaves_like 'metrics collector'
it_behaves_like 'queue metrics presenter'
context 'when metrics_shard tag is defined' do
let(:runner) { create(:ci_runner, :instance, tag_list: %w[tag1 metrics_shard::shard_tag tag2]) }
let(:runner) { create(:ci_runner, :instance, :ref_protected, tag_list: %w[tag1 metrics_shard::shard_tag tag2]) }
let(:expected_shard) { 'shard_tag' }
it_behaves_like 'metrics collector'
it_behaves_like 'queue metrics presenter'
end
context 'when multiple metrics_shard tag is defined' do
let(:runner) { create(:ci_runner, :instance, tag_list: %w[tag1 metrics_shard::shard_tag metrics_shard::shard_tag_2 tag2]) }
let(:runner) { create(:ci_runner, :instance, :ref_protected, tag_list: %w[tag1 metrics_shard::shard_tag metrics_shard::shard_tag_2 tag2]) }
let(:expected_shard) { 'shard_tag' }
it_behaves_like 'metrics collector'
it_behaves_like 'queue metrics presenter'
end
context 'when max running jobs bucket size is exceeded' do
@ -1044,14 +1071,18 @@ module Ci
let(:expected_jobs_running_for_project_third_job) { '1+' }
it_behaves_like 'metrics collector'
it_behaves_like 'queue metrics presenter'
end
context 'when pending job with queued_at=nil is used' do
let(:expected_time_in_queue_seconds) { nil }
before do
pending_job.update!(queued_at: nil)
pending_job_2.update!(queued_at: nil)
end
it_behaves_like 'attempt counter collector'
it_behaves_like 'queue metrics presenter'
it "doesn't count job queuing time histogram" do
allow(attempt_counter).to receive(:increment)
@ -1063,13 +1094,15 @@ module Ci
end
context 'when project runner is used' do
let(:runner) { create(:ci_runner, :project, projects: [project], tag_list: %w[tag1 metrics_shard::shard_tag tag2]) }
let(:runner) { create(:ci_runner, :project, :ref_protected, projects: [project], tag_list: %w[tag1 metrics_shard::shard_tag tag2]) }
let(:expected_shared_runner) { false }
let(:expected_shard) { ::Gitlab::Ci::Queue::Metrics::DEFAULT_METRICS_SHARD }
let(:expected_jobs_running_for_project_first_job) { '+Inf' }
let(:expected_jobs_running_for_project_third_job) { '+Inf' }
let(:expected_project_jobs_running_on_instance_runners_count) { '+Inf' }
it_behaves_like 'metrics collector'
it_behaves_like 'queue metrics presenter'
end
end

View File

@ -7,15 +7,38 @@ RSpec.shared_examples 'a valid diff note with after commit callback' do
end
context 'when diff_line is not found' do
it 'raises an error' do
allow(diff_file_from_repository).to receive(:line_for_position).with(position).and_return(nil)
context 'when importing' do
before do
subject.importing = true
subject.line_code = line_code
end
expect { subject.save! }.to raise_error(
::DiffNote::NoteDiffFileCreationError,
"Failed to find diff line for: #{diff_file_from_repository.file_path}, "\
"old_line: #{position.old_line}"\
", new_line: #{position.new_line}"
)
it 'raises validation error' do
allow(diff_file_from_repository).to receive(:line_for_position).with(position).and_return(nil)
expect { subject.save! }.to raise_error(
ActiveRecord::RecordInvalid,
"Validation failed: Failed to find diff line for: #{diff_file_from_repository.file_path}, "\
"old_line: #{position.old_line}"\
", new_line: #{position.new_line}"
)
end
end
context 'when not importing' do
before do
subject.importing = false
end
it 'raises an error' do
allow(diff_file_from_repository).to receive(:line_for_position).with(position).and_return(nil)
expect { subject.save! }.to raise_error(
::DiffNote::NoteDiffFileCreationError,
"Failed to find diff line for: #{diff_file_from_repository.file_path}, "\
"old_line: #{position.old_line}"\
", new_line: #{position.new_line}"
)
end
end
end
@ -39,10 +62,28 @@ RSpec.shared_examples 'a valid diff note with after commit callback' do
end
context 'when diff file is not found in repository' do
it 'raises an error' do
allow_any_instance_of(::Gitlab::Diff::Position).to receive(:diff_file).with(project.repository).and_return(nil)
context 'when importing' do
before do
subject.importing = true
subject.line_code = line_code
end
expect { subject.save! }.to raise_error(::DiffNote::NoteDiffFileCreationError, 'Failed to find diff file')
it 'raises validation error' do
allow(subject).to receive(:diff_file).and_return(nil)
expect { subject.save! }.to raise_error(ActiveRecord::RecordInvalid, /Validation failed: Failed to find diff file/)
end
end
context 'when not importing' do
before do
subject.importing = false
end
it 'raises an error' do
allow(subject).to receive(:diff_file).and_return(nil)
expect { subject.save! }.to raise_error(::DiffNote::NoteDiffFileCreationError, 'Failed to find diff file')
end
end
end
end

View File

@ -12,7 +12,7 @@ RSpec.describe FlushCounterIncrementsWorker, :counter_attribute, feature_categor
let(:attribute) { model.class.counter_attributes.first }
let(:worker) { described_class.new }
subject { worker.perform(model.class.name, model.id, attribute) }
subject(:service) { worker.perform(model.class.name, model.id, attribute) }
it 'commits increments to database' do
expect(model.class).to receive(:find_by_id).and_return(model)
@ -20,26 +20,26 @@ RSpec.describe FlushCounterIncrementsWorker, :counter_attribute, feature_categor
expect(service).to receive(:commit_increment!)
end
subject
service
end
context 'when model class does not exist' do
subject { worker.perform('NonExistentModel', 1, attribute) }
subject(:service) { worker.perform('NonExistentModel', 1, attribute) }
it 'does nothing' do
expect(Gitlab::Counters::BufferedCounter).not_to receive(:new)
subject
service
end
end
context 'when record does not exist' do
subject { worker.perform(model.class.name, non_existing_record_id, attribute) }
subject(:service) { worker.perform(model.class.name, non_existing_record_id, attribute) }
it 'does nothing' do
expect(Gitlab::Counters::BufferedCounter).not_to receive(:new)
subject
service
end
end
end

View File

@ -0,0 +1,32 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Gitlab::Counters::FlushStaleCounterIncrementsCronWorker, feature_category: :continuous_integration do
describe '#perform' do
subject(:worker) { described_class.new }
context 'when we are on gitlab.com' do
before do
allow(Gitlab).to receive(:com_except_jh?).and_return(true)
allow(::Gitlab::Counters::FlushStaleCounterIncrementsWorker).to receive(:perform_with_capacity)
end
it 'calls FlushStaleCounterIncrementsWorker.perform_with_capacity' do
expect(::Gitlab::Counters::FlushStaleCounterIncrementsWorker).to receive(:perform_with_capacity)
worker.perform
end
end
context 'when we are not on gitlab.com' do
before do
allow(Gitlab).to receive(:com_except_jh?).and_return(false)
end
it 'does not call FlushStaleCounterIncrementsWorker.perform_with_capacity' do
expect(::Gitlab::Counters::FlushStaleCounterIncrementsWorker).not_to receive(:perform_with_capacity)
worker.perform
end
end
end
end

View File

@ -0,0 +1,116 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Gitlab::Counters::FlushStaleCounterIncrementsWorker, :saas, :clean_gitlab_redis_shared_state, feature_category: :continuous_integration do
let(:worker) { described_class.new }
let(:redis_key) { "flush_stale_counters:last_id:#{ProjectDailyStatistic.name}" }
let(:batch_limit) { described_class::BATCH_LIMIT }
let_it_be(:project) { create :project }
let!(:project_daily_statistic) do
create(:project_daily_statistic, date: Date.new(2025, 2, 1), fetch_count: 5, project: project)
end
let!(:project_daily_statistic_two) do
create(:project_daily_statistic, date: Date.new(2025, 2, 2), fetch_count: 0, project: project)
end
let!(:project_daily_statistic_three) do
create(:project_daily_statistic, date: Date.new(2025, 2, 3), fetch_count: 10, project: project)
end
let(:keys) do
[
project_daily_statistic.counter('fetch_count').key,
project_daily_statistic_two.counter('fetch_count').key,
project_daily_statistic_three.counter('fetch_count').key
]
end
before do
Gitlab::Redis::SharedState.with do |redis|
redis.set(keys[0], 5)
redis.set(keys[2], 10)
end
end
describe '#remaining_work_count' do
context 'when there is work' do
before do
Gitlab::Redis::SharedState.with do |redis|
redis.set(redis_key, project_daily_statistic.id)
end
stub_const("#{described_class}::ID_RANGES", { ProjectDailyStatistic => {
end_id: project_daily_statistic_three.id
} })
end
it 'has work to do' do
expect(worker.remaining_work_count).to eq(2)
end
end
context 'when there is no more work' do
before do
Gitlab::Redis::SharedState.with do |redis|
redis.set(redis_key, project_daily_statistic_three.id)
end
stub_const("#{described_class}::ID_RANGES", { ProjectDailyStatistic => {
end_id: project_daily_statistic_three.id
} })
end
it 'has no more work to do' do
expect(worker.remaining_work_count).to eq(0)
end
end
end
describe '#max_running_jobs' do
it 'has only one concurrently running job' do
expect(worker.max_running_jobs).to eq(1)
end
end
describe '#perform_work' do
context 'when there is remaining work' do
before do
Gitlab::Redis::SharedState.with do |redis|
redis.set(redis_key, project_daily_statistic.id)
end
allow(Gitlab::Saas).to receive(:feature_available?).with(:purchases_additional_minutes).and_return(true)
end
it "flushes stale counters and updates the redis start id" do
Gitlab::Redis::SharedState.with do |redis|
expect(redis.get(redis_key).to_i).to eq(project_daily_statistic.id)
end
expect_next_instance_of(Gitlab::Counters::FlushStaleCounterIncrements) do |service|
expect(service).to receive(:execute)
.and_call_original
end
expect_initial_counts
worker.perform_work
expect_flushed_counts
Gitlab::Redis::SharedState.with do |redis|
expect(redis.get(redis_key).to_i).to eq(1 + project_daily_statistic_three.id)
end
end
def expect_initial_counts
expect(project_daily_statistic.fetch_count).to eq(5)
expect(project_daily_statistic_two.fetch_count).to eq(0)
expect(project_daily_statistic_three.fetch_count).to eq(10)
end
def expect_flushed_counts
expect(project_daily_statistic.reload.fetch_count).to eq(10)
expect(project_daily_statistic_two.reload.fetch_count).to eq(0)
expect(project_daily_statistic_three.reload.fetch_count).to eq(20)
end
end
end
end