Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2025-07-16 09:13:34 +00:00
parent f7a689b906
commit d8cf97d559
101 changed files with 739 additions and 725 deletions

View File

@ -59,7 +59,9 @@ secret_detection:
dependency-scanning:
extends:
- .ds-analyzer
- .reports:rules:dependency_scanning
rules:
# Use !reference to prevent rule merging issues with template's dependency-scanning job
- !reference [.reports:rules:dependency_scanning, rules]
# Analyze dependencies for malicious behavior
# See https://gitlab.com/gitlab-com/gl-security/security-research/package-hunter

View File

@ -1227,8 +1227,9 @@
- <<: *if-schedule-maintenance
- <<: *if-dot-com-gitlab-org-default-branch
changes: *assets-compilation-patterns
- <<: *if-tag
# push assets for stable branches (canonical & security)
- <<: *if-sync-changes-on-stable-branches
changes: *assets-compilation-patterns
- <<: *if-dot-com-gitlab-org-merge-request
changes:
- ".gitlab/ci/caching.gitlab-ci.yml"
@ -1239,7 +1240,6 @@
- if: '$ENABLE_CACHE_ASSETS == "true"'
when: manual
allow_failure: true
- !reference [".releases:rules:canonical-dot-com-security-gitlab-stable-branch-only", rules]
.caching:rules:packages-cleanup:
rules:

View File

@ -168,10 +168,6 @@ detect-tests:
echoinfo 'Related EE RSpec tests:'
echoinfo "$(tr ' ' '\n' < $RSPEC_MATCHING_TESTS_EE_PATH)"
echo ""
echoinfo 'Related JS files:'
echoinfo "$(tr ' ' '\n' < $RSPEC_MATCHING_JS_FILES_PATH)"
echo ""
fi
artifacts:
expire_in: 7d

View File

@ -3200,7 +3200,6 @@ Gitlab/BoundedContexts:
- 'ee/app/services/epics/transfer_service.rb'
- 'ee/app/services/epics/tree_reorder_service.rb'
- 'ee/app/services/epics/update_dates_service.rb'
- 'ee/app/services/epics/update_service.rb'
- 'ee/app/services/external_status_checks/base_service.rb'
- 'ee/app/services/external_status_checks/create_service.rb'
- 'ee/app/services/external_status_checks/destroy_service.rb'

View File

@ -673,7 +673,6 @@ Layout/LineLength:
- 'ee/app/services/ee/resource_access_tokens/create_service.rb'
- 'ee/app/services/ee/system_note_service.rb'
- 'ee/app/services/ee/users/update_service.rb'
- 'ee/app/services/epics/update_service.rb'
- 'ee/app/services/geo/blob_upload_service.rb'
- 'ee/app/services/geo/file_registry_removal_service.rb'
- 'ee/app/services/geo/framework_repository_sync_service.rb'
@ -1633,7 +1632,6 @@ Layout/LineLength:
- 'ee/spec/services/ee/users/destroy_service_spec.rb'
- 'ee/spec/services/ee/users/update_service_spec.rb'
- 'ee/spec/services/epics/issue_promote_service_spec.rb'
- 'ee/spec/services/epics/update_service_spec.rb'
- 'ee/spec/services/external_status_checks/update_service_spec.rb'
- 'ee/spec/services/geo/blob_download_service_spec.rb'
- 'ee/spec/services/geo/container_repository_sync_spec.rb'

View File

@ -89,7 +89,6 @@ Rails/Date:
- 'ee/spec/services/ee/personal_access_tokens/create_service_spec.rb'
- 'ee/spec/services/ee/work_items/callbacks/start_and_due_date_spec.rb'
- 'ee/spec/services/epics/reopen_service_spec.rb'
- 'ee/spec/services/epics/update_service_spec.rb'
- 'ee/spec/services/groups/seat_usage_export_service_spec.rb'
- 'ee/spec/services/issuable/callbacks/time_tracking_spec.rb'
- 'ee/spec/services/iterations/cadences/destroy_service_spec.rb'

View File

@ -392,7 +392,6 @@ RSpec/BeEq:
- 'ee/spec/services/epic_issues/destroy_service_spec.rb'
- 'ee/spec/services/epics/issue_promote_service_spec.rb'
- 'ee/spec/services/epics/update_dates_service_spec.rb'
- 'ee/spec/services/epics/update_service_spec.rb'
- 'ee/spec/services/geo/node_create_service_spec.rb'
- 'ee/spec/services/geo/node_update_service_spec.rb'
- 'ee/spec/services/gitlab_subscriptions/add_on_purchases/self_managed/expire_service_spec.rb'

View File

@ -549,7 +549,6 @@ RSpec/BeforeAllRoleAssignment:
- 'ee/spec/services/epics/reopen_service_spec.rb'
- 'ee/spec/services/epics/transfer_service_spec.rb'
- 'ee/spec/services/epics/tree_reorder_service_spec.rb'
- 'ee/spec/services/epics/update_service_spec.rb'
- 'ee/spec/services/external_status_checks/retry_service_spec.rb'
- 'ee/spec/services/gitlab_subscriptions/preview_billable_user_change_service_spec.rb'
- 'ee/spec/services/gitlab_subscriptions/reconciliations/calculate_seat_count_data_service_spec.rb'

View File

@ -628,7 +628,6 @@ RSpec/ContextWording:
- 'ee/spec/services/epic_issues/update_service_spec.rb'
- 'ee/spec/services/epics/epic_links/list_service_spec.rb'
- 'ee/spec/services/epics/related_epic_links/destroy_service_spec.rb'
- 'ee/spec/services/epics/update_service_spec.rb'
- 'ee/spec/services/external_status_checks/dispatch_service_spec.rb'
- 'ee/spec/services/geo/container_repository_sync_service_spec.rb'
- 'ee/spec/services/geo/container_repository_sync_spec.rb'

View File

@ -875,7 +875,6 @@ RSpec/NamedSubject:
- 'ee/spec/services/epics/related_epic_links/destroy_service_spec.rb'
- 'ee/spec/services/epics/related_epic_links/list_service_spec.rb'
- 'ee/spec/services/epics/reopen_service_spec.rb'
- 'ee/spec/services/epics/update_service_spec.rb'
- 'ee/spec/services/external_status_checks/dispatch_service_spec.rb'
- 'ee/spec/services/external_status_checks/retry_service_spec.rb'
- 'ee/spec/services/geo/base_file_service_spec.rb'

View File

@ -264,7 +264,6 @@ Style/GuardClause:
- 'ee/app/services/ee/projects/gitlab_projects_import_service.rb'
- 'ee/app/services/ee/protected_branches/loggable.rb'
- 'ee/app/services/epics/tree_reorder_service.rb'
- 'ee/app/services/epics/update_service.rb'
- 'ee/app/services/geo/metrics_update_service.rb'
- 'ee/app/services/groups/update_repository_storage_service.rb'
- 'ee/app/services/incident_management/oncall_rotations/remove_participant_service.rb'

View File

@ -289,7 +289,6 @@ Style/IfUnlessModifier:
- 'ee/app/services/ee/protected_branches/create_service.rb'
- 'ee/app/services/ee/users/update_service.rb'
- 'ee/app/services/epics/epic_links/update_service.rb'
- 'ee/app/services/epics/update_service.rb'
- 'ee/app/services/geo/file_registry_removal_service.rb'
- 'ee/app/services/geo/metrics_update_service.rb'
- 'ee/app/services/geo/prune_event_log_service.rb'

View File

@ -5,7 +5,6 @@ module RapidDiffs
extend ActiveSupport::Concern
def diff_files_metadata
return render_404 unless rapid_diffs_enabled?
return render_404 unless diffs_resource.present?
render json: {
@ -14,7 +13,6 @@ module RapidDiffs
end
def diffs_stats
return render_404 unless rapid_diffs_enabled?
return render_404 unless diffs_resource.present?
render json: RapidDiffs::DiffsStatsEntity.represent(
@ -27,7 +25,6 @@ module RapidDiffs
end
def diff_file
return render_404 unless rapid_diffs_enabled?
return render_404 unless diffs_resource.present?
old_path = diff_file_params[:old_path]
@ -52,10 +49,6 @@ module RapidDiffs
private
def rapid_diffs_enabled?
::Feature.enabled?(:rapid_diffs, current_user, type: :beta)
end
def diffs_resource(options = {})
raise NotImplementedError
end

View File

@ -7,8 +7,6 @@ module RapidDiffs
include DiffHelper
def diffs
return render_404 unless rapid_diffs_enabled?
streaming_start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
stream_headers
@ -42,10 +40,6 @@ module RapidDiffs
private
def rapid_diffs_enabled?
::Feature.enabled?(:rapid_diffs, current_user, type: :beta)
end
def resource
raise NotImplementedError
end

View File

@ -146,8 +146,6 @@ class Projects::BlobController < Projects::ApplicationController
end
def diff_lines
return render_404 unless rapid_diffs_enabled?
params.require([:since, :to, :offset])
bottom = diff_lines_params[:bottom] == 'true'
@ -178,10 +176,6 @@ class Projects::BlobController < Projects::ApplicationController
attr_reader :branch_name
def rapid_diffs_enabled?
::Feature.enabled?(:rapid_diffs, current_user, type: :beta)
end
def blob
return unless commit

View File

@ -147,8 +147,7 @@ class Projects::CommitController < Projects::ApplicationController
end
def rapid_diffs
return render_404 unless ::Feature.enabled?(:rapid_diffs, current_user, type: :beta) &&
::Feature.enabled?(:rapid_diffs_on_commit_show, current_user, type: :wip)
return render_404 unless ::Feature.enabled?(:rapid_diffs_on_commit_show, current_user, type: :wip)
@rapid_diffs_presenter = RapidDiffs::CommitPresenter.new(
@commit,

View File

@ -214,8 +214,7 @@ class Projects::CompareController < Projects::ApplicationController
end
def rapid_diffs_enabled?
::Feature.enabled?(:rapid_diffs, current_user, type: :beta) &&
::Feature.enabled?(:rapid_diffs_on_compare_show, current_user, type: :wip) &&
::Feature.enabled?(:rapid_diffs_on_compare_show, current_user, type: :wip) &&
!rapid_diffs_force_disabled? &&
params.permit(:format)[:format].blank?
end

View File

@ -158,8 +158,7 @@ class Projects::MergeRequests::CreationsController < Projects::MergeRequests::Ap
end
def rapid_diffs?
::Feature.enabled?(:rapid_diffs, current_user, type: :beta) &&
::Feature.enabled?(:rapid_diffs_on_mr_creation, current_user, type: :beta) &&
::Feature.enabled?(:rapid_diffs_on_mr_creation, current_user, type: :beta) &&
!rapid_diffs_disabled?
end

View File

@ -705,8 +705,7 @@ class Projects::MergeRequestsController < Projects::MergeRequests::ApplicationCo
end
def rapid_diffs_page_enabled?
::Feature.enabled?(:rapid_diffs, current_user, type: :beta) &&
::Feature.enabled?(:rapid_diffs_on_mr_show, current_user, type: :wip) &&
::Feature.enabled?(:rapid_diffs_on_mr_show, current_user, type: :wip) &&
params[:rapid_diffs] == 'true'
end

View File

@ -35,7 +35,6 @@ class User < ApplicationRecord
include Todoable
include Gitlab::InternalEventsTracking
ignore_column :last_access_from_pipl_country_at, remove_after: '2024-11-17', remove_with: '17.7'
ignore_column %i[role skype], remove_after: '2025-09-18', remove_with: '18.4'
DEFAULT_NOTIFICATION_LEVEL = :participating

View File

@ -50,18 +50,23 @@ module Users
params[:search] && users_relation.size >= SEARCH_LIMIT
end
def groups
return [] unless current_user
def groups(organization: nil)
return [] unless organization.nil? || organization.is_a?(Organizations::Organization)
relation = current_user.authorized_groups
strong_memoize_with(:groups, organization) do
break [] unless current_user
if params[:search]
relation.gfm_autocomplete_search(params[:search]).limit(SEARCH_LIMIT).to_a
else
relation.with_route.sort_by(&:full_path)
relation = current_user.authorized_groups
relation = relation.in_organization(organization) if organization
if params[:search]
relation.gfm_autocomplete_search(params[:search]).limit(SEARCH_LIMIT).to_a
else
relation.with_route.sort_by(&:full_path)
end
end
end
strong_memoize_attr :groups
def render_participants_as_hash(participants)
participants.map { |participant| participant_as_hash(participant) }

View File

@ -56,7 +56,7 @@ module Issuable
def update_issuables(type, ids)
model_class = type.classify.constantize
update_class = type.classify.pluralize.constantize::UpdateService
update_class = update_class(type)
items = find_issuables(parent, model_class, ids)
authorized_issuables = []
@ -74,6 +74,11 @@ module Issuable
authorized_issuables
end
# overridden in EE
def update_class(type)
type.classify.pluralize.constantize::UpdateService
end
def find_issuables(parent, model_class, ids)
issuables = model_class.id_in(ids)

View File

@ -14,7 +14,7 @@ module Projects
all_members +
project_members
participants += groups unless relation_at_search_limit?(project_members)
participants += groups(organization: organization) unless relation_at_search_limit?(project_members)
participants = organization_user_details_for_participants(participants.uniq)
render_participants_as_hash(participants)

View File

@ -7,6 +7,7 @@ module Ci
data_consistency :sticky
sidekiq_options retry: 3
max_concurrency_limit_percentage 0.32
include PipelineBackgroundQueue
def perform(job_id)

View File

@ -20,7 +20,6 @@ module ApplicationWorker
prefer_calling_context_feature_category false
set_queue
after_set_class_attribute { set_queue }
set_default_concurrency_limit
def structured_payload(payload = {})
context = Gitlab::ApplicationContext.current.merge(
@ -252,9 +251,5 @@ module ApplicationWorker
schedule_at
end
def set_default_concurrency_limit
concurrency_limit -> { 0 }
end
end
end

View File

@ -41,6 +41,12 @@ module WorkerAttributes
DEFAULT_DEFER_DELAY = 5.seconds
DEFAULT_CONCURRENCY_LIMIT_PERCENTAGE_BY_URGENCY = {
high: 0.35,
low: 0.25,
throttled: 0.15
}.freeze
class_methods do
def feature_category(value, *extras)
set_class_attribute(:feature_category, value)
@ -204,6 +210,21 @@ module WorkerAttributes
)
end
# If concurrency_limit attribute is not defined, this sets the maximum percentage of fleet
# that this worker can use.
# For example, 0.3 means the worker class is allowed to use 30% of the fleet's threads concurrently.
def max_concurrency_limit_percentage(value)
unless value.is_a?(Numeric) && value.between?(0, 1)
raise ArgumentError, "max_concurrency_limit_percentage must be a number between 0 and 1, got: #{value.inspect}"
end
set_class_attribute(:max_concurrency_limit_percentage, value)
end
def get_max_concurrency_limit_percentage
get_class_attribute(:max_concurrency_limit_percentage) || DEFAULT_CONCURRENCY_LIMIT_PERCENTAGE_BY_URGENCY.fetch(get_urgency)
end
def get_weight
get_class_attribute(:weight) ||
NAMESPACE_WEIGHTS[queue_namespace] ||

View File

@ -22,6 +22,8 @@ class FlushCounterIncrementsWorker
idempotent!
max_concurrency_limit_percentage 0.5
def perform(model_name, model_id, attribute)
return unless self.class.const_defined?(model_name)

View File

@ -9,6 +9,7 @@ module WebHooks
urgency :low
sidekiq_options retry: 3
loggable_arguments 0, 2, 3
max_concurrency_limit_percentage 0.53
idempotent!

View File

@ -5,5 +5,5 @@ introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/194685
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/549452
milestone: '18.2'
group: group::organizations
type: wip
type: beta
default_enabled: false

View File

@ -1,9 +0,0 @@
---
name: rapid_diffs
feature_issue_url: https://gitlab.com/groups/gitlab-org/-/epics/11559
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/147686
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/502705
milestone: '16.11'
group: group::code review
type: beta
default_enabled: true

View File

@ -6,3 +6,4 @@ milestone: '13.2'
type: development
group: group::source code
default_enabled: false
intended_to_rollout_by: "2025-11-01"

View File

@ -0,0 +1,10 @@
---
name: use_max_concurrency_limit_percentage_as_default_limit
description: Set default concurrency limit for workers based on max_concurrency_limit_percentage (based on urgency and sidekiq shard's capacity)
feature_issue_url: https://gitlab.com/gitlab-com/gl-infra/data-access/durability/team/-/issues/215
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/194881
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/553604
milestone: '18.2'
group: group::durability
type: gitlab_com_derisk
default_enabled: false

View File

@ -0,0 +1,16 @@
- title: "User setting to disable exact code search"
removal_milestone: "18.6"
announcement_milestone: "18.3"
breaking_change: false
reporter: changzhengliu
stage: ai-powered
issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/554933
impact: low
impact_offering: [GitLab.com]
scope: instance
resolution_role: Developer
manual_task: false
body: | # (required) Don't change this line.
The user setting to disable exact code search is now deprecated. On GitLab.com, you can no longer disable exact code search in profile preferences.
Exact code search provides a better user experience and is compatible with existing search APIs. This user setting is planned for removal in GitLab 18.6 to ensure all users benefit from improved search functionality.

View File

@ -3,7 +3,9 @@
class CreateAiUsageEvents < Gitlab::Database::Migration[2.3]
milestone '18.2'
def change
def up
return if table_exists?(:ai_usage_events)
# rubocop:disable Migration/Datetime -- "timestamp" is a column name
create_table :ai_usage_events,
options: 'PARTITION BY RANGE (timestamp)',
@ -21,4 +23,8 @@ class CreateAiUsageEvents < Gitlab::Database::Migration[2.3]
end
# rubocop:enable Migration/Datetime
end
def down
drop_table :ai_usage_events, if_exists: true, force: :cascade
end
end

View File

@ -41,7 +41,7 @@ GitLab team members with edit access can update the [source](https://lucid.app/l
### Gitaly setup
GitLab Dedicated deploys Gitaly [in a sharded setup](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster), not a Gitaly Cluster. In this setup:
GitLab Dedicated deploys Gitaly [in a sharded setup](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect), not a Gitaly Cluster. In this setup:
- Customer repositories are spread across multiple virtual machines.
- GitLab manages [storage weights](../repository_storage_paths.md#configure-where-new-repositories-are-stored) on behalf of the customer.

View File

@ -38,8 +38,8 @@ repository storage is either:
- A Gitaly storage with direct access to repositories using [storage paths](../repository_storage_paths.md),
where each repository is stored on a single Gitaly node. All requests are routed to this node.
- A [virtual storage](praefect/_index.md#virtual-storage) provided by [Gitaly Cluster](praefect/_index.md), where each
repository can be stored on multiple Gitaly nodes for fault tolerance. In a Gitaly Cluster:
- A [virtual storage](praefect/_index.md#virtual-storage) provided by [Gitaly Cluster (Praefect)](praefect/_index.md),
where each repository can be stored on multiple Gitaly nodes for fault tolerance. With Gitaly Cluster (Praefect):
- Read requests are distributed between multiple Gitaly nodes, which can improve performance.
- Write requests are broadcast to repository replicas.
@ -56,7 +56,7 @@ In this example:
## Disk requirements
Gitaly and Gitaly Cluster require fast local storage to perform effectively because they are heavy
Gitaly and Gitaly Cluster (Praefect) require fast local storage to perform effectively because they are heavy
I/O-based processes. Therefore, we strongly recommend that all Gitaly nodes use solid-state drives
(SSDs). These SSDs should have high read and write throughput as Gitaly operates on many small files
concurrently.
@ -65,7 +65,7 @@ As a reference, the following charts show the P99 disk IOPS across the Gitaly pr
GitLab.com at a one-minute granularity. The data were queried from a seven-day representative
period, starting and ending on a Monday morning. Note the regular spikes in IOPS as traffic
becomes more intense during the work week. The raw data shows even larger spikes, with writes
peaking at 8000 IOPS. The available disk throughput must be able to handle these spikes to avoid
peaking at 8000 IOPS. The available disk throughput must handle these spikes to avoid
disruptions to Gitaly requests.
- P99 disk IOPS (reads):
@ -97,7 +97,7 @@ instances typically increases the available disk IOPS. You may also choose to se
disk type with guaranteed throughput. Refer to the documentation of your cloud provider about how to
configure IOPS correctly.
For repository data, only local storage is supported for Gitaly and Gitaly Cluster for performance and consistency reasons.
For repository data, only local storage is supported for Gitaly and Gitaly Cluster (Praefect) for performance and consistency reasons.
Alternatives such as [NFS](../nfs.md) or [cloud-based file systems](../nfs.md#avoid-using-cloud-based-file-systems) are not supported.
## Gitaly architecture
@ -155,7 +155,7 @@ Gitaly comes pre-configured with a Linux package installation, which is a config
- Self-compiled installations or custom Gitaly installations, see [Configure Gitaly](configure_gitaly.md).
GitLab installations for more than 2000 active users performing daily Git write operation may be
best suited by using Gitaly Cluster.
best suited by using Gitaly Cluster (Praefect).
## Gitaly CLI
@ -194,7 +194,7 @@ your assumptions, resulting in performance degradation, instability, and even da
- Gitaly has optimizations such as the [`info/refs` advertisement cache](https://gitlab.com/gitlab-org/gitaly/blob/master/doc/design_diskcache.md),
that rely on Gitaly controlling and monitoring access to repositories by using the official gRPC
interface.
- [Gitaly Cluster](praefect/_index.md) has optimizations, such as fault tolerance and
- [Gitaly Cluster (Praefect)](praefect/_index.md) has optimizations, such as fault tolerance and
[distributed reads](praefect/_index.md#distributed-reads), that depend on the gRPC interface and database
to determine repository state.

View File

@ -12,29 +12,29 @@ The following tables are intended to guide you to choose the right combination o
## Gitaly capabilities
| Capability | Availability | Recoverability | Data Resiliency | Performance | Risks/Trade-offs|
|------------|--------------|----------------|-----------------|-------------|-----------------|
| Gitaly Cluster | Very high - tolerant of node failures | RTO for a single node of 10 s with no manual intervention | Data is stored on multiple nodes | Good - While writes may take slightly longer due to voting, read distribution improves read speeds | Trade-off - Slight decrease in write speed for redundant, strongly-consistent storage solution. Risks - [Does not support snapshot backups](praefect/_index.md#snapshot-backup-and-recovery), GitLab backup task can be slow for large data sets |
| Gitaly Shards | Single storage location is a single point of failure | Would need to restore only shards which failed | Single point of failure | Good - can allocate repositories to shards to spread load | Trade-off - Need to manually configure repositories into different shards to balance loads / storage space. Risks - Single point of failure relies on recovery process when single-node failure occurs |
| Capability | Availability | Recoverability | Data Resiliency | Performance | Risks/Trade-offs |
|:--------------------------|:-----------------------------------------------------|:----------------------------------------------------------|:---------------------------------|:---------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Gitaly Cluster (Praefect) | Very high - tolerant of node failures | RTO for a single node of 10 s with no manual intervention | Data is stored on multiple nodes | Good - While writes may take slightly longer due to voting, read distribution improves read speeds | Trade-off - Slight decrease in write speed for redundant, strongly-consistent storage solution. Risks - [Does not support snapshot backups](praefect/_index.md#snapshot-backup-and-recovery), GitLab backup task can be slow for large data sets |
| Gitaly Shards | Single storage location is a single point of failure | Would need to restore only shards which failed | Single point of failure | Good - can allocate repositories to shards to spread load | Trade-off - Need to manually configure repositories into different shards to balance loads / storage space. Risks - Single point of failure relies on recovery process when single-node failure occurs |
## Geo capabilities
If your availability needs to span multiple zones or multiple locations, read about [Geo](../geo/_index.md).
| Capability | Availability | Recoverability | Data Resiliency | Performance | Risks/Trade-offs|
|------------|--------------|----------------|-----------------|-------------|-----------------|
| Geo | Depends on the architecture of the Geo site. It is possible to deploy secondaries in single and multiple node configurations. | Eventually consistent. Recovery point depends on replication lag, which depends on a number of factors such as network speeds. Geo supports failover from a primary to secondary site using manual commands that are scriptable. | Geo replicates and verifies 100% of planned data types. See the [replicated data types table](../geo/replication/datatypes.md#replicated-data-types) for more detail. | Improves read/clone times for users of a secondary. | Geo is not intended to replace other backup/restore solutions. Because of replication lag and the possibility of replicating bad data from a primary, customers should also take regular backups of their primary site and test the restore process. |
| Capability | Availability | Recoverability | Data Resiliency | Performance | Risks/Trade-offs |
|:-----------|:------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Geo | Depends on the architecture of the Geo site. It is possible to deploy secondaries in single and multiple node configurations. | Eventually consistent. Recovery point depends on replication lag, which depends on many factors such as network speeds. Geo supports failover from a primary to secondary site using manual commands that are scriptable. | Geo replicates and verifies 100% of planned data types. See the [replicated data types table](../geo/replication/datatypes.md#replicated-data-types) for more detail. | Improves read/clone times for users of a secondary. | Geo is not intended to replace other backup/restore solutions. Because of replication lag and the possibility of replicating bad data from a primary, customers should also take regular backups of their primary site and test the restore process. |
## Scenarios for failure modes and available mitigation paths
The following table outlines failure modes and mitigation paths for the product offerings detailed in the previous tables.
Gitaly Cluster install assumes an odd number replication factor of 3 or greater.
Gitaly Cluster (Praefect) install assumes an odd number replication factor of 3 or greater.
| Gitaly Mode | Loss of Single Gitaly Node | Application / Data Corruption | Regional Outage (Loss of Instance) | Notes |
| ----------- | -------------------------- | ----------------------------- | ---------------------------------- | ----- |
| Single Gitaly Node | Downtime - Must restore from backup | Downtime - Must restore from Backup | Downtime - Must wait for outage to end | |
| Single Gitaly Node + Geo Secondary | Downtime - Must restore from backup, can perform a manual failover to secondary | Downtime - Must restore from Backup, errors could have propagated to secondary | Manual intervention - failover to Geo secondary | |
| Sharded Gitaly Install | Partial Downtime - Only repositories on impacted node affected, must restore from backup | Partial Downtime - Only repositories on impacted node affected, must restore from backup | Downtime - Must wait for outage to end | |
| Sharded Gitaly Install + Geo Secondary | Partial Downtime - Only repositories on impacted node affected, must restore from backup, could perform manual failover to secondary for impacted repositories | Partial Downtime - Only repositories on impacted node affected, must restore from backup, errors could have propagated to secondary | Manual intervention - failover to Geo secondary | |
| Gitaly Cluster Install* | No Downtime - swaps repository primary to another node after 10 seconds | Not applicable; All writes are voted on by multiple Gitaly Cluster nodes | Downtime - Must wait for outage to end | Snapshot backups for Gitaly Cluster nodes not supported at this time |
| Gitaly Cluster Install* + Geo Secondary | No Downtime - swaps repository primary to another node after 10 seconds | Not applicable; All writes are voted on by multiple Gitaly Cluster nodes | Manual intervention - failover to Geo secondary | Snapshot backups for Gitaly Cluster nodes not supported at this time |
| Gitaly Mode | Loss of Single Gitaly Node | Application / Data Corruption | Regional Outage (Loss of Instance) | Notes |
|:---------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------|:--------------------------------------------------------------------------------|
| Single Gitaly Node | Downtime - Must restore from backup | Downtime - Must restore from Backup | Downtime - Must wait for outage to end | |
| Single Gitaly Node + Geo Secondary | Downtime - Must restore from backup, can perform a manual failover to secondary | Downtime - Must restore from Backup, errors could have propagated to secondary | Manual intervention - failover to Geo secondary | |
| Sharded Gitaly Install | Partial Downtime - Only repositories on impacted node affected, must restore from backup | Partial Downtime - Only repositories on impacted node affected, must restore from backup | Downtime - Must wait for outage to end | |
| Sharded Gitaly Install + Geo Secondary | Partial Downtime - Only repositories on impacted node affected, must restore from backup, could perform manual failover to secondary for impacted repositories | Partial Downtime - Only repositories on impacted node affected, must restore from backup, errors could have propagated to secondary | Manual intervention - failover to Geo secondary | |
| Gitaly Cluster (Praefect) Install* | No Downtime - swaps repository primary to another node after 10 seconds | Not applicable; All writes are voted on by multiple Gitaly Cluster (Praefect) nodes | Downtime - Must wait for outage to end | Snapshot backups for Gitaly Cluster (Praefect) nodes not supported at this time |
| Gitaly Cluster (Praefect) Install* + Geo Secondary | No Downtime - swaps repository primary to another node after 10 seconds | Not applicable; All writes are voted on by multiple Gitaly Cluster (Praefect) nodes | Manual intervention - failover to Geo secondary | Snapshot backups for Gitaly Cluster (Praefect) nodes not supported at this time |

View File

@ -39,7 +39,7 @@ masks the problem by:
The same approach doesn't fit a container-based lifecycle where a container or pod needs to fully shutdown and start as a new container or pod.
Gitaly Cluster (Praefect) solves the data and service high-availability aspect by replicating data across instances. However, Gitaly Cluster is unsuited to run in Kubernetes
Gitaly Cluster (Praefect) solves the data and service high-availability aspect by replicating data across instances. However, Gitaly Cluster (Praefect) is unsuited to run in Kubernetes
because of [existing issues and design constraints](praefect/_index.md#known-issues) that are augmented by a container-based platform.
To support a Cloud Native deployment, Gitaly (non-Cluster) is the only option.

View File

@ -2,7 +2,7 @@
stage: Data Access
group: Gitaly
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
title: Gitaly Cluster
title: Gitaly Cluster (Praefect)
---
{{< details >}}
@ -31,7 +31,7 @@ Gitaly can be run in a clustered configuration to:
In this configuration, every Git repository can be stored on multiple Gitaly nodes in the cluster.
Using a Gitaly Cluster increases fault tolerance by:
Using Gitaly Cluster (Praefect) increases fault tolerance by:
- Replicating write operations to warm standby Gitaly nodes.
- Detecting Gitaly node failures.
@ -39,13 +39,12 @@ Using a Gitaly Cluster increases fault tolerance by:
{{< alert type="note" >}}
Technical support for Gitaly clusters is limited to GitLab Premium and Ultimate
Technical support for Gitaly Cluster (Praefect) is limited to GitLab Premium and Ultimate
customers.
{{< /alert >}}
The following shows GitLab set up to access `storage-1`, a virtual storage provided by Gitaly
Cluster:
The following shows GitLab set up to access `storage-1`, a virtual storage provided by Gitaly Cluster (Praefect):
![GitLab application interacting with virtual Gitaly storage, which interacts with Gitaly physical storage](img/cluster_example_v13_3.png)
@ -57,7 +56,7 @@ In this example:
- The [replication factor](#replication-factor) is `3`. Three copies are maintained
of each repository.
The availability objectives for Gitaly clusters assuming a single node failure are:
The availability objectives for Gitaly Cluster (Praefect) assuming a single node failure are:
- Recovery Point Objective (RPO): Less than 1 minute.
@ -82,21 +81,21 @@ RPO and RTO discussed previously.
## Comparison to Geo
Gitaly Cluster and [Geo](../../geo/_index.md) both provide redundancy. However the redundancy of:
Gitaly Cluster (Praefect) and [Geo](../../geo/_index.md) both provide redundancy. However the redundancy of:
- Gitaly Cluster provides fault tolerance for data storage and is invisible to the user. Users are
not aware when Gitaly Cluster is used.
- Gitaly Cluster (Praefect) provides fault tolerance for data storage and is invisible to the user. Users are
not aware when Gitaly Cluster (Praefect) is used.
- Geo provides [replication](../../geo/_index.md) and [disaster recovery](../../geo/disaster_recovery/_index.md) for
an entire instance of GitLab. Users know when they are using Geo for
[replication](../../geo/_index.md). Geo [replicates multiple data types](../../geo/replication/datatypes.md#replicated-data-types),
including Git data.
The following table outlines the major differences between Gitaly Cluster and Geo:
The following table outlines the major differences between Gitaly Cluster (Praefect) and Geo:
| Tool | Nodes | Locations | Latency tolerance | Failover | Consistency | Provides redundancy for |
|:---------------|:---------|:----------|:------------------------------------------------------------------------------------------------------|:----------------------------------------------------------------------------|:--------------------------------------|:------------------------|
| Gitaly Cluster | Multiple | Single | [Less than 1 second, ideally single-digit milliseconds](configure.md#network-latency-and-connectivity) | [Automatic](configure.md#automatic-failover-and-primary-election-strategies) | [Strong](#strong-consistency) | Data storage in Git |
| Geo | Multiple | Multiple | Up to one minute | [Manual](../../geo/disaster_recovery/_index.md) | Eventual | Entire GitLab instance |
| Tool | Nodes | Locations | Latency tolerance | Failover | Consistency | Provides redundancy for |
|:--------------------------|:---------|:----------|:-------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------|:------------------------------|:------------------------|
| Gitaly Cluster (Praefect) | Multiple | Single | [Less than 1 second, ideally single-digit milliseconds](configure.md#network-latency-and-connectivity) | [Automatic](configure.md#automatic-failover-and-primary-election-strategies) | [Strong](#strong-consistency) | Data storage in Git |
| Geo | Multiple | Multiple | Up to one minute | [Manual](../../geo/disaster_recovery/_index.md) | Eventual | Entire GitLab instance |
For more information, see:
@ -108,9 +107,9 @@ For more information, see:
Virtual storage makes it viable to have a single repository storage in GitLab to simplify repository
management.
Virtual storage with Gitaly Cluster can usually replace direct Gitaly storage configurations.
Virtual storage with Gitaly Cluster (Praefect) can usually replace direct Gitaly storage configurations.
However, this is at the expense of additional storage space needed to store each repository on multiple
Gitaly nodes. The benefit of using Gitaly Cluster virtual storage over direct Gitaly storage is:
Gitaly nodes. The benefit of using Gitaly Cluster (Praefect) virtual storage over direct Gitaly storage is:
- Improved fault tolerance, because each Gitaly node has a copy of every repository.
- Improved resource utilization, reducing the need for over-provisioning for shard-specific peak
@ -133,14 +132,14 @@ As with standard Gitaly storages, virtual storages can be sharded.
{{< alert type="warning" >}}
The storage layout is an internal detail of Gitaly Cluster and is not guaranteed to remain stable between releases.
The storage layout is an internal detail of Gitaly Cluster (Praefect) and is not guaranteed to remain stable between releases.
The information here is only for informational purposes and to help with debugging. Performing changes in the
repositories directly on the disk is not supported and may lead to breakage or the changes being overwritten.
{{< /alert >}}
Gitaly Cluster's virtual storages provide an abstraction that looks like a single storage but actually consists of
multiple physical storages. Gitaly Cluster has to replicate each operation to each physical storage. Operations
Gitaly Cluster (Praefect) virtual storages provide an abstraction that looks like a single storage but actually consists of
multiple physical storages. Gitaly Cluster (Praefect) has to replicate each operation to each physical storage. Operations
may succeed on some of the physical storages but fail on others.
Partially applied operations can cause problems with other operations and leave the system in a state it can't recover from.
@ -159,7 +158,7 @@ recreation of the repository.
These atomicity issues have caused multiple problems in the past with:
- Geo syncing to a secondary site with Gitaly Cluster.
- Geo syncing to a secondary site with Gitaly Cluster (Praefect).
- Backup restoration.
- Repository moves between repository storages.
@ -183,9 +182,9 @@ follow the [hashed storage](../../repository_storage_paths.md#hashed-storage) sc
{{< /history >}}
When Gitaly Cluster creates a repository, it assigns the repository a unique and permanent ID called the _repository ID_. The repository ID is
internal to Gitaly Cluster and doesn't relate to any IDs elsewhere in GitLab. If a repository is removed from Gitaly Cluster and later moved
back, the repository is assigned a new repository ID and is a different repository from Gitaly Cluster's perspective. The sequence of repository IDs
When Gitaly Cluster (Praefect) creates a repository, it assigns the repository a unique and permanent ID called the _repository ID_. The repository ID is
internal to Gitaly Cluster (Praefect) and doesn't relate to any IDs elsewhere in GitLab. If a repository is removed from Gitaly Cluster (Praefect) and later moved
back, the repository is assigned a new repository ID and is a different repository from the perspective of Gitaly Cluster (Praefect). The sequence of repository IDs
always increases, but there may be gaps in the sequence.
The repository ID is used to derive a unique storage path called _replica path_ for each repository on the cluster. The replicas of
@ -229,9 +228,9 @@ Follow the [instructions in hashed storage's documentation](../../repository_sto
### Atomicity of operations
Gitaly Cluster uses the PostgreSQL metadata store with the storage layout to ensure atomicity of repository creation,
Gitaly Cluster (Praefect) uses the PostgreSQL metadata store with the storage layout to ensure atomicity of repository creation,
deletion, and move operations. The disk operations can't be atomically applied across multiple storages. However, PostgreSQL guarantees
the atomicity of the metadata operations. Gitaly Cluster models the operations in a manner that the failing operations always leave
the atomicity of the metadata operations. Gitaly Cluster (Praefect) models the operations in a manner that the failing operations always leave
the metadata consistent. The disks may contain stale state even after successful operations. This situation is expected and
the leftover state does not interfere with future operations but may use up disk space unnecessarily until a clean up is
performed.
@ -264,12 +263,12 @@ The leftover state is eventually cleaned up.
#### Repository moves
Unlike Gitaly, Gitaly Cluster doesn't move the repositories in the storages but only virtually moves the repository by updating the
Unlike Gitaly, Gitaly Cluster (Praefect) doesn't move the repositories in the storages but only virtually moves the repository by updating the
relative path of the repository in the metadata store.
## Components
Gitaly Cluster consists of multiple components:
Gitaly Cluster (Praefect) consists of multiple components:
- [Load balancer](configure.md#load-balancer) for distributing requests and providing fault-tolerant access to
Praefect nodes.
@ -281,7 +280,7 @@ Gitaly Cluster consists of multiple components:
## Architecture
Praefect is a router and transaction manager for Gitaly, and a required
component for running a Gitaly Cluster.
component for running Gitaly Cluster (Praefect).
![Praefect distributing incoming connections to Gitaly cluster nodes](img/praefect_architecture_v12_10.png)
@ -289,7 +288,7 @@ For more information, see [Gitaly High Availability (HA) Design](https://gitlab.
## Features
Gitaly Cluster provides the following features:
Gitaly Cluster (Praefect) provides the following features:
- [Distributed reads](#distributed-reads) among Gitaly nodes.
- [Strong consistency](#strong-consistency) of the secondary replicas.
@ -298,12 +297,12 @@ Gitaly Cluster provides the following features:
primary Gitaly node to secondary Gitaly nodes.
- Reporting of possible [data loss](recovery.md#check-for-data-loss) if replication queue isn't empty.
Follow the [Gitaly Cluster epic](https://gitlab.com/groups/gitlab-org/-/epics/1489) for improvements
Follow the [epic 1489](https://gitlab.com/groups/gitlab-org/-/epics/1489) for proposed improvements
including [horizontally distributing reads](https://gitlab.com/groups/gitlab-org/-/epics/2013).
### Distributed reads
Gitaly Cluster supports distribution of read operations across Gitaly nodes that are configured for
Gitaly Cluster (Praefect) supports distribution of read operations across Gitaly nodes that are configured for
the [virtual storage](#virtual-storage).
All RPCs marked with the `ACCESSOR` option are redirected to an up to date and healthy Gitaly node.
@ -326,42 +325,42 @@ You can [monitor distribution of reads](monitoring.md) by using Prometheus.
### Strong consistency
Gitaly Cluster provides strong consistency by writing changes synchronously to all healthy, up-to-date replicas. If a
Gitaly Cluster (Praefect) provides strong consistency by writing changes synchronously to all healthy, up-to-date replicas. If a
replica is outdated or unhealthy at the time of the transaction, the write is asynchronously replicated to it.
Strong consistency is the primary replication method. A subset of operations still use replication jobs
(eventual consistency) instead of strong consistency. Refer to the
[strong consistency epic](https://gitlab.com/groups/gitlab-org/-/epics/1189) for more information.
If strong consistency is unavailable, Gitaly Cluster guarantees eventual consistency. In this case. Gitaly Cluster
replicates all writes to secondary Gitaly nodes after the write to the primary Gitaly node has occurred.
If strong consistency is unavailable, Gitaly Cluster (Praefect) guarantees eventual consistency. In this case. Gitaly Cluster
(Praefect) replicates all writes to secondary Gitaly nodes after the write to the primary Gitaly node has occurred.
For more information on monitoring strong consistency, see
[Monitoring Gitaly Cluster (Praefect)](monitoring.md).
### Replication factor
Replication factor is the number of copies Gitaly Cluster maintains of a given repository. A higher
Replication factor is the number of copies Gitaly Cluster (Praefect) maintains of a given repository. A higher
replication factor:
- Offers better redundancy and distribution of read workload.
- Results in higher storage cost.
By default, Gitaly Cluster replicates repositories to every storage in a
By default, Gitaly Cluster (Praefect) replicates repositories to every storage in a
[virtual storage](#virtual-storage).
For configuration information, see [Configure replication factor](configure.md#configure-replication-factor).
## Upgrade Gitaly Cluster
## Upgrade Gitaly Cluster (Praefect)
To upgrade a Gitaly Cluster, follow the documentation for
[zero-downtime upgrades](../../../update/zero_downtime.md).
## Downgrade Gitaly Cluster to a previous version
## Downgrade Gitaly Cluster (Praefect) to a previous version
If you need to roll back a Gitaly Cluster to an earlier version, some Praefect database migrations may need to be reverted.
If you need to roll back a Gitaly Cluster (Praefect) to an earlier version, some Praefect database migrations may need to be reverted.
To downgrade a Gitaly Cluster (assuming multiple Praefect nodes):
To downgrade a Gitaly Cluster (Praefect), assuming multiple Praefect nodes:
1. Stop the Praefect service on all Praefect nodes:
@ -396,55 +395,55 @@ To downgrade a Gitaly Cluster (assuming multiple Praefect nodes):
gitlab-ctl start praefect
```
## Migrate to Gitaly Cluster
## Migrate to Gitaly Cluster (Praefect)
{{< alert type="warning" >}}
Some [known issues](#known-issues) exist in Gitaly Cluster. Review the following information before you continue.
Some [known issues](#known-issues) exist in Gitaly Cluster (Praefect). Review the following information before you continue.
{{< /alert >}}
Before migrating to Gitaly Cluster:
Before migrating to Gitaly Cluster (Praefect):
- Review [Before deploying Gitaly Cluster](#before-deploying-gitaly-cluster).
- Review [Before deploying Gitaly Cluster (Praefect)](#before-deploying-gitaly-cluster-praefect).
- Upgrade to the latest possible version of GitLab, to take advantage of improvements and bug fixes.
To migrate to Gitaly Cluster:
To migrate to Gitaly Cluster (Praefect):
1. Create the required storage. Refer to
[repository storage recommendations](configure.md#repository-storage-recommendations).
1. Create and configure [Gitaly Cluster](configure.md).
1. Create and configure [Gitaly Cluster (Praefect)](configure.md).
1. Configure the existing Gitaly instance [to use TCP](configure.md#use-tcp-for-existing-gitlab-instances), if not already configured that way.
1. [Move the repositories](../../operations/moving_repositories.md#moving-repositories). To migrate to
Gitaly Cluster, existing repositories stored outside Gitaly Cluster must be moved. There is no
Gitaly Cluster (Praefect), existing repositories stored outside Gitaly Cluster (Praefect) must be moved. There is no
automatic migration, but the moves can be scheduled with the GitLab API.
Even if you don't use the `default` repository storage, you must ensure it is configured.
[Read more about this limitation](../configure_gitaly.md#gitlab-requires-a-default-repository-storage).
## Migrate off Gitaly Cluster
## Migrate off Gitaly Cluster (Praefect)
If the limitations and tradeoffs of Gitaly Cluster are found to be not suitable for your environment, you can Migrate
off Gitaly Cluster to a sharded Gitaly instance:
If the limitations and tradeoffs of Gitaly Cluster (Praefect) are found to be not suitable for your environment, you can
migrate off Gitaly Cluster (Praefect) to a sharded Gitaly instance:
1. Create and configure a new [Gitaly server](../configure_gitaly.md#run-gitaly-on-its-own-server).
1. [Move the repositories](../../operations/moving_repositories.md#moving-repositories) to the newly created storage. You can
move them by shard or by group, which gives you the opportunity to spread them over multiple Gitaly servers.
## Before deploying Gitaly Cluster
## Before deploying Gitaly Cluster (Praefect)
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Before deploying Gitaly Cluster, see:
Gitaly Cluster (Praefect) provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Before deploying Gitaly Cluster (Praefect), see:
- Existing [known issues](#known-issues).
- [Snapshot backup and recovery](#snapshot-backup-and-recovery).
- [Configuration guidance](../configure_gitaly.md) and [Repository storage options](../../repository_storage_paths.md) to make
sure that Gitaly Cluster is the best setup for you.
sure that Gitaly Cluster (Praefect) is the best setup for you.
If you have not yet migrated to Gitaly Cluster, you have two options:
If you have not yet migrated to Gitaly Cluster (Praefect), you have two options:
- A sharded Gitaly instance.
- Gitaly Cluster.
- Gitaly Cluster (Praefect).
Contact your Customer Success Manager or customer support if you have any questions.
@ -453,24 +452,24 @@ Contact your Customer Success Manager or customer support if you have any questi
The following table outlines current known issues impacting the use of Gitaly Cluster. For
the current status of these issues, refer to the referenced issues and epics.
| Issue | Summary | How to avoid |
|:--------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------|
| Gitaly Cluster + Geo - Issues retrying failed syncs | If Gitaly Cluster is used on a Geo secondary site, repositories that have failed to sync could continue to fail when Geo tries to resync them. Recovering from this state requires assistance from support to run manual steps. | In GitLab 15.0 to 15.2, enable the [`gitaly_praefect_generated_replica_paths` feature flag](#praefect-generated-replica-paths) on your Geo primary site. In GitLab 15.3, the feature flag is enabled by default. |
| Praefect unable to insert data into the database due to migrations not being applied after an upgrade | If the database is not kept up to date with completed migrations, then the Praefect node is unable to perform standard operation. | Make sure the Praefect database is up and running with all migrations completed (For example: `sudo -u git -- /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml sql-migrate-status` should show a list of all applied migrations). Consider [requesting upgrade assistance](https://about.gitlab.com/support/scheduling-upgrade-assistance/) so your upgrade plan can be reviewed by support. |
| Restoring a Gitaly Cluster node from a snapshot in a running cluster | Because the Gitaly Cluster runs with consistent state, introducing a single node that is behind results in the cluster not being able to reconcile the nodes data and other nodes data | Don't restore a single Gitaly Cluster node from a backup snapshot. If you must restore from backup:<br/><br/>1. [Shut down GitLab](../../read_only_gitlab.md#shut-down-the-gitlab-ui).<br/>2. Snapshot all Gitaly Cluster nodes at the same time.<br/>3. Take a database dump of the Praefect database. |
| Limitations when running in Kubernetes, Amazon ECS, or similar | Praefect (Gitaly Cluster) is not supported and Gitaly has known limitations. For more information, see [epic 6127](https://gitlab.com/groups/gitlab-org/-/epics/6127). | Use our [reference architectures](../../reference_architectures/_index.md). |
| Issue | Summary | How to avoid |
|:------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Gitaly Cluster (Praefect) + Geo - Issues retrying failed syncs | If Gitaly Cluster (Praefect) is used on a Geo secondary site, repositories that have failed to sync could continue to fail when Geo tries to resync them. Recovering from this state requires assistance from support to run manual steps. | In GitLab 15.0 to 15.2, enable the [`gitaly_praefect_generated_replica_paths` feature flag](#praefect-generated-replica-paths) on your Geo primary site. In GitLab 15.3, the feature flag is enabled by default. |
| Praefect unable to insert data into the database due to migrations not being applied after an upgrade | If the database is not kept up to date with completed migrations, then the Praefect node is unable to perform standard operation. | Make sure the Praefect database is up and running with all migrations completed (For example: `sudo -u git -- /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml sql-migrate-status` should show a list of all applied migrations). Consider [requesting upgrade assistance](https://about.gitlab.com/support/scheduling-upgrade-assistance/) so your upgrade plan can be reviewed by support. |
| Restoring a Gitaly Cluster (Praefect) node from a snapshot in a running cluster | Because the Gitaly Cluster (Praefect) runs with consistent state, introducing a single node that is behind results in the cluster not being able to reconcile the nodes data and other nodes data | Don't restore a single Gitaly Cluster (Praefect) node from a backup snapshot. If you must restore from backup:<br/><br/>1. [Shut down GitLab](../../read_only_gitlab.md#shut-down-the-gitlab-ui).<br/>2. Snapshot all Gitaly Cluster (Praefect) nodes at the same time.<br/>3. Take a database dump of the Praefect database. |
| Limitations when running in Kubernetes, Amazon ECS, or similar | Gitaly Cluster (Praefect) is not supported and Gitaly has known limitations. For more information, see [epic 6127](https://gitlab.com/groups/gitlab-org/-/epics/6127). | Use our [reference architectures](../../reference_architectures/_index.md). |
### Snapshot backup and recovery
Gitaly Cluster does not support snapshot backups. Snapshot backups can cause issues where the Praefect database becomes
Gitaly Cluster (Praefect) does not support snapshot backups. Snapshot backups can cause issues where the Praefect database becomes
out of sync with the disk storage. Because of how Praefect rebuilds the replication metadata of Gitaly disk information
during a restore, you should use the [official backup and restore Rake tasks](../backup_restore/../_index.md).
The [incremental backup method](../../backup_restore/backup_gitlab.md#incremental-repository-backups)
can be used to speed up Gitaly Cluster backups.
can be used to speed up Gitaly Cluster (Praefect) backups.
If you are unable to use either method, contact customer support for restoration help.
### What to do if you are on Gitaly Cluster experiencing an issue or limitation
### What to do if you are on Gitaly Cluster (Praefect) experiencing an issue or limitation
Contact customer support for immediate help in restoration or recovery.

View File

@ -5,9 +5,9 @@ info: To determine the technical writer assigned to the Stage/Group associated w
title: Configure Gitaly Cluster (Praefect)
---
Configure Gitaly Cluster using either:
Configure Gitaly Cluster (Praefect) using either:
- Gitaly Cluster configuration instructions available as part of
- Gitaly Cluster (Praefect) configuration instructions available as part of
[reference architectures](../../reference_architectures/_index.md) for installations of up to:
- [60 RPS or 3,000 users](../../reference_architectures/3k_users.md#configure-gitaly-cluster).
- [100 RPS or 5,000 users](../../reference_architectures/5k_users.md#configure-gitaly-cluster).
@ -20,14 +20,14 @@ Smaller GitLab installations may need only [Gitaly itself](../_index.md).
{{< alert type="note" >}}
Gitaly Cluster is not yet supported in Kubernetes, Amazon ECS, or similar container environments. For more information, see
Gitaly Cluster (Praefect) is not yet supported in Kubernetes, Amazon ECS, or similar container environments. For more information, see
[epic 6127](https://gitlab.com/groups/gitlab-org/-/epics/6127).
{{< /alert >}}
## Requirements
The minimum recommended configuration for a Gitaly Cluster requires:
The minimum recommended configuration for a Gitaly Cluster (Praefect) requires:
- 1 load balancer
- 1 PostgreSQL server (a [supported version](../../../install/requirements.md#postgresql))
@ -55,7 +55,7 @@ default value. The default value depends on the GitLab version.
### Network latency and connectivity
Network latency for Gitaly Cluster should ideally be measurable in single-digit milliseconds. Latency is particularly
Network latency for Gitaly Cluster (Praefect) should ideally be measurable in single-digit milliseconds. Latency is particularly
important for:
- Gitaly node health checks. Nodes must be able to respond within 1 second.
@ -66,13 +66,13 @@ Achieving acceptable latency between Gitaly nodes:
- On physical networks generally means high bandwidth, single location connections.
- On the cloud generally means in the same region, including allowing cross availability zone replication. These links
are designed for this type of synchronization. Latency of less than 2 ms should be sufficient for Gitaly Cluster.
are designed for this type of synchronization. Latency of less than 2 ms should be sufficient for Gitaly Cluster (Praefect).
If you can't provide low network latencies for replication (for example, between distant locations), consider Geo. For
more information, see [Comparison to Geo](_index.md#comparison-to-geo).
Gitaly Cluster [components](_index.md#components) communicate with each other over many routes. Your firewall rules must
allow the following for Gitaly Cluster to function properly:
Gitaly Cluster (Praefect) [components](_index.md#components) communicate with each other over many routes. Your firewall rules must
allow the following for Gitaly Cluster (Praefect) to function properly:
| From | To | Default port | TLS port |
|:-----------------------|:-----------------------|:-------------|:---------|
@ -142,7 +142,8 @@ You also need the IP/host address for each node:
1. `GITALY_HOST_*`: the IP or host address of each Gitaly server
1. `GITLAB_HOST`: the IP/host address of the GitLab server
If you are using Google Cloud Platform, SoftLayer, or any other vendor that provides a virtual private cloud (VPC) you can use the private addresses for each cloud instance (corresponds to "internal address" for Google Cloud Platform) for `PRAEFECT_HOST`, `GITALY_HOST_*`, and `GITLAB_HOST`.
If you are using Google Cloud Platform, SoftLayer, or any other vendor that provides a virtual private cloud (VPC), you
can use the private addresses for each cloud instance (corresponds to "internal address" for Google Cloud Platform) for `PRAEFECT_HOST`, `GITALY_HOST_*`, and `GITLAB_HOST`.
#### Secrets
@ -662,7 +663,7 @@ Updates to example must be made at:
If you have data on an already existing storage called
`default`, you should configure the virtual storage with another name and
[migrate the data to the Gitaly Cluster storage](_index.md#migrate-to-gitaly-cluster)
[migrate the data to the Gitaly Cluster (Praefect) storage](_index.md#migrate-to-gitaly-cluster-praefect)
afterwards.
{{< /alert >}}
@ -1371,7 +1372,7 @@ Particular attention should be shown to:
{{< alert type="warning" >}}
If you have existing data stored on the default Gitaly storage,
you should [migrate the data to your Gitaly Cluster storage](_index.md#migrate-to-gitaly-cluster)
you should [migrate the data to your Gitaly Cluster (Praefect) storage](_index.md#migrate-to-gitaly-cluster-praefect)
first.
{{< /alert >}}
@ -1485,7 +1486,7 @@ Particular attention should be shown to:
#### Use TCP for existing GitLab instances
When adding Gitaly Cluster to an existing Gitaly instance, the existing Gitaly storage
When adding Gitaly Cluster (Praefect) to an existing Gitaly instance, the existing Gitaly storage
must be listening on TCP/TLS. If `gitaly_address` is not specified, then a Unix socket is used,
which prevents the communication with the cluster.
@ -1575,7 +1576,7 @@ If `default_replication_factor` is unset, the repositories are always replicated
`virtual_storages`. If a new storage node is introduced to the virtual storage, both new and existing repositories are
replicated to the node automatically.
For large Gitaly Cluster deployments with many storage nodes, replicating a repository to every storage node is often not
For large Gitaly Cluster (Praefect) deployments with many storage nodes, replicating a repository to every storage node is often not
sensible and can cause problems. A replication factor of 3 is usually sufficient, which means replicate repositories to
three storages even if more are available. Higher replication factors increase the pressure on the primary storage.
@ -1626,7 +1627,7 @@ repository storage redundancy.
For a replication factor:
- Of `1`: Gitaly and Gitaly Cluster have roughly the same storage requirements.
- Of `1`: Gitaly and Gitaly Cluster (Praefect) have roughly the same storage requirements.
- More than `1`: The amount of required storage is `used space * replication factor`. `used space`
should include any planned future growth.
@ -1778,7 +1779,7 @@ to a newly-elected primary Gitaly node if the current primary node is found to b
### Repository-specific primary nodes
Gitaly Cluster elects a primary Gitaly node separately for each repository. Combined with
Gitaly Cluster (Praefect) elects a primary Gitaly node separately for each repository. Combined with
[configurable replication factors](#configure-replication-factor), you can horizontally scale storage capacity and distribute write load across Gitaly nodes.
Primary elections are run lazily. Praefect doesn't immediately elect a new primary node if the current

View File

@ -2,15 +2,15 @@
stage: Data Access
group: Gitaly
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
title: Gitaly Cluster recovery options and tools
title: Gitaly Cluster (Praefect) recovery options and tools
---
Gitaly Cluster can recover from primary-node failure and unavailable repositories. Gitaly Cluster can perform data
recovery and has Praefect tracking database tools.
Gitaly Cluster (Praefect) can recover from primary-node failure and unavailable repositories. Gitaly Cluster (Praefect)
can perform data recovery and has Praefect tracking database tools.
## Manage Gitaly nodes on a Gitaly Cluster
## Manage Gitaly nodes on a Gitaly Cluster (Praefect)
You can add and replace Gitaly nodes on a Gitaly Cluster.
You can add and replace Gitaly nodes on a Gitaly Cluster (Praefect).
### Add new Gitaly nodes
@ -330,7 +330,7 @@ praefect['configuration'] = {
{{< /history >}}
The `remove-repository` Praefect sub-command removes a repository from a Gitaly Cluster, and all state associated with a given repository including:
The `remove-repository` Praefect sub-command removes a repository from a Gitaly Cluster (Praefect), and all state associated with a given repository including:
- On-disk repositories on all relevant Gitaly nodes.
- Any database state tracked by Praefect.
@ -398,7 +398,7 @@ Common maintenance tasks on the Praefect tracking database are documented in thi
{{< /history >}}
The `list-untracked-repositories` Praefect sub-command lists repositories of the Gitaly Cluster that both:
The `list-untracked-repositories` Praefect sub-command lists repositories of the Gitaly Cluster (Praefect) that both:
- Exist for at least one Gitaly storage.
- Aren't tracked in the Praefect tracking database.

View File

@ -2,7 +2,7 @@
stage: Data Access
group: Gitaly
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
title: Troubleshooting Gitaly Cluster
title: Troubleshooting Gitaly Cluster (Praefect)
---
{{< details >}}
@ -17,7 +17,7 @@ see [Troubleshooting Gitaly](../troubleshooting.md).
## Check cluster health
The `check` Praefect sub-command runs a series of checks to determine the health of the Gitaly Cluster.
The `check` Praefect sub-command runs a series of checks to determine the health of the Gitaly Cluster (Praefect).
```shell
gitlab-ctl praefect check
@ -117,7 +117,7 @@ To determine the primary node of a repository, use the [`praefect metadata`](#vi
## View repository metadata
Gitaly Cluster maintains a [metadata database](_index.md#components) about the repositories stored on the cluster. Use the `praefect metadata` subcommand
Gitaly Cluster (Praefect) maintains a [metadata database](_index.md#components) about the repositories stored on the cluster. Use the `praefect metadata` subcommand
to inspect the metadata for troubleshooting.
You can retrieve a repository's metadata by its Praefect-assigned repository ID:
@ -266,7 +266,7 @@ This indicates that the virtual storage name used in the
Resolve this by matching the virtual storage names used in Praefect and GitLab configuration.
## Gitaly Cluster performance issues on cloud platforms
## Gitaly Cluster (Praefect) performance issues on cloud platforms
Praefect does not require a lot of CPU or memory, and can run on small virtual machines.
Cloud services may place other limits on the resources that small VMs can use, such as

View File

@ -12,9 +12,6 @@ title: Troubleshooting Gitaly
{{< /details >}}
Refer to the information below when troubleshooting Gitaly. For information on troubleshooting Gitaly Cluster (Praefect),
see [Troubleshooting Gitaly Cluster](praefect/troubleshooting.md).
The following sections provide possible solutions to Gitaly errors.
See also [Gitaly timeout](../settings/gitaly_timeouts.md) settings,
@ -36,7 +33,7 @@ to determine the available and used space on a Gitaly storage:
```ruby
Gitlab::GitalyClient::ServerService.new("default").storage_disk_statistics
# For Gitaly Cluster
# For Gitaly Cluster (Praefect)
Gitlab::GitalyClient::ServerService.new("<storage name>").disk_statistics
```

View File

@ -32,7 +32,7 @@ For more information, see:
querying and scheduling snippet repository moves.
- [The API documentation](../../api/group_repository_storage_moves.md) details the endpoints for
querying and scheduling group repository moves.
- [Migrate to Gitaly Cluster](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster).
- [Migrate to Gitaly Cluster](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster-praefect).
### Moving Repositories

View File

@ -764,6 +764,68 @@ ALTER COLLATION "es_ES.utf8" REFRESH VERSION;
For more information about PostgreSQL collation issues and how they affect database indexes, see the [PostgreSQL upgrading OS documentation](../postgresql/upgrading_os.md).
## Repair corrupted database indexes
{{< history >}}
- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/196677) in GitLab 18.2.
{{< /history >}}
The index repair tool fixes corrupted or missing database indexes that can cause data integrity issues.
The tool addresses specific problematic indexes that are affected by
collation mismatches or other corruption issues. The tool:
- Deduplicates data when unique indexes are corrupted.
- Updates references to maintain data integrity.
- Rebuilds or creates indexes with correct configuration.
Before repairing indexes, run the tool in dry-run mode to analyze potential changes:
```shell
sudo DRY_RUN=true gitlab-rake gitlab:db:repair_index
```
The following example output shows the changes:
```shell
INFO -- : DRY RUN: Analysis only, no changes will be made.
INFO -- : Running Index repair on database main...
INFO -- : Processing index 'index_merge_request_diff_commit_users_on_name_and_email'...
INFO -- : Index is unique. Checking for duplicate data...
INFO -- : No duplicates found in 'merge_request_diff_commit_users' for columns: name,email.
INFO -- : Index exists. Reindexing...
INFO -- : Index reindexed successfully.
```
To repair all known problematic indexes in all databases:
```shell
sudo gitlab-rake gitlab:db:repair_index
```
The command processes each database and repairs the indexes. For example:
```shell
INFO -- : Running Index repair on database main...
INFO -- : Processing index 'index_merge_request_diff_commit_users_on_name_and_email'...
INFO -- : Index is unique. Checking for duplicate data...
INFO -- : No duplicates found in 'merge_request_diff_commit_users' for columns: name,email.
INFO -- : Index does not exist. Creating new index...
INFO -- : Index created successfully.
INFO -- : Index repair completed for database main.
```
To repair indexes in a specific database:
```shell
# Repair indexes in main database
sudo gitlab-rake gitlab:db:repair_index:main
# Repair indexes in CI database
sudo gitlab-rake gitlab:db:repair_index:ci
```
## Troubleshooting
### Advisory lock connection information

View File

@ -58,7 +58,7 @@ specifically the [Before you start](_index.md#before-you-start) and [Deciding wh
The sizing depends on selected Load Balancer and additional factors such as Network Bandwidth. Refer to [Load Balancers](_index.md#load-balancers) for more information.
5. Should be run on reputable Cloud Provider or Self Managed solutions. See [Configure the object storage](#configure-the-object-storage) for more information.
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
8. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](_index.md#autoscaling-of-stateful-nodes).
@ -1213,14 +1213,14 @@ If you believe this applies to you, contact us for additional guidance as requir
{{< /alert >}}
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster).
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect).
For guidance on:
- Implementing sharded Gitaly instead, follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md)
instead of this section. Use the same Gitaly specs.
- Migrating existing repositories that aren't managed by Gitaly Cluster, see
[migrate to Gitaly Cluster](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster).
[migrate to Gitaly Cluster](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster-praefect).
The recommended cluster setup includes the following components:
@ -2366,7 +2366,7 @@ services where applicable):
Also, the sizing depends on selected Load Balancer and additional factors such as Network Bandwidth. Refer to [Load Balancers](_index.md#load-balancers) for more information.
5. Should be run on reputable Cloud Provider or Self Managed solutions. See [Configure the object storage](#configure-the-object-storage) for more information.
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
<!-- markdownlint-enable MD029 -->

View File

@ -58,7 +58,7 @@ specifically the [Before you start](_index.md#before-you-start) and [Deciding wh
Also, the sizing depends on selected Load Balancer and additional factors such as Network Bandwidth. Refer to [Load Balancers](_index.md#load-balancers) for more information.
5. Should be run on reputable Cloud Provider or Self Managed solutions. See [Configure the object storage](#configure-the-object-storage) for more information.
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
8. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](_index.md#autoscaling-of-stateful-nodes).
@ -1221,14 +1221,14 @@ If you believe this applies to you, contact us for additional guidance as requir
{{< /alert >}}
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster).
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect).
For guidance on:
- Implementing sharded Gitaly instead, follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md)
instead of this section. Use the same Gitaly specs.
- Migrating existing repositories that aren't managed by Gitaly Cluster, see
[migrate to Gitaly Cluster](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster).
[migrate to Gitaly Cluster](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster-praefect).
The recommended cluster setup includes the following components:
@ -2376,7 +2376,7 @@ services where applicable):
4. Can be optionally run on reputable third-party load balancing services (LB PaaS). See [Recommended cloud providers and services](_index.md#recommended-cloud-providers-and-services) for more information.
5. Should be run on reputable Cloud Provider or Self Managed solutions. See [Configure the object storage](#configure-the-object-storage) for more information.
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
<!-- markdownlint-enable MD029 -->

View File

@ -53,7 +53,7 @@ For a full list of reference architectures, see
Sizing depends on selected Load Balancer and additional factors such as Network Bandwidth. Refer to [Load Balancers](_index.md#load-balancers) for more information.
5. Should be run on reputable Cloud Provider or Self Managed solutions. See [Configure the object storage](#configure-the-object-storage) for more information.
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
8. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](_index.md#autoscaling-of-stateful-nodes).
@ -1049,14 +1049,14 @@ If you believe this applies to you, contact us for additional guidance as requir
{{< /alert >}}
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster).
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect).
For guidance on:
- Implementing sharded Gitaly instead, follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md)
instead of this section. Use the same Gitaly specs.
- Migrating existing repositories that aren't managed by Gitaly Cluster, see
[migrate to Gitaly Cluster](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster).
[migrate to Gitaly Cluster](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster-praefect).
The recommended cluster setup includes the following components:
@ -2266,7 +2266,7 @@ services where applicable):
Sizing depends on selected Load Balancer and additional factors such as Network Bandwidth. Refer to [Load Balancers](_index.md#load-balancers) for more information.
5. Should be run on reputable Cloud Provider or Self Managed solutions. See [Configure the object storage](#configure-the-object-storage) for more information.
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
<!-- markdownlint-enable MD029 -->

View File

@ -57,7 +57,7 @@ specifically the [Before you start](_index.md#before-you-start) and [Deciding wh
4. Can be optionally run on reputable third-party load balancing services (LB PaaS). See [Recommended cloud providers and services](_index.md#recommended-cloud-providers-and-services) for more information.
5. Should be run on reputable Cloud Provider or Self Managed solutions. See [Configure the object storage](#configure-the-object-storage) for more information.
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
8. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](_index.md#autoscaling-of-stateful-nodes).
@ -1228,14 +1228,14 @@ If you believe this applies to you, contact us for additional guidance as requir
{{< /alert >}}
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster).
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect).
For guidance on:
- Implementing sharded Gitaly instead, follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md)
instead of this section. Use the same Gitaly specs.
- Migrating existing repositories that aren't managed by Gitaly Cluster, see
[migrate to Gitaly Cluster](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster).
[migrate to Gitaly Cluster](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster-praefect).
The recommended cluster setup includes the following components:
@ -2389,7 +2389,7 @@ services where applicable):
4. Can be optionally run on reputable third-party load balancing services (LB PaaS). See [Recommended cloud providers and services](_index.md#recommended-cloud-providers-and-services) for more information.
5. Should be run on reputable Cloud Provider or Self Managed solutions. See [Configure the object storage](#configure-the-object-storage) for more information.
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
<!-- markdownlint-enable MD029 -->

View File

@ -56,7 +56,7 @@ specifically the [Before you start](_index.md#before-you-start) and [Deciding wh
Also, the sizing depends on selected Load Balancer and additional factors such as Network Bandwidth. Refer to [Load Balancers](_index.md#load-balancers) for more information.
5. Should be run on reputable Cloud Provider or Self Managed solutions. See [Configure the object storage](#configure-the-object-storage) for more information.
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
8. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](_index.md#autoscaling-of-stateful-nodes).
@ -1052,14 +1052,14 @@ If you believe this applies to you, contact us for additional guidance as requir
{{< /alert >}}
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster).
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect).
For guidance on:
- Implementing sharded Gitaly instead, follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md)
instead of this section. Use the same Gitaly specs.
- Migrating existing repositories that aren't managed by Gitaly Cluster, see
[migrate to Gitaly Cluster](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster).
[migrate to Gitaly Cluster](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster-praefect).
The recommended cluster setup includes the following components:
@ -2240,7 +2240,7 @@ services where applicable):
Also, the sizing depends on selected Load Balancer and additional factors such as Network Bandwidth. Refer to [Load Balancers](_index.md#load-balancers) for more information.
5. Should be run on reputable Cloud Provider or Self Managed solutions. See [Configure the object storage](#configure-the-object-storage) for more information.
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/praefect/_index.md#before-deploying-gitaly-cluster-praefect). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
<!-- markdownlint-enable MD029 -->

View File

@ -739,7 +739,7 @@ For more information, see the following documentation:
- [Redis to multi-node Redis w/ Redis Sentinel](../redis/replication_and_failover.md#switching-from-an-existing-single-machine-installation)
- [Postgres to multi-node Postgres w/ Consul + PgBouncer](../postgresql/moving.md)
- [Gitaly to Gitaly Cluster w/ Praefect](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster)
- [Gitaly to Gitaly Cluster w/ Praefect](../gitaly/praefect/_index.md#migrate-to-gitaly-cluster-praefect)
### Upgrades

View File

@ -325,4 +325,4 @@ See [the tracking issue](https://gitlab.com/gitlab-org/gitlab/-/issues/36175) fo
## Move repositories
To move a repository to a different repository storage (for example, from `default` to `storage2`), use the
same process as [migrating to Gitaly Cluster](gitaly/praefect/_index.md#migrate-to-gitaly-cluster).
same process as [migrating to Gitaly Cluster](gitaly/praefect/_index.md#migrate-to-gitaly-cluster-praefect).

View File

@ -2379,6 +2379,7 @@ Input type: `AiDuoWorkflowCreateInput`
| <a id="mutationaiduoworkflowcreateclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationaiduoworkflowcreateenvironment"></a>`environment` | [`WorkflowEnvironment`](#workflowenvironment) | Environment for the workflow. |
| <a id="mutationaiduoworkflowcreategoal"></a>`goal` | [`String`](#string) | Goal of the workflow. |
| <a id="mutationaiduoworkflowcreatenamespaceid"></a>`namespaceId` | [`NamespaceID`](#namespaceid) | Global ID of the namespace the user is acting on. |
| <a id="mutationaiduoworkflowcreatepreapprovedagentprivileges"></a>`preApprovedAgentPrivileges` | [`[Int!]`](#int) | Actions the agent can perform without asking for approval. |
| <a id="mutationaiduoworkflowcreateprojectid"></a>`projectId` | [`ProjectID`](#projectid) | Global ID of the project the user is acting on. |
| <a id="mutationaiduoworkflowcreateworkflowdefinition"></a>`workflowDefinition` | [`String`](#string) | Workflow type based on its capability. |
@ -8119,6 +8120,32 @@ Input type: `LifecycleUpdateInput`
| <a id="mutationlifecycleupdateerrors"></a>`errors` | [`[String!]!`](#string) | Errors encountered during the mutation. |
| <a id="mutationlifecycleupdatelifecycle"></a>`lifecycle` | [`WorkItemLifecycle`](#workitemlifecycle) | Lifecycle updated. |
### `Mutation.linkProjectComplianceViolationIssue`
{{< details >}}
**Introduced** in GitLab 18.3.
**Status**: Experiment.
{{< /details >}}
Input type: `LinkProjectComplianceViolationIssueInput`
#### Arguments
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="mutationlinkprojectcomplianceviolationissueclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationlinkprojectcomplianceviolationissueissueiid"></a>`issueIid` | [`String!`](#string) | IID of the issue to be linked. |
| <a id="mutationlinkprojectcomplianceviolationissueprojectpath"></a>`projectPath` | [`ID!`](#id) | Full path of the project the issue belongs to. |
| <a id="mutationlinkprojectcomplianceviolationissueviolationid"></a>`violationId` | [`ComplianceManagementProjectsComplianceViolationID!`](#compliancemanagementprojectscomplianceviolationid) | Global ID of the project compliance violation. |
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="mutationlinkprojectcomplianceviolationissueclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationlinkprojectcomplianceviolationissueerrors"></a>`errors` | [`[String!]!`](#string) | Errors encountered during the mutation. |
| <a id="mutationlinkprojectcomplianceviolationissueviolation"></a>`violation` | [`ProjectComplianceViolation`](#projectcomplianceviolation) | Updated project compliance violation. |
### `Mutation.markAsSpamSnippet`
Input type: `MarkAsSpamSnippetInput`
@ -27716,9 +27743,11 @@ GitLab Duo Agent Platform session.
| <a id="duoworkflowid"></a>`id` | [`ID!`](#id) | ID of the session. |
| <a id="duoworkflowlastexecutorlogsurl"></a>`lastExecutorLogsUrl` | [`String`](#string) | URL to the latest executor logs of the workflow. |
| <a id="duoworkflowmcpenabled"></a>`mcpEnabled` | [`Boolean`](#boolean) | Has MCP been enabled for the namespace. |
| <a id="duoworkflownamespace"></a>`namespace` | [`Namespace`](#namespace) | namespace that the session is in. |
| <a id="duoworkflownamespaceid"></a>`namespaceId` | [`TypesNamespaceID`](#typesnamespaceid) | ID of the namespace. |
| <a id="duoworkflowpreapprovedagentprivilegesnames"></a>`preApprovedAgentPrivilegesNames` | [`[String!]`](#string) | Privileges pre-approved for the agent during execution. |
| <a id="duoworkflowproject"></a>`project` | [`Project!`](#project) | Project that the session is in. |
| <a id="duoworkflowprojectid"></a>`projectId` | [`ProjectID!`](#projectid) | ID of the project. |
| <a id="duoworkflowproject"></a>`project` | [`Project`](#project) | Project that the session is in. |
| <a id="duoworkflowprojectid"></a>`projectId` | [`ProjectID`](#projectid) | ID of the project. |
| <a id="duoworkflowstalled"></a>`stalled` | [`Boolean`](#boolean) | Workflow got created but has no checkpoints. |
| <a id="duoworkflowstatus"></a>`status` | [`DuoWorkflowStatus`](#duoworkflowstatus) | Status of the session. |
| <a id="duoworkflowstatusname"></a>`statusName` | [`String`](#string) | Status name of the session. |
@ -49953,6 +49982,12 @@ A `TodoableID` is a global ID. It is encoded as a string.
An example `TodoableID` is: `"gid://gitlab/Todoable/1"`.
### `TypesNamespaceID`
A `TypesNamespaceID` is a global ID. It is encoded as a string.
An example `TypesNamespaceID` is: `"gid://gitlab/Types::Namespace/1"`.
### `UntrustedRegexp`
A regexp containing patterns sourced from user input.

View File

@ -14,7 +14,7 @@ title: Group repository storage moves API
{{< /details >}}
Group wiki repositories can be moved between storages. This API can help you, for example,
[migrate to Gitaly Cluster](../administration/gitaly/praefect/_index.md#migrate-to-gitaly-cluster)
[migrate to Gitaly Cluster](../administration/gitaly/praefect/_index.md#migrate-to-gitaly-cluster-praefect)
or migrate a [group wiki](../user/project/wiki/group.md). This API does not manage
project repositories in a group. To schedule project moves, use the
[project repository storage moves API](project_repository_storage_moves.md).

View File

@ -13,7 +13,7 @@ title: Project repository storage moves API
{{< /details >}}
Project repositories including wiki and design repositories can be moved between storages. This API can help you when
[migrating to Gitaly Cluster](../administration/gitaly/praefect/_index.md#migrate-to-gitaly-cluster), for example.
[migrating to Gitaly Cluster](../administration/gitaly/praefect/_index.md#migrate-to-gitaly-cluster-praefect), for example.
As project repository storage moves are processed, they transition through different states. Values
of `state` are:

View File

@ -13,7 +13,7 @@ title: Snippet repository storage moves API
{{< /details >}}
Snippet repositories can be moved between storages. This API can help you when
[migrating to Gitaly Cluster](../administration/gitaly/praefect/_index.md#migrate-to-gitaly-cluster), for
[migrating to Gitaly Cluster](../administration/gitaly/praefect/_index.md#migrate-to-gitaly-cluster-praefect), for
example.
As snippet repository storage moves are processed, they transition through different states. Values

View File

@ -1209,6 +1209,24 @@ configure the adherence that you require.
In GitLab 18.6, we'll replace the compliance standards adherence dashboard with the compliance status dashboard for more accurate reporting on requirements and controls.
</div>
<div class="deprecation " data-milestone="18.6">
### User setting to disable exact code search
<div class="deprecation-notes">
- Announced in GitLab <span class="milestone">18.3</span>
- Removal in GitLab <span class="milestone">18.6</span>
- To discuss this change or learn more, see the [deprecation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/554933).
</div>
The user setting to disable exact code search is now deprecated. On GitLab.com, you can no longer disable exact code search in profile preferences.
Exact code search provides a better user experience and is compatible with existing search APIs. This user setting is planned for removal in GitLab 18.6 to ensure all users benefit from improved search functionality.
</div>
</div>

View File

@ -15,6 +15,7 @@ If you're new to GitLab, get started learning about how GitLab works.
- [Get started planning work](get_started_planning_work.md)
- [Get started managing code](get_started_managing_code.md)
- [Get started with GitLab CI/CD](../../ci/_index.md)
- [Get started with GitLab Runner](get_started_runner.md)
- [Get started securing your application](../application_security/get-started-security.md)
- [Get started deploying and releasing your application](get_started_deploy_release.md)
- [Get started managing your infrastructure](get_started_managing_infrastructure.md)

View File

@ -2,6 +2,7 @@
stage: Verify
group: Runner
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
description: Learn how to set up and manage GitLab Runner.
title: Get started with GitLab Runner
---

View File

@ -354,7 +354,9 @@ To customize the time format:
1. Under **Time format**, select either the **System**, **12-hour**, or **24-hour** option.
1. Select **Save changes**.
## Disable exact code search
<!--- start_remove The following content will be removed on remove_date: '2026-02-20' -->
## Disable exact code search (deprecated)
{{< details >}}
@ -364,6 +366,13 @@ To customize the time format:
{{< /details >}}
{{< alert type="warning" >}}
This feature was [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/554933) in GitLab 18.3
and is planned for removal in 18.6.
{{< /alert >}}
{{< history >}}
- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/105049) as a [beta](../../policy/development_stages_support.md#beta) in GitLab 15.9 [with flags](../../administration/feature_flags/_index.md) named `index_code_with_zoekt` and `search_code_with_zoekt`. Disabled by default.
@ -392,6 +401,8 @@ To disable [exact code search](../search/exact_code_search.md) in user preferenc
1. Clear the **Enable exact code search** checkbox.
1. Select **Save changes**.
<!--- end_remove -->
## User identities in CI job JSON web tokens
{{< history >}}

View File

@ -55,9 +55,6 @@ To use exact code search:
You can also use exact code search in a project or group.
In user preferences, you can [disable exact code search](../profile/preferences.md#disable-exact-code-search)
to use [advanced search](advanced_search.md) instead.
## Available scopes
Scopes describe the type of data you're searching.

View File

@ -4,11 +4,11 @@ module Gitlab
module SidekiqMiddleware
module ConcurrencyLimit
class WorkersMap
@data = {}
class << self
def set_limit_for(worker:, max_jobs:)
raise ArgumentError, 'max_jobs must be a Proc instance' if max_jobs && !max_jobs.is_a?(Proc)
@data ||= {}
@data[worker] = max_jobs
end
@ -17,11 +17,17 @@ module Gitlab
# - 0 value is returned for workers without concurrency limits
# - negative value is returned for paused workers
def limit_for(worker:)
return 0 unless data
return 0 if Feature.disabled?(:sidekiq_concurrency_limit_middleware, Feature.current_request, type: :ops)
worker_class = worker.is_a?(Class) ? worker : worker.class
data[worker_class]&.call.to_i
limit = data[worker_class]&.call.to_i
return limit unless limit == 0
if Feature.enabled?(:use_max_concurrency_limit_percentage_as_default_limit, Feature.current_request)
default_limit_from_max_percentage(worker_class)
else
limit
end
end
def over_the_limit?(worker:)
@ -48,6 +54,19 @@ module Gitlab
::Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService.concurrent_worker_count(worker_name)
end
# Default limit based on urgency and number of total Sidekiq threads in the fleet.
# e.g. for low urgency worker class, maximum 20% of all Sidekiq workers can run concurrently.
def default_limit_from_max_percentage(worker)
return 0 unless worker.ancestors.include?(WorkerAttributes)
max_replicas = ENV.fetch('SIDEKIQ_MAX_REPLICAS', 0).to_i
concurrency = ENV.fetch('SIDEKIQ_CONCURRENCY', 0).to_i
max_total_threads = max_replicas * concurrency
percentage = worker.get_max_concurrency_limit_percentage
(percentage * max_total_threads).ceil
end
attr_reader :data
end
end

View File

@ -58077,6 +58077,12 @@ msgstr ""
msgid "SecurityReports|No vulnerabilities to report"
msgstr ""
msgid "SecurityReports|Not available"
msgstr ""
msgid "SecurityReports|Not found"
msgstr ""
msgid "SecurityReports|Oops, something doesn't seem right."
msgstr ""
@ -58101,6 +58107,9 @@ msgstr ""
msgid "SecurityReports|Projects added"
msgstr ""
msgid "SecurityReports|Reachability"
msgstr ""
msgid "SecurityReports|Reachable:"
msgstr ""
@ -58268,6 +58277,9 @@ msgstr ""
msgid "SecurityReports|Warning parsing security reports"
msgstr ""
msgid "SecurityReports|Yes"
msgstr ""
msgid "SecurityReports|scanned resources"
msgstr ""

View File

@ -77,7 +77,6 @@ RSpec.describe Projects::CompareController, feature_category: :source_code_manag
let(:page) { nil }
before do
stub_feature_flags(rapid_diffs: false)
stub_feature_flags(rapid_diffs_on_compare_show: false)
end

View File

@ -141,7 +141,7 @@ RSpec.describe Projects::MergeRequests::CreationsController, feature_category: :
let(:params) { get_diff_params }
before do
stub_feature_flags(rapid_diffs: true, rapid_diffs_on_mr_creation: true, rapid_diffs_debug: true)
stub_feature_flags(rapid_diffs_on_mr_creation: true, rapid_diffs_debug: true)
end
include_examples 'renders rapid diffs'
@ -165,7 +165,7 @@ RSpec.describe Projects::MergeRequests::CreationsController, feature_category: :
let(:params) { get_diff_params }
before do
stub_feature_flags(rapid_diffs: false, rapid_diffs_on_mr_creation: false)
stub_feature_flags(rapid_diffs_on_mr_creation: false)
end
include_examples 'renders default new template'

View File

@ -11,7 +11,6 @@ RSpec.describe 'User views rapid diffs', :js, feature_category: :code_review_wor
let(:diffs) { merge_request.diffs }
before do
stub_feature_flags(rapid_diffs: true)
visit(diffs_project_merge_request_path(project, merge_request, rapid_diffs: true))
wait_for_requests

View File

@ -127,28 +127,6 @@ RSpec.describe 'Merge request > User selects branches for new MR', :js, feature_
it_behaves_like 'Rapid Diffs application'
end
context 'without rapid diffs' do
before do
stub_feature_flags(rapid_diffs: false)
end
it 'allows to change the diff view' do
visit project_new_merge_request_path(project, merge_request: { target_branch: 'master', source_branch: 'fix' })
click_link 'Changes'
expect(page).to have_css('a.btn.selected', text: 'Inline')
expect(page).not_to have_css('a.btn.selected', text: 'Side-by-side')
click_link 'Side-by-side'
within '.merge-request' do
expect(page).not_to have_css('a.btn.selected', text: 'Inline')
expect(page).to have_css('a.btn.selected', text: 'Side-by-side')
end
end
end
it 'does not allow non-existing branches' do
visit project_new_merge_request_path(project, merge_request: { target_branch: 'non-exist-target', source_branch: 'non-exist-source' })

View File

@ -95,7 +95,7 @@ RSpec.describe "Compare", :js, feature_category: :source_code_management do
context 'with legacy diffs' do
before do
stub_feature_flags(rapid_diffs: false, rapid_diffs_on_compare_show: false)
stub_feature_flags(rapid_diffs_on_compare_show: false)
end
it 'renders additions info when click unfold diff' do

View File

@ -50,7 +50,7 @@ RSpec.describe 'View on environment', :js, feature_category: :groups_and_project
context 'with legacy diffs' do
before do
stub_feature_flags(rapid_diffs: false, rapid_diffs_on_compare_show: false)
stub_feature_flags(rapid_diffs_on_compare_show: false)
end
context 'when visiting a comparison for the branch' do

View File

@ -176,10 +176,6 @@ RSpec.describe Gitlab::SidekiqMiddleware::ConcurrencyLimit::Server, feature_cate
end
context 'when limit is not defined' do
before do
::Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersMap.remove_instance_variable(:@data)
end
it_behaves_like 'track execution'
end
end
@ -225,10 +221,6 @@ RSpec.describe Gitlab::SidekiqMiddleware::ConcurrencyLimit::Server, feature_cate
end
context 'when limit is not defined' do
before do
::Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersMap.remove_instance_variable(:@data)
end
it_behaves_like 'track execution'
end
end

View File

@ -24,24 +24,133 @@ RSpec.describe Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersMap, feature_
end
describe '.limit_for' do
let(:expected_limit) { 60 }
context 'with concurrency_limit attribute defined' do
let(:expected_limit) { 60 }
it 'accepts worker instance' do
expect(described_class.limit_for(worker: worker_class.new)).to eq(expected_limit)
it 'accepts worker instance and return defined limit' do
expect(described_class.limit_for(worker: worker_class.new)).to eq(expected_limit)
end
it 'accepts worker class and return defined limit' do
expect(described_class.limit_for(worker: worker_class)).to eq(expected_limit)
end
it 'returns 0 for unknown worker' do
expect(described_class.limit_for(worker: described_class)).to eq(0)
end
it 'returns 0 if the feature flag is disabled' do
stub_feature_flags(sidekiq_concurrency_limit_middleware: false)
expect(described_class.limit_for(worker: worker_class)).to eq(0)
end
end
it 'accepts worker class' do
expect(described_class.limit_for(worker: worker_class)).to eq(expected_limit)
context 'with concurrency_limit and max_concurrency_limit_percentage attributes defined' do
let(:expected_limit) { 60 }
before do
worker_class.class_eval do
max_concurrency_limit_percentage 0.5
end
end
it 'returns the concurrency_limit value' do
expect(described_class.limit_for(worker: worker_class)).to eq(expected_limit)
end
end
it 'returns 0 for unknown worker' do
expect(described_class.limit_for(worker: described_class)).to eq(0)
end
context 'for worker class without concurrency_limit attribute' do
using RSpec::Parameterized::TableSyntax
it 'returns 0 if the feature flag is disabled' do
stub_feature_flags(sidekiq_concurrency_limit_middleware: false)
let(:worker_class) do
Class.new do
def self.name
'Gitlab::Foo::Bar::DummyWorker'
end
expect(described_class.limit_for(worker: worker_class)).to eq(0)
include ApplicationWorker
end
end
where(:urgency, :sidekiq_max_replicas, :sidekiq_concurrency, :expected_concurrency_limit) do
:high | 10 | 10 | 35
:high | 0 | 10 | 0
:high | 10 | 0 | 0
:high | 0 | 0 | 0
:low | 10 | 10 | 25
:low | 0 | 10 | 0
:low | 10 | 0 | 0
:low | 0 | 0 | 0
:throttled | 10 | 10 | 15
:throttled | 0 | 10 | 0
:throttled | 10 | 0 | 0
:throttled | 0 | 0 | 0
end
with_them do
before do
worker_class.urgency urgency
stub_env("SIDEKIQ_MAX_REPLICAS", sidekiq_max_replicas)
stub_env("SIDEKIQ_CONCURRENCY", sidekiq_concurrency)
end
it 'returns expected limit' do
expect(described_class.limit_for(worker: worker_class)).to eq(expected_concurrency_limit)
end
end
context 'with max_concurrency_limit_percentage attribute' do
let(:worker_class) do
Class.new do
def self.name
'Gitlab::Foo::Bar::DummyWorker'
end
include ApplicationWorker
max_concurrency_limit_percentage 0.4
end
end
before do
stub_env("SIDEKIQ_MAX_REPLICAS", 10)
stub_env("SIDEKIQ_CONCURRENCY", 10)
end
it 'returns expected limit' do
expect(described_class.limit_for(worker: worker_class)).to eq(40)
end
end
context 'with only SIDEKIQ_CONCURRENCY environment variable defined' do
before do
stub_env("SIDEKIQ_CONCURRENCY", 10)
end
it 'returns 0' do
expect(described_class.limit_for(worker: worker_class)).to eq(0)
end
end
context 'with only SIDEKIQ_MAX_REPLICAS environment variable defined' do
before do
stub_env("SIDEKIQ_MAX_REPLICAS", 10)
end
it 'returns 0' do
expect(described_class.limit_for(worker: worker_class)).to eq(0)
end
end
context 'when use_max_concurrency_limit_percentage_as_default_limit FF is disabled' do
before do
stub_feature_flags(use_max_concurrency_limit_percentage_as_default_limit: false)
end
it 'returns 0' do
expect(described_class.limit_for(worker: worker_class)).to eq(0)
end
end
end
end

View File

@ -93,18 +93,6 @@ RSpec.describe 'Projects blob controller', feature_category: :code_review_workfl
expect(response).to have_gitlab_http_status(:not_found)
end
end
context 'when rapid_diffs FF is disabled' do
before do
stub_feature_flags(rapid_diffs: false)
end
it 'returns 404' do
do_get(since: 2, to: 6, offset: 10, closest_line_number: 1)
expect(response).to have_gitlab_http_status(:not_found)
end
end
end
describe 'POST preview' do

View File

@ -24,25 +24,6 @@ RSpec.describe Projects::CommitController, feature_category: :source_code_manage
sign_in(user)
end
context 'when the feature flag rapid_diffs is disabled' do
before do
stub_feature_flags(rapid_diffs: false)
end
it 'returns 404' do
send_request
expect(response).to have_gitlab_http_status(:not_found)
end
it 'uses show action when rapid_diffs query parameter doesnt exist' do
get project_commit_path(project, commit)
expect(response).to have_gitlab_http_status(:ok)
expect(response.body).to include('data-page="projects:commit:show"')
end
end
it 'returns 200' do
send_request

View File

@ -26,19 +26,6 @@ RSpec.describe 'Merge Request Creation', feature_category: :code_review_workflow
get namespace_project_new_merge_request_diffs_path(params.merge(extra_params))
end
context 'when the feature flag rapid_diffs is disabled' do
before do
stub_feature_flags(rapid_diffs: false)
end
it 'uses default action' do
get_diffs
expect(response).to have_gitlab_http_status(:ok)
expect(response.body).to include('data-page="projects:merge_requests:creations:new"')
end
end
context 'when rapid_diffs_disabled param is present' do
it 'uses default action' do
get_diffs(rapid_diffs_disabled: true)

View File

@ -113,18 +113,6 @@ RSpec.describe 'Merge Requests Diffs stream', feature_category: :code_review_wor
end
end
context 'when rapid_diffs FF is disabled' do
before do
stub_feature_flags(rapid_diffs: false)
end
it 'returns 404' do
go
expect(response).to have_gitlab_http_status(:not_found)
end
end
include_examples 'with diffs_blobs param'
end
end

View File

@ -255,25 +255,6 @@ RSpec.describe Projects::MergeRequestsController, feature_category: :source_code
end
describe '#rapid_diffs' do
context 'when the feature flag rapid_diffs is disabled' do
before do
stub_feature_flags(rapid_diffs: false)
end
it 'returns 404' do
get diffs_project_merge_request_path(project, merge_request, rapid_diffs: 'true')
expect(response).to have_gitlab_http_status(:not_found)
end
it 'uses diffs action when rapid_diffs query parameter doesnt exist' do
get diffs_project_merge_request_path(project, merge_request)
expect(response).to have_gitlab_http_status(:ok)
expect(response.body).to include('data-page="projects:merge_requests:diffs"')
end
end
it 'returns 200' do
get diffs_project_merge_request_path(project, merge_request, rapid_diffs: 'true')

View File

@ -7,8 +7,9 @@ RSpec.describe Projects::ParticipantsService, feature_category: :groups_and_proj
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project, :public) }
let_it_be(:noteable) { create(:issue, project: project) }
let_it_be(:organization) { project.organization }
let_it_be(:org_user_detail) do
create(:organization_user_detail, organization: project.organization, username: 'spec_bot')
create(:organization_user_detail, organization: organization, username: 'spec_bot')
end
let_it_be(:other_org_user_detail) do
@ -254,6 +255,28 @@ RSpec.describe Projects::ParticipantsService, feature_category: :groups_and_proj
end
end
end
context 'when groups are in other organizations' do
let!(:other_organization) { create(:organization) }
let(:group_1) { create(:group, organization: organization) }
let(:group_2) { create(:group, organization: other_organization) }
before do
group_1.add_owner(user)
group_1.add_owner(create(:user))
group_2.add_owner(user)
end
it 'only includes groups in the projects organization' do
expect(group_items).to contain_exactly(
a_hash_including(name: group_1.full_name, count: 2)
)
expect(group_items).not_to include(
a_hash_including(name: group_2.full_name)
)
end
end
end
context 'when `disable_all_mention` FF is enabled' do

View File

@ -246,7 +246,6 @@
- ee/spec/services/ee/users/migrate_records_to_ghost_user_service_spec.rb
- ee/spec/services/epics/issue_promote_service_spec.rb
- ee/spec/services/epics/transfer_service_spec.rb
- ee/spec/services/epics/update_service_spec.rb
- ee/spec/services/namespaces/service_accounts/create_service_spec.rb
- ee/spec/services/projects/create_from_template_service_spec.rb
- ee/spec/services/projects/create_service_spec.rb

View File

@ -2027,7 +2027,6 @@
- './ee/spec/services/epics/reopen_service_spec.rb'
- './ee/spec/services/epics/transfer_service_spec.rb'
- './ee/spec/services/epics/update_dates_service_spec.rb'
- './ee/spec/services/epics/update_service_spec.rb'
- './ee/spec/services/external_status_checks/create_service_spec.rb'
- './ee/spec/services/external_status_checks/destroy_service_spec.rb'
- './ee/spec/services/external_status_checks/dispatch_service_spec.rb'

View File

@ -1,18 +1,6 @@
# frozen_string_literal: true
RSpec.shared_examples 'diff file endpoint' do
context 'when the rapid_diffs feature flag is disabled' do
before do
stub_feature_flags(rapid_diffs: false)
end
it 'returns a 404 status' do
send_request
expect(response).to have_gitlab_http_status(:not_found)
end
end
context 'when diff_file is not found' do
let(:old_path) { 'bad/path' }
let(:new_path) { 'bad/path' }

View File

@ -7,18 +7,6 @@ RSpec.shared_examples 'diff files metadata' do
expect(response).to have_gitlab_http_status(:success)
expect(json_response['diff_files']).to be_an Array
end
context 'when the rapid_diffs feature flag is disabled' do
before do
stub_feature_flags(rapid_diffs: false)
end
it 'returns a 404 status' do
send_request
expect(response).to have_gitlab_http_status(:not_found)
end
end
end
RSpec.shared_examples 'missing diff files metadata' do

View File

@ -15,18 +15,6 @@ RSpec.shared_examples 'diffs stats' do
expect(response).to have_gitlab_http_status(:success)
expect(json_response['diffs_stats']).to be_an Hash
end
context 'when the rapid_diffs feature flag is disabled' do
before do
stub_feature_flags(rapid_diffs: false)
end
it 'returns a 404 status' do
send_request
expect(response).to have_gitlab_http_status(:not_found)
end
end
end
RSpec.shared_examples 'overflow' do

View File

@ -44,16 +44,4 @@ RSpec.shared_examples 'diffs stream tests' do
expect(response.body).to include('something went wrong')
end
end
context 'when the rapid_diffs feature flag is disabled' do
before do
stub_feature_flags(rapid_diffs: false)
end
it 'returns a 404 status' do
go
expect(response).to have_gitlab_http_status(:not_found)
end
end
end

View File

@ -1,16 +1,12 @@
# frozen_string_literal: true
require 'tempfile'
require_relative '../../../../tooling/lib/tooling/find_tests'
require 'fast_spec_helper'
RSpec.describe Tooling::FindTests, feature_category: :tooling do
attr_accessor :predictive_tests_file
let(:instance) do
described_class.new(
changed_files,
predictive_tests_pathname,
mappings_file: mappings_file,
mappings_limit_percentage: 50
)
@ -18,40 +14,20 @@ RSpec.describe Tooling::FindTests, feature_category: :tooling do
let(:mock_test_file_finder) { instance_double(TestFileFinder::FileFinder) }
let(:new_matching_tests) { ["new_matching_spec.rb"] }
let(:predictive_tests_pathname) { predictive_tests_file.path }
let(:changed_files) { %w[changed_file1 changed_file2] }
let(:predictive_tests_content) { "previously_matching_spec.rb" }
let(:mappings_file) { nil }
around do |example|
self.predictive_tests_file = Tempfile.new('predictive_tests_file')
# See https://ruby-doc.org/stdlib-1.9.3/libdoc/tempfile/rdoc/
# Tempfile.html#class-Tempfile-label-Explicit+close
begin
example.run
ensure
predictive_tests_file.close
predictive_tests_file.unlink
end
end
before do
allow(mock_test_file_finder).to receive(:use)
allow(mock_test_file_finder).to receive(:test_files).and_return(new_matching_tests)
allow(TestFileFinder::FileFinder).to receive(:new).and_return(mock_test_file_finder)
# We write into the temp files initially, to later check how the code modified those files
File.write(predictive_tests_pathname, predictive_tests_content)
end
describe '#execute' do
subject { instance.execute }
it 'does not overwrite the output file' do
expect { subject }.to change { File.read(predictive_tests_pathname) }
.from(predictive_tests_content)
.to("#{predictive_tests_content} #{new_matching_tests.uniq.join(' ')}")
it 'returns matched files list' do
expect(subject).to match_array(new_matching_tests.uniq)
end
it 'loads the tests.yml file with a pattern matching mapping' do
@ -81,12 +57,8 @@ RSpec.describe Tooling::FindTests, feature_category: :tooling do
]
end
it 'writes uniquely matching specs to the output' do
subject
expect(File.read(predictive_tests_pathname).split(' ')).to match_array(
predictive_tests_content.split(' ') + new_matching_tests.uniq
)
it 'return only unique specs' do
expect(subject).to match_array(new_matching_tests.uniq)
end
end
end

View File

@ -6,16 +6,11 @@ require_relative '../../../../../tooling/lib/tooling/mappings/graphql_base_type_
RSpec.describe Tooling::Mappings::GraphqlBaseTypeMappings, feature_category: :tooling do
# We set temporary folders, and those readers give access to those folder paths
attr_accessor :foss_folder, :ee_folder, :jh_folder
attr_accessor :predictive_tests_file
let(:predictive_tests_pathname) { predictive_tests_file.path }
let(:instance) { described_class.new(changed_files, predictive_tests_pathname) }
let(:instance) { described_class.new(changed_files) }
let(:changed_files) { %w[changed_file1 changed_file2] }
let(:predictive_tests_initial_content) { "previously_matching_spec.rb" }
around do |example|
self.predictive_tests_file = Tempfile.new('predictive_tests_file')
Dir.mktmpdir('FOSS') do |foss_folder|
Dir.mktmpdir('EE') do |ee_folder|
Dir.mktmpdir('JH') do |jh_folder|
@ -23,14 +18,7 @@ RSpec.describe Tooling::Mappings::GraphqlBaseTypeMappings, feature_category: :to
self.ee_folder = ee_folder
self.jh_folder = jh_folder
# See https://ruby-doc.org/stdlib-1.9.3/libdoc/tempfile/rdoc/
# Tempfile.html#class-Tempfile-label-Explicit+close
begin
example.run
ensure
predictive_tests_file.close
predictive_tests_file.unlink
end
example.run
end
end
end
@ -42,9 +30,6 @@ RSpec.describe Tooling::Mappings::GraphqlBaseTypeMappings, feature_category: :to
'ee' => [foss_folder, ee_folder],
'jh' => [foss_folder, ee_folder, jh_folder]
})
# We write into the temp files initially, to later check how the code modified those files
File.write(predictive_tests_pathname, predictive_tests_initial_content)
end
describe '#execute' do
@ -53,8 +38,8 @@ RSpec.describe Tooling::Mappings::GraphqlBaseTypeMappings, feature_category: :to
context 'when no GraphQL files were changed' do
let(:changed_files) { [] }
it 'does not change the output file' do
expect { subject }.not_to change { File.read(predictive_tests_pathname) }
it 'returns empty file list' do
expect(subject).to be_empty
end
end
@ -73,7 +58,7 @@ RSpec.describe Tooling::Mappings::GraphqlBaseTypeMappings, feature_category: :to
end
it 'does not change the output file' do
expect { subject }.not_to change { File.read(predictive_tests_pathname) }
expect(subject).to be_empty
end
end
@ -89,9 +74,7 @@ RSpec.describe Tooling::Mappings::GraphqlBaseTypeMappings, feature_category: :to
end
it 'writes the correct specs in the output' do
expect { subject }.to change { File.read(predictive_tests_pathname) }
.from(predictive_tests_initial_content)
.to("#{predictive_tests_initial_content} spec/my_graphql_file_spec.rb")
expect(subject).to match_array(['spec/my_graphql_file_spec.rb'])
end
end
end

View File

@ -6,54 +6,36 @@ require_relative '../../../../../tooling/lib/tooling/mappings/js_to_system_specs
RSpec.describe Tooling::Mappings::JsToSystemSpecsMappings, feature_category: :tooling do
# We set temporary folders, and those readers give access to those folder paths
attr_accessor :js_base_folder, :system_specs_base_folder
attr_accessor :predictive_tests_file
let(:changed_files) { %w[changed_file1 changed_file2] }
let(:predictive_tests_pathname) { predictive_tests_file.path }
let(:predictive_tests_content) { "previously_matching_spec.rb" }
let(:instance) do
described_class.new(
changed_files,
predictive_tests_pathname,
system_specs_base_folder: system_specs_base_folder,
js_base_folder: js_base_folder
)
end
around do |example|
self.predictive_tests_file = Tempfile.new('predictive_tests_file')
Dir.mktmpdir do |tmp_js_base_folder|
Dir.mktmpdir do |tmp_system_specs_base_folder|
self.system_specs_base_folder = tmp_system_specs_base_folder
self.js_base_folder = tmp_js_base_folder
# See https://ruby-doc.org/stdlib-1.9.3/libdoc/tempfile/rdoc/
# Tempfile.html#class-Tempfile-label-Explicit+close
begin
example.run
ensure
predictive_tests_file.close
predictive_tests_file.unlink
end
example.run
end
end
end
before do
# We write into the temp files initially, to later check how the code modified those files
File.write(predictive_tests_pathname, predictive_tests_content)
end
describe '#execute' do
subject { instance.execute }
context 'when no JS files were changed' do
let(:changed_files) { [] }
it 'does not change the output file' do
expect { subject }.not_to change { File.read(predictive_tests_pathname) }
it 'returns empty array' do
expect(subject).to be_empty
end
end
@ -61,8 +43,8 @@ RSpec.describe Tooling::Mappings::JsToSystemSpecsMappings, feature_category: :to
let(:changed_files) { ["#{js_base_folder}/issues/secret_values.js"] }
context 'when the JS files are not present on disk' do
it 'does not change the output file' do
expect { subject }.not_to change { File.read(predictive_tests_pathname) }
it 'return empty array' do
expect(subject).to be_empty
end
end
@ -73,8 +55,8 @@ RSpec.describe Tooling::Mappings::JsToSystemSpecsMappings, feature_category: :to
end
context 'when no system specs match the JS keyword' do
it 'does not change the output file' do
expect { subject }.not_to change { File.read(predictive_tests_pathname) }
it 'returns empty array' do
expect(subject).to be_empty
end
end
@ -85,9 +67,7 @@ RSpec.describe Tooling::Mappings::JsToSystemSpecsMappings, feature_category: :to
end
it 'adds the new specs to the output file' do
expect { subject }.to change { File.read(predictive_tests_pathname) }
.from(predictive_tests_content)
.to("#{predictive_tests_content} #{system_specs_base_folder}/confidential_issues/issues_spec.rb")
expect(subject).to match_array(["#{system_specs_base_folder}/confidential_issues/issues_spec.rb"])
end
end
end
@ -140,7 +120,7 @@ RSpec.describe Tooling::Mappings::JsToSystemSpecsMappings, feature_category: :to
end
describe '#construct_js_keywords' do
subject { described_class.new(changed_files, predictive_tests_file).construct_js_keywords(js_files) }
subject { described_class.new(changed_files).construct_js_keywords(js_files) }
let(:js_files) do
%w[

View File

@ -6,46 +6,28 @@ require_relative '../../../../../tooling/lib/tooling/mappings/view_to_js_mapping
RSpec.describe Tooling::Mappings::ViewToJsMappings, feature_category: :tooling do
# We set temporary folders, and those readers give access to those folder paths
attr_accessor :view_base_folder, :js_base_folder
attr_accessor :predictive_tests_file
let(:changed_files) { %w[changed_file1 changed_file2] }
let(:predictive_tests_pathname) { predictive_tests_file.path }
let(:predictive_tests_content) { "previously_matching_spec.rb" }
let(:instance) do
described_class.new(
changed_files,
predictive_tests_pathname,
view_base_folder: view_base_folder,
js_base_folder: js_base_folder
)
end
around do |example|
self.predictive_tests_file = Tempfile.new('matching_tests')
Dir.mktmpdir do |tmp_js_base_folder|
Dir.mktmpdir do |tmp_views_base_folder|
self.js_base_folder = tmp_js_base_folder
self.view_base_folder = tmp_views_base_folder
# See https://ruby-doc.org/stdlib-1.9.3/libdoc/tempfile/rdoc/
# Tempfile.html#class-Tempfile-label-Explicit+close
begin
example.run
ensure
predictive_tests_file.close
predictive_tests_file.unlink
end
example.run
end
end
end
before do
# We write into the temp files initially, to later check how the code modified those files
File.write(predictive_tests_pathname, predictive_tests_content)
end
describe '#execute' do
let(:changed_files) { %W[#{view_base_folder}/index.html] }
@ -56,8 +38,8 @@ RSpec.describe Tooling::Mappings::ViewToJsMappings, feature_category: :tooling d
allow(instance).to receive(:filter_files).and_return([])
end
it 'does not change the output file' do
expect { subject }.not_to change { File.read(predictive_tests_pathname) }
it 'returns empty array' do
expect(subject).to be_empty
end
end
@ -74,8 +56,8 @@ RSpec.describe Tooling::Mappings::ViewToJsMappings, feature_category: :tooling d
FILE
end
it 'does not change the output file' do
expect { subject }.not_to change { File.read(predictive_tests_pathname) }
it 'returns empty array' do
expect(subject).to be_empty
end
end
@ -91,8 +73,8 @@ RSpec.describe Tooling::Mappings::ViewToJsMappings, feature_category: :tooling d
end
context 'when no matching JS files are found' do
it 'does not change the output file' do
expect { subject }.not_to change { File.read(predictive_tests_pathname) }
it 'returns empty array' do
expect(subject).to be_empty
end
end
@ -112,9 +94,7 @@ RSpec.describe Tooling::Mappings::ViewToJsMappings, feature_category: :tooling d
end
it 'adds the matching JS files to the output' do
expect { subject }.to change { File.read(predictive_tests_pathname) }
.from(predictive_tests_content)
.to("#{predictive_tests_content} #{js_base_folder}/index.js")
expect(subject).to match_array(["#{js_base_folder}/index.js"])
end
end
end
@ -158,9 +138,7 @@ RSpec.describe Tooling::Mappings::ViewToJsMappings, feature_category: :tooling d
end
it 'scans those partials for the HTML attribute value' do
expect { subject }.to change { File.read(predictive_tests_pathname) }
.from(predictive_tests_content)
.to("#{predictive_tests_content} #{js_base_folder}/index.js")
expect(subject).to match_array(["#{js_base_folder}/index.js"])
end
end
end

View File

@ -5,43 +5,29 @@ require 'fileutils'
require_relative '../../../../../tooling/lib/tooling/mappings/view_to_system_specs_mappings'
RSpec.describe Tooling::Mappings::ViewToSystemSpecsMappings, feature_category: :tooling do
attr_accessor :view_base_folder, :predictive_tests_file
attr_accessor :view_base_folder
let(:instance) do
described_class.new(changed_files, predictive_tests_pathname, view_base_folder: view_base_folder)
described_class.new(changed_files, view_base_folder: view_base_folder)
end
let(:changed_files_content) { %w[changed_file1 changed_file2] }
let(:predictive_tests_pathname) { predictive_tests_file.path }
let(:predictive_tests_initial_content) { "previously_added_spec.rb" }
around do |example|
self.predictive_tests_file = Tempfile.new('predictive_tests_file')
# See https://ruby-doc.org/stdlib-1.9.3/libdoc/tempfile/rdoc/
# Tempfile.html#class-Tempfile-label-Explicit+close
begin
Dir.mktmpdir do |tmp_views_base_folder|
self.view_base_folder = tmp_views_base_folder
example.run
end
ensure
predictive_tests_file.close
predictive_tests_file.unlink
Dir.mktmpdir do |tmp_views_base_folder|
self.view_base_folder = tmp_views_base_folder
example.run
end
end
before do
FileUtils.mkdir_p("#{view_base_folder}/app/views/dashboard")
# We write into the temp files initially, to check how the code modified those files
File.write(predictive_tests_pathname, predictive_tests_initial_content)
end
describe '#execute' do
subject { instance.execute }
let(:changed_files) { ["#{view_base_folder}/app/views/dashboard/my_view.html.haml"] }
let(:changed_files) { ["#{view_base_folder}/app/views/dashboard/my_view.html.haml"] }
before do
# We create all of the changed_files, so that they are part of the filtered files
@ -60,10 +46,8 @@ RSpec.describe Tooling::Mappings::ViewToSystemSpecsMappings, feature_category: :
allow(File).to receive(:exist?).with(expected_feature_spec).and_return(true)
end
it 'writes that feature spec to the output file' do
expect { subject }.to change { File.read(predictive_tests_pathname) }
.from(predictive_tests_initial_content)
.to("#{predictive_tests_initial_content} #{expected_feature_spec}")
it 'returns feature spec' do
expect(subject).to match_array([expected_feature_spec])
end
end
@ -83,10 +67,8 @@ RSpec.describe Tooling::Mappings::ViewToSystemSpecsMappings, feature_category: :
end
end
it 'writes all of the feature specs for the parent folder to the output file' do
expect { subject }.to change { File.read(predictive_tests_pathname) }
.from(predictive_tests_initial_content)
.to("#{predictive_tests_initial_content} #{expected_feature_specs.join(' ')}")
it 'returns all of the feature specs for the parent folder' do
expect(subject).to match_array(expected_feature_specs)
end
end
end

View File

@ -21,8 +21,16 @@ RSpec.describe Tooling::PredictiveTests::MetricsExporter, feature_category: :too
end
let(:event_tracker) { instance_double(Tooling::Events::TrackPipelineEvents, send_event: nil) }
let(:test_selector) { instance_double(Tooling::PredictiveTests::TestSelector, execute: nil) }
let(:logger) { instance_double(Logger, info: nil, error: nil) }
let(:logger) { Logger.new(log_output) }
let(:log_output) { StringIO.new } # useful for debugging to print out all log output
let(:test_selector_described) do
instance_double(Tooling::PredictiveTests::TestSelector, rspec_spec_list: matching_tests_described_class_specs)
end
let(:test_selector_coverage) do
instance_double(Tooling::PredictiveTests::TestSelector, rspec_spec_list: matching_tests_coverage_specs)
end
let(:event_name) { "glci_predictive_tests_metrics" }
let(:extra_properties) { { ci_job_id: "123", test_type: "backend" } }
@ -50,8 +58,8 @@ RSpec.describe Tooling::PredictiveTests::MetricsExporter, feature_category: :too
end
let(:changed_files) { mappings.values.pluck(:model) }
let(:matching_tests_described_class_content) { mappings.dig(:user, :spec) }
let(:matching_tests_coverage_content) { mappings.values.pluck(:spec).join(" ") }
let(:matching_tests_described_class_specs) { [mappings.dig(:user, :spec)] }
let(:matching_tests_coverage_specs) { mappings.values.pluck(:spec) }
let(:failed_tests_content) { "#{mappings.dig(:user, :spec)}\n#{mappings.dig(:todo, :spec)}" }
let(:described_class_mapping_content) do
@ -101,40 +109,30 @@ RSpec.describe Tooling::PredictiveTests::MetricsExporter, feature_category: :too
# create files used as input for exporting selected test metrics
File.write(failed_tests_file, failed_tests_content)
File.write(matching_tests_described_class_file, matching_tests_described_class_content)
File.write(matching_tests_coverage_file, matching_tests_coverage_content)
File.write(coverage_mapping_file, coverage_mapping_content)
File.write(described_class_mapping_file, described_class_mapping_content)
allow(Tooling::PredictiveTests::ChangedFiles).to receive(:fetch)
.with(frontend_fixtures_file: frontend_fixtures_file)
.and_return(changed_files)
allow(Tooling::PredictiveTests::TestSelector).to receive(:new).and_return(test_selector)
allow(Tooling::Events::TrackPipelineEvents).to receive(:new).and_return(event_tracker)
allow(Logger).to receive(:new).with($stdout, progname: "rspec predictive testing").and_return(logger)
allow(Tooling::Events::TrackPipelineEvents).to receive(:new).and_return(event_tracker)
allow(Tooling::PredictiveTests::ChangedFiles).to receive(:fetch).with(
frontend_fixtures_file: frontend_fixtures_file
).and_return(changed_files)
allow(Tooling::PredictiveTests::TestSelector).to receive(:new).with(
changed_files: changed_files,
rspec_test_mapping_path: coverage_mapping_file,
rspec_mappings_limit_percentage: nil
).and_return(test_selector_coverage)
allow(Tooling::PredictiveTests::TestSelector).to receive(:new).with(
changed_files: changed_files,
rspec_test_mapping_path: described_class_mapping_file,
rspec_mappings_limit_percentage: nil
).and_return(test_selector_described)
end
describe "#execute" do
it "creates selected test list for each strategy" do
exporter.execute
expect(Tooling::PredictiveTests::TestSelector).to have_received(:new).with(
changed_files: changed_files,
rspec_test_mapping_path: coverage_mapping_file,
rspec_matching_test_files_path: matching_tests_coverage_file,
rspec_matching_js_files_path: File.join(output_dir, "coverage", "js_matching_files.txt"),
rspec_mappings_limit_percentage: nil
)
expect(Tooling::PredictiveTests::TestSelector).to have_received(:new).with(
changed_files: changed_files,
rspec_test_mapping_path: described_class_mapping_file,
rspec_matching_test_files_path: matching_tests_described_class_file,
rspec_matching_js_files_path: File.join(output_dir, "described_class", "js_matching_files.txt"),
rspec_mappings_limit_percentage: nil
)
expect(test_selector).to have_received(:execute).twice
end
it "exports metrics for described_class strategy", :aggregate_failures do
exporter.execute

View File

@ -1,68 +1,61 @@
# frozen_string_literal: true
require 'tempfile'
require 'fileutils'
require_relative '../../../../../tooling/lib/tooling/predictive_tests/test_selector'
require_relative "../../../../../tooling/lib/tooling/predictive_tests/test_selector"
RSpec.describe Tooling::PredictiveTests::TestSelector, :aggregate_failures, feature_category: :tooling do
subject(:test_selector) do
described_class.new(
changed_files: changed_files,
rspec_matching_test_files_path: test_files_path,
rspec_matching_js_files_path: matching_js_files_path,
rspec_test_mapping_path: crystalball_mapping_path
)
described_class.new(changed_files: changed_files, rspec_test_mapping_path: crystalball_mapping_path)
end
let(:test_files_path) { 'matching_test_files.txt' }
let(:matching_js_files_path) { 'matching_js_files.txt' }
let(:views_with_partials_path) { 'views_with_partials.txt' }
let(:crystalball_mapping_path) { 'crystalball_mapping.txt' }
let(:crystalball_mapping_path) { "crystalball_mapping.txt" }
let(:changed_files) { ["app/models/user.rb", "app/models/todo.rb"] }
let(:rspec_mappings_limit_percentage) { 50 }
let(:find_tests) { instance_double(Tooling::FindTests, execute: nil) }
let(:graphql_mappings) { instance_double(Tooling::Mappings::GraphqlBaseTypeMappings, execute: nil) }
let(:view_to_system_mappings) { instance_double(Tooling::Mappings::ViewToSystemSpecsMappings, execute: nil) }
let(:view_to_js_mappings) { instance_double(Tooling::Mappings::ViewToJsMappings, execute: nil) }
let(:js_to_system_mappings) { instance_double(Tooling::Mappings::JsToSystemSpecsMappings, execute: nil) }
let(:find_tests) { instance_double(Tooling::FindTests, execute: ["specs_from_mapping"]) }
let(:view_to_js_mappings) { instance_double(Tooling::Mappings::ViewToJsMappings, execute: ["jest_spec_list"]) }
let(:changed_files) { ['app/models/user.rb', 'app/models/todo.rb'] }
let(:graphql_mappings) do
instance_double(Tooling::Mappings::GraphqlBaseTypeMappings, execute: ["specs_from_graphql"])
end
let(:view_to_system_mappings) do
instance_double(Tooling::Mappings::ViewToSystemSpecsMappings, execute: ["specs_from_views"])
end
let(:js_to_system_mappings) do
instance_double(Tooling::Mappings::JsToSystemSpecsMappings, execute: ["specs_from_js"])
end
before do
allow(Tooling::FindTests).to receive(:new).and_return(find_tests)
allow(Tooling::Mappings::GraphqlBaseTypeMappings).to receive(:new).and_return(graphql_mappings)
allow(Tooling::Mappings::ViewToSystemSpecsMappings).to receive(:new).and_return(view_to_system_mappings)
allow(Tooling::Mappings::ViewToJsMappings).to receive(:new).and_return(view_to_js_mappings)
allow(Tooling::Mappings::JsToSystemSpecsMappings).to receive(:new).and_return(js_to_system_mappings)
allow(Tooling::Mappings::ViewToJsMappings).to receive(:new).and_return(view_to_js_mappings)
allow(Logger).to receive(:new).and_return(Logger.new(StringIO.new))
end
it 'generates predictive rspec test list by calling correct helpers' do
test_selector.execute
it "generates predictive rspec test list" do
expect(test_selector.rspec_spec_list).to match_array(%w[
specs_from_graphql
specs_from_views
specs_from_js
specs_from_mapping
])
expect(Tooling::Mappings::GraphqlBaseTypeMappings).to have_received(:new).with(changed_files)
expect(Tooling::Mappings::ViewToSystemSpecsMappings).to have_received(:new).with(changed_files)
expect(Tooling::Mappings::JsToSystemSpecsMappings).to have_received(:new).with(changed_files)
expect(Tooling::FindTests).to have_received(:new).with(
changed_files,
test_files_path,
mappings_file: crystalball_mapping_path,
mappings_limit_percentage: rspec_mappings_limit_percentage
)
expect(find_tests).to have_received(:execute)
expect(Tooling::Mappings::GraphqlBaseTypeMappings).to have_received(:new).with(changed_files, test_files_path)
expect(graphql_mappings).to have_received(:execute)
expect(Tooling::Mappings::ViewToSystemSpecsMappings).to have_received(:new).with(changed_files, test_files_path)
expect(view_to_system_mappings).to have_received(:execute)
expect(Tooling::Mappings::JsToSystemSpecsMappings).to have_received(:new).with(changed_files, test_files_path)
expect(js_to_system_mappings).to have_received(:execute)
end
it 'generates predictive js test list by calling correct helpers' do
test_selector.execute
expect(Tooling::Mappings::ViewToJsMappings).to have_received(:new).with(changed_files, matching_js_files_path)
it "generates predictive js test" do
expect(test_selector.js_spec_list).to match_array(["jest_spec_list"])
expect(Tooling::Mappings::ViewToJsMappings).to have_received(:new).with(changed_files)
end
end

View File

@ -540,18 +540,6 @@ RSpec.describe ApplicationWorker, feature_category: :shared do
end
end
describe 'concurrency_limit' do
before do
stub_const(worker.name, worker)
end
it 'sets concurrency_limit by default' do
expect(::Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersMap.workers).to include(Gitlab::Foo::Bar::DummyWorker)
expect(::Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersMap.limit_for(worker: Gitlab::Foo::Bar::DummyWorker))
.to eq 0
end
end
describe '.concurrency_limit_resume' do
around do |example|
Sidekiq::Testing.fake!(&example)

View File

@ -67,6 +67,7 @@ RSpec.describe WorkerAttributes, feature_category: :shared do
:get_weight | :weight | 1 | [3] | {} | 3
:get_tags | :tags | [] | [:foo, :bar] | {} | [:foo, :bar]
:get_deduplicate_strategy | :deduplicate | :until_executing | [:none] | {} | :none
:get_max_concurrency_limit_percentage | :max_concurrency_limit_percentage | 0.25 | 0.5 | {} | 0.5
:get_deduplication_options | :deduplicate | {} | [:none, { including_scheduled: true }] | {} | { including_scheduled: true }
:database_health_check_attrs | :defer_on_database_health_signal | nil | [:gitlab_main, [:users], 1.minute] | {} | { gitlab_schema: :gitlab_main, tables: [:users], delay_by: 1.minute, block: nil }
@ -349,4 +350,34 @@ RSpec.describe WorkerAttributes, feature_category: :shared do
it { is_expected.to be(false) }
end
end
describe '.max_concurrency_limit_percentage' do
subject(:max_concurrency_limit_percentage) { worker.max_concurrency_limit_percentage(percentage) }
context 'when value is invalid' do
shared_examples 'invalid argument' do
it 'raises ArgumentError' do
expect { max_concurrency_limit_percentage }.to raise_error(ArgumentError)
end
end
context 'with negative value' do
let(:percentage) { -1 }
it_behaves_like 'invalid argument'
end
context 'with value > 1' do
let(:percentage) { 1.1 }
it_behaves_like 'invalid argument'
end
context 'with non Numeric type' do
let(:percentage) { "asd" }
it_behaves_like 'invalid argument'
end
end
end
end

View File

@ -73,15 +73,15 @@ if options[:select_tests]
changed_files = Tooling::PredictiveTests::ChangedFiles.fetch(
frontend_fixtures_file: ENV['FRONTEND_FIXTURES_MAPPING_PATH']
)
Tooling::PredictiveTests::TestSelector.new(
test_selector = Tooling::PredictiveTests::TestSelector.new(
changed_files: changed_files,
rspec_matching_test_files_path: ENV['RSPEC_MATCHING_TEST_FILES_PATH'],
rspec_matching_js_files_path: ENV['RSPEC_MATCHING_JS_FILES_PATH'],
rspec_test_mapping_path: test_mapping_file
).execute
)
# File with a list of mr changes is also used by frontend related pipelines/jobs
# Used to generate predictive rspec test pipelines
File.write(ENV['RSPEC_MATCHING_TEST_FILES_PATH'], test_selector.rspec_spec_list.join(" "))
# Used by frontend related pipelines/jobs
File.write(ENV['RSPEC_MATCHING_JS_FILES_PATH'], test_selector.js_spec_list.join(" "))
File.write(ENV['RSPEC_CHANGED_FILES_PATH'], changed_files.join("\n"))
elsif options[:export_rspec_metrics]
require_relative '../lib/tooling/predictive_tests/metrics_exporter'

View File

@ -9,11 +9,9 @@ module Tooling
def initialize(
changed_files,
predictive_tests_pathname,
mappings_file: nil,
mappings_limit_percentage: nil
)
@predictive_tests_pathname = predictive_tests_pathname
@changed_files = changed_files
@mappings_file = mappings_file
@mappings_limit_percentage = mappings_limit_percentage
@ -32,11 +30,11 @@ module Tooling
file_finder.use TestFileFinder::MappingStrategies::PatternMatching.load('tests.yml')
end
write_array_to_file(predictive_tests_pathname, tff.test_files.uniq)
tff.test_files.uniq
end
private
attr_reader :changed_files, :predictive_tests_pathname, :mappings_file, :mappings_limit_percentage
attr_reader :changed_files, :mappings_file, :mappings_limit_percentage
end
end

View File

@ -25,16 +25,15 @@ module Tooling
'jh' => GRAPHQL_TYPES_FOLDERS_JH
}.freeze
def initialize(changed_files, predictive_tests_pathname)
@changed_files = changed_files
@predictive_tests_pathname = predictive_tests_pathname
def initialize(changed_files)
@changed_files = changed_files
end
def execute
# We go through the available editions when searching for base types
#
# `nil` is the FOSS edition
matching_graphql_tests = ([nil] + ::GitlabEdition.extensions).flat_map do |edition|
([nil] + ::GitlabEdition.extensions).flat_map do |edition|
hierarchy = types_hierarchies[edition]
filter_files.flat_map do |graphql_file|
@ -45,8 +44,6 @@ module Tooling
children_types.map { |filename| filename_to_spec_filename(filename) }
end
end.compact.uniq
write_array_to_file(predictive_tests_pathname, matching_graphql_tests)
end
def filter_files
@ -113,12 +110,12 @@ module Tooling
def filename_to_spec_filename(filename)
spec_file = filename.sub('app', 'spec').sub('.rb', '_spec.rb')
return spec_file if File.exist?(spec_file)
spec_file if File.exist?(spec_file)
end
private
attr_reader :changed_files, :predictive_tests_pathname
attr_reader :changed_files
end
end
end

View File

@ -11,12 +11,10 @@ module Tooling
def initialize(
changed_files,
predictive_tests_pathname,
js_base_folder: 'app/assets/javascripts',
system_specs_base_folder: 'spec/features'
)
@changed_files = changed_files
@predictive_tests_pathname = predictive_tests_pathname
@js_base_folder = js_base_folder
@js_base_folders = folders_for_available_editions(js_base_folder)
@system_specs_base_folder = system_specs_base_folder
@ -31,15 +29,13 @@ module Tooling
end
def execute
matching_system_tests = filter_files.flat_map do |edition, js_files|
filter_files.flat_map do |edition, js_files|
js_keywords_regexp = Regexp.union(construct_js_keywords(js_files))
system_specs_for_edition(edition).select do |system_spec_file|
system_spec_file if js_keywords_regexp.match?(system_spec_file)
end
end
write_array_to_file(predictive_tests_pathname, matching_system_tests)
end
# Keep the files that are in the @js_base_folders folders
@ -84,7 +80,7 @@ module Tooling
private
attr_reader :changed_files, :predictive_tests_pathname
attr_reader :changed_files
end
end
end

View File

@ -15,16 +15,10 @@ module Tooling
# Search for Rails partials included in an HTML file
RAILS_PARTIAL_INVOCATION_REGEXP = %r{(?:render|render_if_exists)(?: |\()(?:partial: ?)?['"]([\w/-]+)['"]}
def initialize(
changed_files,
predictive_tests_pathname,
view_base_folder: 'app/views',
js_base_folder: 'app/assets/javascripts'
)
@changed_files = changed_files
@predictive_tests_pathname = predictive_tests_pathname
@view_base_folders = folders_for_available_editions(view_base_folder)
@js_base_folders = folders_for_available_editions(js_base_folder)
def initialize(changed_files, view_base_folder: 'app/views', js_base_folder: 'app/assets/javascripts')
@changed_files = changed_files
@view_base_folders = folders_for_available_editions(view_base_folder)
@js_base_folders = folders_for_available_editions(js_base_folder)
end
def execute
@ -38,14 +32,12 @@ module Tooling
end
js_tags_regexp = Regexp.union(js_tags)
matching_js_files = @js_base_folders.flat_map do |js_base_folder|
@js_base_folders.flat_map do |js_base_folder|
Dir["#{js_base_folder}/**/*.{js,vue}"].select do |js_file|
file_content = File.read(js_file)
js_tags_regexp.match?(file_content)
end
end
write_array_to_file(predictive_tests_pathname, matching_js_files)
end
# Keep the files that are in the @view_base_folders folder
@ -81,7 +73,7 @@ module Tooling
private
attr_reader :changed_files, :predictive_tests_pathname
attr_reader :changed_files
end
end
end

View File

@ -9,10 +9,9 @@ module Tooling
class ViewToSystemSpecsMappings
include Helpers::PredictiveTestsHelper
def initialize(changed_files, predictive_tests_pathname, view_base_folder: 'app/views')
@changed_files = changed_files
@predictive_tests_pathname = predictive_tests_pathname
@view_base_folders = folders_for_available_editions(view_base_folder)
def initialize(changed_files, view_base_folder: 'app/views')
@changed_files = changed_files
@view_base_folders = folders_for_available_editions(view_base_folder)
end
def execute
@ -29,12 +28,12 @@ module Tooling
end
end
write_array_to_file(predictive_tests_pathname, found_system_specs.compact.uniq.sort)
found_system_specs.compact.uniq.sort
end
private
attr_reader :changed_files, :predictive_tests_pathname, :view_base_folders
attr_reader :changed_files, :view_base_folders
# Keep the views files that are in the @view_base_folders folder
def filter_files

View File

@ -40,7 +40,6 @@ module Tooling
def execute
STRATEGIES.each do |strategy|
logger.info("Running metrics export for '#{strategy}' strategy ...")
create_test_list!(strategy)
generate_and_record_metrics(strategy)
rescue StandardError => e
logger.error("Failed to export test metrics for strategy '#{strategy}': #{e.message}")
@ -110,18 +109,16 @@ module Tooling
File.join(output_path, strategy.to_s, *args)
end
# Create selected test list using specific strategy mapping
# Predictive spec list selector
#
# @param strategy [Symbol]
# @return [void]
def create_test_list!(strategy)
# @return [TestSelector]
def test_selector(strategy)
Tooling::PredictiveTests::TestSelector.new(
changed_files: changed_files,
rspec_test_mapping_path: mapping_file_path(strategy),
rspec_matching_test_files_path: matching_rspec_test_files_path(strategy),
rspec_matching_js_files_path: path_for_strategy(strategy, "js_matching_files.txt"),
rspec_mappings_limit_percentage: nil # always return all tests in the mapping
).execute
)
end
# Create, save and export metrics for selected RSpec tests for specific strategy
@ -132,7 +129,7 @@ module Tooling
logger.info("Generating metrics for mapping strategy '#{strategy}' ...")
# based on the predictive test selection strategy
predicted_test_files = read_array_from_file(matching_rspec_test_files_path(strategy))
predicted_test_files = test_selector(strategy).rspec_spec_list
# actual failed tests from tier-3 run
failed_test_files = read_array_from_file(rspec_all_failed_tests_file)
# crystalball mapping file

View File

@ -10,78 +10,61 @@ require_relative '../mappings/js_to_system_specs_mappings'
require_relative '../mappings/view_to_js_mappings'
require_relative '../mappings/view_to_system_specs_mappings'
# rubocop:disable Gitlab/Json -- not rails
module Tooling
module PredictiveTests
class TestSelector
def initialize(
changed_files:,
rspec_matching_test_files_path:,
rspec_matching_js_files_path:,
rspec_test_mapping_path: nil,
# See https://gitlab.com/gitlab-org/gitlab/-/issues/450374#note_1836131381 on why limit might be used
rspec_mappings_limit_percentage: 50
)
@changed_files = changed_files
@rspec_matching_test_files_path = rspec_matching_test_files_path
@rspec_matching_js_files_path = rspec_matching_js_files_path
@rspec_test_mapping_path = rspec_test_mapping_path
@rspec_mappings_limit_percentage = rspec_mappings_limit_percentage
@logger = Logger.new($stdout, progname: "predictive testing")
end
def execute
logger.info(
"Creating predictive test list based on following changed files: #{JSON.pretty_generate(changed_files)}" # rubocop:disable Gitlab/Json -- not rails
)
# Predictive rspec test files specs list
#
# @return [Array]
def rspec_spec_list
logger.info "Creating predictive rspec test files specs list ..."
specs = {
crystalball_mapping_specs: specs_from_mapping,
graphql_type_mapping_specs: specs_from_graphql_base_types,
js_changes_specs: system_specs_from_js_changes,
view_changes_specs: system_specs_from_view_changes
}
create_rspec_spec_list!
create_js_spec_list!
logger.info("Generated following rspec specs list: #{JSON.pretty_generate(specs)}")
specs.values.flatten
end
# Predictive js test files specs list
#
# @return [Array]
def js_spec_list
logger.info "Creating predictive js test files specs list ..."
Tooling::Mappings::ViewToJsMappings.new(changed_files).execute.tap do |specs|
logger.info "Generated following jest spec list: #{JSON.pretty_generate(specs)}"
end
end
private
attr_reader :changed_files,
:rspec_matching_test_files_path,
:rspec_matching_js_files_path,
:rspec_test_mapping_path,
:rspec_mappings_limit_percentage,
:logger
# Create predictive rspec test files specs list
#
# @return [void]
def create_rspec_spec_list!
logger.info "Creating predictive rspec test files specs list ..."
# TODO: Remove appending to file and work with arrays directly
append_specs_from_mapping!
append_specs_from_graphql_base_types!
append_system_specs_from_js_changes!
append_system_specs_from_view_changes!
end
# Create predictive js test files specs list
#
# @return [void]
def create_js_spec_list!
logger.info "Creating predictive js test files specs list ..."
Tooling::Mappings::ViewToJsMappings.new(changed_files, rspec_matching_js_files_path).execute
end
# Create list of view files that include the potential rails partials
#
# @return [void]
def create_view_partials_mapping_file!
logger.info "Creating list of view files that include the potential rails partials ..."
Tooling::Mappings::PartialToViewsMappings.new(changed_files, rspec_views_including_partials_path).execute
end
# Add specs based on crystalball mapping or static tests.yml file
#
# @return [void]
def append_specs_from_mapping!
Tooling::FindTests.new(
def specs_from_mapping
@specs_from_mapping ||= Tooling::FindTests.new(
changed_files,
rspec_matching_test_files_path,
mappings_file: rspec_test_mapping_path,
mappings_limit_percentage: rspec_mappings_limit_percentage
).execute
@ -90,23 +73,24 @@ module Tooling
# Add system specs based on changes to JS files.
#
# @return [void]
def append_system_specs_from_js_changes!
Tooling::Mappings::JsToSystemSpecsMappings.new(changed_files, rspec_matching_test_files_path).execute
def system_specs_from_js_changes
@system_specs_from_js_changes ||= Tooling::Mappings::JsToSystemSpecsMappings.new(changed_files).execute
end
# Add specs based on potential changes to the GraphQL base types
#
# @return [void]
def append_specs_from_graphql_base_types!
Tooling::Mappings::GraphqlBaseTypeMappings.new(changed_files, rspec_matching_test_files_path).execute
def specs_from_graphql_base_types
@specs_from_graphql_base_types ||= Tooling::Mappings::GraphqlBaseTypeMappings.new(changed_files).execute
end
# Add system specs based on changes to views.
#
# @return [void]
def append_system_specs_from_view_changes!
Tooling::Mappings::ViewToSystemSpecsMappings.new(changed_files, rspec_matching_test_files_path).execute
def system_specs_from_view_changes
@system_specs_from_view_changes ||= Tooling::Mappings::ViewToSystemSpecsMappings.new(changed_files).execute
end
end
end
end
# rubocop:enable Gitlab/Json -- not rails

Some files were not shown because too many files have changed in this diff Show More