Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2025-06-03 03:07:36 +00:00
parent 8ee978407a
commit cb6508e11f
28 changed files with 80 additions and 76 deletions

View File

@ -37,8 +37,6 @@ RSpec/ChangeByZero:
- 'ee/spec/services/vulnerabilities/manually_create_service_spec.rb'
- 'ee/spec/services/vulnerabilities/security_finding/create_merge_request_service_spec.rb'
- 'ee/spec/services/work_items/legacy_epics/related_epic_links/create_service_spec.rb'
- 'ee/spec/workers/observability/alert_query_worker_spec.rb'
- 'ee/spec/workers/security/store_security_reports_by_project_worker_spec.rb'
- 'spec/controllers/admin/clusters_controller_spec.rb'
- 'spec/controllers/groups/boards_controller_spec.rb'
- 'spec/controllers/groups/clusters_controller_spec.rb'
@ -88,16 +86,3 @@ RSpec/ChangeByZero:
- 'spec/support/shared_examples/services/clusters/create_service_shared_examples.rb'
- 'spec/support/shared_examples/services/schedule_bulk_repository_shard_moves_shared_examples.rb'
- 'spec/support/shared_examples/services/wiki_pages/update_service_shared_examples.rb'
- 'spec/workers/gitlab/bitbucket_import/stage/import_issues_notes_worker_spec.rb'
- 'spec/workers/gitlab/bitbucket_import/stage/import_issues_worker_spec.rb'
- 'spec/workers/gitlab/bitbucket_import/stage/import_pull_requests_notes_worker_spec.rb'
- 'spec/workers/gitlab/bitbucket_import/stage/import_pull_requests_worker_spec.rb'
- 'spec/workers/gitlab/bitbucket_server_import/stage/import_pull_requests_worker_spec.rb'
- 'spec/workers/gitlab/bitbucket_server_import/stage/import_repository_worker_spec.rb'
- 'spec/workers/packages/cleanup_package_file_worker_spec.rb'
- 'spec/workers/packages/maven/metadata/sync_worker_spec.rb'
- 'spec/workers/packages/rubygems/extraction_worker_spec.rb'
- 'spec/workers/process_commit_worker_spec.rb'
- 'spec/workers/projects/import_export/wait_relation_exports_worker_spec.rb'
- 'spec/workers/releases/manage_evidence_worker_spec.rb'
- 'spec/workers/remove_expired_members_worker_spec.rb'

View File

@ -497,7 +497,7 @@ p_ci_pipelines:
on_delete: async_delete
- table: ci_triggers
column: trigger_id
on_delete: async_delete
on_delete: async_nullify
delete_limit: 50
p_ci_runner_machine_builds:
- table: ci_runner_machines

View File

@ -282,7 +282,7 @@ Improvements to RPO and RTO are proposed in epic [8903](https://gitlab.com/group
{{< alert type="warning" >}}
If complete cluster failure occurs, disaster recovery plans should be executed. These can affect the
RPO and RTO discussed above.
RPO and RTO discussed previously.
{{< /alert >}}
@ -458,7 +458,7 @@ conflict. The first to complete creates the metadata record and the other operat
The failing creation leaves leftover repositories on the storages. There is on-going work on a
[background crawler](https://gitlab.com/gitlab-org/gitaly/-/issues/3719) that clean up the leftover repositories from the storages.
The repository IDs are generated from the `repositories_repository_id_seq` in PostgreSQL. In the above example, the failing operation took
The repository IDs are generated from the `repositories_repository_id_seq` in PostgreSQL. In the previous example, the failing operation took
one repository ID without successfully creating a repository with it. Failed repository creations are expected lead to gaps in the repository IDs.
##### Repository deletions

View File

@ -376,14 +376,14 @@ Checking connectivity: 1381139, done.
Updating files: 100% (71304/71304), done.
```
In the above example:
In the previous example:
- When not using a Bundle URI, there were 5,271,177 objects received from the
GitLab server.
- When using a Bundle URI, there were 1,322,255 objects received from the GitLab
server.
This reduction means GitLab needs to pack together fewer objects (in the above
This reduction means GitLab needs to pack together fewer objects (in the previous
example, roughly a quarter of the number of objects) because the client first
downloaded the bundle from the storage server.

View File

@ -109,7 +109,7 @@ You can use oversubscription to maintain a baseline level of performance during
small number of high-workload repositories to "burst" when necessary, without impacting unrelated requests.
Oversubscription refers to assigning more resources than what is technically available on the system.
Using the example above, we can oversubscribe our repository cgroups by allocating 10GiB of memory each, despite the system
Using the previous example, we can oversubscribe our repository cgroups by allocating 10GiB of memory each, despite the system
not having 10GiB * 100 of system memory. These values assume that 10GiB is sufficient for normal operations against any
one repository, but also allows two repositories to burst to 10GiB each while leaving a third bucket of resources to
maintain baseline performance.

View File

@ -251,7 +251,7 @@ The following are some queries for monitoring Gitaly:
The only non-zero number should have `enforced="true",status="ok"`. If you have other non-zero
numbers, something is wrong in your configuration.
The `status="ok"` number reflects your current request rate. In the example above, Gitaly is
The `status="ok"` number reflects your current request rate. In the previous example, Gitaly is
handling about 4000 requests per second.
- Use the following Prometheus query to observe the [Git protocol versions](../git_protocol.md)

View File

@ -268,7 +268,7 @@ your [Gitaly log](logs/_index.md#gitaly-logs):
```
The `actual_duration` (in nanoseconds) indicates how long the scheduled maintenance
took to execute. In the example above, the scheduled housekeeping completed
took to execute. In the previous example, the scheduled housekeeping completed
in just over 5 minutes.
## Object pool repositories

View File

@ -100,7 +100,7 @@ the Linux package in `5` **independent** machines, both with
### Redis setup overview
You must have at least `3` Redis servers: `1` primary, `2` Replicas, and they
need to each be on independent machines (see explanation above).
need to each be on independent machines.
You can have additional Redis nodes, that helps to survive a situation
where more nodes goes down. Whenever there is only `2` nodes online, a failover
@ -161,7 +161,7 @@ Here are some examples:
- With `7` sentinels, a maximum of `3` nodes can go down.
The **Leader** election can sometimes fail the voting round when **consensus**
is not achieved (see the odd number of nodes requirement above). In that case,
is not achieved. In that case,
a new attempt is made after the amount of time defined in
`sentinel['failover_timeout']` (in milliseconds).

View File

@ -150,7 +150,7 @@ To serve metrics via HTTPS instead of HTTP, enable TLS in the exporter settings:
1. Save the file and [reconfigure GitLab](../restart_gitlab.md#reconfigure-a-linux-package-installation)
for the changes to take effect.
When TLS is enabled, the same `port` and `address` are used as described above.
When TLS is enabled, the same `port` and `address` are used as described previously.
The metrics server cannot serve both HTTP and HTTPS at the same time.
## Configure health checks

View File

@ -176,7 +176,7 @@ precedence:
The operator precedence for this syntax is fixed: it's not possible to make `AND`
have higher precedence than `OR`.
As with the standard queue group syntax above, a single `*` as the
As with the standard queue group syntax documented previously, a single `*` as the
entire queue group selects all queues.
### List of available job classes

View File

@ -305,7 +305,7 @@ Samples: 348K of event 'cycles', Event count (approx.): 280908431073
0.10% ruby libc-2.12.so [.] _int_free
```
Above you see sample output from a `perf` report. It shows that 97% of the CPU is
The sample output from the `perf` report shows that 97% of the CPU is
being spent inside Nokogiri and `xmlXPathNodeSetMergeAndClear`. For something
this obvious you should then go investigate what job in GitLab would use
Nokogiri and XPath. Combine with `TTIN` or `gdb` output to show the
@ -345,7 +345,7 @@ from /opt/gitlab/embedded/service/gem/ruby/2.1.0/gems/nokogiri-1.6.7.2/lib/nokog
...
```
If you see a suspicious thread, like the Nokogiri one above, you may want
If you see a suspicious thread, like the Nokogiri one in the example, you may want
to get more information:
```plaintext
@ -381,7 +381,7 @@ exit
## Sidekiq kill signals
TTIN was described above as the signal to print backtraces for logging, however
TTIN was described previously as the signal to print backtraces for logging, however
Sidekiq responds to other signals as well. For example, TSTP and TERM can be used
to gracefully shut Sidekiq down, see
[the Sidekiq Signals docs](https://github.com/mperham/sidekiq/wiki/Signals#ttin).
@ -389,7 +389,7 @@ to gracefully shut Sidekiq down, see
## Check for blocking queries
Sometimes the speed at which Sidekiq processes jobs can be so fast that it can
cause database contention. Check for blocking queries when backtraces above
cause database contention. Check for blocking queries when backtraces documented previously
show that many threads are stuck in the database adapter.
The PostgreSQL wiki has details on the query you can run to see blocking
@ -476,7 +476,7 @@ queue.each { |job| job.delete if <condition>}
Have a look at the section below for cancelling running jobs.
In the method above, `<queue-name>` is the name of the queue that contains the jobs you want to delete and `<condition>` decides which jobs get deleted.
In the method documented previously, `<queue-name>` is the name of the queue that contains the jobs you want to delete and `<condition>` decides which jobs get deleted.
Commonly, `<condition>` references the job arguments, which depend on the type of job in question. To find the arguments for a specific queue, you can have a look at the `perform` function of the related worker file, commonly found at `/app/workers/<queue-name>_worker.rb`.

View File

@ -15463,6 +15463,9 @@ msgstr ""
msgid "ComplianceFrameworksReport|Framework ID copied to clipboard."
msgstr ""
msgid "ComplianceFrameworksReport|No controls"
msgstr ""
msgid "ComplianceFrameworksReport|Policies"
msgstr ""

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Gitlab::Ci::Config::Entry::Kubernetes, feature_category: :kubernetes_management do
RSpec.describe Gitlab::Ci::Config::Entry::Kubernetes, feature_category: :deployment_management do
let(:config) { Hash(namespace: 'namespace') }
subject { described_class.new(config) }

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Clusters::Platforms::Kubernetes, feature_category: :kubernetes_managedment do
RSpec.describe Clusters::Platforms::Kubernetes, feature_category: :deployment_management do
include KubernetesHelpers
include ReactiveCachingHelpers

View File

@ -2,35 +2,50 @@
require 'spec_helper'
RSpec.describe 'gitlab:x509 namespace rake task', :silence_stdout do
# rubocop:disable RSpec/AvoidTestProf -- this is not a migration spec
RSpec.describe 'gitlab:x509 namespace rake task', :silence_stdout, feature_category: :source_code_management do
before(:all) do
Rake.application.rake_require 'tasks/gitlab/x509/update'
end
describe 'update_signatures' do
let(:user) { create(:user, email: X509Helpers::User1.certificate_email) }
let(:project) { create(:project, :repository, path: X509Helpers::User1.path, creator: user) }
let(:x509_signed_commit) { project.commit_by(oid: '189a6c924013fc3fe40d6f1ec1dc20214183bc97') }
let(:x509_commit) { Gitlab::X509::Commit.new(x509_signed_commit).signature }
subject { run_rake_task('gitlab:x509:update_signatures') }
it 'changes from unverified to verified if the certificate store contains the root certificate' do
x509_commit
context 'with commit signatures' do
let_it_be(:user) { create(:user, email: X509Helpers::User1.certificate_email) }
let_it_be(:project) { create(:project, :repository, path: X509Helpers::User1.path, creator: user) }
store = OpenSSL::X509::Store.new
certificate = OpenSSL::X509::Certificate.new X509Helpers::User1.trust_cert
store.add_cert(certificate)
allow(OpenSSL::X509::Store).to receive(:new).and_return(store)
let!(:x509_commit_signature) do
create(:x509_commit_signature, project: project, verification_status: :unverified,
commit_sha: x509_signed_commit.sha)
end
expect_any_instance_of(Gitlab::X509::Commit).to receive(:update_signature!).and_call_original
expect { subject }.to change { x509_commit.reload.verification_status }.from('unverified').to('verified')
let(:x509_signed_commit) { project.commit_by(oid: '189a6c924013fc3fe40d6f1ec1dc20214183bc97') }
let(:x509_commit) { x509_commit_signature.x509_commit }
let(:store) { OpenSSL::X509::Store.new.tap { |s| s.add_cert(certificate) } }
let(:certificate) { OpenSSL::X509::Certificate.new(X509Helpers::User1.trust_cert) }
before do
allow(OpenSSL::X509::Store).to receive(:new).and_return(store)
end
it 'changes from unverified to verified if the certificate store contains the root certificate' do
allow(Gitlab::X509::Commit).to receive(:new).and_return(x509_commit)
expect(x509_commit).to receive(:update_signature!).and_call_original
expect { subject }.to change { x509_commit_signature.reload.verification_status }
.from('unverified').to('verified')
end
end
it 'returns if no signature is available' do
expect_any_instance_of(Gitlab::X509::Commit).not_to receive(:update_signature!)
context 'without commit signatures' do
it 'returns if no signature is available' do
expect_any_instance_of(Gitlab::X509::Commit).not_to receive(:update_signature!)
subject
subject
end
end
end
end
# rubocop:enable RSpec/AvoidTestProf

View File

@ -69,7 +69,7 @@ RSpec.describe Gitlab::BitbucketImport::Stage::ImportIssuesNotesWorker, feature_
).and_call_original
expect { worker.perform(project.id) }
.to change { Gitlab::BitbucketImport::AdvanceStageWorker.jobs.size }.by(0)
.to not_change { Gitlab::BitbucketImport::AdvanceStageWorker.jobs.size }
.and raise_error(exception)
end
end

View File

@ -69,7 +69,7 @@ RSpec.describe Gitlab::BitbucketImport::Stage::ImportIssuesWorker, feature_categ
).and_call_original
expect { worker.perform(project.id) }
.to change { Gitlab::BitbucketImport::AdvanceStageWorker.jobs.size }.by(0)
.to not_change { Gitlab::BitbucketImport::AdvanceStageWorker.jobs.size }
.and raise_error(exception)
end
end

View File

@ -69,7 +69,7 @@ RSpec.describe Gitlab::BitbucketImport::Stage::ImportPullRequestsNotesWorker, fe
).and_call_original
expect { worker.perform(project.id) }
.to change { Gitlab::BitbucketImport::AdvanceStageWorker.jobs.size }.by(0)
.to not_change { Gitlab::BitbucketImport::AdvanceStageWorker.jobs.size }
.and raise_error(exception)
end
end

View File

@ -69,7 +69,7 @@ RSpec.describe Gitlab::BitbucketImport::Stage::ImportPullRequestsWorker, feature
).and_call_original
expect { worker.perform(project.id) }
.to change { Gitlab::BitbucketImport::AdvanceStageWorker.jobs.size }.by(0)
.to not_change { Gitlab::BitbucketImport::AdvanceStageWorker.jobs.size }
.and raise_error(exception)
end
end

View File

@ -69,7 +69,7 @@ RSpec.describe Gitlab::BitbucketServerImport::Stage::ImportPullRequestsWorker, f
).and_call_original
expect { worker.perform(project.id) }
.to change { Gitlab::BitbucketServerImport::AdvanceStageWorker.jobs.size }.by(0)
.to not_change { Gitlab::BitbucketServerImport::AdvanceStageWorker.jobs.size }
.and raise_error(exception)
end
end

View File

@ -69,7 +69,7 @@ RSpec.describe Gitlab::BitbucketServerImport::Stage::ImportRepositoryWorker, fea
).and_call_original
expect { worker.perform(project.id) }
.to change { Gitlab::BitbucketServerImport::Stage::ImportPullRequestsWorker.jobs.size }.by(0)
.to not_change { Gitlab::BitbucketServerImport::Stage::ImportPullRequestsWorker.jobs.size }
.and raise_error(exception)
end
end

View File

@ -103,8 +103,9 @@ RSpec.describe Packages::CleanupPackageFileWorker, type: :worker, feature_catego
it 'deletes the package file but keeps the package' do
expect(worker).to receive(:log_extra_metadata_on_done).twice
expect { subject }.to change { Packages::PackageFile.count }.by(-1)
.and change { Packages::Package.count }.by(0)
expect { subject }
.to change { Packages::PackageFile.count }.by(-1)
.and not_change { Packages::Package.count }
end
end

View File

@ -65,7 +65,7 @@ RSpec.describe Packages::Maven::Metadata::SyncWorker, type: :worker do
expect(worker).to receive(:log_extra_metadata_on_done).with(:message, 'Non existing versionless package(s). Nothing to do.')
expect { subject }
.to change { ::Packages::PackageFile.count }.by(0)
.to not_change { ::Packages::PackageFile.count }
end
end
@ -74,7 +74,7 @@ RSpec.describe Packages::Maven::Metadata::SyncWorker, type: :worker do
it 'does not create the updated metadata files' do
expect { subject }
.to change { ::Packages::PackageFile.count }.by(0)
.to not_change { ::Packages::PackageFile.count }
.and raise_error(described_class::SyncError, 'Not allowed')
end
end
@ -151,7 +151,7 @@ RSpec.describe Packages::Maven::Metadata::SyncWorker, type: :worker do
expect(worker).to receive(:log_extra_metadata_on_done).with(:message, 'Non existing versionless package(s). Nothing to do.')
expect { subject }
.to change { ::Packages::PackageFile.count }.by(0)
.to not_change { ::Packages::PackageFile.count }
end
end
@ -160,7 +160,7 @@ RSpec.describe Packages::Maven::Metadata::SyncWorker, type: :worker do
it 'does not create the updated metadata files' do
expect { subject }
.to change { ::Packages::PackageFile.count }.by(0)
.to not_change { ::Packages::PackageFile.count }
.and raise_error(described_class::SyncError, 'Not allowed')
end
end

View File

@ -20,7 +20,7 @@ RSpec.describe Packages::Rubygems::ExtractionWorker, type: :worker, feature_cate
it 'processes the gem', :aggregate_failures do
expect { subject }
.to change { Packages::Package.count }.by(0)
.to not_change { Packages::Package.count }
.and change { Packages::PackageFile.count }.by(1)
expect(Packages::Package.last.id).to be(package_for_processing.id)
@ -88,8 +88,8 @@ RSpec.describe Packages::Rubygems::ExtractionWorker, type: :worker, feature_cate
expect(::Packages::Rubygems::ProcessGemService).not_to receive(:new)
expect { subject }
.to change { Packages::Package.count }.by(0)
.and change { Packages::PackageFile.count }.by(0)
.to not_change { Packages::Package.count }
.and not_change { Packages::PackageFile.count }
end
end
end

View File

@ -39,7 +39,7 @@ RSpec.describe ProcessCommitWorker, feature_category: :source_code_management do
let(:project_id) { -1 }
it 'does not close related issues' do
expect { perform }.to change { Issues::CloseWorker.jobs.size }.by(0)
expect { perform }.to not_change { Issues::CloseWorker.jobs.size }
perform
end

View File

@ -26,8 +26,8 @@ RSpec.describe Projects::ImportExport::WaitRelationExportsWorker, feature_catego
finished_export_job = create(:project_export_job, :finished)
expect { described_class.new.perform(finished_export_job.id, user.id, after_export_strategy) }
.to change { Projects::ImportExport::ParallelProjectExportWorker.jobs.size }.by(0)
.and change { described_class.jobs.size }.by(0)
.to not_change { Projects::ImportExport::ParallelProjectExportWorker.jobs.size }
.and not_change { described_class.jobs.size }
end
end
@ -40,7 +40,7 @@ RSpec.describe Projects::ImportExport::WaitRelationExportsWorker, feature_catego
it 'does not enqueue ParallelProjectExportWorker and re-enqueue WaitRelationExportsWorker' do
expect { described_class.new.perform(*job_args) }
.to change { Projects::ImportExport::ParallelProjectExportWorker.jobs.size }.by(0)
.to not_change { Projects::ImportExport::ParallelProjectExportWorker.jobs.size }
.and change { described_class.jobs.size }.by(1)
end
end
@ -85,7 +85,7 @@ RSpec.describe Projects::ImportExport::WaitRelationExportsWorker, feature_catego
it 'enqueues ParallelProjectExportWorker and does not reenqueue WaitRelationExportsWorker' do
expect { described_class.new.perform(*job_args) }
.to change { Projects::ImportExport::ParallelProjectExportWorker.jobs.size }.by(1)
.and change { described_class.jobs.size }.by(0)
.and not_change { described_class.jobs.size }
end
it_behaves_like 'an idempotent worker'
@ -116,8 +116,8 @@ RSpec.describe Projects::ImportExport::WaitRelationExportsWorker, feature_catego
it 'does not enqueue ParallelProjectExportWorker and re-enqueue WaitRelationExportsWorker' do
expect { described_class.new.perform(*job_args) }
.to change { Projects::ImportExport::ParallelProjectExportWorker.jobs.size }.by(0)
.and change { described_class.jobs.size }.by(0)
.to not_change { Projects::ImportExport::ParallelProjectExportWorker.jobs.size }
.and not_change { described_class.jobs.size }
end
end
end

View File

@ -9,7 +9,7 @@ RSpec.describe Releases::ManageEvidenceWorker, feature_category: :release_eviden
specify :sidekiq_inline do
aggregate_failures do
expect(::Releases::CreateEvidenceService).not_to receive(:execute)
expect { described_class.new.perform }.to change { Releases::Evidence.count }.by(0)
expect { described_class.new.perform }.to not_change { Releases::Evidence.count }
end
end
end
@ -45,7 +45,7 @@ RSpec.describe Releases::ManageEvidenceWorker, feature_category: :release_eviden
project_id: project.id
)
expect { described_class.new.perform }.to change { Releases::Evidence.count }.by(0)
expect { described_class.new.perform }.to not_change { Releases::Evidence.count }
end
end

View File

@ -76,7 +76,7 @@ RSpec.describe RemoveExpiredMembersWorker, feature_category: :system_access do
end
it 'does not remove expired project bot that expires in the future' do
expect { worker.perform }.to change { Member.count }.by(0)
expect { worker.perform }.to not_change { Member.count }
expect(other_project_bot.reload).to be_present
end