Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-03-20 09:08:58 +00:00
parent 44b20b052b
commit e25b2fa175
43 changed files with 2192 additions and 2038 deletions

View File

@ -41,8 +41,8 @@ export default {
</script>
<template>
<div>
<h1 class="gl-font-size-h2">{{ s__('Runners|New instance runner') }}</h1>
<div class="gl-mt-5">
<h1 class="gl-heading-1">{{ s__('Runners|New instance runner') }}</h1>
<registration-compatibility-alert :alert-key="$options.INSTANCE_TYPE" />
@ -56,7 +56,7 @@ export default {
<hr aria-hidden="true" />
<h2 class="gl-font-size-h2 gl-my-5">
<h2 class="gl-heading-2">
{{ s__('Runners|Platform') }}
</h2>

View File

@ -151,8 +151,8 @@ export default {
};
</script>
<template>
<div>
<h1 class="gl-font-size-h1">{{ heading }}</h1>
<div class="gl-mt-5">
<h1 class="gl-heading-1">{{ heading }}</h1>
<p>
<gl-sprintf
@ -168,8 +168,8 @@ export default {
</gl-sprintf>
</p>
<section>
<h2 class="gl-font-size-h2">{{ s__('Runners|Step 1') }}</h2>
<section class="gl-mt-6">
<h2 class="gl-heading-2">{{ s__('Runners|Step 1') }}</h2>
<p>
{{
s__(
@ -203,8 +203,8 @@ export default {
</p>
</template>
</section>
<section>
<h2 class="gl-font-size-h2">{{ s__('Runners|Step 2') }}</h2>
<section class="gl-mt-6">
<h2 class="gl-heading-2">{{ s__('Runners|Step 2') }}</h2>
<p>
<gl-sprintf
:message="
@ -221,8 +221,8 @@ export default {
</gl-sprintf>
</p>
</section>
<section>
<h2 class="gl-font-size-h2">{{ s__('Runners|Step 3 (optional)') }}</h2>
<section class="gl-mt-6">
<h2 class="gl-heading-2">{{ s__('Runners|Step 3 (optional)') }}</h2>
<p>{{ s__('Runners|Manually verify that the runner is available to pick up jobs.') }}</p>
<cli-command :prompt="commandPrompt" :command="runCommand" />
<p>
@ -241,8 +241,8 @@ export default {
</gl-sprintf>
</p>
</section>
<section v-if="isRunnerOnline">
<h2 class="gl-font-size-h2">🎉 {{ $options.I18N_REGISTRATION_SUCCESS }}</h2>
<section v-if="isRunnerOnline" class="gl-mt-6">
<h2 class="gl-heading-2">🎉 {{ $options.I18N_REGISTRATION_SUCCESS }}</h2>
<p class="gl-pl-6">
<gl-sprintf :message="s__('Runners|To view the runner, go to %{runnerListName}.')">

View File

@ -84,7 +84,7 @@ export default {
</script>
<template>
<div>
<h2 class="gl-font-size-h2 gl-my-5">
<h2 class="gl-heading-2">
{{ s__('Runners|Tags') }}
</h2>
<gl-skeleton-loader v-if="loading" :lines="16" />
@ -127,7 +127,7 @@ export default {
<hr aria-hidden="true" />
<h2 class="gl-font-size-h2 gl-my-5">
<h2 class="gl-heading-2">
{{ s__('Runners|Configuration') }}
{{ __('(optional)') }}
</h2>

View File

@ -66,8 +66,8 @@ export default {
</script>
<template>
<div>
<h1 class="gl-font-size-h2">{{ s__('Runners|New group runner') }}</h1>
<div class="gl-mt-5">
<h1 class="gl-heading-1">{{ s__('Runners|New group runner') }}</h1>
<registration-compatibility-alert :alert-key="groupId" />
@ -81,7 +81,7 @@ export default {
<hr aria-hidden="true" />
<h2 class="gl-font-size-h2 gl-my-5">
<h2 class="gl-heading-2">
{{ s__('Runners|Platform') }}
</h2>

View File

@ -71,8 +71,8 @@ export default {
</script>
<template>
<div>
<h1 class="gl-font-size-h2">{{ s__('Runners|New project runner') }}</h1>
<div class="gl-mt-5">
<h1 class="gl-heading-1">{{ s__('Runners|New project runner') }}</h1>
<registration-compatibility-alert :alert-key="projectId" />
@ -86,7 +86,7 @@ export default {
<hr aria-hidden="true" />
<h2 class="gl-font-size-h2 gl-my-5">
<h2 class="gl-heading-2">
{{ s__('Runners|Platform') }}
</h2>

View File

@ -0,0 +1,20 @@
# frozen_string_literal: true
class DropIndexEnvironmentsForNameSearchWithinFolder < Gitlab::Database::Migration[2.2]
milestone '16.10'
disable_ddl_transaction!
INDEX_NAME = 'index_environments_for_name_search_within_folder'
def up
remove_concurrent_index_by_name :environments, name: INDEX_NAME
end
def down
# from structure.sql:
# CREATE INDEX index_environments_for_name_search_within_folder ON environments USING btree
# (project_id, lower(ltrim((name)::text, ((environment_type)::text || '/'::text))) varchar_pattern_ops, state);
add_concurrent_index :environments,
"project_id, lower(ltrim(name, environment_type || '/')) varchar_pattern_ops, state", name: INDEX_NAME
end
end

View File

@ -0,0 +1,11 @@
# frozen_string_literal: true
class DropPromoteUltimateFeaturesAtColumn < Gitlab::Database::Migration[2.2]
milestone '16.11'
enable_lock_retries!
def change
remove_column :onboarding_progresses, :promote_ultimate_features_at, :datetime_with_timezone
end
end

View File

@ -0,0 +1 @@
3b81cc44ad7225df2fb73c8fc2f02d24fa3654813147475871a009414decfc0a

View File

@ -0,0 +1 @@
e3148a3952b4698853f79ed55758b5fbd1d197e325c29d4262f7639ee4ba9d9b

View File

@ -12047,7 +12047,6 @@ CREATE TABLE onboarding_progresses (
secure_cluster_image_scanning_run_at timestamp with time zone,
secure_api_fuzzing_run_at timestamp with time zone,
license_scanning_run_at timestamp with time zone,
promote_ultimate_features_at timestamp with time zone,
code_added_at timestamp with time zone
);
@ -25030,8 +25029,6 @@ CREATE INDEX index_enabled_clusters_on_id ON clusters USING btree (id) WHERE (en
CREATE INDEX index_environments_cluster_agent_id ON environments USING btree (cluster_agent_id) WHERE (cluster_agent_id IS NOT NULL);
CREATE INDEX index_environments_for_name_search_within_folder ON environments USING btree (project_id, lower(ltrim((name)::text, ((environment_type)::text || '/'::text))) varchar_pattern_ops, state);
CREATE INDEX index_environments_name_without_type ON environments USING btree (project_id, lower(ltrim(ltrim((name)::text, (environment_type)::text), '/'::text)) varchar_pattern_ops, state);
CREATE INDEX index_environments_on_merge_request_id ON environments USING btree (merge_request_id);

View File

@ -85,7 +85,7 @@ Note the following when promoting a secondary:
the **secondary** to the **primary**.
- If you encounter an `ActiveRecord::RecordInvalid: Validation failed: Name has already been taken`
error message during this process, for more information, see this
[troubleshooting advice](../replication/troubleshooting.md#fixing-errors-during-a-failover-or-when-promoting-a-secondary-to-a-primary-site).
[troubleshooting advice](../replication/troubleshooting/index.md#fixing-errors-during-a-failover-or-when-promoting-a-secondary-to-a-primary-site).
- You should [point the primary domain DNS at the newly promoted site](#step-4-optional-updating-the-primary-domain-dns-record). Otherwise, runners must be registered again with the newly promoted site, and all Git remotes, bookmarks, and external integrations must be updated.
#### Promoting a **secondary** site running on a single node
@ -531,4 +531,4 @@ Data that was created on the primary while the secondary was paused is lost.
## Troubleshooting
This section was moved to [another location](../replication/troubleshooting.md#fixing-errors-during-a-failover-or-when-promoting-a-secondary-to-a-primary-site).
This section was moved to [another location](../replication/troubleshooting/index.md#fixing-errors-during-a-failover-or-when-promoting-a-secondary-to-a-primary-site).

View File

@ -230,7 +230,7 @@ the **secondary** to the **primary**.
WARNING:
If you encounter an `ActiveRecord::RecordInvalid: Validation failed: Name has already been taken` error during this process, read
[the troubleshooting advice](../../replication/troubleshooting.md#fixing-errors-during-a-failover-or-when-promoting-a-secondary-to-a-primary-site).
[the troubleshooting advice](../../replication/troubleshooting/index.md#fixing-errors-during-a-failover-or-when-promoting-a-secondary-to-a-primary-site).
The `gitlab-ctl promote-to-primary-node` command cannot be used in
conjunction with multiple servers, as it can only

View File

@ -220,7 +220,7 @@ Note the following when promoting a secondary:
the **secondary** to the **primary**.
- If you encounter an `ActiveRecord::RecordInvalid: Validation failed: Name has already been taken`
error during this process, read
[the troubleshooting advice](../../replication/troubleshooting.md#fixing-errors-during-a-failover-or-when-promoting-a-secondary-to-a-primary-site).
[the troubleshooting advice](../../replication/troubleshooting/index.md#fixing-errors-during-a-failover-or-when-promoting-a-secondary-to-a-primary-site).
To promote the secondary site running GitLab 14.5 and later:

View File

@ -129,7 +129,7 @@ NOTE:
- All sites must run [the same PostgreSQL versions](setup/database.md#postgresql-replication).
- Where possible, you should also use the same operating system version on all
Geo sites. If using different operating system versions between Geo sites, you
**must** [check OS locale data compatibility](replication/troubleshooting.md#check-os-locale-data-compatibility)
**must** [check OS locale data compatibility](replication/troubleshooting/index.md#check-os-locale-data-compatibility)
across Geo sites to avoid silent corruption of database indexes.
- Git 2.9 or later
- Git-lfs 2.4.2 or later on the user side when using LFS
@ -213,7 +213,7 @@ This list of limitations only reflects the latest version of GitLab. If you are
- [Disaster recovery](disaster_recovery/index.md) for deployments that have multiple secondary sites causes downtime due to the need to perform complete re-synchronization and re-configuration of all non-promoted secondaries to follow the new primary site.
- For Git over SSH, to make the project clone URL display correctly regardless of which site you are browsing, secondary sites must use the same port as the primary. [GitLab issue #339262](https://gitlab.com/gitlab-org/gitlab/-/issues/339262) proposes to remove this limitation.
- Git push over SSH against a secondary site does not work for pushes over 1.86 GB. [GitLab issue #413109](https://gitlab.com/gitlab-org/gitlab/-/issues/413109) tracks this bug.
- Backups [cannot be run on secondaries](replication/troubleshooting.md#message-error-canceling-statement-due-to-conflict-with-recovery).
- Backups [cannot be run on secondaries](replication/troubleshooting/index.md#message-error-canceling-statement-due-to-conflict-with-recovery).
- Git clone and fetch requests with option `--depth` over SSH against a secondary site does not work and hangs indefinitely if the secondary site is not up to date at the time the request is initiated. For more information, see [issue 391980](https://gitlab.com/gitlab-org/gitlab/-/issues/391980).
- Git push with options over SSH against a secondary site does not work and terminates the connection. For more information, see [issue 417186](https://gitlab.com/gitlab-org/gitlab/-/issues/417186).
- The Geo secondary site does not accelerate (serve) the clone request for the first stage of the pipeline in most cases. Later stages are not guaranteed to be served by the secondary site either, for example if the Git change is large, bandwidth is small, or pipeline stages are short. In general, it does serve the clone request for subsequent stages. [Issue 446176](https://gitlab.com/gitlab-org/gitlab/-/issues/446176) discusses the reasons for this and proposes an enhancement to increase the chance that Runner clone requests are served from the secondary site.
@ -347,4 +347,4 @@ For more information on how to access and consume Geo logs, see the [Geo section
## Troubleshooting
For troubleshooting steps, see [Geo Troubleshooting](replication/troubleshooting.md).
For troubleshooting steps, see [Geo Troubleshooting](replication/troubleshooting/index.md).

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -964,4 +964,4 @@ Follow [Geo with external PostgreSQL instances](external_database.md#configure-t
## Troubleshooting
Read the [troubleshooting document](../replication/troubleshooting.md).
Read the [troubleshooting document](../replication/troubleshooting/index.md).

View File

@ -337,7 +337,7 @@ secondary site is a read-only copy.
sudo gitlab-rake gitlab:geo:check
```
If any of the checks fail, see the [troubleshooting documentation](../replication/troubleshooting.md).
If any of the checks fail, see the [troubleshooting documentation](../replication/troubleshooting/index.md).
1. To verify that the secondary site is reachable, SSH into a Rails or Sidekiq server on your primary site and run:
@ -345,7 +345,7 @@ secondary site is a read-only copy.
sudo gitlab-rake gitlab:geo:check
```
If any of the checks fail, check the [troubleshooting documentation](../replication/troubleshooting.md).
If any of the checks fail, check the [troubleshooting documentation](../replication/troubleshooting/index.md).
After the secondary site is added to the Geo administration page and restarted,
the site automatically starts to replicate missing data from the primary site
@ -491,4 +491,4 @@ The reconfigure in the [steps above](#configure-gitlab) handles these steps auto
## Troubleshooting
See [troubleshooting Geo](../replication/troubleshooting.md).
See [troubleshooting Geo](../replication/troubleshooting/index.md).

View File

@ -583,7 +583,7 @@ You must manually replicate the secret file across all of your secondary sites,
gitlab-rake gitlab:geo:check
```
If any of the checks fail, see the [troubleshooting documentation](../replication/troubleshooting.md).
If any of the checks fail, see the [troubleshooting documentation](../replication/troubleshooting/index.md).
1. To verify that the secondary site is reachable, SSH into a Rails or Sidekiq server on your primary site and sign in as root:
@ -591,7 +591,7 @@ You must manually replicate the secret file across all of your secondary sites,
gitlab-rake gitlab:geo:check
```
If any of the checks fail, check the [troubleshooting documentation](../replication/troubleshooting.md).
If any of the checks fail, check the [troubleshooting documentation](../replication/troubleshooting/index.md).
After the secondary site is added to the Geo administration page and restarted,
the site automatically starts to replicate missing data from the primary site
@ -639,4 +639,4 @@ site **Geo Sites** dashboard in your browser.
## Related topics
- [Troubleshooting Geo](../replication/troubleshooting.md)
- [Troubleshooting Geo](../replication/troubleshooting/index.md)

View File

@ -217,7 +217,7 @@ for example.
#### Find most common Geo sync errors
If [the `geo:status` Rake task](../geo/replication/troubleshooting.md#sync-status-rake-task)
If [the `geo:status` Rake task](../geo/replication/troubleshooting/index.md#sync-status-rake-task)
repeatedly reports that some items never reach 100%,
the following command helps to focus on the most common errors.
@ -225,7 +225,7 @@ the following command helps to focus on the most common errors.
jq --raw-output 'select(.severity == "ERROR") | [.project_path, .class, .message, .error] | @tsv' geo.log | sort | uniq -c | sort | tail
```
Refer to our [Geo troubleshooting page](../geo/replication/troubleshooting.md)
Refer to our [Geo troubleshooting page](../geo/replication/troubleshooting/index.md)
for advice about specific error messages.
### Parsing `gitaly/current`

View File

@ -70,7 +70,7 @@ To move repositories:
- [All groups](#move-all-groups) or
[individual groups](../../api/group_repository_storage_moves.md#schedule-a-repository-storage-move-for-a-group).
1. If [Geo](../geo/index.md) is enabled,
[resync all repositories](../geo/replication/troubleshooting.md#queue-up-all-repositories-for-resync).
[resync all repositories](../geo/replication/troubleshooting/index.md#queue-up-all-repositories-for-resync).
#### Move all projects

View File

@ -277,8 +277,8 @@ Additionally, `current_connections` should be greater than 1.
### Message: `LOG: invalid CIDR mask in address`
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting.md#message-log--invalid-cidr-mask-in-address).
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting/index.md#message-log--invalid-cidr-mask-in-address).
### Message: `LOG: invalid IP mask "md5": Name or service not known`
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting.md#message-log--invalid-ip-mask-md5-name-or-service-not-known).
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting/index.md#message-log--invalid-ip-mask-md5-name-or-service-not-known).

View File

@ -130,7 +130,7 @@ The `gitlab:check` Rake task runs the following Rake tasks:
It checks that each component was set up according to the installation guide and suggest fixes
for issues found. This command must be run from your application server and doesn't work correctly on
component servers like [Gitaly](../gitaly/configure_gitaly.md#run-gitaly-on-its-own-server).
If you're running Geo, see also the [Geo Health check Rake task](../geo/replication/troubleshooting.md#health-check-rake-task).
If you're running Geo, see also the [Geo Health check Rake task](../geo/replication/troubleshooting/index.md#health-check-rake-task).
You may also have a look at our troubleshooting guides for:

View File

@ -74,25 +74,25 @@ This content has been moved to [Troubleshooting Sidekiq](../sidekiq/sidekiq_trou
### Reverify all uploads (or any SSF data type which is verified)
Moved to [Geo replication troubleshooting](../geo/replication/troubleshooting.md#reverify-all-uploads-or-any-ssf-data-type-which-is-verified).
Moved to [Geo replication troubleshooting](../geo/replication/troubleshooting/index.md#reverify-all-uploads-or-any-ssf-data-type-which-is-verified).
### Artifacts
Moved to [Geo replication troubleshooting](../geo/replication/troubleshooting.md#resync-and-reverify-individual-components).
Moved to [Geo replication troubleshooting](../geo/replication/troubleshooting/index.md#resync-and-reverify-individual-components).
### Repository verification failures
Moved to [Geo replication troubleshooting](../geo/replication/troubleshooting.md#find-repository-verification-failures).
Moved to [Geo replication troubleshooting](../geo/replication/troubleshooting/index.md#find-repository-verification-failures).
### Resync repositories
Moved to [Geo replication troubleshooting - Resync repository types](../geo/replication/troubleshooting.md#resync-and-reverify-individual-components).
Moved to [Geo replication troubleshooting - Resync repository types](../geo/replication/troubleshooting/index.md#resync-and-reverify-individual-components).
Moved to [Geo replication troubleshooting - Resync project and project wiki repositories](../geo/replication/troubleshooting.md#resync-project-and-project-wiki-repositories).
Moved to [Geo replication troubleshooting - Resync project and project wiki repositories](../geo/replication/troubleshooting/index.md#resync-project-and-project-wiki-repositories).
### Blob types
Moved to [Geo replication troubleshooting](../geo/replication/troubleshooting.md#resync-and-reverify-individual-components).
Moved to [Geo replication troubleshooting](../geo/replication/troubleshooting/index.md#resync-and-reverify-individual-components).
## Generate Service Ping

View File

@ -19,7 +19,7 @@ for in this list, you should search the documentation.
## Troubleshooting guides
- [SSL](https://docs.gitlab.com/omnibus/settings/ssl/ssl_troubleshooting.html)
- [Geo](../geo/replication/troubleshooting.md)
- [Geo](../geo/replication/troubleshooting/index.md)
- [SAML](../../user/group/saml_sso/troubleshooting.md)
- [Kubernetes cheat sheet](https://docs.gitlab.com/charts/troubleshooting/kubernetes_cheat_sheet.html)
- [Linux cheat sheet](linux_cheat_sheet.md)

View File

@ -297,7 +297,7 @@ HINT: Free one or increase max_replication_slots.
### Geo replication errors
If you receive errors like this example, read about how to resolve
[Geo replication errors](../geo/replication/troubleshooting.md#fixing-postgresql-database-replication-errors):
[Geo replication errors](../geo/replication/troubleshooting/index.md#fixing-postgresql-database-replication-errors):
```plaintext
ERROR: replication slots can only be used if max_replication_slots > 0
@ -313,8 +313,8 @@ PANIC: could not write to file 'pg_xlog/xlogtemp.123': No space left on device
When troubleshooting problems with Geo, you should:
- Review [common Geo errors](../geo/replication/troubleshooting.md#fixing-common-errors).
- [Review your Geo configuration](../geo/replication/troubleshooting.md), including:
- Review [common Geo errors](../geo/replication/troubleshooting/index.md#fixing-common-errors).
- [Review your Geo configuration](../geo/replication/troubleshooting/index.md), including:
- Reconfiguring hosts and ports.
- Reviewing and fixing the user and password mappings.

View File

@ -132,7 +132,7 @@ It is recommended to review the [full requirements for running Geo](../administr
Changes to locale data in `glibc` means that PostgreSQL database files are not fully compatible
between different OS releases.
To avoid index corruption, [check for locale compatibility](../administration/geo/replication/troubleshooting.md#check-os-locale-data-compatibility)
To avoid index corruption, [check for locale compatibility](../administration/geo/replication/troubleshooting/index.md#check-os-locale-data-compatibility)
when:
- Moving binary PostgreSQL data between servers.

View File

@ -215,9 +215,9 @@ For more information, see [Enable or disable service ping](../../administration/
In GitLab 15.4 and 15.5, Gitaly Cluster assumes `pool.ntp.org` is accessible. If `pool.ntp.org` is not accessible, [customize the time server setting](../../administration/gitaly/praefect.md#customize-time-server-setting) on the Gitaly
and Praefect servers so they can use an accessible NTP server.
On offline instances, the [GitLab Geo check Rake task](../../administration/geo/replication/troubleshooting.md#can-geo-detect-the-current-site-correctly)
On offline instances, the [GitLab Geo check Rake task](../../administration/geo/replication/troubleshooting/index.md#can-geo-detect-the-current-site-correctly)
always fails because it uses `pool.ntp.org`. This error can be ignored but you can
[read more about how to work around it](../../administration/geo/replication/troubleshooting.md#message-machine-clock-is-synchronized--exception).
[read more about how to work around it](../../administration/geo/replication/troubleshooting/index.md#message-machine-clock-is-synchronized--exception).
## Enabling the Package Metadata Database

View File

@ -230,7 +230,7 @@ DETAILS:
results in a loop that consistently fails for all objects stored in object storage.
For information on how to fix this, see
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting.md#failed-syncs-with-gitlab-managed-object-storage-replication).
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting/index.md#failed-syncs-with-gitlab-managed-object-storage-replication).
## 14.6.0
@ -256,7 +256,7 @@ DETAILS:
results in a loop that consistently fails for all objects stored in object storage.
For information on how to fix this, see
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting.md#failed-syncs-with-gitlab-managed-object-storage-replication).
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting/index.md#failed-syncs-with-gitlab-managed-object-storage-replication).
## 14.5.0
@ -341,7 +341,7 @@ DETAILS:
results in a loop that consistently fails for all objects stored in object storage.
For information on how to fix this, see
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting.md#failed-syncs-with-gitlab-managed-object-storage-replication).
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting/index.md#failed-syncs-with-gitlab-managed-object-storage-replication).
## 14.4.4
@ -425,7 +425,7 @@ DETAILS:
results in a loop that consistently fails for all objects stored in object storage.
For information on how to fix this, see
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting.md#failed-syncs-with-gitlab-managed-object-storage-replication).
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting/index.md#failed-syncs-with-gitlab-managed-object-storage-replication).
- There is [an issue in GitLab 14.4.0 through 14.4.2](#1440) that can affect
Geo and other features that rely on cronjobs. We recommend upgrading to GitLab 14.4.3 or later.
@ -596,7 +596,7 @@ DETAILS:
results in a loop that consistently fails for all objects stored in object storage.
For information on how to fix this, see
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting.md#failed-syncs-with-gitlab-managed-object-storage-replication).
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting/index.md#failed-syncs-with-gitlab-managed-object-storage-replication).
- We found an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/336013) where the container registry replication
wasn't fully working if you used multi-arch images. In case of a multi-arch image, only the primary architecture
@ -701,7 +701,7 @@ DETAILS:
results in a loop that consistently fails for all objects stored in object storage.
For information on how to fix this, see
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting.md#failed-syncs-with-gitlab-managed-object-storage-replication).
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting/index.md#failed-syncs-with-gitlab-managed-object-storage-replication).
- We found an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/336013) where the container registry replication
wasn't fully working if you used multi-arch images. In case of a multi-arch image, only the primary architecture

View File

@ -620,7 +620,7 @@ DETAILS:
## 15.4.6
- Due to a [bug introduced in curl in GitLab 15.4.6](https://github.com/curl/curl/issues/10122), the [`no_proxy` environment variable may not work properly](../../administration/geo/replication/troubleshooting.md#secondary-site-returns-received-http-code-403-from-proxy-after-connect). Either downgrade to GitLab 15.4.5, or upgrade to GitLab 15.5.7 or a later version.
- Due to a [bug introduced in curl in GitLab 15.4.6](https://github.com/curl/curl/issues/10122), the [`no_proxy` environment variable may not work properly](../../administration/geo/replication/troubleshooting/index.md#secondary-site-returns-received-http-code-403-from-proxy-after-connect). Either downgrade to GitLab 15.4.5, or upgrade to GitLab 15.5.7 or a later version.
- Due to [a bug introduced in GitLab 15.4](https://gitlab.com/gitlab-org/gitlab/-/issues/390155), if one or more Git repositories in Gitaly Cluster is [unavailable](../../administration/gitaly/recovery.md#unavailable-repositories), then [Repository checks](../../administration/repository_checks.md#repository-checks) and [Geo replication and verification](../../administration/geo/index.md) stop running for all project or project wiki repositories in the affected Gitaly Cluster. The bug was fixed by [reverting the change in GitLab 15.9.0](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/110823). Before upgrading to this version, check if you have any "unavailable" repositories. See [the bug issue](https://gitlab.com/gitlab-org/gitlab/-/issues/390155) for more information.
## 15.4.5

View File

@ -432,7 +432,7 @@ prevent you from creating a branch with this name.
To find all branches you've authored in a project, run this command in a Git repository:
```shell
git for-each-ref --format='%(refname:short) %(authoremail)' | grep $(git config --get user.email)
git for-each-ref --format='%(authoremail) %(refname:short)' | grep $(git config --get user.email)
```
To get a total of all branches in a project, sorted by author, run this command

View File

@ -236,8 +236,7 @@ GitLab can render OpenAPI specification files. The filename must include `openap
To render an OpenAPI file:
1. Go to the OpenAPI file in your repository.
1. Between the **Display source** and **Edit** buttons, select **Display OpenAPI**. When an OpenAPI file is found, it replaces the
**Display rendered file** button.
1. Select **Display rendered file**.
1. To display the `operationId` in the operations list, add `displayOperationId=true` to the query string.
NOTE:

View File

@ -10,38 +10,80 @@ module API
helpers do
def queue_metrics
::Gitlab::SidekiqConfig.routing_queues.each_with_object({}) do |queue_name, hash|
queue = Sidekiq::Queue.new(queue_name)
hash[queue.name] = {
backlog: queue.size,
latency: queue.latency.to_i
}
hash = {}
Gitlab::Redis::Queues.instances.each_value do |v| # rubocop:disable Cop/RedisQueueUsage -- allow iteration over shard instances
queue_metrics_from_shard(v.sidekiq_redis).each do |queue_name, queue_details|
if hash[queue_name].nil?
hash[queue_name] = queue_details
else
hash[queue_name][:backlog] += queue_details[:backlog]
hash[queue_name][:latency] = [queue_details[:latency], hash[queue_name][:latency]].max
end
end
end
hash
end
def queue_metrics_from_shard(pool)
Sidekiq::Client.via(pool) do
::Gitlab::SidekiqConfig.routing_queues.each_with_object({}) do |queue_name, hash|
queue = Sidekiq::Queue.new(queue_name)
hash[queue.name] = {
backlog: queue.size,
latency: queue.latency.to_i
}
end
end
end
def process_metrics
Sidekiq::ProcessSet.new(false).map do |process|
{
hostname: process['hostname'],
pid: process['pid'],
tag: process['tag'],
started_at: Time.at(process['started_at']),
queues: process['queues'],
labels: process['labels'],
concurrency: process['concurrency'],
busy: process['busy']
}
Gitlab::Redis::Queues.instances.values.flat_map do |v| # rubocop:disable Cop/RedisQueueUsage -- allow iteration over shard instances
process_metrics_from_shard(v.sidekiq_redis)
end
end
def process_metrics_from_shard(pool)
Sidekiq::Client.via(pool) do
Sidekiq::ProcessSet.new(false).map do |process|
{
hostname: process['hostname'],
pid: process['pid'],
tag: process['tag'],
started_at: Time.at(process['started_at']),
queues: process['queues'],
labels: process['labels'],
concurrency: process['concurrency'],
busy: process['busy']
}
end
end
end
def job_stats
stats = Sidekiq::Stats.new
{
processed: stats.processed,
failed: stats.failed,
enqueued: stats.enqueued,
dead: stats.dead_size
stats = {
processed: 0,
failed: 0,
enqueued: 0,
dead: 0
}
Gitlab::Redis::Queues.instances.each_value do |shard| # rubocop:disable Cop/RedisQueueUsage -- allow iteration over shard instances
job_stats_from_shard(shard.sidekiq_redis).each { |k, v| stats[k] += v }
end
stats
end
def job_stats_from_shard(pool)
Sidekiq::Client.via(pool) do
stats = Sidekiq::Stats.new
{
processed: stats.processed,
failed: stats.failed,
enqueued: stats.enqueued,
dead: stats.dead_size
}
end
end
end

View File

@ -125,7 +125,7 @@
"clipboard": "^2.0.8",
"compression-webpack-plugin": "^5.0.2",
"copy-webpack-plugin": "^6.4.1",
"core-js": "^3.36.0",
"core-js": "^3.36.1",
"cron-validator": "^1.1.1",
"cronstrue": "^1.122.0",
"cropperjs": "^1.6.1",

View File

@ -119,8 +119,6 @@ describe('positive tests', () => {
SecretsYaml,
NeedsParallelMatrixYaml,
ScriptYaml,
WorkflowAutoCancelOnJobFailureYaml,
WorkflowAutoCancelOnNewCommitYaml,
WorkflowRulesAutoCancelOnJobFailureYaml,
WorkflowRulesAutoCancelOnNewCommitYaml,
StagesYaml,
@ -128,13 +126,36 @@ describe('positive tests', () => {
}),
)('schema validates %s', (_, input) => {
// We construct a new "JSON" from each main key that is inside a
// file which allow us to make sure each blob is valid.
// file which allows us to make sure each blob is valid.
// Note that this treats each main key as a job or global definition,
// which means that more than one global definition (e.g. `workflow`)
// is not allowed. To use multiple global keys on a single test file
// use the `global positive tests` below.
Object.keys(input).forEach((key) => {
expect({ [key]: input[key] }).toValidateJsonSchema(ajvSchema);
});
});
});
describe('global positive tests', () => {
const tests = {
WorkflowAutoCancelOnJobFailureYaml,
WorkflowAutoCancelOnNewCommitYaml,
};
for (const testName in tests) {
if (Object.hasOwn(tests, testName)) {
const test = tests[testName];
describe(testName, () => {
// We construct a new "JSON" from each main key that is inside a
// file which allows us to make sure each blob is valid.
it.each(Object.entries(test))('schema validates %s', (_, input) => {
expect(input).toValidateJsonSchema(ajvSchema);
});
});
}
}
});
describe('negative tests', () => {
it.each(
Object.entries({
@ -170,8 +191,6 @@ describe('negative tests', () => {
NeedsParallelMatrixWrongParallelValueYaml,
NeedsParallelMatrixWrongMatrixValueYaml,
ScriptNegativeYaml,
WorkflowAutoCancelOnJobFailureNegativeYaml,
WorkflowAutoCancelOnNewCommitNegativeYaml,
WorkflowRulesAutoCancelOnJobFailureNegativeYaml,
WorkflowRulesAutoCancelOnNewCommitNegativeYaml,
StagesNegativeYaml,
@ -179,9 +198,33 @@ describe('negative tests', () => {
}),
)('schema validates %s', (_, input) => {
// We construct a new "JSON" from each main key that is inside a
// file which allow us to make sure each blob is invalid.
// file which allows us to make sure each blob is invalid.
// Note that this treats each main key as a job or global definition,
// which means that using more than one global definition (e.g. `workflow`)
// on a single test file could lead to incorrect test results.
// To use multiple global keys on a single test file use the
// `global negative tests` below.
Object.keys(input).forEach((key) => {
expect({ [key]: input[key] }).not.toValidateJsonSchema(ajvSchema);
});
});
});
describe('global negative tests', () => {
const tests = {
WorkflowAutoCancelOnJobFailureNegativeYaml,
WorkflowAutoCancelOnNewCommitNegativeYaml,
};
for (const testName in tests) {
if (Object.hasOwn(tests, testName)) {
const test = tests[testName];
describe(testName, () => {
// We construct a new "JSON" from each main key that is inside a
// file which allows us to make sure each blob is invalid.
it.each(Object.entries(test))('schema validates %s', (_, input) => {
expect(input).not.toValidateJsonSchema(ajvSchema);
});
});
}
}
});

View File

@ -1,3 +1,4 @@
workflow:
auto_cancel:
on_job_failure: unexpected_value
invalid workflow:auto_cancel:on_job_failure value:
workflow:
auto_cancel:
on_job_failure: unexpected_value

View File

@ -1,3 +1,4 @@
workflow:
auto_cancel:
on_new_commit: unexpected_value
invalid workflow:auto_cancel:on_new_commit value:
workflow:
auto_cancel:
on_new_commit: unexpected_value

View File

@ -1,3 +1,9 @@
workflow:
auto_cancel:
on_job_failure: all
accepts workflow:auto_cancel:on_job_failure value none:
workflow:
auto_cancel:
on_job_failure: none
accepts workflow:auto_cancel:on_job_failure value all:
workflow:
auto_cancel:
on_job_failure: all

View File

@ -1,3 +1,14 @@
workflow:
auto_cancel:
on_new_commit: conservative
accepts workflow:auto_cancel:on_new_commit value conservative:
workflow:
auto_cancel:
on_new_commit: conservative
accepts workflow:auto_cancel:on_new_commit value interruptible:
workflow:
auto_cancel:
on_new_commit: interruptible
accepts workflow:auto_cancel:on_new_commit value none:
workflow:
auto_cancel:
on_new_commit: none

View File

@ -3,6 +3,7 @@
require 'spec_helper'
RSpec.describe API::SidekiqMetrics, :aggregate_failures, feature_category: :shared do
let(:instance_count) { 1 }
let(:admin) { create(:user, :admin) }
describe 'GET sidekiq/*' do
@ -13,50 +14,71 @@ RSpec.describe API::SidekiqMetrics, :aggregate_failures, feature_category: :shar
end
end
it 'defines the `queue_metrics` endpoint' do
get api('/sidekiq/queue_metrics', admin, admin_mode: true)
shared_examples 'GET sidekiq metrics' do
it 'defines the `queue_metrics` endpoint' do
expect(Gitlab::SidekiqConfig).to receive(:routing_queues).exactly(instance_count).times.and_call_original
get api('/sidekiq/queue_metrics', admin, admin_mode: true)
expect(response).to have_gitlab_http_status(:ok)
expect(json_response).to match a_hash_including(
'queues' => a_hash_including(
'default' => {
'backlog' => be_a(Integer),
'latency' => be_a(Integer)
},
'mailers' => {
'backlog' => be_a(Integer),
'latency' => be_a(Integer)
}
expect(response).to have_gitlab_http_status(:ok)
expect(json_response).to match a_hash_including(
'queues' => a_hash_including(
'default' => {
'backlog' => be_a(Integer),
'latency' => be_a(Integer)
},
'mailers' => {
'backlog' => be_a(Integer),
'latency' => be_a(Integer)
}
)
)
)
end
it 'defines the `process_metrics` endpoint' do
expect(Sidekiq::ProcessSet).to receive(:new).exactly(instance_count).times.and_call_original
get api('/sidekiq/process_metrics', admin, admin_mode: true)
expect(response).to have_gitlab_http_status(:ok)
expect(json_response['processes']).to be_an Array
end
it 'defines the `job_stats` endpoint' do
expect(Sidekiq::Stats).to receive(:new).exactly(instance_count).times.and_call_original
get api('/sidekiq/job_stats', admin, admin_mode: true)
expect(response).to have_gitlab_http_status(:ok)
expect(json_response).to be_a Hash
expect(json_response['jobs']).to be_a Hash
expect(json_response['jobs'].keys)
.to contain_exactly(*%w[processed failed enqueued dead])
expect(json_response['jobs'].values).to all(be_an(Integer))
end
it 'defines the `compound_metrics` endpoint' do
expect(Sidekiq::Stats).to receive(:new).exactly(instance_count).times.and_call_original
expect(Sidekiq::ProcessSet).to receive(:new).exactly(instance_count).times.and_call_original
expect(Gitlab::SidekiqConfig).to receive(:routing_queues).exactly(instance_count).times.and_call_original
get api('/sidekiq/compound_metrics', admin, admin_mode: true)
expect(response).to have_gitlab_http_status(:ok)
expect(json_response).to be_a Hash
expect(json_response['queues']).to be_a Hash
expect(json_response['processes']).to be_an Array
expect(json_response['jobs']).to be_a Hash
end
end
it 'defines the `process_metrics` endpoint' do
get api('/sidekiq/process_metrics', admin, admin_mode: true)
context 'with multiple Sidekiq Redis' do
let(:instance_count) { 2 }
expect(response).to have_gitlab_http_status(:ok)
expect(json_response['processes']).to be_an Array
before do
allow(Gitlab::Redis::Queues)
.to receive(:instances).and_return({ 'main' => Gitlab::Redis::Queues, 'shard' => Gitlab::Redis::Queues })
end
it_behaves_like 'GET sidekiq metrics'
end
it 'defines the `job_stats` endpoint' do
get api('/sidekiq/job_stats', admin, admin_mode: true)
expect(response).to have_gitlab_http_status(:ok)
expect(json_response).to be_a Hash
expect(json_response['jobs']).to be_a Hash
expect(json_response['jobs'].keys)
.to contain_exactly(*%w[processed failed enqueued dead])
expect(json_response['jobs'].values).to all(be_an(Integer))
end
it 'defines the `compound_metrics` endpoint' do
get api('/sidekiq/compound_metrics', admin, admin_mode: true)
expect(response).to have_gitlab_http_status(:ok)
expect(json_response).to be_a Hash
expect(json_response['queues']).to be_a Hash
expect(json_response['processes']).to be_an Array
expect(json_response['jobs']).to be_a Hash
end
it_behaves_like 'GET sidekiq metrics'
end
end

View File

@ -137,17 +137,7 @@ export default defineConfig({
'process.env.GITLAB_WEB_IDE_PUBLIC_PATH': JSON.stringify(GITLAB_WEB_IDE_PUBLIC_PATH),
},
server: {
hmr:
viteGDKConfig.hmr === undefined
? /*
This is a legacy behavior for older GDKs. They will fallback to:
ws://localhost:3038/vite-dev/
TODO: Remove this after 2024-01-18 */
{
host: 'localhost',
protocol: 'ws',
}
: viteGDKConfig.hmr,
hmr: viteGDKConfig.hmr,
https: false,
watch:
viteGDKConfig.hmr === null

View File

@ -4661,10 +4661,10 @@ core-js-pure@^3.30.2:
resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.35.0.tgz#4660033304a050215ae82e476bd2513a419fbb34"
integrity sha512-f+eRYmkou59uh7BPcyJ8MC76DiGhspj1KMxVIcF24tzP8NA9HVa1uC7BTW2tgx7E1QVCzDzsgp7kArrzhlz8Ew==
core-js@^3.29.1, core-js@^3.36.0, core-js@^3.6.5:
version "3.36.0"
resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.36.0.tgz#e752fa0b0b462a0787d56e9d73f80b0f7c0dde68"
integrity sha512-mt7+TUBbTFg5+GngsAxeKBTl5/VS0guFeJacYge9OmHb+m058UwwIm41SE9T4Den7ClatV57B6TYTuJ0CX1MAw==
core-js@^3.29.1, core-js@^3.36.1, core-js@^3.6.5:
version "3.36.1"
resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.36.1.tgz#c97a7160ebd00b2de19e62f4bbd3406ab720e578"
integrity sha512-BTvUrwxVBezj5SZ3f10ImnX2oRByMxql3EimVqMysepbC9EeMUOpLwdy6Eoili2x6E4kf+ZUB5k/+Jv55alPfA==
core-util-is@~1.0.0:
version "1.0.3"