Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-04-18 00:11:04 +00:00
parent c35b5c40f4
commit 9674ea1649
54 changed files with 101 additions and 131 deletions

View File

@ -5,7 +5,7 @@ workflow:
name: $PIPELINE_NAME
include:
- component: "gitlab.com/gitlab-org/quality/pipeline-common/allure-report@8.6.1"
- component: "gitlab.com/gitlab-org/quality/pipeline-common/allure-report@8.7.0"
inputs:
job_name: "e2e-test-report"
job_stage: "report"
@ -15,7 +15,7 @@ include:
gitlab_auth_token_variable_name: "PROJECT_TOKEN_FOR_CI_SCRIPTS_API_USAGE"
allure_job_name: "${QA_RUN_TYPE}"
- project: gitlab-org/quality/pipeline-common
ref: 8.6.1
ref: 8.7.0
file:
- /ci/base.gitlab-ci.yml
- /ci/knapsack-report.yml

View File

@ -776,7 +776,6 @@ Layout/LineLength:
- 'ee/app/graphql/types/security_report_summary_section_type.rb'
- 'ee/app/graphql/types/vulnerabilities_count_by_day_type.rb'
- 'ee/app/graphql/types/vulnerability/external_issue_link_external_tracker_enum.rb'
- 'ee/app/graphql/types/vulnerability_confidence_enum.rb'
- 'ee/app/graphql/types/vulnerability_detail_type.rb'
- 'ee/app/graphql/types/vulnerability_type.rb'
- 'ee/app/helpers/admin/emails_helper.rb'

View File

@ -357,7 +357,6 @@ RSpec/FeatureCategory:
- 'ee/spec/graphql/types/vulnerability/external_issue_link_type_spec.rb'
- 'ee/spec/graphql/types/vulnerability/issue_link_type_enum_spec.rb'
- 'ee/spec/graphql/types/vulnerability/issue_link_type_spec.rb'
- 'ee/spec/graphql/types/vulnerability_confidence_enum_spec.rb'
- 'ee/spec/graphql/types/vulnerability_detail_type_spec.rb'
- 'ee/spec/graphql/types/vulnerability_details/base_type_spec.rb'
- 'ee/spec/graphql/types/vulnerability_details/boolean_type_spec.rb'

View File

@ -39,7 +39,6 @@ Style/HashEachMethods:
- 'ee/app/graphql/types/vulnerability/external_issue_link_external_tracker_enum.rb'
- 'ee/app/graphql/types/vulnerability/external_issue_link_type_enum.rb'
- 'ee/app/graphql/types/vulnerability/issue_link_type_enum.rb'
- 'ee/app/graphql/types/vulnerability_confidence_enum.rb'
- 'ee/app/graphql/types/vulnerability_grade_enum.rb'
- 'ee/app/graphql/types/vulnerability_report_type_enum.rb'
- 'ee/app/graphql/types/vulnerability_severities_count_type.rb'

View File

@ -207,9 +207,9 @@ gem 'seed-fu', '~> 2.3.7' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'elasticsearch-model', '~> 7.2' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'elasticsearch-rails', '~> 7.2', require: 'elasticsearch/rails/instrumentation' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'elasticsearch-api', '7.13.3' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'aws-sdk-core', '~> 3.191.6' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'aws-sdk-core', '~> 3.192.0' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'aws-sdk-cloudformation', '~> 1' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'aws-sdk-s3', '~> 1.146.1' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'aws-sdk-s3', '~> 1.147.0' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'faraday_middleware-aws-sigv4', '~>0.3.0' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'typhoeus', '~> 1.4.0' # Used with Elasticsearch to support http keep-alive connections # rubocop:todo Gemfile/MissingFeatureCategory

View File

@ -36,9 +36,9 @@
{"name":"aws-eventstream","version":"1.3.0","platform":"ruby","checksum":"f1434cc03ab2248756eb02cfa45e900e59a061d7fbdc4a9fd82a5dd23d796d3f"},
{"name":"aws-partitions","version":"1.877.0","platform":"ruby","checksum":"9552ed7bbd3700ed1eeb0121c160ceaf64fa5dbaff5a1ff5fe6fd8481ecd9cfd"},
{"name":"aws-sdk-cloudformation","version":"1.41.0","platform":"ruby","checksum":"31e47539719734413671edf9b1a31f8673fbf9688549f50c41affabbcb1c6b26"},
{"name":"aws-sdk-core","version":"3.191.6","platform":"ruby","checksum":"d3afd9e59992b84e3fe1a6c8e5ec07e5103d4d89448672dea44e09dbfa550922"},
{"name":"aws-sdk-core","version":"3.192.0","platform":"ruby","checksum":"106b391d232f64475e048ce35d54ab22487744e2e78d6256905689ed83cca3b2"},
{"name":"aws-sdk-kms","version":"1.76.0","platform":"ruby","checksum":"e7f75013cba9ba357144f66bbc600631c192e2cda9dd572794be239654e2cf49"},
{"name":"aws-sdk-s3","version":"1.146.1","platform":"ruby","checksum":"384f95e11fb543fbb7446f319fb37b1d035720a2011f90ade33c75f2e10588aa"},
{"name":"aws-sdk-s3","version":"1.147.0","platform":"ruby","checksum":"367753c25d98b8019932d403d3ec56def51e70be917a2612d762a369ca167c45"},
{"name":"aws-sigv4","version":"1.8.0","platform":"ruby","checksum":"84dd99768b91b93b63d1d8e53ee837cfd06ab402812772a7899a78f9f9117cbc"},
{"name":"axe-core-api","version":"4.8.0","platform":"ruby","checksum":"88cf44fdbd5d501ae429f9ca6b37c4a46ba27ac673d478ab688eea3e353da62f"},
{"name":"axe-core-rspec","version":"4.9.0","platform":"ruby","checksum":"e5f81fa55af0c421254c98476511c4511e193c5659996f184541f74a1359df3a"},

View File

@ -311,7 +311,7 @@ GEM
aws-sdk-cloudformation (1.41.0)
aws-sdk-core (~> 3, >= 3.99.0)
aws-sigv4 (~> 1.1)
aws-sdk-core (3.191.6)
aws-sdk-core (3.192.0)
aws-eventstream (~> 1, >= 1.3.0)
aws-partitions (~> 1, >= 1.651.0)
aws-sigv4 (~> 1.8)
@ -319,8 +319,8 @@ GEM
aws-sdk-kms (1.76.0)
aws-sdk-core (~> 3, >= 3.188.0)
aws-sigv4 (~> 1.1)
aws-sdk-s3 (1.146.1)
aws-sdk-core (~> 3, >= 3.191.0)
aws-sdk-s3 (1.147.0)
aws-sdk-core (~> 3, >= 3.192.0)
aws-sdk-kms (~> 1)
aws-sigv4 (~> 1.8)
aws-sigv4 (1.8.0)
@ -1837,8 +1837,8 @@ DEPENDENCIES
attr_encrypted (~> 3.2.4)!
awesome_print
aws-sdk-cloudformation (~> 1)
aws-sdk-core (~> 3.191.6)
aws-sdk-s3 (~> 1.146.1)
aws-sdk-core (~> 3.192.0)
aws-sdk-s3 (~> 1.147.0)
axe-core-rspec (~> 4.9.0)
babosa (~> 2.0)
base32 (~> 0.3.0)

View File

@ -26,6 +26,7 @@ const PERSISTENT_USER_CALLOUTS = [
'.js-joining-a-project-alert',
'.js-duo-pro-trial-alert',
'.js-duo-chat-ga-alert',
'.js-all-seats-used',
];
const initCallouts = () => {

View File

@ -241,7 +241,7 @@ export default {
<gl-button
v-gl-tooltip
:class="{ 'gl-visibility-visible': showRemove }"
class="gl-visibility-hidden gl-mt-n2"
class="gl-visibility-hidden gl-mt-n2 gl-mr-n2"
category="tertiary"
size="small"
icon="close"

View File

@ -270,7 +270,7 @@ export default {
</gl-disclosure-dropdown>
</template>
<template #body>
<div class="gl-new-card-content">
<div class="gl-new-card-content gl-px-0">
<gl-loading-icon v-if="isLoading" color="dark" class="gl-my-2" />
<template v-else>
<div v-if="isChildrenEmpty && !isShownAddForm && !error" data-testid="links-empty">

View File

@ -182,7 +182,7 @@ export default {
<work-item-actions-split-button v-if="canUpdate" :actions="addItemsActions" />
</template>
<template #body>
<div class="gl-new-card-content">
<div class="gl-new-card-content gl-px-0">
<div v-if="!isShownAddForm && children.length === 0" data-testid="tree-empty">
<p class="gl-new-card-empty">
{{ $options.WORK_ITEMS_TREE_TEXT_MAP[workItemType].empty }}

View File

@ -36,7 +36,7 @@ export default {
<h4
v-if="heading"
data-testid="work-items-list-heading"
class="gl-font-sm gl-font-weight-semibold gl-text-gray-700 gl-mt-3 gl-mb-0!"
class="gl-font-sm gl-font-weight-semibold gl-text-gray-700 gl-mt-3 gl-mb-2 gl-ml-3"
>
{{ heading }}
</h4>

View File

@ -245,7 +245,7 @@ export default {
</gl-button>
</template>
<template #body>
<div class="gl-new-card-content">
<div class="gl-new-card-content gl-px-0">
<work-item-add-relationship-form
v-if="isShownLinkItemForm"
:work-item-id="workItemId"
@ -270,7 +270,7 @@ export default {
<work-item-relationship-list
v-if="linksBlocks.length"
:class="{
'gl-pb-3 gl-border-b-1 gl-border-b-solid gl-border-b-gray-100':
'gl-pb-3 gl-mb-5 gl-border-b-1 gl-border-b-solid gl-border-b-gray-100':
linksIsBlockedBy.length,
}"
:linked-items="linksBlocks"
@ -290,7 +290,7 @@ export default {
<work-item-relationship-list
v-if="linksIsBlockedBy.length"
:class="{
'gl-pb-3 gl-border-b-1 gl-border-b-solid gl-border-b-gray-100':
'gl-pb-3 gl-mb-5 gl-border-b-1 gl-border-b-solid gl-border-b-gray-100':
linksRelatesTo.length,
}"
:linked-items="linksIsBlockedBy"

View File

@ -30,7 +30,8 @@ module Users
project_repository_limit_alert_warning_threshold: 20, # EE-only
project_repository_limit_alert_alert_threshold: 21, # EE-only
project_repository_limit_alert_error_threshold: 22, # EE-only
namespace_over_storage_users_combined_alert: 23 # EE-only
namespace_over_storage_users_combined_alert: 23, # EE-only
all_seats_used_alert: 24 # EE-only
}
validates :group, presence: true

View File

@ -287,7 +287,6 @@ class WebHookService
def request_payload
return data unless hook.custom_webhook_template.present?
return data unless Feature.enabled?(:custom_webhook_template, hook.parent, type: :beta)
start_time = Gitlab::Metrics::System.monotonic_time
rendered_template = render_custom_template(hook.custom_webhook_template, data.deep_stringify_keys)

View File

@ -1,6 +1,7 @@
.layout-page{ class: page_with_sidebar_class }
-# Render the parent group sidebar while creating a new subgroup/project, see GroupsController#new.
- group = @parent_group || @group
- context = group || @project
- sidebar_panel = super_sidebar_nav_panel(nav: nav, user: current_user, group: group, project: @project, current_ref: current_ref, ref_type: @ref_type, viewed_user: @user, organization: @organization)
- sidebar_data = super_sidebar_context(current_user, group: group, project: @project, panel: sidebar_panel, panel_type: nav).to_json
@ -28,6 +29,7 @@
= dispensable_render "shared/service_ping_consent"
= dispensable_render_if_exists "layouts/header/ee_subscribable_banner"
= dispensable_render_if_exists "layouts/header/seat_count_alert"
= dispensable_render_if_exists "layouts/header/all_seats_used_alert", context: context
= dispensable_render_if_exists "shared/namespace_user_cap_reached_alert"
= dispensable_render_if_exists "shared/new_user_signups_cap_reached_alert"
= dispensable_render_if_exists "shared/silent_mode_banner"

View File

@ -83,12 +83,11 @@
help_text: s_('Webhooks|An access token is going to expire in the next 7 days. %{help_link}?').html_safe % { help_link: access_token_help_link }
- if Feature.enabled?(:custom_webhook_template, hook.parent, type: :beta)
.form-group
= form.label :custom_webhook_template, s_('Webhooks|Custom webhook template (optional)'), class: 'label-bold'
= form.text_area :custom_webhook_template, value: hook.custom_webhook_template, class: 'form-control gl-form-input gl-form-input-xl', rows: 8, maxlength: 4096
%p.form-text.text-muted
= link_to s_('Webhooks|How to create a custom webhook template?'), help_page_path('user/project/integrations/webhooks', anchor: 'custom-webhook-template')
.form-group
= form.label :custom_webhook_template, s_('Webhooks|Custom webhook template (optional)'), class: 'label-bold'
= form.text_area :custom_webhook_template, value: hook.custom_webhook_template, class: 'form-control gl-form-input gl-form-input-xl', rows: 8, maxlength: 4096
%p.form-text.text-muted
= link_to s_('Webhooks|How to create a custom webhook template?'), help_page_path('user/project/integrations/webhooks', anchor: 'custom-webhook-template')
.form-group
= form.label :enable_ssl_verification, s_('Webhooks|SSL verification'), class: 'label-bold checkbox'

View File

@ -1,9 +0,0 @@
---
name: custom_webhook_template
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/362504
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/142738
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/439610
milestone: '16.10'
group: group::import and integrate
type: beta
default_enabled: true

View File

@ -247,7 +247,7 @@ In some circumstances, like during [upgrades](replication/upgrading_the_geo_site
[planned failover](disaster_recovery/planned_failover.md), it is desirable to pause replication between the primary and secondary.
If you plan to allow user activity on your secondary sites during the upgrade,
do not pause replication for a [zero downtime upgrade](../../update/zero_downtime.md). While paused, the secondary site gets more and more out-of-date.
do not pause replication for a [zero-downtime upgrade](../../update/zero_downtime.md). While paused, the secondary site gets more and more out-of-date.
One known effect is that more and more Git fetches get redirected or proxied to the primary site. There may be additional unknown effects.
Pausing and resuming replication is done through a command-line tool from a specific node in the secondary site. Depending on your database architecture,

View File

@ -24,7 +24,7 @@ The following are GitLab upgrade validation tests we performed.
[Upgrade Geo multi-node installation](https://gitlab.com/gitlab-org/gitlab/-/issues/225359):
- Description: Tested upgrading from GitLab 12.10.12 to 13.0.10 package in a multi-node
configuration. As part of the issue to [Fix zero-downtime upgrade process/instructions for multi-node Geo deployments](https://gitlab.com/gitlab-org/gitlab/-/issues/22568), we monitored for downtime using the looping pipeline, HAProxy stats dashboards, and a script to log readiness status on both nodes.
configuration. As part of the issue to [Fix zero-downtime upgrade process/instructions for multi-node Geo deployments](https://gitlab.com/gitlab-org/gitlab/-/issues/225684), we monitored for downtime using the looping pipeline, HAProxy stats dashboards, and a script to log readiness status on both nodes.
- Outcome: Partial success because we observed downtime during the upgrade of the primary and secondary sites.
- Follow up issues/actions:
- [Investigate why `reconfigure` and `hup` cause downtime on multi-node Geo deployments](https://gitlab.com/gitlab-org/gitlab/-/issues/228898)

View File

@ -28,7 +28,7 @@ Upgrading Geo sites involves performing:
NOTE:
These general upgrade steps require downtime in a multi-node setup.
If you want to avoid downtime, consider using [zero downtime upgrades](../../../update/zero_downtime.md#multi-node--ha-deployment-with-geo).
If you want to avoid downtime, consider using [zero-downtime upgrades](../../../update/zero_downtime.md#multi-node--ha-deployment-with-geo).
To upgrade the Geo sites when a new GitLab version is released, upgrade **primary**
and all **secondary** sites:

View File

@ -529,7 +529,7 @@ For more information on configuring Gitaly Cluster, see [Configure Gitaly Cluste
### Upgrade Gitaly Cluster
To upgrade a Gitaly Cluster, follow the documentation for
[zero downtime upgrades](../../update/zero_downtime.md).
[zero-downtime upgrades](../../update/zero_downtime.md).
### Downgrade Gitaly Cluster to a previous version

View File

@ -1294,7 +1294,7 @@ itself on the system so that the `gitlab-ctl` command can bring the registry ser
Also, there's no way to save progress or results during the mark phase of the process. Only once
blobs start being deleted is anything permanent done.
### Continuous Zero Downtime Garbage Collection
### Continuous Zero-Downtime Garbage Collection
DETAILS:
**Status:** Beta

View File

@ -1087,7 +1087,7 @@ Reverting the PostgreSQL upgrade with `gitlab-ctl revert-pg-upgrade` has the sam
`gitlab-ctl pg-upgrade`. You should follow the same procedure by first stopping the replicas,
then reverting the leader, and finally reverting the replicas.
### Near zero downtime upgrade of PostgreSQL in a Patroni cluster
### Near-zero-downtime upgrade of PostgreSQL in a Patroni cluster
DETAILS:
**Status:** Experiment
@ -1126,7 +1126,7 @@ cluster.
#### Preflight check
We rely on PostgreSQL [logical replication](https://www.postgresql.org/docs/current/logical-replication.html)
to support near-zero downtime upgrades of Patroni clusters. The of
to support near-zero-downtime upgrades of Patroni clusters. The of
[logical replication requirements](https://www.postgresql.org/docs/current/logical-replication-restrictions.html)
must be met. In particular, `wal_level` must be `logical`. To check the `wal_level`,
run the following command with `gitlab-psql` on any node of the existing cluster:
@ -1137,7 +1137,7 @@ SHOW wal_level;
By default, Patroni sets `wal_level` to `replica`. You must increase it to `logical`.
Changing `wal_level` requires restarting PostgreSQL, so this step leads to a short
downtime (hence near-zero downtime). To do this on the Patroni **leader** node:
downtime (hence near-zero-downtime). To do this on the Patroni **leader** node:
1. Edit `gitlab.rb` by setting:

View File

@ -111,9 +111,9 @@ In general then, we'd only recommend you employ HA in the following scenarios:
If you still need to have HA for a lower number of users, this can be achieved with an adjusted [3K architecture](3k_users.md#supported-modifications-for-lower-user-counts-ha).
#### Zero Downtime Upgrades
#### Zero-Downtime Upgrades
[Zero Downtime Upgrades](../../update/zero_downtime.md) are available for standard Reference Architecture environments with HA (Cloud Native Hybrid is [not supported](https://gitlab.com/groups/gitlab-org/cloud-native/-/epics/52)). This allows for an environment to stay up during an upgrade, but the process is more complex as a result and has some limitations as detailed in the documentation.
[Zero-Downtime Upgrades](../../update/zero_downtime.md) are available for standard Reference Architecture environments with HA (Cloud Native Hybrid is [not supported](https://gitlab.com/groups/gitlab-org/cloud-native/-/epics/52)). This allows for an environment to stay up during an upgrade, but the process is more complex as a result and has some limitations as detailed in the documentation.
When going through this process it's worth noting that there may still be brief moments of downtime when the HA mechanisms take effect.

View File

@ -9138,7 +9138,6 @@ Input type: `VulnerabilityCreateInput`
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="mutationvulnerabilitycreateclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationvulnerabilitycreateconfidence"></a>`confidence` **{warning-solid}** | [`VulnerabilityConfidence`](#vulnerabilityconfidence) | **Deprecated:** This field will be removed from the Vulnerability domain model. Deprecated in GitLab 15.4. |
| <a id="mutationvulnerabilitycreateconfirmedat"></a>`confirmedAt` | [`Time`](#time) | Timestamp of when the vulnerability state changed to confirmed (defaults to creation time if status is `confirmed`). |
| <a id="mutationvulnerabilitycreatedescription"></a>`description` | [`String!`](#string) | Long text section that describes the vulnerability in more detail. |
| <a id="mutationvulnerabilitycreatedetectedat"></a>`detectedAt` | [`Time`](#time) | Timestamp of when the vulnerability was first detected (defaults to creation time). |
@ -34248,20 +34247,6 @@ Determines whether the pipeline list shows ID or IID.
| <a id="visibilityscopesenumprivate"></a>`private` | Snippet is visible only to the snippet creator. |
| <a id="visibilityscopesenumpublic"></a>`public` | Snippet can be accessed without any authentication. |
### `VulnerabilityConfidence`
Confidence that a given vulnerability is present in the codebase.
| Value | Description |
| ----- | ----------- |
| <a id="vulnerabilityconfidenceconfirmed"></a>`CONFIRMED` | Confirmed confidence. |
| <a id="vulnerabilityconfidenceexperimental"></a>`EXPERIMENTAL` | Experimental confidence. |
| <a id="vulnerabilityconfidencehigh"></a>`HIGH` | High confidence. |
| <a id="vulnerabilityconfidenceignore"></a>`IGNORE` | Ignore confidence. |
| <a id="vulnerabilityconfidencelow"></a>`LOW` | Low confidence. |
| <a id="vulnerabilityconfidencemedium"></a>`MEDIUM` | Medium confidence. |
| <a id="vulnerabilityconfidenceunknown"></a>`UNKNOWN` | Unknown confidence. |
### `VulnerabilityDismissalReason`
The dismissal reason of the Vulnerability.

View File

@ -16,7 +16,7 @@ we can document the reasons for not choosing this approach.
# Cells: Schema changes
When we introduce multiple Cells that own their own databases this will complicate the process of making schema changes to Postgres and Elasticsearch.
Today we already need to be careful to make changes comply with our zero downtime deployments.
Today we already need to be careful to make changes comply with our zero-downtime deployments.
For example, [when removing a column we need to make changes over 3 separate deployments](../../../../development/database/avoiding_downtime_in_migrations.md#dropping-columns).
We have tooling like `post_migrate` that helps with these kinds of changes to reduce the number of merge requests needed, but these will be complicated when we are dealing with deploying multiple Rails applications that will be at different versions at any one time.
This problem will be particularly tricky to solve for shared databases like our plan to share the `users` related tables among all Cells.

View File

@ -166,7 +166,7 @@ Before we can integrate Secondary Cells to our deployment pipeline, we need a fe
- This is required for appropriate testing. As noted below, we'll need a QA cell to direct a deployment to for which QA will execute tests against. A router will need to route QA tests to the appropriate Cell.
1. Assets Deployment
- This already exists today for .com. Today this is handled via HAProxy, but with Cells, the routing layer will become the responsible party to redirect assets in a similar fashion.
- If assets are chosen to be managed differently, this changes both how Delivery need to deploy said assets in order to provide as close to Zero Downtime Upgrades as possible, and configuration to the Cell installation to support routing to assets properly.
- If assets are chosen to be managed differently, this changes both how Delivery need to deploy said assets in order to provide as close to Zero-Downtime Upgrades as possible, and configuration to the Cell installation to support routing to assets properly.
1. Feature Flags
- We are assuming that the current Feature Flags workflows and tooling will just work on the Primary Cell and that Secondary Cells will not be affected.
- The use of feature flags to mitigate incidents is limited to only the Primary Cell.

View File

@ -459,7 +459,6 @@ The table below is a comparison between the existing GitLab.com features, and no
| Organization Switching | A user belongs to a single organization. |
| Shared user accounts across Cells | Users will need to have new user accounts on each Cell for now |
| GitLab Duo Pro license works across all projects on instance | GitLab Duo Pro licenses, once granted, [should allow users to use GitLab Duo Pro on all projects on the instance](https://gitlab.com/gitlab-org/gitlab/-/issues/441244). With Cells 1.0, this will only work within their own cell. |
| Shared user accounts across Cells | Users needs to have new user accounts on each Cell for now |
| User removal | Users can only be part of one Organization. A removal would equal a deletion in this case, so only user deletions will be offered in Organizations on Cells 1.0. Upon removal, there would be no way for a User to discover another Organization to join, as they are private for Cells 1.0. |
| Windows and Mac OS Runners | Mac and Windows runners are still in beta and there are some more complex technical considerations related to cost. See the discussion: [#434982 (comment 1789275416)](https://gitlab.com/gitlab-org/gitlab/-/issues/434982#note_1789275416) on sharing resources. |
| Multiple Sizes for Linux Runners | We will only support [small linux runners on Cells 1.0](https://gitlab.com/gitlab-org/gitlab/-/issues/434982#note_1806447839). |

View File

@ -208,7 +208,7 @@ It should execute the `list` task we defined earlier, which connects to the serv
Envoy is not a dependency of Laravel, therefore you can use it for any PHP application.
### Zero downtime deployment
### Zero-downtime deployment
Every time we deploy to the production server, Envoy downloads the latest release of our app from GitLab repository and replace it with preview's release.
Envoy does this without any [downtime](https://en.wikipedia.org/wiki/Downtime),

View File

@ -10,7 +10,8 @@ DETAILS:
**Tier:** Free, Premium, Ultimate
**Offering:** GitLab.com, Self-managed, GitLab Dedicated
Pipeline configuration begins with jobs. Jobs are the most fundamental element of a `.gitlab-ci.yml` file.
Jobs are fundamental elements of a GitLab CI/CD pipeline. Jobs are configured in the `.gitlab-ci.yml` file
with a list of commands to run to accomplish tasks like building, testing, or deploying code.
Jobs are:

View File

@ -116,7 +116,7 @@ Example:
- Searches can have their own analyzers. Remember to check when editing analyzers.
- `Character` filters (as opposed to token filters) always replace the original character. These filters can hinder exact searches.
## Zero downtime reindexing with multiple indices
## Zero-downtime reindexing with multiple indices
NOTE:
This is not applicable yet as multiple indices functionality is not fully implemented.

View File

@ -66,7 +66,7 @@ migrations, or code changes that assume that schema changes introduced in
prior migrations have completed by the time the code loads.
Designing changes and migrations for [backwards compatibility between versions](multi_version_compatibility.md) can mitigate stop concerns with continuous or
"zero-downtime" upgrades. However, the **contract** phase will likely introduce
zero-downtime upgrades. However, the **contract** phase will likely introduce
a required stop when a migration/code change is introduced that requires
that background migrations have completed before running or loading.

View File

@ -175,7 +175,7 @@ guessing query performance is a hard task.
If you are concerned about the performance of a query on self-managed instances
and decide that self-managed instances must have an index, follow these recommendations:
- For self-managed instances following [zero downtime](../../update/zero_downtime.md)
- For self-managed instances following [zero-downtime](../../update/zero_downtime.md)
upgrades, post-deploy migrations execute when performing an upgrade after the application code deploys.
- For self-managed instances that do not follow a zero-downtime upgrade,
the administrator might choose to execute the post-deployment migrations for a release later,

View File

@ -148,7 +148,7 @@ As an example, when adding a new feature with frontend and API changes, it may b
### Expand and contract pattern
One way to guarantee zero downtime updates for on-premise instances is following the
One way to guarantee zero-downtime updates for on-premise instances is following the
[expand and contract pattern](https://martinfowler.com/bliki/ParallelChange.html).
This means that every breaking change is broken down in three phases: expand, migrate, and contract.
@ -157,7 +157,7 @@ This means that every breaking change is broken down in three phases: expand, mi
1. **migrate**: all consumers are updated to make use of the new implementation.
1. **contract**: backward compatibility is removed.
Those three phases **must be part of different milestones**, to allow zero downtime updates.
Those three phases **must be part of different milestones**, to allow zero-downtime updates.
Depending on the support level for the feature, the contract phase could be delayed until the next major release.

View File

@ -420,7 +420,7 @@ To enable languages support:
1. Locate **Custom analyzers: language support**.
1. Enable plugins support for **Indexing**.
1. Select **Save changes** for the changes to take effect.
1. Trigger [Zero downtime reindexing](#zero-downtime-reindexing) or reindex everything from scratch to create a new index with updated mappings.
1. Trigger [zero-downtime reindexing](#zero-downtime-reindexing) or reindex everything from scratch to create a new index with updated mappings.
1. Enable plugins support for **Searching** after the previous step is completed.
For guidance on what to install, see the following Elasticsearch language plugin options:

View File

@ -80,7 +80,7 @@ the tiers are no longer mentioned in GitLab documentation:
- Rake tasks:
- [Displaying GitLab license information](../administration/raketasks/maintenance.md#show-gitlab-license-information)
- Reference Architecture information:
- [Zero downtime upgrades](../administration/reference_architectures/index.md#zero-downtime-upgrades)
- [Zero-downtime upgrades](../administration/reference_architectures/index.md#zero-downtime-upgrades)
- Repositories:
- [Repository size limit](../administration/settings/account_and_limit_settings.md#repository-size-limit)
- Repository mirroring:

View File

@ -57,7 +57,7 @@ Use these variables to customize and deploy your build.
| `KUBE_INGRESS_BASE_DOMAIN` | Can be used to set a domain per cluster. See [cluster domains](../../user/project/clusters/gitlab_managed_clusters.md#base-domain) for more information. |
| `KUBE_NAMESPACE` | The namespace used for deployments. When using certificate-based clusters, [this value should not be overwritten directly](../../user/project/clusters/deploy_to_cluster.md#custom-namespace). |
| `KUBECONFIG` | The kubeconfig to use for deployments. User-provided values take priority over GitLab-provided values. |
| `PRODUCTION_REPLICAS` | Number of replicas to deploy in the production environment. Takes precedence over `REPLICAS` and defaults to 1. For zero downtime upgrades, set to 2 or greater. |
| `PRODUCTION_REPLICAS` | Number of replicas to deploy in the production environment. Takes precedence over `REPLICAS` and defaults to 1. For zero-downtime upgrades, set to 2 or greater. |
| `REPLICAS` | Number of replicas to deploy. Defaults to 1. Change this variable instead of [modifying](customize.md#customize-helm-chart-values) `replicaCount`. |
| `ROLLOUT_RESOURCE_TYPE` | Allows specification of the resource type being deployed when using a custom Helm chart. Default value is `deployment`. |
| `ROLLOUT_STATUS_DISABLED` | Used to disable rollout status check because it does not support all resource types, for example, `cronjob`. |

View File

@ -34,7 +34,7 @@ GitLab package.
- For single node installations, GitLab is not available to users while an
upgrade is in progress. The user's web browser shows a `Deploy in progress` message or a `502` error.
- For multi-node installations, see how to perform
[zero downtime upgrades](../zero_downtime.md).
[zero-downtime upgrades](../zero_downtime.md).
- Upgrades to multi-node installations can also be performed
[with downtime](../with_downtime.md).

View File

@ -333,7 +333,7 @@ sudo -u git -H bundle exec rake "gitlab:workhorse:install[/home/git/gitlab-workh
### 13. Update Gitaly
If Gitaly is located on its own server, or you use Gitaly Cluster, see [Zero Downtime upgrades](zero_downtime.md).
If Gitaly is located on its own server, or you use Gitaly Cluster, see [Zero-downtime upgrades](zero_downtime.md).
#### Compile Gitaly

View File

@ -208,7 +208,7 @@ DETAILS:
- In GitLab 14.8, we are upgrading Redis from 6.0.16 to 6.2.6. This upgrade is expected to be fully backwards compatible.
Follow [the zero downtime instructions](../zero_downtime.md) for upgrading your Redis HA cluster.
Follow [the zero-downtime instructions](../zero_downtime.md) for upgrading your Redis HA cluster.
### Geo installations

View File

@ -881,7 +881,7 @@ Specific information applies to Linux package installations:
mentioning that the installed Redis version is different than the one running is
displayed at the end of reconfigure run until the restart is performed.
Follow [the zero downtime instructions](../zero_downtime.md) for upgrading your Redis HA cluster.
Follow [the zero-downtime instructions](../zero_downtime.md) for upgrading your Redis HA cluster.
### Self-compiled installations

View File

@ -23,7 +23,7 @@ relevant to your [upgrade path](index.md#upgrade-paths).
For a single node installation, you must only [upgrade the GitLab package](package/index.md).
The process for upgrading a number of components of a multi-node GitLab
installation is the same as for zero downtime upgrades.
installation is the same as for zero-downtime upgrades.
The differences relate to the servers running Rails (Puma/Sidekiq) and
the order of events.
@ -94,7 +94,7 @@ following principles when upgrading those servers:
## Upgrade the Gitaly nodes (Praefect / Gitaly Cluster)
If you're running Gitaly cluster, follow the [zero downtime process](zero_downtime.md)
If you're running Gitaly cluster, follow the [zero-downtime process](zero_downtime.md)
for Gitaly cluster.
If you are using Amazon Machine Images (AMIs) on AWS, you can either upgrade the Gitaly nodes
@ -199,7 +199,7 @@ DETAILS:
**Tier:** Premium, Ultimate
**Offering:** Self-managed
Follow [the zero downtime instructions](zero_downtime.md)
Follow [the zero-downtime instructions](zero_downtime.md)
for upgrading your Redis HA cluster.
## Upgrade the Rails components

View File

@ -4,13 +4,13 @@ group: Distribution
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
---
# Zero downtime upgrades
# Zero-downtime upgrades
DETAILS:
**Tier:** Free, Premium, Ultimate
**Offering:** Self-managed
With Zero downtime upgrades, it's possible to upgrade a live GitLab environment without having to
With zero-downtime upgrades, it's possible to upgrade a live GitLab environment without having to
take it offline. This guide will take you through the core process of performing
such an upgrade.
@ -22,7 +22,7 @@ or management of third party services, such as AWS RDS, please refer to the resp
## Before you start
Achieving _true_ Zero Downtime as part of an upgrade is notably difficult for any distributed application. The process detailed in
Achieving _true_ zero downtime as part of an upgrade is notably difficult for any distributed application. The process detailed in
this guide has been tested as given against our HA [Reference Architectures](../administration/reference_architectures/index.md)
and was found to result in effectively no observable downtime, but please be aware your mileage may vary dependent on the specific system makeup.
@ -34,16 +34,16 @@ or the [Support team](https://about.gitlab.com/support/).
## Requirements and considerations
The Zero downtime upgrade process has the following requirements:
The zero-downtime upgrade process has the following requirements:
- Zero downtime upgrades are only supported on multi-node GitLab environments built with the Linux package that have Load Balancing and HA mechanisms configured as follows:
- Zero-downtime upgrades are only supported on multi-node GitLab environments built with the Linux package that have Load Balancing and HA mechanisms configured as follows:
- External Load Balancer configured for Rails nodes with health checks enabled against the [Readiness](../administration/monitoring/health_check.md#readiness) (`/-/readiness`) endpoint.
- Internal Load Balancer configured for any PgBouncer and Praefect components with TCP health checks enabled.
- HA mechanisms configured for the Consul, Postgres and Redis components if present.
- Any of these components that are not deployed in a HA fashion will need to be upgraded separately with downtime.
- **You can only upgrade one minor release at a time**. So from `16.1` to `16.2`, not to `16.3`. If you skip releases, database modifications may be run in the wrong sequence [and leave the database schema in a broken state](https://gitlab.com/gitlab-org/gitlab/-/issues/321542).
- You have to use [post-deployment migrations](../development/database/post_deployment_migrations.md).
- [Zero Downtime Upgrades are not available with the GitLab Charts](https://docs.gitlab.com/charts/installation/upgrade.html). This in turn means this type of upgrade is not available for Cloud Native Hybrid environments.
- [Zero-downtime upgrades are not available with the GitLab Charts](https://docs.gitlab.com/charts/installation/upgrade.html). This in turn means this type of upgrade is not available for Cloud Native Hybrid environments.
In addition to the above, please be aware of the following considerations:
@ -52,8 +52,8 @@ In addition to the above, please be aware of the following considerations:
- Certain major or minor releases may require a set of background migrations to be finished. While this doesn't require downtime (if the above conditions are met), it's required that you [wait for background migrations to complete](index.md#check-for-background-migrations-before-upgrading) between each major or minor release upgrade.
- The time necessary to complete these migrations can be reduced by increasing the number of Sidekiq workers that can process jobs in the
`background_migration` queue. To see the size of this queue, [check for background migrations before upgrading](index.md#check-for-background-migrations-before-upgrading).
- [PostgreSQL major version upgrades](../administration/postgresql/replication_and_failover.md#near-zero-downtime-upgrade-of-postgresql-in-a-patroni-cluster) are a separate process and not covered by Zero Downtime upgrades (smaller upgrades are covered).
- Zero Downtime Upgrades are supported for any GitLab components you've deployed with the GitLab Linux package. If you've deployed select components via a supported third party service, such as PostgreSQL in AWS RDS or Redis in GCP Memorystore, upgrades for those services will need to be performed separately as per their standard processes.
- [PostgreSQL major version upgrades](../administration/postgresql/replication_and_failover.md#near-zero-downtime-upgrade-of-postgresql-in-a-patroni-cluster) are a separate process and not covered by zero-downtime upgrades (smaller upgrades are covered).
- Zero-downtime upgrades are supported for any GitLab components you've deployed with the GitLab Linux package. If you've deployed select components via a supported third party service, such as PostgreSQL in AWS RDS or Redis in GCP Memorystore, upgrades for those services will need to be performed separately as per their standard processes.
- As a general guideline, the larger amount of data you have, the more time it will take for the upgrade to complete. In testing, any database smaller than 10 GB shouldn't generally take longer than an hour, but your mileage may vary.
NOTE:
@ -61,7 +61,7 @@ If you want to upgrade multiple releases or do not meet these requirements [upgr
## Upgrade order
We recommend a "back to front" approach for the order of what components to upgrade with Zero Downtime.
We recommend a "back to front" approach for the order of what components to upgrade with zero downtime.
Generally this would be stateful backends first, their dependents next and then the frontends accordingly.
While the order of deployment can be changed, it is best to deploy the components running GitLab application code (Rails, Sidekiq) together. If possible, upgrade the supporting infrastructure (PostgreSQL, PgBouncer, Consul, Gitaly, Praefect, Redis) separately since these components do not have dependencies on changes made in version updates within a major release.
As such, we generally recommend the following order:

View File

@ -10,8 +10,11 @@ DETAILS:
**Tier:** Premium, Ultimate
**Offering:** GitLab.com, Self-managed, GitLab Dedicated
An epic in GitLab describes a large body of work that can be broken down into smaller parts.
Epics organize issues and features into a high-level theme or goal.
When [issues](../../project/issues/index.md) share a theme across projects and
milestones, you can manage them by using epics.
milestones, you can group them by using epics.
You can also create child epics and assign start and end dates, which creates
a visual roadmap for you to view progress.

View File

@ -15,8 +15,11 @@ DETAILS:
> - Moved to GitLab Premium in 13.9.
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/221047) in GitLab 14.6. [Feature flag `group_iterations`](https://gitlab.com/gitlab-org/gitlab/-/issues/221047) removed.
Iterations are a way to track issues over a period of time. This allows teams
to track velocity and volatility metrics. For tracking the same item over multiple concurrent periods, you can use iterations with [milestones](../../project/milestones/index.md).
An iteration in GitLab refers to a time-boxed workflow that groups issues to be worked on during
a specific period of time, usually lasting 1-3 weeks.
Teams can use iterations to track velocity and volatility metrics.
For tracking the same item over multiple concurrent periods, you can use iterations with [milestones](../../project/milestones/index.md).
Create and manage various [iteration cadences](#iteration-cadences) in a group.
For example, you can use:

View File

@ -131,11 +131,7 @@ Custom headers appear in [recent deliveries](#recently-triggered-webhook-payload
## Custom webhook template
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/142738) in GitLab 16.10 [with a flag](../../../administration/feature_flags.md) named `custom_webhook_template`. Enabled by default.
FLAG:
On self-managed GitLab, by default this feature is available. To hide the feature, an administrator can
[disable the feature flag](../../../administration/feature_flags.md) named `custom_webhook_template`.
On GitLab.com and GitLab Dedicated, this feature is available.
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/439610) in GitLab 17.0. Feature flag `custom_webhook_template` removed.
You can set a custom payload template in the webhook configuration. The request body is rendered from the template
with the data for the current event. The template must render as valid JSON.

View File

@ -10,17 +10,21 @@ DETAILS:
**Tier:** Free, Premium, Ultimate
**Offering:** GitLab.com, Self-managed, GitLab Dedicated
As your count of issues, merge requests, and epics grows in GitLab, it gets more challenging
to keep track of those items. Especially as your organization grows from just a few people to
Labels are a way to categorize and filter issues, merge requests, and epics in GitLab.
As your projects grow in GitLab, it gets more challenging to keep track of the work that's done.
Especially as your organization grows from just a few people to
hundreds or thousands. With labels, you can organize and tag your work, and track the work items
you're interested in.
Labels are a key part of [issue boards](issue_board.md). With labels you can:
Labels are a key part of [issue boards](issue_board.md).
- Categorize [epics](../group/epics/index.md), issues, and merge requests using colors and descriptive titles like
`bug`, `feature request`, or `docs`.
- Dynamically filter and manage [epics](../group/epics/index.md), issues, and merge requests.
- Search lists of issues, merge requests, and epics, as well as issue boards.
Use labels on [epics](../group/epics/index.md), issues, and merge requests to:
- Categorize items using colors and descriptive titles like `bug`, `feature request`, or `docs`.
- Dynamically filter and manage items to view work that's in progress or completed.
- Search lists and boards.
- Communicate priority or severity of items, using [scoped labels](#scoped-labels).
## Types of labels

View File

@ -14,23 +14,21 @@ DETAILS:
> - [Creating, editing, and deleting tasks](https://gitlab.com/groups/gitlab-org/-/epics/7169) introduced in GitLab 15.0.
> - [Enabled on GitLab.com and self-managed](https://gitlab.com/gitlab-org/gitlab/-/issues/334812) in GitLab 15.3.
Known limitation:
- [Tasks cannot be accessed via REST API.](https://gitlab.com/gitlab-org/gitlab/-/issues/368055)
For the latest updates, check the [Tasks Roadmap](https://gitlab.com/groups/gitlab-org/-/epics/7103).
FLAG:
On self-managed GitLab, by default this feature is available. To hide the feature,
an administrator can [disable the feature flags](../administration/feature_flags.md) named `work_items`.
On GitLab.com, this feature is available. On GitLab Dedicated, this feature is not available.
Use tasks to track steps needed for the [issue](project/issues/index.md) to be closed.
A task in GitLab is a planning item that can be created in an issue.
Use tasks to break down user stories captured in [issues](project/issues/index.md) into
smaller, trackable items.
When planning an issue, you need a way to capture and break down technical
requirements or steps necessary to complete it. An issue with related tasks is better defined,
and so you can provide a more accurate issue weight and completion criteria.
For the latest updates, check the [Tasks roadmap](https://gitlab.com/groups/gitlab-org/-/epics/7103).
Tasks are a type of work item, a step towards [default issue types](https://gitlab.com/gitlab-org/gitlab/-/issues/323404)
in GitLab.
For the roadmap of migrating issues and [epics](group/epics/index.md)

View File

@ -33989,6 +33989,9 @@ msgstr ""
msgid "No milestone"
msgstr ""
msgid "No more seats in subscription"
msgstr ""
msgid "No more than %{max_issues} issues can be updated at the same time"
msgstr ""
@ -59791,6 +59794,9 @@ msgstr ""
msgid "Your name"
msgstr ""
msgid "Your namespace has used all the seats in your subscription and users can no longer be invited or added to the namespace."
msgstr ""
msgid "Your namespace storage is full. This merge request cannot be merged. To continue, %{link_start}manage your storage usage%{link_end}."
msgstr ""

View File

@ -16,7 +16,7 @@ start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
# Build an array of filename globs to process.
# Only search file types that might use or define a helper.
#
extensions = %w[rb haml erb].map { |ext| "{ee/,}app/**/*.#{ext}" }
extensions = %w[rb haml erb].flat_map { |ext| ["{ee/,}app/**/*.#{ext}", "{ee/,}lib/**/*.#{ext}"] }
# Build a hash of all the source files to search.
# Key is filename, value is an array of the lines.

View File

@ -5,7 +5,7 @@ exports[`WorkItemRelationshipList renders linked item list 1`] = `
data-testid="work-item-linked-items-list"
>
<h4
class="gl-font-sm gl-font-weight-semibold gl-mb-0! gl-mt-3 gl-text-gray-700"
class="gl-font-sm gl-font-weight-semibold gl-mb-2 gl-ml-3 gl-mt-3 gl-text-gray-700"
data-testid="work-items-list-heading"
>
Blocking

View File

@ -404,20 +404,6 @@ RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state,
.once
end
end
context 'when feature flag is disabled' do
before do
stub_feature_flags(custom_webhook_template: false)
end
it 'does not render custom template', :aggregate_failures do
service_instance.execute
expect(WebMock).to have_requested(:post, stubbed_hostname(project_hook.url))
.with(headers: headers, body: '{"before":"oldrev","after":"newrev","ref":"ref"}')
.once
end
end
end
context 'when template is invalid' do

View File

@ -802,7 +802,6 @@
- './ee/spec/graphql/types/vulnerabilities/container_image_type_spec.rb'
- './ee/spec/graphql/types/vulnerabilities_count_by_day_type_spec.rb'
- './ee/spec/graphql/types/vulnerabilities/link_type_spec.rb'
- './ee/spec/graphql/types/vulnerability_confidence_enum_spec.rb'
- './ee/spec/graphql/types/vulnerability_details/base_type_spec.rb'
- './ee/spec/graphql/types/vulnerability_details/boolean_type_spec.rb'
- './ee/spec/graphql/types/vulnerability_details/code_type_spec.rb'