+ .project-code-holder.d-none.d-sm-inline-block
= render "projects/buttons/code", dropdown_class: 'dropdown-menu-right', ref: @ref
- .project-code-holder.d-block.d-sm-none.mt-sm-2.mt-md-0.ml-md-2>
+ .project-code-holder.gl-display-flex.gl-gap-3{ class: 'gl-sm-display-none!' }
= render 'projects/buttons/download', project: @project, ref: @ref
= render "shared/mobile_clone_panel", ref: @ref
diff --git a/app/views/shared/_web_ide_button.html.haml b/app/views/shared/_web_ide_button.html.haml
index 803f6f9efce..5f281946d3c 100644
--- a/app/views/shared/_web_ide_button.html.haml
+++ b/app/views/shared/_web_ide_button.html.haml
@@ -1,5 +1,6 @@
- type = blob ? 'blob' : 'tree'
- button_data = web_ide_button_data({ blob: blob })
- fork_options = fork_modal_options(@project, blob)
+- css_classes = false unless local_assigns[:css_classes]
-.gl-display-inline-block{ data: { options: button_data.merge(fork_options).to_json, web_ide_promo_popover_img: image_path('web-ide-promo-popover.svg') }, id: "js-#{type}-web-ide-link" }
+.gl-display-inline-block{ data: { options: button_data.merge(fork_options).to_json, web_ide_promo_popover_img: image_path('web-ide-promo-popover.svg'), css_classes: css_classes }, id: "js-#{type}-web-ide-link" }
diff --git a/config/vite.json b/config/vite.json
index 14b5da38ab2..178f978687f 100644
--- a/config/vite.json
+++ b/config/vite.json
@@ -1,6 +1,13 @@
{
"all": {
"sourceCodeDir": "app/assets",
+ "watchAdditionalPaths": [
+ "app/graphql/queries",
+ "app/assets",
+ "ee/app/assets",
+ "jh/app/assets",
+ "vendor/assets"
+ ],
"entrypointsDir": "javascripts/entrypoints",
"port": 3038,
"publicOutputDir": "vite-dev",
diff --git a/doc/administration/gitaly/concurrency_limiting.md b/doc/administration/gitaly/concurrency_limiting.md
index 7150883123c..321bb9efe20 100644
--- a/doc/administration/gitaly/concurrency_limiting.md
+++ b/doc/administration/gitaly/concurrency_limiting.md
@@ -163,7 +163,7 @@ The adaptive limiter calibrates the limits every 30 seconds and:
or CPU throttled for 50% or more of the observation time.
Otherwise, the limits increase by one until reaching the upper bound. For more information about technical implementation
-of this system, please refer to [this blueprint](../../architecture/blueprints/gitaly_adaptive_concurrency_limit/index.md).
+of this system, refer to [this blueprint](../../architecture/blueprints/gitaly_adaptive_concurrency_limit/index.md).
Adaptive limiting is enabled for each RPC or pack-objects cache individually. However, limits are calibrated at the same time.
diff --git a/doc/administration/postgresql/replication_and_failover.md b/doc/administration/postgresql/replication_and_failover.md
index 67037a1f02d..f4ed9d99b45 100644
--- a/doc/administration/postgresql/replication_and_failover.md
+++ b/doc/administration/postgresql/replication_and_failover.md
@@ -275,7 +275,7 @@ gitlab_rails['auto_migrate'] = false
consul['services'] = %w(postgresql)
# START user configuration
-# Please set the real values as explained in Required Information section
+# Set the real values as explained in Required Information section
#
# Replace PGBOUNCER_PASSWORD_HASH with a generated md5 value
postgresql['pgbouncer_user_password'] = 'PGBOUNCER_PASSWORD_HASH'
@@ -427,7 +427,7 @@ authentication mode (`patroni['tls_client_mode']`), must each have the same valu
consul['watchers'] = %w(postgresql)
# START user configuration
- # Please set the real values as explained in Required Information section
+ # Set the real values as explained in Required Information section
# Replace CONSUL_PASSWORD_HASH with with a generated md5 value
# Replace PGBOUNCER_PASSWORD_HASH with with a generated md5 value
pgbouncer['users'] = {
diff --git a/doc/administration/silent_mode/index.md b/doc/administration/silent_mode/index.md
index da9c930c86c..f72b715ac5e 100644
--- a/doc/administration/silent_mode/index.md
+++ b/doc/administration/silent_mode/index.md
@@ -91,7 +91,7 @@ Outbound communications from the following features are silenced by Silent Mode.
| [Executable integrations](../../user/project/integrations/index.md) | The integrations are not executed. |
| [Service Desk](../../user/project/service_desk/index.md) | Incoming emails still raise issues, but the users who sent the emails to Service Desk are not notified of issue creation or comments on their issues. |
| Outbound emails | |
-| Outbound HTTP requests | Many HTTP requests are blocked where features are not blocked or skipped explicitly. These may produce errors. If a particular error is problematic for testing during Silent Mode, please consult [GitLab Support](https://about.gitlab.com/support/). |
+| Outbound HTTP requests | Many HTTP requests are blocked where features are not blocked or skipped explicitly. These may produce errors. If a particular error is problematic for testing during Silent Mode, consult [GitLab Support](https://about.gitlab.com/support/). |
### Outbound communications that are not silenced
diff --git a/doc/api/group_milestones.md b/doc/api/group_milestones.md
index 2552626f6c6..57994e069a5 100644
--- a/doc/api/group_milestones.md
+++ b/doc/api/group_milestones.md
@@ -35,7 +35,8 @@ Parameters:
| `title` | string | no | Return only the milestones having the given `title` |
| `search` | string | no | Return only milestones with a title or description matching the provided string |
| `include_parent_milestones` | boolean | no | [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/433298) in GitLab 16.7. Use `include_ancestors` instead. |
-| `include_ancestors` | boolean | no | Include milestones from all parent groups. [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/196066) in GitLab 13.4. |
+| `include_ancestors` | boolean | no | Include milestones for all parent groups. |
+| `include_descendants` | boolean | no | Include milestones for group and its descendants. [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/421030) in GitLab 16.7. |
| `updated_before` | datetime | no | Return only milestones updated before the given datetime. Expected in ISO 8601 format (`2019-03-15T08:00:00Z`). Introduced in GitLab 15.10 |
| `updated_after` | datetime | no | Return only milestones updated after the given datetime. Expected in ISO 8601 format (`2019-03-15T08:00:00Z`). Introduced in GitLab 15.10 |
diff --git a/doc/api/oauth2.md b/doc/api/oauth2.md
index 2eea6cec6e2..4ef09868565 100644
--- a/doc/api/oauth2.md
+++ b/doc/api/oauth2.md
@@ -72,7 +72,7 @@ parameter, which are securely bound to the user agent", with each request to the
### Use HTTPS in production
-For production, please use HTTPS for your `redirect_uri`.
+For production, use HTTPS for your `redirect_uri`.
For development, GitLab allows insecure HTTP redirect URIs.
As OAuth 2.0 bases its security entirely on the transport layer, you should not use unprotected
diff --git a/doc/api/project_vulnerabilities.md b/doc/api/project_vulnerabilities.md
index 82217fc37c0..771ff4c2e8e 100644
--- a/doc/api/project_vulnerabilities.md
+++ b/doc/api/project_vulnerabilities.md
@@ -16,7 +16,7 @@ info: "To determine the technical writer assigned to the Stage/Group associated
WARNING:
This API is in the process of being deprecated and considered unstable.
The response payload may be subject to change or breakage
-across GitLab releases. Please use the
+across GitLab releases. Use the
[GraphQL API](graphql/reference/index.md#queryvulnerabilities)
instead.
diff --git a/doc/architecture/blueprints/_template.md b/doc/architecture/blueprints/_template.md
index 18f88322906..8577054db83 100644
--- a/doc/architecture/blueprints/_template.md
+++ b/doc/architecture/blueprints/_template.md
@@ -13,7 +13,7 @@ Before you start:
- Copy this file to a sub-directory and call it `index.md` for it to appear in
the blueprint directory.
-- Please remove comment blocks for sections you've filled in.
+- Remove comment blocks for sections you've filled in.
When your blueprint ready for review, all of these comment blocks should be
removed.
diff --git a/doc/architecture/blueprints/capacity_planning/index.md b/doc/architecture/blueprints/capacity_planning/index.md
index ed014f545f9..31740d50368 100644
--- a/doc/architecture/blueprints/capacity_planning/index.md
+++ b/doc/architecture/blueprints/capacity_planning/index.md
@@ -13,21 +13,27 @@ approvers: [ "@swiskow", "@rnienaber", "@o-lluch" ]
## Summary
-This document outlines how we plan to set up infrastructure capacity planning for GitLab Dedicated tenant environments, which is a [FY24-Q3 OKR](https://gitlab.com/gitlab-com/gitlab-OKRs/-/work_items/3507).
+This document outlines how we plan to set up infrastructure capacity planning for GitLab Dedicated tenant environments, which started as a [FY24-Q3 OKR](https://gitlab.com/gitlab-com/gitlab-OKRs/-/work_items/3507).
-We make use of Tamland, a tool we built to provide saturation forecasting insights for GitLab.com infrastructure resources. We propose to include Tamland as a part of the GitLab Dedicated stack and execute forecasting from within the tenant environments.
+We make use of [Tamland](https://gitlab.com/gitlab-com/gl-infra/tamland), a tool we build to provide saturation forecasting insights for GitLab.com infrastructure resources.
+We propose to include Tamland as a part of the GitLab Dedicated stack and execute forecasting from within the tenant environments.
-Tamland predicts SLO violations and their respective dates, which need to be reviewed and acted upon. In terms of team organisation, the Dedicated team is proposed to own the tenant-side setup for Tamland and to own the predicted SLO violations, with the help and guidance of the Scalability::Projections team, which drives further development, documentation and overall guidance for capacity planning, including for Dedicated.
+Tamland predicts SLO violations and their respective dates, which need to be reviewed and acted upon.
+In terms of team organisation, the Dedicated team is proposed to own the tenant-side setup for Tamland and to own the predicted SLO violations, with the help and guidance of the Scalability::Projections team, which drives further development, documentation and overall guidance for capacity planning, including for Dedicated.
-With this setup, we aim to turn Tamland into a more generic tool, which can be used in various environments including but not limited to Dedicated tenants. Long-term, we think of including Tamland in self-managed installations and think of Tamland as a candidate for open source release.
+With this setup, we aim to turn Tamland into a more generic tool, which can be used in various environments including but not limited to Dedicated tenants.
+Long-term, we think of including Tamland in self-managed installations and think of Tamland as a candidate for open source release.
## Motivation
### Background: Capacity planning for GitLab.com
-[Tamland](https://gitlab.com/gitlab-com/gl-infra/tamland) is an infrastructure resource forecasting project owned by the [Scalability::Projections](https://about.gitlab.com/handbook/engineering/infrastructure/team/scalability/projections.html) group. It implements [capacity planning](https://about.gitlab.com/handbook/engineering/infrastructure/capacity-planning/) for GitLab.com, which is a [controlled activity covered by SOC 2](https://gitlab.com/gitlab-com/gl-security/security-assurance/security-compliance-commercial-and-dedicated/observation-management/-/issues/604). As of today, it is used exclusively for GitLab.com to predict upcoming SLO violations across hundreds of monitored infrastructure components.
+[Tamland](https://gitlab.com/gitlab-com/gl-infra/tamland) is an infrastructure resource forecasting project owned by the [Scalability::Observability](https://about.gitlab.com/handbook/engineering/infrastructure/team/scalability/#scalabilityobservability) group.
+It implements [capacity planning](https://about.gitlab.com/handbook/engineering/infrastructure/capacity-planning/) for GitLab.com, which is a [controlled activity covered by SOC 2](https://gitlab.com/gitlab-com/gl-security/security-assurance/security-compliance-commercial-and-dedicated/observation-management/-/issues/604).
+As of today, it is used exclusively for GitLab.com to predict upcoming SLO violations across hundreds of monitored infrastructure components.
-Tamland produces a [report](https://gitlab-com.gitlab.io/gl-infra/tamland/intro.html) (hosted on GitLab Pages) containing forecast plots, information around predicted violations and other information around the components monitored. Any predicted SLO violation result in a capacity warning issue being created in the [issue tracker for capacity planning](https://gitlab.com/gitlab-com/gl-infra/capacity-planning/-/boards/2816983) on GitLab.com.
+Tamland produces a [report](https://gitlab-com.gitlab.io/gl-infra/tamland/intro.html) (hosted on GitLab Pages) containing forecast plots, information around predicted violations and other information around the components monitored.
+Any predicted SLO violation result in a capacity warning issue being created in the [issue tracker for capacity planning](https://gitlab.com/gitlab-com/gl-infra/capacity-planning/-/boards/2816983) on GitLab.com.
At present, Tamland is quite tailor made and specific for GitLab.com:
@@ -36,21 +42,28 @@ At present, Tamland is quite tailor made and specific for GitLab.com:
[Turning Tamland into a tool](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/1106) we can use more generically and making it independent of GitLab.com specifics is subject of ongoing work.
-For illustration, we can see a saturation forecast plot below for the `disk_space` resource for a PostgreSQL service called `patroni-ci`. Within the 90 days forecast horizon, we predict a violation of the `soft` SLO (set at 85% saturation) and this resulted in the creation of a [capacity planning issue](https://gitlab.com/gitlab-com/gl-infra/capacity-planning/-/issues/1219) for further review and potential actions. At present, the Scalability::Projections group reviews those issues and engages with the respective DRI for the service in question to remedy a saturation concern.
+For illustration, we can see a saturation forecast plot below for the `disk_space` resource for a PostgreSQL service called `patroni-ci`.
+Within the 90 days forecast horizon, we predict a violation of the `soft` SLO (set at 85% saturation) and this resulted in the creation of a [capacity planning issue](https://gitlab.com/gitlab-com/gl-infra/capacity-planning/-/issues/1219) for further review and potential actions.
+At present, the Scalability::Projections group reviews those issues and engages with the respective DRI for the service in question to remedy a saturation concern.
-For GitLab.com capacity planning, we operate Tamland from a scheduled CI pipeline with access to the central Thanos, which provides saturation and utilization metrics for GitLab.com. The CI pipeline produces the desired report, exposes it on GitLab Pages and also creates capacity planning issues. Scalability::Projections runs a capacity planning triage rotation which entails reviewing and prioritizing any open issues and their respective saturation concerns.
+For GitLab.com capacity planning, we operate Tamland from a scheduled CI pipeline with access to the central Thanos, which provides saturation and utilization metrics for GitLab.com.
+The CI pipeline produces the desired report, exposes it on GitLab Pages and also creates capacity planning issues.
+Scalability::Projections runs a capacity planning triage rotation which entails reviewing and prioritizing any open issues and their respective saturation concerns.
### Problem Statement
-With the number of [GitLab Dedicated](https://about.gitlab.com/dedicated/) deployments increasing, we need to establish capacity planning processes for Dedicated tenants. This is going to help us notice any pending resource constraints soon enough to be able to upgrade the infrastructure for a given tenant before the resource saturates and causes an incident.
+With the number of [GitLab Dedicated](https://about.gitlab.com/dedicated/) deployments increasing, we need to establish capacity planning processes for Dedicated tenants.
+This is going to help us notice any pending resource constraints soon enough to be able to upgrade the infrastructure for a given tenant before the resource saturates and causes an incident.
-Each Dedicated tenant is an isolated GitLab environment, with a full set of metrics monitored. These metrics are standardized in the [metrics catalog](https://gitlab.com/gitlab-com/runbooks/-/blob/master/reference-architectures/get-hybrid/src/gitlab-metrics-config.libsonnet?ref_type=heads) and on top of these, we have defined saturation metrics along with respective SLOs.
+Each Dedicated tenant is an isolated GitLab environment, with a full set of metrics monitored.
+These metrics are standardized in the [metrics catalog](https://gitlab.com/gitlab-com/runbooks/-/blob/master/reference-architectures/get-hybrid/src/gitlab-metrics-config.libsonnet?ref_type=heads) and on top of these, we have defined saturation metrics along with respective SLOs.
In order to provide capacity planning and forecasts for saturation metrics for each tenant, we'd like to get Tamland set up for GitLab Dedicated.
-While Tamland is developed by the Scalability::Projections and this team also owns the capacity planning process for GitLab.com, they don't have access to any of the Dedicated infrastructure as we have strong isolation implemented for Dedicated environments. As such, the technical design choices are going to affect how those teams interact and vice versa. We include this consideration into this documentation as we think the organisational aspect is a crucial part of it.
+While Tamland is developed by the Scalability::Projections and this team also owns the capacity planning process for GitLab.com, they don't have access to any of the Dedicated infrastructure as we have strong isolation implemented for Dedicated environments.
+As such, the technical design choices are going to affect how those teams interact and vice versa. We include this consideration into this documentation as we think the organisational aspect is a crucial part of it.
### Key questions
@@ -70,25 +83,34 @@ While Tamland is developed by the Scalability::Projections and this team also ow
##### Reporting
-As of today, it's not quite clear yet how we'd like to consume forecasting data across tenants. In contrast to GitLab.com, we generate forecasts across a potentially large number of tenants. At this point, we suspect that we're more interested in an aggregate report across tenants rather than individual, very detailed saturation forecasts. As such, this is subject to refinement in a further iteration once we have the underlying data available and gathered practical insight in how we consume this information.
+As of today, it's not quite clear yet how we'd like to consume forecasting data across tenants.
+In contrast to GitLab.com, we generate forecasts across a potentially large number of tenants.
+At this point, we suspect that we're more interested in an aggregate report across tenants rather than individual, very detailed saturation forecasts.
+As such, this is subject to refinement in a further iteration once we have the underlying data available and gathered practical insight in how we consume this information.
##### Issue management
-While each predicted SLO violation results in the creation of a GitLab issue, this may not be the right mode of raising awareness for Dedicated. Similar to the reporting side, this is subject to further discussion once we have data to look at.
+While each predicted SLO violation results in the creation of a GitLab issue, this may not be the right mode of raising awareness for Dedicated.
+Similar to the reporting side, this is subject to further discussion once we have data to look at.
##### Customizing forecasting models
-Forecasting models can and should be tuned and informed with domain knowledge to produce accurate forecasts. This information is a part of the Tamland manifest. In the first iteration, we don't support per-tenant customization, but this can be added later.
+Forecasting models can and should be tuned and informed with domain knowledge to produce accurate forecasts.
+This information is a part of the Tamland manifest.
+In the first iteration, we don't support per-tenant customization, but this can be added later.
## Proposed Design for Dedicated: A part of the Dedicated stack
-Dedicated environments are fully isolated and run their own Prometheus instance to capture metrics, including saturation metrics. Tamland will run from each individual Dedicated tenant environment, consume metrics from Prometheus and store the resulting data in S3. From there, we consume forecast data and act on it.
+Dedicated environments are fully isolated and run their own Prometheus instance to capture metrics, including saturation metrics.
+Tamland will run from each individual Dedicated tenant environment, consume metrics from Prometheus and store the resulting data in S3.
+From there, we consume forecast data and act on it.

### Storage for output and cache
-Any data Tamland relies on is stored in a S3 bucket. We use one bucket per tenant to clearly separate data between tenants.
+Any data Tamland relies on is stored in a S3 bucket.
+We use one bucket per tenant to clearly separate data between tenants.
1. Resulting forecast data and other outputs
1. Tamland's internal cache for Prometheus metrics data
@@ -97,9 +119,11 @@ There is no need for a persistent state across Tamland runs aside from the S3 bu
### Benefits of executing inside tenant environments
-Each Tamland run for a single environment (tenant) can take a few hours to execute. With the number of tenants expected to increase significantly, we need to consider scaling the execution environment for Tamland.
+Each Tamland run for a single environment (tenant) can take a few hours to execute.
+With the number of tenants expected to increase significantly, we need to consider scaling the execution environment for Tamland.
-In this design, Tamland becomes a part of the Dedicated stack and a component of the individual tenant environment. As such, scaling the execution environment for Tamland is solved by design, because tenant forecasts execute inherently parallel in their respective environments.
+In this design, Tamland becomes a part of the Dedicated stack and a component of the individual tenant environment.
+As such, scaling the execution environment for Tamland is solved by design, because tenant forecasts execute inherently parallel in their respective environments.
### Distribution model: Docker
@@ -107,15 +131,18 @@ Tamland is released as a Docker image, see [Tamland's README](https://gitlab.com
### Tamland manifest
-The manifest contains information about which saturation metrics to forecast on (see this [manifest example](https://gitlab.com/gitlab-com/gl-infra/tamland/-/blob/62854e1afbc2ed3160a55a738ea587e0cf7f994f/saturation.json) for GitLab.com). This will be generated from the metrics catalog and will be the same for all tenants for starters.
+The manifest contains information about which saturation metrics to forecast on (see this [manifest example](https://gitlab.com/gitlab-com/gl-infra/tamland/-/blob/62854e1afbc2ed3160a55a738ea587e0cf7f994f/saturation.json) for GitLab.com).
+This will be generated from the metrics catalog and will be the same for all tenants for starters.
-In order to generate the manifest from the metrics catalog, we setup dedicated GitLab project `tamland-dedicated` . On a regular basis, a scheduled pipeline grabs the metrics catalog, generates the JSON manifest from it and commits this to the project.
+In order to generate the manifest from the metrics catalog, we setup dedicated GitLab project `tamland-dedicated`.
+On a regular basis, a scheduled pipeline grabs the metrics catalog, generates the JSON manifest from it and commits this to the project.
On the Dedicated tenants, we download the latest version of the committed JSON manifest from `tamland-dedicated` and use this as input to execute Tamland.
### Acting on forecast insights
-When Tamland forecast data is available for a tenant, the Dedicated teams consume this data and act on it accordingly. The Scalability::Projections group is going to support and guide this process to get started and help interpret data, along with implementing Tamland features required to streamline this process for Dedicated in further iterations.
+When Tamland forecast data is available for a tenant, the Dedicated teams consume this data and act on it accordingly.
+The Scalability::Observability group is going to support and guide this process to get started and help interpret data, along with implementing Tamland features required to streamline this process for Dedicated in further iterations.
## Alternative Solution
@@ -125,11 +152,14 @@ An alternative design, we don't consider an option at this point, is to setup Ta

-In this design, a central Prometheus/Thanos instance is needed to provide the metrics data for Tamland. Dedicated tenants use remote-write to push their Prometheus data to the central Thanos instance.
+In this design, a central Prometheus/Thanos instance is needed to provide the metrics data for Tamland.
+Dedicated tenants use remote-write to push their Prometheus data to the central Thanos instance.
-Tamland is set up to run on a regular basis and consume metrics data from the single Thanos instance. It stores its results and cache in S3, similar to the other design.
+Tamland is set up to run on a regular basis and consume metrics data from the single Thanos instance.
+It stores its results and cache in S3, similar to the other design.
-In order to execute forecasts regularly, we need to provide an execution environment to run Tamland in. With an increasing number of tenants, we'd need to scale up resources for this cluster.
+In order to execute forecasts regularly, we need to provide an execution environment to run Tamland in.
+With an increasing number of tenants, we'd need to scale up resources for this cluster.
This design **has not been chosen** because of both technical and organisational concerns:
diff --git a/doc/architecture/blueprints/consolidating_groups_and_projects/index.md b/doc/architecture/blueprints/consolidating_groups_and_projects/index.md
index c35488cfa4f..89e7f0a8a88 100644
--- a/doc/architecture/blueprints/consolidating_groups_and_projects/index.md
+++ b/doc/architecture/blueprints/consolidating_groups_and_projects/index.md
@@ -230,7 +230,7 @@ We should strive to do the code clean up as we move through the phases. However,
The initial iteration will provide a framework to house features under `Namespaces`. Stage groups will eventually need to migrate their own features and functionality over to `Namespaces`. This may impact these features in unexpected ways. Therefore, to minimize UX debt and maintain product consistency, stage groups will have to consider several factors when migrating their features over to `Namespaces`:
1. **Conceptual model**: What are the current and future state conceptual models of these features ([see object modeling for designers](https://hpadkisson.medium.com/object-modeling-for-designers-an-introduction-7871bdcf8baf))? These should be documented in Pajamas (example: [merge requests](https://design.gitlab.com/objects/merge-request/)).
-1. **Merge conflicts**: What inconsistencies are there across project, group, and administrator levels? How might these be addressed? For an example of how we rationalized this for labels, please see [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/338820).
+1. **Merge conflicts**: What inconsistencies are there across project, group, and administrator levels? How might these be addressed? For an example of how we rationalized this for labels, see [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/338820).
1. **Inheritance & information flow**: How is information inherited across our container hierarchy currently? How might this be impacted if complying with the new [inheritance behavior](https://gitlab.com/gitlab-org/gitlab/-/issues/343316) framework?
1. **Settings**: Where can settings for this feature be found currently? How will these be impacted by `Namespaces`?
1. **Access**: Who can access this feature and is that impacted by the new container structure? Are there any role or privacy considerations?
diff --git a/doc/architecture/blueprints/container_registry_metadata_database/index.md b/doc/architecture/blueprints/container_registry_metadata_database/index.md
index b62dd660643..66beac6cdb7 100644
--- a/doc/architecture/blueprints/container_registry_metadata_database/index.md
+++ b/doc/architecture/blueprints/container_registry_metadata_database/index.md
@@ -65,7 +65,7 @@ sequenceDiagram
Note right of C: Bearer token included in the Authorization header
```
-Please refer to the [Docker documentation](https://docs.docker.com/registry/spec/auth/token/) for more details.
+For more details, refer to the [Docker documentation](https://docs.docker.com/registry/spec/auth/token/).
##### Push and Pull
@@ -164,7 +164,7 @@ Although blobs are shared across repositories, manifest and tag metadata are sco
#### GitLab.com
-Due to scale, performance and isolation concerns, for GitLab.com the registry database will be on a separate dedicated PostgreSQL cluster. Please see [#93](https://gitlab.com/gitlab-org/container-registry/-/issues/93) and [GitLab-com/gl-infra/reliability#10109](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/10109) for additional context.
+Due to scale, performance and isolation concerns, for GitLab.com the registry database will be on a separate dedicated PostgreSQL cluster. See [#93](https://gitlab.com/gitlab-org/container-registry/-/issues/93) and [GitLab-com/gl-infra/reliability#10109](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/10109) for additional context.
The diagram below illustrates the architecture of the database cluster:
@@ -238,7 +238,7 @@ This is a list of all the registry HTTP API operations and how they depend on th
| [Complete blob upload](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/spec/api.md#put-blob-upload) | `PUT` | `/v2//blobs/uploads/` | **{check-circle}** Yes | **{check-circle}** Yes | **{dotted-circle}** No |
| [Cancel blob upload](https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/spec/api.md#canceling-an-upload) | `DELETE` | `/v2//blobs/uploads/` | **{check-circle}** Yes | **{check-circle}** Yes | **{dotted-circle}** No |
-`*` Please refer to the [list of interactions between registry and Rails](#from-gitlab-rails-to-registry) to know why and how.
+`*` Refer to the [list of interactions between registry and Rails](#from-gitlab-rails-to-registry) to know why and how.
#### Failure Scenarios
diff --git a/doc/architecture/blueprints/container_registry_metadata_database_self_managed_rollout/index.md b/doc/architecture/blueprints/container_registry_metadata_database_self_managed_rollout/index.md
index 4e81761839c..bfc5c4a7133 100644
--- a/doc/architecture/blueprints/container_registry_metadata_database_self_managed_rollout/index.md
+++ b/doc/architecture/blueprints/container_registry_metadata_database_self_managed_rollout/index.md
@@ -88,7 +88,7 @@ The metadata database is in early beta for self-managed users. The core migratio
process for existing registries has been implemented, and online garbage collection
is fully implemented. Certain database enabled features are only enabled for GitLab.com
and automatic database provisioning for the registry database is not available.
-Please see the table below for the status of features related to the container
+See the table below for the status of features related to the container
registry database.
| Feature | Description | Status | Link |
diff --git a/doc/architecture/blueprints/gitaly_handle_upload_pack_in_http2_server/index.md b/doc/architecture/blueprints/gitaly_handle_upload_pack_in_http2_server/index.md
index acee83b2649..897f9f97365 100644
--- a/doc/architecture/blueprints/gitaly_handle_upload_pack_in_http2_server/index.md
+++ b/doc/architecture/blueprints/gitaly_handle_upload_pack_in_http2_server/index.md
@@ -28,7 +28,7 @@ provided. We are looking for a solution that won't require us to completely rewr
### How Git data transfer works
-Please skip this part if you are familiar with how Git data transfer architecture at GitLab.
+Skip this part if you are familiar with how Git data transfer architecture at GitLab.
Git data transfer is undeniably one of the crucial services that a Git server can offer. It is a fundamental feature of Git that was originally developed for Linux
kernel development. As Git gained popularity, it continued to be recognized as a distributed system. However, the emergence of centralized Git services like GitHub or
diff --git a/doc/architecture/blueprints/google_artifact_registry_integration/index.md b/doc/architecture/blueprints/google_artifact_registry_integration/index.md
index 78a158f7baa..0419601e266 100644
--- a/doc/architecture/blueprints/google_artifact_registry_integration/index.md
+++ b/doc/architecture/blueprints/google_artifact_registry_integration/index.md
@@ -18,7 +18,7 @@ As highlighted in the announcement, one key goal is the ability to "_use Google'
## Motivation
-Please refer to the [announcement](https://about.gitlab.com/blog/2023/08/29/gitlab-google-partnership-s3c/) blog post for more details about the motivation and long-term goals of the GitLab and Google Cloud partnership.
+Refer to the [announcement](https://about.gitlab.com/blog/2023/08/29/gitlab-google-partnership-s3c/) blog post for more details about the motivation and long-term goals of the GitLab and Google Cloud partnership.
Regarding the scope of this design document, our primary focus is to fulfill the Product requirement of providing users with visibility over their container images in GAR. The motivation for this specific goal is rooted in foundational research on the use of external registries as a complement to the GitLab container registry ([internal](https://gitlab.com/gitlab-org/ux-research/-/issues/2602)).
@@ -82,13 +82,13 @@ Regarding the GAR integration, since there is no equivalent entities for GitLab
GAR provides three APIs: Docker API, REST API, and RPC API.
-The [Docker API](https://cloud.google.com/artifact-registry/docs/reference/docker-api) is based on the [Docker Registry HTTP API V2](https://docs.docker.com/registry/spec/api), now superseded by the [OCI Distribution Specification API](https://github.com/opencontainers/distribution-spec/blob/main/spec.md) (from now on referred to as OCI API). This API is used for pushing/pulling images to/from GAR and also provides some discoverability operations. Please refer to [Alternative Solutions](#alternative-solutions) for the reasons why we don't intend to use it.
+The [Docker API](https://cloud.google.com/artifact-registry/docs/reference/docker-api) is based on the [Docker Registry HTTP API V2](https://docs.docker.com/registry/spec/api), now superseded by the [OCI Distribution Specification API](https://github.com/opencontainers/distribution-spec/blob/main/spec.md) (from now on referred to as OCI API). This API is used for pushing/pulling images to/from GAR and also provides some discoverability operations. Refer to [Alternative Solutions](#alternative-solutions) for the reasons why we don't intend to use it.
Among the proprietary GAR APIs, the [REST API](https://cloud.google.com/artifact-registry/docs/reference/rest) provides basic functionality for managing repositories. This includes [`list`](https://cloud.google.com/artifact-registry/docs/reference/rest/v1/projects.locations.repositories.dockerImages/list) and [`get`](https://cloud.google.com/artifact-registry/docs/reference/rest/v1/projects.locations.repositories.dockerImages/get) operations for container image repositories, which could be used for this integration. Both operations return the same data structure, represented by the [`DockerImage`](https://cloud.google.com/artifact-registry/docs/reference/rest/v1/projects.locations.repositories.dockerImages#DockerImage) object, so both provide the same level of detail.
Last but not least, there is also an [RPC API](https://cloud.google.com/artifact-registry/docs/reference/rpc/google.devtools.artifactregistry.v1), backed by gRPC and Protocol Buffers. This API provides the most functionality, covering all GAR features. From the available operations, we can make use of the [`ListDockerImagesRequest`](https://cloud.google.com/artifact-registry/docs/reference/rpc/google.devtools.artifactregistry.v1#listdockerimagesrequest) and [`GetDockerImageRequest`](https://cloud.google.com/artifact-registry/docs/reference/rpc/google.devtools.artifactregistry.v1#google.devtools.artifactregistry.v1.GetDockerImageRequest) operations. As with the REST API, both responses are composed of [`DockerImage`](https://cloud.google.com/artifact-registry/docs/reference/rpc/google.devtools.artifactregistry.v1#google.devtools.artifactregistry.v1.DockerImage) objects.
-Between the two proprietary API options, we chose the RPC one because it provides support not only for the operations we need today but also offers better coverage of all GAR features, which will be beneficial in future iterations. Finally, we do not intend to make direct use of this API but rather use it through the official Ruby client SDK. Please see [Client SDK](backend.md#client-sdk) below for more details.
+Between the two proprietary API options, we chose the RPC one because it provides support not only for the operations we need today but also offers better coverage of all GAR features, which will be beneficial in future iterations. Finally, we do not intend to make direct use of this API but rather use it through the official Ruby client SDK. See [Client SDK](backend.md#client-sdk) below for more details.
#### Backend Integration
diff --git a/doc/architecture/blueprints/observability_logging/index.md b/doc/architecture/blueprints/observability_logging/index.md
index d8259e0a736..bbe15cde58e 100644
--- a/doc/architecture/blueprints/observability_logging/index.md
+++ b/doc/architecture/blueprints/observability_logging/index.md
@@ -121,7 +121,7 @@ Hence the decision to only support Log objects seems like a boring and simple so
Similar to traces, logging data ingestion will be done at the Ingress level.
As part of [the forward-auth](https://doc.traefik.io/traefik/middlewares/http/forwardauth/) flow, Traefik will forward the request to Gatekeeper which in turn leverages Redis for counting.
This is currently done only for [the ingestion path](https://gitlab.com/gitlab-org/opstrace/opstrace/-/merge_requests/2236).
-Please check the MR description for more details on how it works.
+Check the MR description for more details on how it works.
The read path rate limiting implementation is tracked [here](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/2356).
### Database schema
@@ -629,4 +629,4 @@ Long-term, we will need a way to monitor the number of user queries that failed
## Iterations
-Please refer to [Observability Group planning epic](https://gitlab.com/groups/gitlab-org/opstrace/-/epics/92) and its linked issues for up-to-date information.
+Refer to [Observability Group planning epic](https://gitlab.com/groups/gitlab-org/opstrace/-/epics/92) and its linked issues for up-to-date information.
diff --git a/doc/architecture/blueprints/runner_tokens/index.md b/doc/architecture/blueprints/runner_tokens/index.md
index 5e1300f9a3b..f2e9d624d20 100644
--- a/doc/architecture/blueprints/runner_tokens/index.md
+++ b/doc/architecture/blueprints/runner_tokens/index.md
@@ -440,7 +440,7 @@ scope.
## FAQ
-Please follow [the user documentation](../../../ci/runners/new_creation_workflow.md).
+Follow [the user documentation](../../../ci/runners/new_creation_workflow.md).
## Status
diff --git a/doc/cloud_seed/index.md b/doc/cloud_seed/index.md
index 70166b2d09f..8180669b1af 100644
--- a/doc/cloud_seed/index.md
+++ b/doc/cloud_seed/index.md
@@ -120,7 +120,7 @@ The following databases and versions are supported:
- 2019: Standard, Enterprise, Express, and Web
- 2017: Standard, Enterprise, Express, and Web
-Google Cloud pricing applies. Please refer to the [Cloud SQL pricing page](https://cloud.google.com/sql/pricing).
+Google Cloud pricing applies. Refer to the [Cloud SQL pricing page](https://cloud.google.com/sql/pricing).
1. [Create a database instance](#create-a-database-instance)
1. [Database setup through a background worker](#database-setup-through-a-background-worker)
diff --git a/doc/development/advanced_search.md b/doc/development/advanced_search.md
index 64127af58a1..a552b22226d 100644
--- a/doc/development/advanced_search.md
+++ b/doc/development/advanced_search.md
@@ -32,7 +32,7 @@ See the [Elasticsearch GDK setup instructions](https://gitlab.com/gitlab-org/git
- `gitlab:elastic:test:index_size`: Tells you how much space the current index is using, as well as how many documents are in the index.
- `gitlab:elastic:test:index_size_change`: Outputs index size, reindexes, and outputs index size again. Useful when testing improvements to indexing size.
-Additionally, if you need large repositories or multiple forks for testing, please consider [following these instructions](rake_tasks.md#extra-project-seed-options)
+Additionally, if you need large repositories or multiple forks for testing, consider [following these instructions](rake_tasks.md#extra-project-seed-options)
## How does it work?
@@ -40,7 +40,7 @@ The Elasticsearch integration depends on an external indexer. We ship an [indexe
After initial indexing is complete, create, update, and delete operations for all models except projects (see [#207494](https://gitlab.com/gitlab-org/gitlab/-/issues/207494)) are tracked in a Redis [`ZSET`](https://redis.io/docs/manual/data-types/#sorted-sets). A regular `sidekiq-cron` `ElasticIndexBulkCronWorker` processes this queue, updating many Elasticsearch documents at a time with the [Bulk Request API](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html).
-Search queries are generated by the concerns found in [`ee/app/models/concerns/elastic`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/ee/app/models/concerns/elastic). These concerns are also in charge of access control, and have been a historic source of security bugs so please pay close attention to them!
+Search queries are generated by the concerns found in [`ee/app/models/concerns/elastic`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/ee/app/models/concerns/elastic). These concerns are also in charge of access control, and have been a historic source of security bugs so pay close attention to them!
### Custom routing
@@ -62,13 +62,13 @@ The following analyzers and tokenizers are defined in [`ee/lib/elastic/latest/co
Used when indexing blobs' paths. Uses the `path_tokenizer` and the `lowercase` and `asciifolding` filters.
-Please see the `path_tokenizer` explanation below for an example.
+See the `path_tokenizer` explanation below for an example.
#### `sha_analyzer`
Used in blobs and commits. Uses the `sha_tokenizer` and the `lowercase` and `asciifolding` filters.
-Please see the `sha_tokenizer` explanation later below for an example.
+See the `sha_tokenizer` explanation later below for an example.
#### `code_analyzer`
@@ -76,7 +76,7 @@ Used when indexing a blob's filename and content. Uses the `whitespace` tokenize
The `whitespace` tokenizer was selected to have more control over how tokens are split. For example the string `Foo::bar(4)` needs to generate tokens like `Foo` and `bar(4)` to be properly searched.
-Please see the `code` filter for an explanation on how tokens are split.
+See the `code` filter for an explanation on how tokens are split.
NOTE:
The [Elasticsearch `code_analyzer` doesn't account for all code cases](../integration/advanced_search/elasticsearch_troubleshooting.md#elasticsearch-code_analyzer-doesnt-account-for-all-code-cases).
diff --git a/doc/development/ai_architecture.md b/doc/development/ai_architecture.md
index e1ad9745fee..0717872a12b 100644
--- a/doc/development/ai_architecture.md
+++ b/doc/development/ai_architecture.md
@@ -19,7 +19,7 @@ The following diagram from the [architecture blueprint](../architecture/blueprin
## SaaS-based AI abstraction layer
-GitLab currently operates a cloud-hosted AI architecture. We will allow access to it for licensed self managed instances using the AI-gateway. Please see [the blueprint](../architecture/blueprints/ai_gateway) for details
+GitLab currently operates a cloud-hosted AI architecture. We will allow access to it for licensed self managed instances using the AI-gateway. See [the blueprint](../architecture/blueprints/ai_gateway) for details.
There are two primary reasons for this: the best AI models are cloud-based as they often depend on specialized hardware designed for this purpose, and operating self-managed infrastructure capable of AI at-scale and with appropriate performance is a significant undertaking. We are actively [tracking self-managed customers interested in AI](https://gitlab.com/gitlab-org/gitlab/-/issues/409183).
diff --git a/doc/development/ai_features/glossary.md b/doc/development/ai_features/glossary.md
index 2228d6e231a..be856639b83 100644
--- a/doc/development/ai_features/glossary.md
+++ b/doc/development/ai_features/glossary.md
@@ -8,7 +8,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
This is a list of terms that may have a general meaning but also may have a
specific meaning at GitLab. If you encounter a piece of technical jargon related
-to AI that you think could benefit from being in this list, please add it!
+to AI that you think could benefit from being in this list, add it!
- **AI Gateway**: standalone service used to give access to AI features to
non-SaaS GitLab users. This logic will be moved to Cloud Connector when that
diff --git a/doc/development/ai_features/index.md b/doc/development/ai_features/index.md
index 35c329ff1e6..d1b530f00cf 100644
--- a/doc/development/ai_features/index.md
+++ b/doc/development/ai_features/index.md
@@ -93,7 +93,7 @@ For features that use the embedding database, additional setup is needed.
### Configure GCP Vertex access
-In order to obtain a GCP service key for local development, please follow the steps below:
+In order to obtain a GCP service key for local development, follow the steps below:
- Create a sandbox GCP project by visiting [this page](https://about.gitlab.com/handbook/infrastructure-standards/#individual-environment) and following the instructions, or by requesting access to our existing group GCP project by using [this template](https://gitlab.com/gitlab-com/it/infra/issue-tracker/-/issues/new?issuable_template=gcp_group_account_iam_update_request).
- If you are using an individual GCP project, you may also need to enable the Vertex AI API:
diff --git a/doc/development/api_graphql_styleguide.md b/doc/development/api_graphql_styleguide.md
index f6d39ff954b..ab291026fc6 100644
--- a/doc/development/api_graphql_styleguide.md
+++ b/doc/development/api_graphql_styleguide.md
@@ -58,7 +58,7 @@ For example, the one for [GitLab.com](https://gitlab.com/-/graphql-explorer).
The GraphQL framework has some specific gotchas to be aware of, and domain expertise is required to ensure they are satisfied.
-If you are asked to review a merge request that modifies any GraphQL files or adds an endpoint, please have a look at
+If you are asked to review a merge request that modifies any GraphQL files or adds an endpoint, have a look at
[our GraphQL review guide](graphql_guide/reviewing.md).
## Reading GraphQL logs
diff --git a/doc/development/api_styleguide.md b/doc/development/api_styleguide.md
index 7a5898c6521..714115d474a 100644
--- a/doc/development/api_styleguide.md
+++ b/doc/development/api_styleguide.md
@@ -10,7 +10,7 @@ This style guide recommends best practices for API development.
## Instance variables
-Please do not use instance variables, there is no need for them (we don't need
+Don't use instance variables, there is no need for them (we don't need
to access them as we do in Rails views), local variables are fine.
## Entities
@@ -321,7 +321,7 @@ it's own file in the [`validators`](https://gitlab.com/gitlab-org/gitlab/-/blob/
## Internal API
-The [internal API](internal_api/index.md) is documented for internal use. Please keep it up to date so we know what endpoints
+The [internal API](internal_api/index.md) is documented for internal use. Keep it up to date so we know what endpoints
different components are making use of.
## Avoiding N+1 problems
diff --git a/doc/development/application_secrets.md b/doc/development/application_secrets.md
index 3961cc08d97..3217f0500f8 100644
--- a/doc/development/application_secrets.md
+++ b/doc/development/application_secrets.md
@@ -46,4 +46,4 @@ GitLab.com environments prior to changing this file.
## Further iteration
We may either deprecate or remove this automatic secret generation `01_secret_token.rb` in the future.
-Please see [issue 222690](https://gitlab.com/gitlab-org/gitlab/-/issues/222690) for more information.
+See [issue 222690](https://gitlab.com/gitlab-org/gitlab/-/issues/222690) for more information.
diff --git a/doc/development/application_slis/index.md b/doc/development/application_slis/index.md
index 3eacfab7d7c..c5cdc6edc5a 100644
--- a/doc/development/application_slis/index.md
+++ b/doc/development/application_slis/index.md
@@ -179,7 +179,7 @@ In [this project](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/614)
we are extending this so alerts for SLIs with a `feature_category`
label in the source metrics can also be routed.
-For any question, please don't hesitate to create an issue in
+For any question, don't hesitate to create an issue in
[the Scalability issue tracker](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues)
or come find us in
[#g_scalability](https://gitlab.slack.com/archives/CMMF8TKR9) on Slack.
diff --git a/doc/development/application_slis/rails_request.md b/doc/development/application_slis/rails_request.md
index 981fc28c9b0..2d18e2a8a15 100644
--- a/doc/development/application_slis/rails_request.md
+++ b/doc/development/application_slis/rails_request.md
@@ -126,7 +126,7 @@ a case-by-case basis. Take the following into account:
view. We cannot scale up the fleet fast enough to accommodate for
the incoming slow requests alongside the regular traffic.
-When lowering the urgency for an existing endpoint, please involve a
+When lowering the urgency for an existing endpoint, involve a
[Scalability team member](https://about.gitlab.com/handbook/engineering/infrastructure/team/scalability/#team-members)
in the review. We can use request rates and durations available in the
logs to come up with a recommendation. You can pick a threshold
@@ -172,7 +172,7 @@ information in the logs to check:
the target duration we want to set.
As decreasing a threshold too much could result in alerts for the
-Apdex degradation, please also involve a Scalability team member in
+Apdex degradation, also involve a Scalability team member in
the merge request.
## How to adjust the urgency
diff --git a/doc/development/audit_event_guide/index.md b/doc/development/audit_event_guide/index.md
index df1d2dd03b5..28af4c3e0bb 100644
--- a/doc/development/audit_event_guide/index.md
+++ b/doc/development/audit_event_guide/index.md
@@ -23,7 +23,7 @@ While any events could trigger an Audit Event, not all events should. In general
- Are tracking information for product feature adoption.
- Are covered in the direction page's discussion on [what is not planned](https://about.gitlab.com/direction/govern/compliance/audit-events/#what-is-not-planned-right-now).
-If you have any questions, please reach out to `@gitlab-org/govern/compliance` to see if an Audit Event, or some other approach, may be best for your event.
+If you have any questions, reach out to `@gitlab-org/govern/compliance` to see if an Audit Event, or some other approach, may be best for your event.
## Audit Event Schemas
diff --git a/doc/development/avoiding_required_stops.md b/doc/development/avoiding_required_stops.md
index 551ffd3e12d..74db216a1f0 100644
--- a/doc/development/avoiding_required_stops.md
+++ b/doc/development/avoiding_required_stops.md
@@ -32,7 +32,7 @@ Wherever possible, a required stop should be avoided. If it can't be avoided,
the required stop should be aligned to a _scheduled_ required stop.
In cases where we are considering retroactively declaring an unplanned required stop,
-please contact the [Distribution team product manager](https://about.gitlab.com/handbook/product/categories/#distributionbuild-group) to advise on next steps. If there
+contact the [Distribution team product manager](https://about.gitlab.com/handbook/product/categories/#distributionbuild-group) to advise on next steps. If there
is uncertainty about whether we should declare a required stop, the Distribution product
manager may escalate to GitLab product leadership (VP or Chief Product Officer) to make
a final determination. This may happen, for example, if a change might require a stop for
diff --git a/doc/development/cicd/index.md b/doc/development/cicd/index.md
index 6e6156c8f52..18781f9315a 100644
--- a/doc/development/cicd/index.md
+++ b/doc/development/cicd/index.md
@@ -8,7 +8,7 @@ info: Any user with at least the Maintainer role can merge updates to this conte
Development guides that are specific to CI/CD are listed here:
-- If you are creating new CI/CD templates, please read [the development guide for GitLab CI/CD templates](templates.md).
+- If you are creating new CI/CD templates, read [the development guide for GitLab CI/CD templates](templates.md).
- If you are adding a new keyword or changing the CI schema, check the [CI schema guide](schema.md)
See the [CI/CD YAML reference documentation guide](cicd_reference_documentation_guide.md)
diff --git a/doc/development/cicd/templates.md b/doc/development/cicd/templates.md
index e9cfe0fc56f..dfa77362a9d 100644
--- a/doc/development/cicd/templates.md
+++ b/doc/development/cicd/templates.md
@@ -284,7 +284,7 @@ the user's `.gitlab-ci.yml` immediately causes a lint error because there
are no such jobs named `performance` in the included template anymore. Therefore,
users have to fix their `.gitlab-ci.yml` that could annoy their workflow.
-Please read [versioning](#versioning) section for introducing breaking change safely.
+Read [versioning](#versioning) section for introducing breaking change safely.
## Versioning
@@ -377,7 +377,7 @@ Each CI/CD template must be tested to make sure that it's safe to be published.
### Manual QA
It's always good practice to test the template in a minimal demo project.
-To do so, please follow the following steps:
+To do so, follow the following steps:
1. Create a public sample project on .
1. Add a `.gitlab-ci.yml` to the project with the proposed template.
@@ -481,6 +481,6 @@ If you're unsure if it's secure or not, you must ask security experts for cross-
After your CI/CD template MR is created and labeled with `ci::templates`, DangerBot
suggests one reviewer and one maintainer that can review your code. When your merge
-request is ready for review, please [mention](../../user/discussions/index.md#mentions)
+request is ready for review, [mention](../../user/discussions/index.md#mentions)
the reviewer and ask them to review your CI/CD template changes. See details in the merge request that added
[a DangerBot task for CI/CD template MRs](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/44688).
diff --git a/doc/development/code_comments.md b/doc/development/code_comments.md
index b1a360a8a2f..51643f3e7ed 100644
--- a/doc/development/code_comments.md
+++ b/doc/development/code_comments.md
@@ -7,7 +7,7 @@ info: Any user with at least the Maintainer role can merge updates to this conte
# Code comments
Whenever you add comment to the code that is expected to be addressed at any time
-in future, please create a technical debt issue for it. Then put a link to it
+in future, create a technical debt issue for it. Then put a link to it
to the code comment you've created. This allows other developers to quickly
check if a comment is still relevant and what needs to be done to address it.
diff --git a/doc/development/code_review.md b/doc/development/code_review.md
index 401cca49429..32a7e394034 100644
--- a/doc/development/code_review.md
+++ b/doc/development/code_review.md
@@ -251,7 +251,7 @@ See the [test engineering process](https://about.gitlab.com/handbook/engineering
1. You have reviewed the documentation regarding [internal application security reviews](https://about.gitlab.com/handbook/security/#internal-application-security-reviews) for **when** and **how** to request a security review and requested a security review if this is warranted for this change.
1. If there are security scan results that are blocking the MR (due to the [scan result policies](https://gitlab.com/gitlab-com/gl-security/security-policies)):
- For true positive findings, they should be corrected before the merge request is merged. This will remove the AppSec approval required by the scan result policy.
- - For false positive findings, something that should be discussed for risk acceptance, or anything questionable, please ping `@gitlab-com/gl-security/appsec`.
+ - For false positive findings, something that should be discussed for risk acceptance, or anything questionable, ping `@gitlab-com/gl-security/appsec`.
##### Deployment
@@ -466,7 +466,7 @@ Here is a summary of the changes, also reflected in this section above.
### Having your merge request reviewed
-Please keep in mind that code review is a process that can take multiple
+Keep in mind that code review is a process that can take multiple
iterations, and reviewers may spot things later that they may not have seen the
first time.
diff --git a/doc/development/contributing/index.md b/doc/development/contributing/index.md
index f95d6abca54..39414f9c298 100644
--- a/doc/development/contributing/index.md
+++ b/doc/development/contributing/index.md
@@ -142,7 +142,7 @@ Lastly, keep the following in mind when submitting merge requests:
## Contributing to Premium/Ultimate features with an Enterprise Edition license
If you would like to work on GitLab features that are within a paid tier, also known as the code that lives in the [EE folder](https://gitlab.com/gitlab-org/gitlab/-/tree/master/ee), it requires a GitLab Enterprise Edition license.
-Please request an Enterprise Edition Developers License according to the [documented process](https://about.gitlab.com/handbook/marketing/developer-relations/contributor-success/community-contributors-workflows.html#contributing-to-the-gitlab-enterprise-edition-ee).
+Request an Enterprise Edition Developers License according to the [documented process](https://about.gitlab.com/handbook/marketing/developer-relations/contributor-success/community-contributors-workflows.html#contributing-to-the-gitlab-enterprise-edition-ee).
## Get help
diff --git a/doc/development/contributing/issue_workflow.md b/doc/development/contributing/issue_workflow.md
index b5a49ffa995..b59a77c1f52 100644
--- a/doc/development/contributing/issue_workflow.md
+++ b/doc/development/contributing/issue_workflow.md
@@ -34,7 +34,7 @@ In order to help track feature proposals, we use the
Users that are not members of the project cannot add labels via the UI.
Instead, use [reactive label commands](https://about.gitlab.com/handbook/engineering/quality/triage-operations/#reactive-label-and-unlabel-commands).
-Please keep feature proposals as small and simple as possible, complex ones
+Keep feature proposals as small and simple as possible, complex ones
might be edited to make them small and simple.
For changes to the user interface (UI), follow our [design and UI guidelines](design.md),
@@ -77,7 +77,7 @@ You are very welcome to help the GitLab team triage issues.
The most important thing is making sure valid issues receive feedback from the
development team. Therefore the priority is mentioning developers that can help
-on those issues. Please select someone with relevant experience from the
+on those issues. Select someone with relevant experience from the
[GitLab team](https://about.gitlab.com/company/team/).
If there is nobody mentioned with that expertise, look in the commit history for
the affected files to find someone.
@@ -120,7 +120,7 @@ with a reference to an issue describing the regression, and then to update that
note with a reference to the merge request that fixes it as it becomes available.
If you're a contributor who doesn't have the required permissions to update
-other users' notes, please post a new note with a reference to both the issue
+other users' notes, post a new note with a reference to both the issue
and the merge request.
The release manager will
diff --git a/doc/development/contributing/merge_request_workflow.md b/doc/development/contributing/merge_request_workflow.md
index c6376c083aa..30fe82aae58 100644
--- a/doc/development/contributing/merge_request_workflow.md
+++ b/doc/development/contributing/merge_request_workflow.md
@@ -14,7 +14,7 @@ label, but you are free to contribute to any issue you want.
## Working from issues
-If you find an issue, please submit a merge request with a fix or improvement,
+If you find an issue, submit a merge request with a fix or improvement,
if you can, and include tests.
If you want to add a new feature that is not labeled, it is best to first create
@@ -70,13 +70,13 @@ For a walkthrough of the contribution process, see [Tutorial: Make a GitLab cont
- If you would like quick feedback on your merge request feel free to mention someone
from the [core team](https://about.gitlab.com/community/core-team/) or one of the
[merge request coaches](https://about.gitlab.com/company/team/). When having your code reviewed
- and when reviewing merge requests, please keep the [code review guidelines](../code_review.md)
+ and when reviewing merge requests, keep the [code review guidelines](../code_review.md)
in mind. And if your code also makes changes to the database, or does expensive queries,
check the [database review guidelines](../database_review.md).
### Keep it simple
-*Live by smaller iterations.* Please keep the amount of changes in a single MR **as small as possible**.
+*Live by smaller iterations.* Keep the amount of changes in a single MR **as small as possible**.
If you want to contribute a large feature, think very carefully about what the
[minimum viable change](https://about.gitlab.com/handbook/product/#the-minimally-viable-change)
is. Can you split the functionality into two smaller MRs? Can you submit only the
@@ -156,7 +156,7 @@ Example commit message template that can be used on your machine that embodies t
## Contribution acceptance criteria
-To make sure that your merge request can be approved, please ensure that it meets
+To make sure that your merge request can be approved, ensure that it meets
the contribution acceptance criteria below:
1. The change is as small as possible.
@@ -195,7 +195,7 @@ the contribution acceptance criteria below:
## Definition of done
-If you contribute to GitLab, please know that changes involve more than just
+If you contribute to GitLab, know that changes involve more than just
code. We use the following [definition of done](https://www.agilealliance.org/glossary/definition-of-done/).
To reach the definition of done, the merge request must create no regressions and meet all these criteria:
@@ -263,7 +263,7 @@ requirements.
1. For tests that use Capybara, read
[how to write reliable, asynchronous integration tests](https://thoughtbot.com/blog/write-reliable-asynchronous-integration-tests-with-capybara).
1. [Black-box tests/end-to-end tests](../testing_guide/testing_levels.md#black-box-tests-at-the-system-level-aka-end-to-end-tests)
- added if required. Please contact [the quality team](https://about.gitlab.com/handbook/engineering/quality/#teams)
+ added if required. Contact [the quality team](https://about.gitlab.com/handbook/engineering/quality/#teams)
with any questions.
1. The change is tested in a review app where possible and if appropriate.
1. Code affected by a feature flag is covered by [automated tests with the feature flag enabled and disabled](../feature_flags/index.md#feature-flags-in-tests), or both
@@ -275,7 +275,7 @@ requirements.
1. Use available components from the GitLab Design System,
[Pajamas](https://design.gitlab.com/).
1. The MR must include *Before* and *After* screenshots if UI changes are made.
-1. If the MR changes CSS classes, please include the list of affected pages, which
+1. If the MR changes CSS classes, include the list of affected pages, which
can be found by running `grep css-class ./app -R`.
### Description of changes
@@ -323,7 +323,7 @@ Contributions do not require approval from the [Product team](https://about.gitl
## Dependencies
-If you add a dependency in GitLab (such as an operating system package) please
+If you add a dependency in GitLab (such as an operating system package),
consider updating the following, and note the applicability of each in your merge
request:
diff --git a/doc/development/contributing/verify/index.md b/doc/development/contributing/verify/index.md
index 9cd44b6cb64..dbc48121dff 100644
--- a/doc/development/contributing/verify/index.md
+++ b/doc/development/contributing/verify/index.md
@@ -242,7 +242,7 @@ scenario relating to a software being built by one of our [early customers](http
> could generate a new particle that would then cause the universe to implode.
That would be quite an undesirable outcome of a small bug in GitLab CI/CD status
-processing. Please take extra care when you are working on CI/CD statuses,
+processing. Take extra care when you are working on CI/CD statuses,
we don't want to implode our Universe!
This is an extreme and unlikely scenario, but presenting data that is not accurate
diff --git a/doc/development/database/database_reviewer_guidelines.md b/doc/development/database/database_reviewer_guidelines.md
index 69b4b3b7a02..07785557813 100644
--- a/doc/development/database/database_reviewer_guidelines.md
+++ b/doc/development/database/database_reviewer_guidelines.md
@@ -84,7 +84,7 @@ topics and use cases. The most frequently required during database reviewing are
Database maintainership uses the same process as other projects for identifying maintainers.
[Follow the general process documented here](https://about.gitlab.com/handbook/engineering/workflow/code-review/#how-to-become-a-project-maintainer).
-For database specific requirements, please see [`Project maintainer process for gitlab-database`](https://about.gitlab.com/handbook/engineering/workflow/code-review/#project-maintainer-process-for-gitlab-database)
+For database specific requirements, see [`Project maintainer process for gitlab-database`](https://about.gitlab.com/handbook/engineering/workflow/code-review/#project-maintainer-process-for-gitlab-database)
## What to do if you feel overwhelmed
diff --git a/doc/development/event_store.md b/doc/development/event_store.md
index ea7ccb9f726..0ffcf8704ff 100644
--- a/doc/development/event_store.md
+++ b/doc/development/event_store.md
@@ -184,7 +184,7 @@ Changes to the schema require multiple rollouts. While the new version is being
- Existing subscribers can consume events using the old version.
- Events get persisted in the Sidekiq queue as job arguments, so we could have 2 versions of the schema during deployments.
-As changing the schema ultimately impacts the Sidekiq arguments, please refer to our
+As changing the schema ultimately impacts the Sidekiq arguments, refer to our
[Sidekiq style guide](sidekiq/compatibility_across_updates.md#changing-the-arguments-for-a-worker) with regards to multiple rollouts.
#### Add properties
diff --git a/doc/development/fe_guide/graphql.md b/doc/development/fe_guide/graphql.md
index 9bfc68bd2ac..22b977519be 100644
--- a/doc/development/fe_guide/graphql.md
+++ b/doc/development/fe_guide/graphql.md
@@ -104,7 +104,7 @@ Default client accepts two parameters: `resolvers` and `config`.
### Multiple client queries for the same object
-If you are making multiple queries to the same Apollo client object you might encounter the following error: `Cache data may be lost when replacing the someProperty field of a Query object. To address this problem, either ensure all objects of SomeEntityhave an id or a custom merge function`. We are already checking `id` presence for every GraphQL type that has an `id`, so this shouldn't be the case (unless you see this warning when running unit tests; in this case please ensure your mocked responses contain an `id` whenever it's requested).
+If you are making multiple queries to the same Apollo client object you might encounter the following error: `Cache data may be lost when replacing the someProperty field of a Query object. To address this problem, either ensure all objects of SomeEntityhave an id or a custom merge function`. We are already checking `id` presence for every GraphQL type that has an `id`, so this shouldn't be the case (unless you see this warning when running unit tests; in this case ensure your mocked responses contain an `id` whenever it's requested).
When `SomeEntity` type doesn't have an `id` property in the GraphQL schema, to fix this warning we need to define a custom merge function.
diff --git a/doc/development/fe_guide/index.md b/doc/development/fe_guide/index.md
index 3c7c000dbe6..40ff704edfa 100644
--- a/doc/development/fe_guide/index.md
+++ b/doc/development/fe_guide/index.md
@@ -11,7 +11,7 @@ across the GitLab frontend team.
## Overview
-GitLab is built on top of [Ruby on Rails](https://rubyonrails.org). It uses [Haml](https://haml.info/) and a JavaScript-based frontend with [Vue.js](https://vuejs.org). If you are not sure when to use Vue on top of Haml-page, please read [this explanation](vue.md#when-to-add-vue-application).
+GitLab is built on top of [Ruby on Rails](https://rubyonrails.org). It uses [Haml](https://haml.info/) and a JavaScript-based frontend with [Vue.js](https://vuejs.org). If you are not sure when to use Vue on top of Haml-page, read [this explanation](vue.md#when-to-add-vue-application).
@@ -19,7 +19,7 @@ Be wary of [the limitations that come with using Hamlit](https://github.com/k0ku
-When it comes to CSS, we use a utils-based CSS approach. GitLab has its own CSS utils which are packaged inside the `gitlab-ui` project and can be seen [in the repository](https://gitlab.com/gitlab-org/gitlab-ui/-/tree/main/src/scss/utility-mixins) or on [UNPKG](https://unpkg.com/browse/@gitlab/ui@latest/src/scss/utility-mixins/). Please favor using these before adding or using any SCSS classes.
+When it comes to CSS, we use a utils-based CSS approach. GitLab has its own CSS utils which are packaged inside the `gitlab-ui` project and can be seen [in the repository](https://gitlab.com/gitlab-org/gitlab-ui/-/tree/main/src/scss/utility-mixins) or on [UNPKG](https://unpkg.com/browse/@gitlab/ui@latest/src/scss/utility-mixins/). Favor using these before adding or using any SCSS classes.
We also use [SCSS](https://sass-lang.com) and plain JavaScript with
modern ECMAScript standards supported through [Babel](https://babeljs.io/) and ES module support through [webpack](https://webpack.js.org/).
@@ -31,7 +31,7 @@ We use [Apollo](https://www.apollographql.com/) as our global state manager and
You should **not** [use VueX and Apollo together](graphql.md#using-with-vuex),
and should [avoid adding new VueX stores](migrating_from_vuex.md) whenever possible.
-For copy strings and translations, we have frontend utilities available. Please see the JavaScript section of [Preparing a page for translation](../i18n/externalization.md#javascript-files) for more information.
+For copy strings and translations, we have frontend utilities available. See the JavaScript section of [Preparing a page for translation](../i18n/externalization.md#javascript-files) for more information.
Working with our frontend assets requires Node (v12.22.1 or greater) and Yarn
(v1.10.0 or greater). You can find information on how to install these on our
diff --git a/doc/development/fe_guide/onboarding_course/index.md b/doc/development/fe_guide/onboarding_course/index.md
index 082ab08fe58..d8d4cdad8a6 100644
--- a/doc/development/fe_guide/onboarding_course/index.md
+++ b/doc/development/fe_guide/onboarding_course/index.md
@@ -46,7 +46,7 @@ A fortnightly 1-on-1 mentoring sessions are also available to each participant.
There are 10 places available on the course.
The date will be set after the course material has been prepared.
-Please complete the [Frontend Onboarding Course Application Form](https://forms.gle/39Rs4w4ZxQuByhE4A) to apply.
+Complete the [Frontend Onboarding Course Application Form](https://forms.gle/39Rs4w4ZxQuByhE4A) to apply.
You may also participate in the course informally at your own pace, without the benefit of the synchronous office hours or mentoring session.
GitLab team members are happy to support you regardless.
diff --git a/doc/development/fe_guide/vue.md b/doc/development/fe_guide/vue.md
index 1886e9c7483..a50e3fea605 100644
--- a/doc/development/fe_guide/vue.md
+++ b/doc/development/fe_guide/vue.md
@@ -39,9 +39,9 @@ In the past, we added interactivity to the page piece-by-piece, adding multiple
- multiple applications lead to unpredictable user experience, increased page complexity, harder debugging process;
- the way apps communicate with each other affects Web Vitals numbers.
-Because of these reasons, we want to be cautious about adding new Vue applications to the pages where another Vue application is already present (this does not include old or new navigation). Before adding a new app, please make sure that it is absolutely impossible to extend an existing application to achieve a desired functionality. When in doubt, please feel free to ask for the architectural advise on `#frontend` or `#frontend-maintainers` Slack channel.
+Because of these reasons, we want to be cautious about adding new Vue applications to the pages where another Vue application is already present (this does not include old or new navigation). Before adding a new app, make sure that it is absolutely impossible to extend an existing application to achieve a desired functionality. When in doubt, feel free to ask for the architectural advise on `#frontend` or `#frontend-maintainers` Slack channel.
-If you still need to add a new application, please make sure it shares local state with existing applications (preferably via Apollo Client, or Vuex if we use REST API)
+If you still need to add a new application, make sure it shares local state with existing applications (preferably via Apollo Client, or Vuex if we use REST API)
## Vue architecture
diff --git a/doc/development/fe_guide/vuex.md b/doc/development/fe_guide/vuex.md
index 1adbd892fc0..aff3588a503 100644
--- a/doc/development/fe_guide/vuex.md
+++ b/doc/development/fe_guide/vuex.md
@@ -6,7 +6,7 @@ info: Any user with at least the Maintainer role can merge updates to this conte
# Vuex
-[Vuex](https://vuex.vuejs.org) should no longer be considered a preferred path to store management and is currently in its legacy phase. This means it is acceptable to add upon existing `Vuex` stores, but we strongly recommend reducing store sizes over time and eventually [migrating away from VueX entirely](migrating_from_vuex.md). Before adding any new `Vuex` store to an application, first ensure that the `Vue` application you plan to add it into **does not use** `Apollo`. `Vuex` and `Apollo` should not be combined unless absolutely necessary. Please consider reading through [our GraphQL documentation](../fe_guide/graphql.md) for more guidelines on how you can build `Apollo` based applications.
+[Vuex](https://vuex.vuejs.org) should no longer be considered a preferred path to store management and is currently in its legacy phase. This means it is acceptable to add upon existing `Vuex` stores, but we strongly recommend reducing store sizes over time and eventually [migrating away from VueX entirely](migrating_from_vuex.md). Before adding any new `Vuex` store to an application, first ensure that the `Vue` application you plan to add it into **does not use** `Apollo`. `Vuex` and `Apollo` should not be combined unless absolutely necessary. Consider reading through [our GraphQL documentation](../fe_guide/graphql.md) for more guidelines on how you can build `Apollo` based applications.
The information included in this page is explained in more detail in the
official [Vuex documentation](https://vuex.vuejs.org).
diff --git a/doc/development/feature_flags/controls.md b/doc/development/feature_flags/controls.md
index 69822f0d03d..1fffd3c652b 100644
--- a/doc/development/feature_flags/controls.md
+++ b/doc/development/feature_flags/controls.md
@@ -42,7 +42,7 @@ The GitLab feature library (using
[Feature flags process](https://about.gitlab.com/handbook/product-development-flow/feature-flag-lifecycle/) guide) supports rolling out changes to a percentage of
time to users. This in turn can be controlled using [GitLab ChatOps](../../ci/chatops/index.md).
-For an up to date list of feature flag commands please see
+For an up to date list of feature flag commands see
[the source code](https://gitlab.com/gitlab-com/chatops/blob/master/lib/chatops/commands/feature.rb).
Note that all the examples in that file must be preceded by
`/chatops run`.
@@ -104,7 +104,7 @@ Guidelines:
- Consider notifying `#support_gitlab-com` beforehand. So in case if the feature has any side effects on user experience, they can mitigate and disable the feature flag to reduce some impact.
- If the feature meets the requirements for creating a [Change Management](https://about.gitlab.com/handbook/engineering/infrastructure/change-management/#feature-flags-and-the-change-management-process) issue, create a Change Management issue per [criticality guidelines](https://about.gitlab.com/handbook/engineering/infrastructure/change-management/#change-request-workflows).
- For simple, low-risk, easily reverted features, proceed and [enable the feature in `#production`](#process).
-- For support requests to toggle feature flags for specific groups or projects, please follow the process outlined in the [support workflows](https://about.gitlab.com/handbook/support/workflows/saas_feature_flags.html).
+- For support requests to toggle feature flags for specific groups or projects, follow the process outlined in the [support workflows](https://about.gitlab.com/handbook/support/workflows/saas_feature_flags.html).
#### Guideline for which percentages to choose during the rollout
@@ -203,7 +203,7 @@ Before enabling a feature flag, verify that you are not violating any [Productio
The following `/chatops` commands should be performed in the Slack
`#production` channel.
-When you begin to enable the feature, please link to the relevant
+When you begin to enable the feature, link to the relevant
feature flag rollout issue within a Slack thread of the first `/chatops`
command you make so people can understand the change if they need to.
diff --git a/doc/development/feature_flags/index.md b/doc/development/feature_flags/index.md
index 3e20eb0ab53..17dd8432f7b 100644
--- a/doc/development/feature_flags/index.md
+++ b/doc/development/feature_flags/index.md
@@ -19,7 +19,7 @@ All newly-introduced feature flags should be [used with an actor](controls.md#pe
This document is the subject of continued work as part of an epic to [improve internal usage of feature flags](https://gitlab.com/groups/gitlab-org/-/epics/3551). Raise any suggestions as new issues and attach them to the epic.
-For an [overview of the feature flag lifecycle](https://about.gitlab.com/handbook/product-development-flow/feature-flag-lifecycle/#feature-flag-lifecycle), or if you need help deciding [if you should use a feature flag](https://about.gitlab.com/handbook/product-development-flow/feature-flag-lifecycle/#when-to-use-feature-flags) or not, please see the [feature flag lifecycle](https://about.gitlab.com/handbook/product-development-flow/feature-flag-lifecycle/) handbook page.
+For an [overview of the feature flag lifecycle](https://about.gitlab.com/handbook/product-development-flow/feature-flag-lifecycle/#feature-flag-lifecycle), or if you need help deciding [if you should use a feature flag](https://about.gitlab.com/handbook/product-development-flow/feature-flag-lifecycle/#when-to-use-feature-flags) or not, see the [feature flag lifecycle](https://about.gitlab.com/handbook/product-development-flow/feature-flag-lifecycle/) handbook page.
## When to use feature flags
@@ -604,7 +604,7 @@ with the untested code path should be manually tested before deployment to produ
When using the testing environment, all feature flags are enabled by default.
Flags can be disabled by default in the [`spec/spec_helper.rb` file](https://gitlab.com/gitlab-org/gitlab/-/blob/b61fba42eea2cf5bb1ca64e80c067a07ed5d1921/spec/spec_helper.rb#L274).
-Please add a comment inline to explain why the flag needs to be disabled. You can also attach the issue URL for reference if possible.
+Add a comment inline to explain why the flag needs to be disabled. You can also attach the issue URL for reference if possible.
WARNING:
This does not apply to end-to-end (QA) tests, which [do not enable feature flags by default](#end-to-end-qa-tests). There is a different [process for using feature flags in end-to-end tests](../testing_guide/end_to_end/feature_flags.md).
diff --git a/doc/development/features_inside_dot_gitlab.md b/doc/development/features_inside_dot_gitlab.md
index 889e1349485..1217c2bf596 100644
--- a/doc/development/features_inside_dot_gitlab.md
+++ b/doc/development/features_inside_dot_gitlab.md
@@ -7,7 +7,7 @@ info: Any user with at least the Maintainer role can merge updates to this conte
# Features inside the `.gitlab/` directory
We have implemented standard features that depend on configuration files in the `.gitlab/` directory. You can find `.gitlab/` in various GitLab repositories.
-When implementing new features, please refer to these existing features to avoid conflicts:
+When implementing new features, refer to these existing features to avoid conflicts:
- [Issue Templates](../user/project/description_templates.md#create-an-issue-template): `.gitlab/issue_templates/`.
- [Merge request Templates](../user/project/description_templates.md#create-a-merge-request-template): `.gitlab/merge_request_templates/`.
diff --git a/doc/development/file_storage.md b/doc/development/file_storage.md
index 6d236b6f1b7..550b56af520 100644
--- a/doc/development/file_storage.md
+++ b/doc/development/file_storage.md
@@ -8,7 +8,7 @@ info: Any user with at least the Maintainer role can merge updates to this conte
We use the [CarrierWave](https://github.com/carrierwaveuploader/carrierwave) gem to handle file upload, store and retrieval.
-File uploads should be accelerated by workhorse, for details please refer to [uploads development documentation](uploads/index.md).
+File uploads should be accelerated by workhorse, for details refer to [uploads development documentation](uploads/index.md).
There are many places where file uploading is used, according to contexts:
diff --git a/doc/development/gitaly.md b/doc/development/gitaly.md
index 5f82344156c..a391daf8962 100644
--- a/doc/development/gitaly.md
+++ b/doc/development/gitaly.md
@@ -73,12 +73,12 @@ when Gitaly is called more than 30 times in a single Rails request or Sidekiq ex
As a temporary measure, export `GITALY_DISABLE_REQUEST_LIMITS=1` to suppress the error. This disables the n+1 detection
in your development environment.
-Please raise an issue in the GitLab CE or EE repositories to report the issue. Include the labels ~Gitaly
-~performance ~"technical debt". Please ensure that the issue contains the full stack trace and error message of the
+Raise an issue in the GitLab CE or EE repositories to report the issue. Include the labels ~Gitaly
+~performance ~"technical debt". Ensure that the issue contains the full stack trace and error message of the
`TooManyInvocationsError`. Also include any known failing tests if possible.
Isolate the source of the n+1 problem. This is usually a loop that results in Gitaly being called for each
-element in an array. If you are unable to isolate the problem, please contact a member
+element in an array. If you are unable to isolate the problem, contact a member
of the [Gitaly Team](https://gitlab.com/groups/gl-gitaly/group_members) for assistance.
After the source has been found, wrap it in an `allow_n_plus_1_calls` block, as follows:
diff --git a/doc/development/gitpod_internals.md b/doc/development/gitpod_internals.md
index 90d808e87c5..33e18dc2133 100644
--- a/doc/development/gitpod_internals.md
+++ b/doc/development/gitpod_internals.md
@@ -19,7 +19,7 @@ The current settings are:
A webhook that starts with `https://gitpod.io/` is created to enable prebuilds (see [Enabling Prebuilds](https://www.gitpod.io/docs/configure/authentication/gitlab#enabling-prebuilds) for more details). The webhook is maintained by an [Engineering Productivity team](https://about.gitlab.com/handbook/engineering/quality/engineering-productivity/).
-You can find this webhook in [Webhook Settings in `gitlab-org/gitlab`](https://gitlab.com/gitlab-org/gitlab/-/hooks). If you cannot access this setting, please chat to the [Engineering Productivity team](https://about.gitlab.com/handbook/engineering/quality/engineering-productivity/).
+You can find this webhook in [Webhook Settings in `gitlab-org/gitlab`](https://gitlab.com/gitlab-org/gitlab/-/hooks). If you cannot access this setting, chat to the [Engineering Productivity team](https://about.gitlab.com/handbook/engineering/quality/engineering-productivity/).
### Troubleshooting a failed webhook
diff --git a/doc/development/go_guide/index.md b/doc/development/go_guide/index.md
index 97f863082a2..a9f4b22c778 100644
--- a/doc/development/go_guide/index.md
+++ b/doc/development/go_guide/index.md
@@ -26,7 +26,7 @@ can still have specifics. They are described in their respective
The Go upgrade documentation [provides an overview](go_upgrade.md#overview)
of how GitLab manages and ships Go binary support.
-If a GitLab component requires a newer version of Go, please
+If a GitLab component requires a newer version of Go,
follow the [upgrade process](go_upgrade.md#updating-go-version) to ensure no customer, team, or component is adversely impacted.
Sometimes, individual projects must also [manage builds with multiple versions of Go](go_upgrade.md#supporting-multiple-go-versions).
diff --git a/doc/development/gotchas.md b/doc/development/gotchas.md
index 6864676a719..0f872bfff01 100644
--- a/doc/development/gotchas.md
+++ b/doc/development/gotchas.md
@@ -76,7 +76,7 @@ When run, this spec doesn't do what we might expect:
This is because FactoryBot sequences are not reset for each example.
-Please remember that sequence-generated values exist only to avoid having to
+Remember that sequence-generated values exist only to avoid having to
explicitly set attributes that have a uniqueness constraint when using a factory.
### Solution
diff --git a/doc/development/i18n/translation.md b/doc/development/i18n/translation.md
index 63c325cfaa0..7149d431c30 100644
--- a/doc/development/i18n/translation.md
+++ b/doc/development/i18n/translation.md
@@ -96,7 +96,7 @@ For example, in German, the word _user_ can be translated into _Benutzer_ (male)
### Updating the glossary
-To propose additions to the glossary, please
+To propose additions to the glossary,
[open an issue](https://gitlab.com/gitlab-org/gitlab/-/issues?scope=all&utf8=✓&state=all&label_name[]=Category%3AInternationalization).
## French translation guidelines
diff --git a/doc/development/import_export.md b/doc/development/import_export.md
index 55db6a3ca3b..909c31c1b62 100644
--- a/doc/development/import_export.md
+++ b/doc/development/import_export.md
@@ -288,13 +288,13 @@ Fixtures used in Import/Export specs live in `spec/fixtures/lib/gitlab/import_ex
There are two versions of each of these fixtures:
- A human readable single JSON file with all objects, called either `project.json` or `group.json`.
-- A folder named `tree`, containing a tree of files in `ndjson` format. **Please do not edit files under this folder manually unless strictly necessary.**
+- A folder named `tree`, containing a tree of files in `ndjson` format. **Do not edit files under this folder manually unless strictly necessary.**
The tools to generate the NDJSON tree from the human-readable JSON files live in the [`gitlab-org/memory-team/team-tools`](https://gitlab.com/gitlab-org/memory-team/team-tools/-/blob/master/import-export/) project.
### Project
-**Please use `legacy-project-json-to-ndjson.sh` to generate the NDJSON tree.**
+**Use `legacy-project-json-to-ndjson.sh` to generate the NDJSON tree.**
The NDJSON tree looks like:
@@ -328,7 +328,7 @@ tree
### Group
-**Please use `legacy-group-json-to-ndjson.rb` to generate the NDJSON tree.**
+**Use `legacy-group-json-to-ndjson.rb` to generate the NDJSON tree.**
The NDJSON tree looks like this:
@@ -355,4 +355,4 @@ tree
```
WARNING:
-When updating these fixtures, please ensure you update both `json` files and `tree` folder, as the tests apply to both.
+When updating these fixtures, ensure you update both `json` files and `tree` folder, as the tests apply to both.
diff --git a/doc/development/integrations/secure_partner_integration.md b/doc/development/integrations/secure_partner_integration.md
index 082168fcfdc..53c333a6f13 100644
--- a/doc/development/integrations/secure_partner_integration.md
+++ b/doc/development/integrations/secure_partner_integration.md
@@ -104,7 +104,7 @@ and complete an integration with the Secure stage.
- If you specified `remediations` in your artifact, it is proposed through our [remediation](../../user/application_security/vulnerabilities/index.md#resolve-a-vulnerability)
interface.
1. Demo the integration to GitLab:
- - After you have tested and are ready to demo your integration please
+ - After you have tested and are ready to demo your integration,
[reach out](https://about.gitlab.com/partners/technology-partners/integrate/) to us. If you
skip this step you won't be able to do supported marketing.
1. Begin doing supported marketing of your GitLab integration.
@@ -119,4 +119,4 @@ that may be helpful as part of this process. This covers various topics related
tool.
If you have any issues while working through your integration or the steps
-above, please create an issue to discuss with us further.
+above, create an issue to discuss with us further.
diff --git a/doc/development/internal_analytics/internal_event_instrumentation/migration.md b/doc/development/internal_analytics/internal_event_instrumentation/migration.md
index f0e39b148ec..32a68011e5a 100644
--- a/doc/development/internal_analytics/internal_event_instrumentation/migration.md
+++ b/doc/development/internal_analytics/internal_event_instrumentation/migration.md
@@ -23,7 +23,7 @@ The event triggered by Internal Events has some special properties compared to p
1. The `label`, `property` and `value` attributes are not used within Internal Events and are always empty.
1. The `category` is automatically set to `InternalEventTracking`
-Please make sure that you are okay with this change before you migrate and dashboards are changed accordingly.
+Make sure that you are okay with this change before you migrate and dashboards are changed accordingly.
### Backend
diff --git a/doc/development/internal_analytics/internal_event_instrumentation/quick_start.md b/doc/development/internal_analytics/internal_event_instrumentation/quick_start.md
index c5388754bab..34be2080e95 100644
--- a/doc/development/internal_analytics/internal_event_instrumentation/quick_start.md
+++ b/doc/development/internal_analytics/internal_event_instrumentation/quick_start.md
@@ -44,7 +44,7 @@ Where:
## Trigger events
-Triggering an event and thereby updating a metric is slightly different on backend and frontend. Please refer to the relevant section below.
+Triggering an event and thereby updating a metric is slightly different on backend and frontend. Refer to the relevant section below.
### Backend tracking
@@ -71,7 +71,7 @@ If a `project` but no `namespace` is provided, the `project.namespace` is used a
Any frontend tracking call automatically passes the values `user.id`, `namespace.id`, and `project.id` from the current context of the page.
-If you need to pass any further properties, such as `extra`, `context`, `label`, `property`, and `value`, you can use the [deprecated snowplow implementation](https://docs.gitlab.com/16.4/ee/development/internal_analytics/snowplow/implementation.html). In this case, please let us know about your specific use-case in our [feedback issue for Internal Events](https://gitlab.com/gitlab-org/analytics-section/analytics-instrumentation/internal/-/issues/690).
+If you need to pass any further properties, such as `extra`, `context`, `label`, `property`, and `value`, you can use the [deprecated snowplow implementation](https://docs.gitlab.com/16.4/ee/development/internal_analytics/snowplow/implementation.html). In this case, let us know about your specific use-case in our [feedback issue for Internal Events](https://gitlab.com/gitlab-org/analytics-section/analytics-instrumentation/internal/-/issues/690).
#### Vue components
diff --git a/doc/development/internal_analytics/metrics/metrics_lifecycle.md b/doc/development/internal_analytics/metrics/metrics_lifecycle.md
index cb734901c2e..681992b4379 100644
--- a/doc/development/internal_analytics/metrics/metrics_lifecycle.md
+++ b/doc/development/internal_analytics/metrics/metrics_lifecycle.md
@@ -24,7 +24,7 @@ As a result, if you need to change one of the following parts of a metric, you n
- **calculation logic**: This means any changes that can produce a different value than the previous implementation
- **YAML attributes**: The following attributes are directly used for analysis or calculation: `key_path`, `time_frame`, `value_type`, `data_source`.
-If you change the `performance_indicator_type` attribute of a metric or think your case needs an exception from the outlined rules then please notify the Customer Success Ops team (`@csops-team`), Analytics Engineers (`@gitlab-data/analytics-engineers`), and Product Analysts (`@gitlab-data/product-analysts`) teams by `@` mentioning those groups in a comment on the merge request or issue.
+If you change the `performance_indicator_type` attribute of a metric or think your case needs an exception from the outlined rules then notify the Customer Success Ops team (`@csops-team`), Analytics Engineers (`@gitlab-data/analytics-engineers`), and Product Analysts (`@gitlab-data/product-analysts`) teams by `@` mentioning those groups in a comment on the merge request or issue.
You can change any other attributes without impact to the calculation or analysis. See [this video tutorial](https://youtu.be/bYf3c01KCls) for help updating metric attributes.
@@ -91,7 +91,7 @@ To remove a metric:
therefore continue to report the removed metric. The Analytics Instrumentation team
requires a record of all removed metrics to identify and filter them.
- For example please take a look at this [merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60149/diffs#b01f429a54843feb22265100c0e4fec1b7da1240_10_10).
+ For example, take a look at this [merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60149/diffs#b01f429a54843feb22265100c0e4fec1b7da1240_10_10).
1. After you verify the metric can be safely removed,
remove the metric's instrumentation from
@@ -99,7 +99,7 @@ To remove a metric:
or
[`ee/lib/ee/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/lib/ee/gitlab/usage_data.rb).
- For example please take a look at this [merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60149/diffs#6335dc533bd21df26db9de90a02dd66278c2390d_167_167).
+ For example, take a look at this [merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60149/diffs#6335dc533bd21df26db9de90a02dd66278c2390d_167_167).
1. Remove any other records related to the metric:
- The feature flag YAML file at [`config/feature_flags/*/*.yaml`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/config/feature_flags).
diff --git a/doc/development/labels/index.md b/doc/development/labels/index.md
index 90253d101d4..4288408baf6 100644
--- a/doc/development/labels/index.md
+++ b/doc/development/labels/index.md
@@ -24,9 +24,9 @@ Most issues will have labels for at least one of the following:
- Priority: `~"priority::1"`, `~"priority::2"`, `~"priority::3"`, `~"priority::4"`
- Severity: `~"severity::1"`, `~"severity::2"`, `~"severity::3"`, `~"severity::4"`
-Please add `~"breaking change"` label if the issue can be considered as a [breaking change](../deprecation_guidelines/index.md).
+Add `~"breaking change"` label if the issue can be considered as a [breaking change](../deprecation_guidelines/index.md).
-Please add `~security` label if the issue is related to application security.
+Add `~security` label if the issue is related to application security.
All labels, their meaning and priority are defined on the
[labels page](https://gitlab.com/gitlab-org/gitlab/-/labels).
@@ -252,7 +252,7 @@ We have the following priority labels:
- `~"priority::3"`
- `~"priority::4"`
-Please refer to the issue triage [priority label](https://about.gitlab.com/handbook/engineering/quality/issue-triage/#priority) section in our handbook to see how it's used.
+Refer to the issue triage [priority label](https://about.gitlab.com/handbook/engineering/quality/issue-triage/#priority) section in our handbook to see how it's used.
## Severity labels
@@ -263,7 +263,7 @@ We have the following severity labels:
- `~"severity::3"`
- `~"severity::4"`
-Please refer to the issue triage [severity label](https://about.gitlab.com/handbook/engineering/quality/issue-triage/#severity) section in our handbook to see how it's used.
+Refer to the issue triage [severity label](https://about.gitlab.com/handbook/engineering/quality/issue-triage/#severity) section in our handbook to see how it's used.
## Label for community contributors
@@ -301,7 +301,7 @@ with the [label `~"Community Challenge"`](https://gitlab.com/gitlab-org/gitlab/-
If your MR for the `~"Community Challenge"` issue gets merged, you will also have a chance to win a custom
GitLab merchandise.
-If you've decided that you would like to work on an issue, please @-mention
+If you've decided that you would like to work on an issue, @-mention
the [appropriate product manager](https://about.gitlab.com/handbook/product/#who-to-talk-to-for-what)
as soon as possible. The product manager will then pull in appropriate GitLab team
members to further discuss scope, design, and technical considerations. This will
diff --git a/doc/development/licensing.md b/doc/development/licensing.md
index 2c11756acbc..c84f42270c3 100644
--- a/doc/development/licensing.md
+++ b/doc/development/licensing.md
@@ -44,7 +44,7 @@ To tell License Finder about a dependency's license if it isn't auto-detected:
license_finder licenses add my_unknown_dependency MIT
```
-For all of the above, please include `--why "Reason"` and `--who "My Name"` so the `decisions.yml` file can keep track of when, why, and who approved of a dependency.
+For all of the above, include `--why "Reason"` and `--who "My Name"` so the `decisions.yml` file can keep track of when, why, and who approved of a dependency.
More detailed information on how the gem and its commands work is available in the [License Finder README](https://github.com/pivotal/LicenseFinder).
@@ -77,4 +77,4 @@ Those projects are set to use a test license encryption key by default.
## Additional information
-Please see the [Open Source](https://about.gitlab.com/handbook/engineering/open-source/#using-open-source-libraries) page for more information on licensing.
+See the [Open Source](https://about.gitlab.com/handbook/engineering/open-source/#using-open-source-libraries) page for more information on licensing.
diff --git a/doc/development/logging.md b/doc/development/logging.md
index 123c5c46f92..2af914d76ef 100644
--- a/doc/development/logging.md
+++ b/doc/development/logging.md
@@ -361,7 +361,7 @@ class MyExampleWorker
end
```
-Please see [this example](https://gitlab.com/gitlab-org/gitlab/-/blob/16ecc33341a3f6b6bebdf78d863c5bce76b040d3/app/workers/ci/pipeline_artifacts/expire_artifacts_worker.rb#L20-21)
+See [this example](https://gitlab.com/gitlab-org/gitlab/-/blob/16ecc33341a3f6b6bebdf78d863c5bce76b040d3/app/workers/ci/pipeline_artifacts/expire_artifacts_worker.rb#L20-21)
which logs a count of how many artifacts are destroyed per run of the `ExpireArtifactsWorker`.
## Exception Handling
diff --git a/doc/development/merge_request_concepts/performance.md b/doc/development/merge_request_concepts/performance.md
index 548bd6e1e55..7a1e33494d0 100644
--- a/doc/development/merge_request_concepts/performance.md
+++ b/doc/development/merge_request_concepts/performance.md
@@ -286,7 +286,7 @@ be clearly mentioned in the merge request description.
**Summary:** Iterating a single process to external services (for example, PostgreSQL, Redis, Object Storage)
should be executed in a **batch-style** to reduce connection overheads.
-For fetching rows from various tables in a batch-style, please see [Eager Loading](#eager-loading) section.
+For fetching rows from various tables in a batch-style, see [Eager Loading](#eager-loading) section.
### Example: Delete multiple files from Object Storage
@@ -323,7 +323,7 @@ Using [`ReactiveCaching`](../utilities.md#reactivecaching) is one of the best so
transactions, otherwise it leads to severe contention problems
as an open transaction basically blocks the release of a PostgreSQL backend connection.
-For keeping transaction as minimal as possible, please consider using `AfterCommitQueue`
+For keeping transaction as minimal as possible, consider using `AfterCommitQueue`
module or `after_commit` AR hook.
Here is [an example](https://gitlab.com/gitlab-org/gitlab/-/issues/36154#note_247228859)
diff --git a/doc/development/migration_style_guide.md b/doc/development/migration_style_guide.md
index 25d48632bd6..30f598ef736 100644
--- a/doc/development/migration_style_guide.md
+++ b/doc/development/migration_style_guide.md
@@ -27,7 +27,7 @@ When writing your migrations, also consider that databases might have stale data
or inconsistencies and guard for that. Try to make as few assumptions as
possible about the state of the database.
-Please don't depend on GitLab-specific code since it can change in future
+Don't depend on GitLab-specific code since it can change in future
versions. If needed copy-paste GitLab code into the migration to make it forward
compatible.
@@ -140,7 +140,7 @@ Changes to the schema should be committed to `db/structure.sql`. This
file is automatically generated by Rails when you run
`bundle exec rails db:migrate`, so you typically should not
edit this file by hand. If your migration is adding a column to a
-table, that column is added at the bottom. Please do not reorder
+table, that column is added at the bottom. Do not reorder
columns manually for existing tables as this causes confusion to
other people using `db/structure.sql` generated by Rails.
@@ -1025,7 +1025,7 @@ steps in the [database dictionary guide](database/database_dictionary.md#droppin
Dropping a database table is uncommon, and the `drop_table` method
provided by Rails is generally considered safe. Before dropping the table,
-please consider the following:
+consider the following:
If your table has foreign keys on a [high-traffic table](#high-traffic-tables) (like `projects`), then
the `DROP TABLE` statement is likely to stall concurrent traffic until it fails with **statement timeout** error.
@@ -1370,7 +1370,7 @@ See the [Testing Rails migrations](testing_guide/testing_migrations_guide.md) st
## Data migration
-Please prefer Arel and plain SQL over usual ActiveRecord syntax. In case of
+Prefer Arel and plain SQL over usual ActiveRecord syntax. In case of
using plain SQL, you need to quote all input manually with `quote_string` helper.
Example with Arel:
diff --git a/doc/development/multi_version_compatibility.md b/doc/development/multi_version_compatibility.md
index 4f1fc1b0f10..40a30aa4926 100644
--- a/doc/development/multi_version_compatibility.md
+++ b/doc/development/multi_version_compatibility.md
@@ -243,7 +243,7 @@ With all those details in mind, let's imagine we need to replace a query, and th
1. **contract**: from `Schema B` to `Schema C` (post-deployment migration). Nothing uses the old index anymore, we can safely remove it.
This is only an example. More complex migrations, especially when background migrations are needed may
-require more than one milestone. For details please refer to our [migration style guide](migration_style_guide.md).
+require more than one milestone. For details refer to our [migration style guide](migration_style_guide.md).
## Examples of previous incidents
diff --git a/doc/development/pipelines/index.md b/doc/development/pipelines/index.md
index c7b6470f9b0..c4dfda9466a 100644
--- a/doc/development/pipelines/index.md
+++ b/doc/development/pipelines/index.md
@@ -105,7 +105,7 @@ In addition, there are a few circumstances where we would always run the full RS
#### Have you encountered a problem with backend predictive tests?
-If so, please have a look at [the Engineering Productivity RUNBOOK on predictive tests](https://gitlab.com/gitlab-org/quality/engineering-productivity/team/-/blob/main/runbooks/predictive-tests.md) for instructions on how to act upon predictive tests issues. Additionally, if you identified any test selection gaps, please let `@gl-quality/eng-prod` know so that we can take the necessary steps to optimize test selections.
+If so, have a look at [the Engineering Productivity RUNBOOK on predictive tests](https://gitlab.com/gitlab-org/quality/engineering-productivity/team/-/blob/main/runbooks/predictive-tests.md) for instructions on how to act upon predictive tests issues. Additionally, if you identified any test selection gaps, let `@gl-quality/eng-prod` know so that we can take the necessary steps to optimize test selections.
### Jest predictive jobs
@@ -130,7 +130,7 @@ The `rules` definitions for full Jest tests are defined at `.frontend:rules:jest
#### Have you encountered a problem with frontend predictive tests?
-If so, please have a look at [the Engineering Productivity RUNBOOK on predictive tests](https://gitlab.com/gitlab-org/quality/engineering-productivity/team/-/blob/main/runbooks/predictive-tests.md) for instructions on how to act upon predictive tests issues.
+If so, have a look at [the Engineering Productivity RUNBOOK on predictive tests](https://gitlab.com/gitlab-org/quality/engineering-productivity/team/-/blob/main/runbooks/predictive-tests.md) for instructions on how to act upon predictive tests issues.
### Fork pipelines
@@ -302,7 +302,7 @@ The intent is to ensure that a change doesn't introduce a failure after `gitlab-
#### What it is
This pipeline is also called [JiHu validation pipeline](https://about.gitlab.com/handbook/ceo/chief-of-staff-team/jihu-support/jihu-validation-pipelines.html),
-and it's currently allowed to fail. When that happens, please follow
+and it's currently allowed to fail. When that happens, follow
[What to do when the validation pipeline fails](https://about.gitlab.com/handbook/ceo/chief-of-staff-team/jihu-support/jihu-validation-pipelines.html#what-to-do-when-the-validation-pipeline-failed).
#### How we run it
diff --git a/doc/development/project_templates.md b/doc/development/project_templates.md
index d3114e7b2b6..7200927858c 100644
--- a/doc/development/project_templates.md
+++ b/doc/development/project_templates.md
@@ -8,11 +8,11 @@ info: "To determine the technical writer assigned to the Stage/Group associated
## Adding a new built-in project template
-If you'd like to contribute a new built-in project template to be distributed with GitLab, please do the following:
+If you'd like to contribute a new built-in project template to be distributed with GitLab, do the following:
1. Create a new public project with the project content you'd like to contribute in a namespace of your choosing. You can view a working example [here](https://gitlab.com/gitlab-org/project-templates/dotnetcore).
- Projects should be as simple as possible and free of any unnecessary assets or dependencies.
-1. When the project is ready for review, please create a new issue in [GitLab](https://gitlab.com/gitlab-org/gitlab/issues) with a link to your project.
+1. When the project is ready for review, create a new issue in [GitLab](https://gitlab.com/gitlab-org/gitlab/issues) with a link to your project.
- In your issue, `@` mention the relevant Backend Engineering Manager and Product Manager for the [Create:Source Code group](https://about.gitlab.com/handbook/product/categories/#source-code-group).
To make the project template available when creating a new project, the vendoring process will have to be completed:
@@ -59,7 +59,7 @@ To make the project template available when creating a new project, the vendorin
Existing templates are available in the [project-templates](https://gitlab.com/gitlab-org/project-templates)
group.
-To contribute a change, please open a merge request in the relevant project
+To contribute a change, open a merge request in the relevant project
and mention `@gitlab-org/manage/import/backend` when you are ready for a review.
Then, if your merge request gets accepted, either open an issue on
@@ -79,7 +79,7 @@ Complete the following steps to test the project template in your own GitLab Dev
## For GitLab team members
-Please ensure the merge request has been reviewed by the Security Counterpart before merging.
+Ensure the merge request has been reviewed by the Security Counterpart before merging.
To review a merge request which changes a vendored project template, run the `check-template-changes` script:
diff --git a/doc/development/rails_endpoints/index.md b/doc/development/rails_endpoints/index.md
index 5442e0313ac..adc17fde1d0 100644
--- a/doc/development/rails_endpoints/index.md
+++ b/doc/development/rails_endpoints/index.md
@@ -19,7 +19,7 @@ These Rails Endpoints:
## Proof of concept period: Feedback Request
-We are currently evaluating a new approach for documenting Rails endpoints. Please [check out the Feedback Issue](https://gitlab.com/gitlab-org/gitlab/-/issues/411605) and feel free to share your thoughts, suggestions, or concerns. We appreciate your participation in helping us improve the documentation!
+We are currently evaluating a new approach for documenting Rails endpoints. [Check out the Feedback Issue](https://gitlab.com/gitlab-org/gitlab/-/issues/411605) and feel free to share your thoughts, suggestions, or concerns. We appreciate your participation in helping us improve the documentation!
## SAST Scanners
diff --git a/doc/development/ruby3_gotchas.md b/doc/development/ruby3_gotchas.md
index 16ca804e6d6..d32f71948d7 100644
--- a/doc/development/ruby3_gotchas.md
+++ b/doc/development/ruby3_gotchas.md
@@ -150,7 +150,7 @@ We run automated detection for this warning in tests via `deprecation_toolkit`,
but it relies on the fact that `Kernel#warn` emits a warning, so stubbing out this call will effectively remove the call to warn, which means `deprecation_toolkit` will never see the deprecation warnings.
Stubbing out the implementation removes that warning, and we never pick it up, so the build is green.
-Please refer to [issue 364099](https://gitlab.com/gitlab-org/gitlab/-/issues/364099) for more context.
+Refer to [issue 364099](https://gitlab.com/gitlab-org/gitlab/-/issues/364099) for more context.
## Testing in `irb` and `rails console`
diff --git a/doc/development/search/advanced_search_migration_styleguide.md b/doc/development/search/advanced_search_migration_styleguide.md
index e02ae7372fe..0ad3b5dc7c9 100644
--- a/doc/development/search/advanced_search_migration_styleguide.md
+++ b/doc/development/search/advanced_search_migration_styleguide.md
@@ -425,7 +425,7 @@ being upgraded to, we do the following:
### Process for removing migrations
1. Select migrations that were marked as obsolete before the current major release
-1. If the step above includes all obsolete migrations, please keep one last migration as a safeguard for customers with unapplied migrations
+1. If the step above includes all obsolete migrations, keep one last migration as a safeguard for customers with unapplied migrations
1. Delete migration files and spec files for those migrations
1. Verify that there are no references of the migrations in the `.rubocop_todo/` directory.
1. Create a merge request and assign it to a team member from the global search team.
diff --git a/doc/development/sec/analyzer_development_guide.md b/doc/development/sec/analyzer_development_guide.md
index 956c7b464aa..eb59d8fcaf5 100644
--- a/doc/development/sec/analyzer_development_guide.md
+++ b/doc/development/sec/analyzer_development_guide.md
@@ -226,7 +226,7 @@ After the above steps have been completed, the automatic release process execute
### Steps to perform after releasing an analyzer
-1. After a new version of the analyzer Docker image has been tagged and deployed, please test it with the corresponding test project.
+1. After a new version of the analyzer Docker image has been tagged and deployed, test it with the corresponding test project.
1. Announce the release on the relevant group Slack channel. Example message:
> FYI I've just released `ANALYZER_NAME` `ANALYZER_VERSION`. `LINK_TO_RELEASE`
diff --git a/doc/development/sec/index.md b/doc/development/sec/index.md
index 5deb153fe9b..9e8486b26fa 100644
--- a/doc/development/sec/index.md
+++ b/doc/development/sec/index.md
@@ -71,7 +71,7 @@ For details on how GitLab processes the reports generated by the scanners, see
## CI/CD template development
While CI/CD templates are the responsibility of the Verify section, many are critical to the Sec Section's feature usage.
-If you are working with CI/CD templates, please read the [development guide for GitLab CI/CD templates](../cicd/templates.md).
+If you are working with CI/CD templates, read the [development guide for GitLab CI/CD templates](../cicd/templates.md).
## Importance of the primary identifier
diff --git a/doc/development/secure_coding_guidelines.md b/doc/development/secure_coding_guidelines.md
index c68a1a6bcab..a575d1ff890 100644
--- a/doc/development/secure_coding_guidelines.md
+++ b/doc/development/secure_coding_guidelines.md
@@ -14,17 +14,16 @@ goal of reducing the number of vulnerabilities released over time.
**Contributing**
If you would like to contribute to one of the existing documents, or add
-guidelines for a new vulnerability type, please open an MR! Please try to
+guidelines for a new vulnerability type, open an MR! Try to
include links to examples of the vulnerability found, and link to any resources
-used in defined mitigations. If you have questions or when ready for a review,
-please ping `gitlab-com/gl-security/appsec`.
+used in defined mitigations. If you have questions or when ready for a review, ping `gitlab-com/gl-security/appsec`.
## Permissions
### Description
Application permissions are used to determine who can access what and what actions they can perform.
-For more information about the permission model at GitLab, please see [the GitLab permissions guide](permissions.md) or the [EE docs on permissions](../../ee/user/permissions.md).
+For more information about the permission model at GitLab, see [the GitLab permissions guide](permissions.md) or the [EE docs on permissions](../../ee/user/permissions.md).
### Impact
@@ -340,7 +339,7 @@ The injected client-side code is executed on the victim's browser in the context
- potentially [obtain the victim's session tokens](https://youtu.be/2VFavqfDS6w?t=739)
- perform actions that lead to data loss/theft or account takeover
-Much of the impact is contingent upon the function of the application and the capabilities of the victim's session. For further impact possibilities, please check out [the beef project](https://beefproject.com/).
+Much of the impact is contingent upon the function of the application and the capabilities of the victim's session. For further impact possibilities, check out [the beef project](https://beefproject.com/).
For a demonstration of the impact on GitLab with a realistic attack scenario, see [this video on the GitLab Unfiltered channel](https://www.youtube.com/watch?v=t4PzHNycoKo) (internal, it requires being logged in with the GitLab Unfiltered account).
diff --git a/doc/development/sidekiq/idempotent_jobs.md b/doc/development/sidekiq/idempotent_jobs.md
index 30e41884870..029b18adb46 100644
--- a/doc/development/sidekiq/idempotent_jobs.md
+++ b/doc/development/sidekiq/idempotent_jobs.md
@@ -80,7 +80,7 @@ GitLab supports two deduplication strategies:
More [deduplication strategies have been suggested](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/195).
If you are implementing a worker that could benefit from a different
-strategy, please comment in the issue.
+strategy, comment in the issue.
#### Until Executing
diff --git a/doc/development/sql.md b/doc/development/sql.md
index e62500e44b6..86892f86e0c 100644
--- a/doc/development/sql.md
+++ b/doc/development/sql.md
@@ -86,7 +86,7 @@ _can_ be used by `ILIKE` / `LIKE` and can lead to greatly improved performance.
One downside of these indexes is that they can easily get quite large (depending
on the amount of data indexed).
-To keep naming of these indexes consistent please use the following naming
+To keep naming of these indexes consistent, use the following naming
pattern:
```plaintext
diff --git a/doc/development/testing_guide/best_practices.md b/doc/development/testing_guide/best_practices.md
index e6155fa36bf..458a5efea42 100644
--- a/doc/development/testing_guide/best_practices.md
+++ b/doc/development/testing_guide/best_practices.md
@@ -385,7 +385,7 @@ NOTE:
`stub_method` does not support method existence and method arity checks.
WARNING:
-`stub_method` is supposed to be used in factories only. It's strongly discouraged to be used elsewhere. Please consider using [RSpec mocks](https://rspec.info/features/3-12/rspec-mocks/) if available.
+`stub_method` is supposed to be used in factories only. It's strongly discouraged to be used elsewhere. Consider using [RSpec mocks](https://rspec.info/features/3-12/rspec-mocks/) if available.
#### Stubbing member access level
@@ -582,7 +582,7 @@ Use the coverage reports to ensure your tests cover 100% of your code.
NOTE:
Before writing a new system test,
-[please consider **not** writing one](testing_levels.md#consider-not-writing-a-system-test)!
+[consider **not** writing one](testing_levels.md#consider-not-writing-a-system-test)!
- Feature specs should be named `ROLE_ACTION_spec.rb`, such as
`user_changes_password_spec.rb`.
diff --git a/doc/development/testing_guide/end_to_end/best_practices.md b/doc/development/testing_guide/end_to_end/best_practices.md
index 71d607f6f05..cd92f8656e9 100644
--- a/doc/development/testing_guide/end_to_end/best_practices.md
+++ b/doc/development/testing_guide/end_to_end/best_practices.md
@@ -361,7 +361,7 @@ When you add a new test that requires administrator access, apply the RSpec meta
When running tests locally or configuring a pipeline, the environment variable `QA_CAN_TEST_ADMIN_FEATURES` can be set to `false` to skip tests that have the `:requires_admin` tag.
NOTE:
-If the _only_ action in the test that requires administrator access is to toggle a feature flag, please use the `feature_flag` tag instead. More details can be found in [testing with feature flags](feature_flags.md).
+If the _only_ action in the test that requires administrator access is to toggle a feature flag, use the `feature_flag` tag instead. More details can be found in [testing with feature flags](feature_flags.md).
## Prefer `Commit` resource over `ProjectPush`
diff --git a/doc/development/testing_guide/end_to_end/feature_flags.md b/doc/development/testing_guide/end_to_end/feature_flags.md
index 0b1525fb973..e11119d2c0b 100644
--- a/doc/development/testing_guide/end_to_end/feature_flags.md
+++ b/doc/development/testing_guide/end_to_end/feature_flags.md
@@ -16,7 +16,7 @@ and `GITLAB_ADMIN_PASSWORD`.
## `feature_flag` RSpec tag
-Please be sure to include the `feature_flag` tag so that the test can be skipped on the appropriate environments.
+Be sure to include the `feature_flag` tag so that the test can be skipped on the appropriate environments.
**Optional metadata:**
@@ -181,7 +181,7 @@ active feature flag. To circumvent this behavior, add a wait for elements behind
It's also possible to run an entire scenario with a feature flag enabled, without having to edit
existing tests or write new ones.
-Please see the [QA README](https://gitlab.com/gitlab-org/gitlab/-/tree/master/qa#running-tests-with-a-feature-flag-enabled)
+See the [QA README](https://gitlab.com/gitlab-org/gitlab/-/tree/master/qa#running-tests-with-a-feature-flag-enabled)
for details.
## Confirming that end-to-end tests pass with a feature flag enabled
@@ -219,4 +219,4 @@ pass on the default branch. The end-to-end tests run on the default branch every
If the relevant tests do not enable the feature flag themselves, you can check if the tests will need to be updated by opening
a draft merge request that enables the flag by default via a [feature flag definition file](../../feature_flags/index.md#feature-flag-definition-and-validation).
That will [automatically execute the end-to-end test suite](#automatic-test-execution-when-a-feature-flag-definition-changes).
-The merge request can be closed once the tests pass. If you need assistance to update the tests, please contact the relevant [stable counterpart in the Quality department](https://about.gitlab.com/handbook/engineering/quality/#individual-contributors), or any Software Engineer in Test if there is no stable counterpart for your group.
+The merge request can be closed once the tests pass. If you need assistance to update the tests, contact the relevant [stable counterpart in the Quality department](https://about.gitlab.com/handbook/engineering/quality/#individual-contributors), or any Software Engineer in Test if there is no stable counterpart for your group.
diff --git a/doc/development/testing_guide/end_to_end/index.md b/doc/development/testing_guide/end_to_end/index.md
index 7ab1e588a40..33465816ec1 100644
--- a/doc/development/testing_guide/end_to_end/index.md
+++ b/doc/development/testing_guide/end_to_end/index.md
@@ -266,7 +266,7 @@ use the [GitLab QA orchestrator](https://gitlab.com/gitlab-org/gitlab-qa/tree/ma
On the other hand, if you would like to run against a local development GitLab
environment, you can use the [GitLab Development Kit (GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit/).
-Please refer to the instructions in the [QA README](https://gitlab.com/gitlab-org/gitlab/-/tree/master/qa/README.md#how-can-i-use-it)
+Refer to the instructions in the [QA README](https://gitlab.com/gitlab-org/gitlab/-/tree/master/qa/README.md#how-can-i-use-it)
and the section below.
### Running tests that require special setup
diff --git a/doc/development/testing_guide/end_to_end/page_objects.md b/doc/development/testing_guide/end_to_end/page_objects.md
index 6e05bb972f8..812d2724b72 100644
--- a/doc/development/testing_guide/end_to_end/page_objects.md
+++ b/doc/development/testing_guide/end_to_end/page_objects.md
@@ -306,5 +306,5 @@ from within the `qa` directory.
If you need more information, ask for help on `#test-platform` channel on Slack
(internal, GitLab Team only).
-If you are not a Team Member, and you still need help to contribute, please
+If you are not a Team Member, and you still need help to contribute,
open an issue in GitLab CE issue tracker with the `~QA` label.
diff --git a/doc/development/testing_guide/end_to_end/resources.md b/doc/development/testing_guide/end_to_end/resources.md
index 2b5d6fcf855..7ff44f8cddb 100644
--- a/doc/development/testing_guide/end_to_end/resources.md
+++ b/doc/development/testing_guide/end_to_end/resources.md
@@ -494,7 +494,7 @@ We have a mechanism to [collect](https://gitlab.com/gitlab-org/gitlab/-/blob/443
all resources created during test executions, and another to [handle](https://gitlab.com/gitlab-org/gitlab/-/blob/44345381e89d6bbd440f7b4c680d03e8b75b86de/qa/qa/tools/test_resources_handler.rb#L44)
these resources. On [dotcom environments](https://about.gitlab.com/handbook/engineering/infrastructure/environments/#environments), after a test suite finishes in the [QA pipelines](https://about.gitlab.com/handbook/engineering/quality/quality-engineering/debugging-qa-test-failures/#scheduled-qa-test-pipelines), resources from all passing test are
automatically deleted in the same pipeline run. Resources from all failed tests are reserved for investigation,
-and won't be deleted until the following Saturday by a scheduled pipeline. When introducing new resources, please
+and won't be deleted until the following Saturday by a scheduled pipeline. When introducing new resources,
also make sure to add any resource that cannot be deleted to the [IGNORED_RESOURCES](https://gitlab.com/gitlab-org/gitlab/-/blob/44345381e89d6bbd440f7b4c680d03e8b75b86de/qa/qa/tools/test_resources_handler.rb#L29)
list.
@@ -503,5 +503,5 @@ list.
If you need more information, ask for help on `#test-platform` channel on Slack
(internal, GitLab Team only).
-If you are not a Team Member, and you still need help to contribute, please
+If you are not a Team Member, and you still need help to contribute,
open an issue in GitLab CE issue tracker with the `~QA` label.
diff --git a/doc/development/testing_guide/end_to_end/running_tests_that_require_special_setup.md b/doc/development/testing_guide/end_to_end/running_tests_that_require_special_setup.md
index 83c194afc60..c2661a15c3d 100644
--- a/doc/development/testing_guide/end_to_end/running_tests_that_require_special_setup.md
+++ b/doc/development/testing_guide/end_to_end/running_tests_that_require_special_setup.md
@@ -10,7 +10,7 @@ info: Any user with at least the Maintainer role can merge updates to this conte
The [`jenkins_build_status_spec`](https://gitlab.com/gitlab-org/gitlab/-/blob/24a86debf49f3aed6f2ecfd6e8f9233b3a214181/qa/qa/specs/features/browser_ui/3_create/jenkins/jenkins_build_status_spec.rb)
spins up a Jenkins instance in a Docker container with the Jenkins GitLab plugin pre-installed. Due to a license restriction we are unable to distribute this image.
-To build a QA compatible image, please visit the [third party images project](https://gitlab.com/gitlab-org/quality/third-party-docker-public), where third party Dockerfiles can be found.
+To build a QA compatible image, visit the [third party images project](https://gitlab.com/gitlab-org/quality/third-party-docker-public), where third party Dockerfiles can be found.
The project also has instructions for forking and building the images automatically in CI.
Some extra environment variables for the location of the forked repository are also needed.
@@ -47,7 +47,7 @@ bin/qa Test::Instance::All http://localhost -- qa/specs/features/ee/browser_ui/3
The test automatically spins up a Docker container for Jenkins and tear down once the test completes.
-If you need to run Jenkins manually outside of the tests, please refer to the README for the
+If you need to run Jenkins manually outside of the tests, refer to the README for the
[third party images project](https://gitlab.com/gitlab-org/quality/third-party-docker-public/-/blob/main/jenkins/README.md)
### Troubleshooting
@@ -420,7 +420,7 @@ To run these tests locally against the GDK:
QA_LOG_LEVEL=debug WEBDRIVER_HEADLESS=false bin/qa Test::Instance::All http://localhost:3000 qa/specs/features/browser_ui/2_plan/email/trigger_email_notification_spec.rb -- --tag orchestrated
```
-For instructions on how to run these tests using the `gitlab-qa` gem, please refer to [the GitLab QA documentation](https://gitlab.com/gitlab-org/gitlab-qa/-/blob/master/docs/what_tests_can_be_run.md#testintegrationsmtp-ceeefull-image-address).
+For instructions on how to run these tests using the `gitlab-qa` gem, refer to [the GitLab QA documentation](https://gitlab.com/gitlab-org/gitlab-qa/-/blob/master/docs/what_tests_can_be_run.md#testintegrationsmtp-ceeefull-image-address).
## Guide to the mobile suite
diff --git a/doc/development/testing_guide/end_to_end/style_guide.md b/doc/development/testing_guide/end_to_end/style_guide.md
index 4ab0d06282e..966ed851115 100644
--- a/doc/development/testing_guide/end_to_end/style_guide.md
+++ b/doc/development/testing_guide/end_to_end/style_guide.md
@@ -71,7 +71,7 @@ We follow a simple formula roughly based on Hungarian notation.
- `_menu_item`
NOTE:
-If none of the listed types are suitable, please open a merge request to add an appropriate type to the list.
+If none of the listed types are suitable, open a merge request to add an appropriate type to the list.
### Examples
diff --git a/doc/development/testing_guide/review_apps.md b/doc/development/testing_guide/review_apps.md
index 51ca7e20300..d0a6fa8da5f 100644
--- a/doc/development/testing_guide/review_apps.md
+++ b/doc/development/testing_guide/review_apps.md
@@ -203,15 +203,15 @@ subgraph "CNG-mirror pipeline"
**Additional notes:**
- If the `review-deploy` job keeps failing (and a manual retry didn't help),
- please post a message in the `#g_qe_engineering_productivity` channel and/or create a `~"Engineering Productivity"` `~"ep::review apps"` `~"type::bug"`
+ post a message in the `#g_qe_engineering_productivity` channel and/or create a `~"Engineering Productivity"` `~"ep::review apps"` `~"type::bug"`
issue with a link to your merge request. The deployment failure can
reveal an actual problem introduced in your merge request (that is, this isn't
necessarily a transient failure)!
- If the `review-qa-smoke` or `review-qa-reliable` job keeps failing (we already retry them once),
- please check the job's logs: you could discover an actual problem introduced in
+ check the job's logs: you could discover an actual problem introduced in
your merge request. You can also download the artifacts to see screenshots of
the page at the time the failures occurred. If you don't find the cause of the
- failure or if it seems unrelated to your change, please post a message in the
+ failure or if it seems unrelated to your change, post a message in the
`#test-platform` channel and/or create a ~Quality ~"type::bug" issue with a link to your
merge request.
- The manual `review-stop` can be used to
diff --git a/doc/development/testing_guide/testing_migrations_guide.md b/doc/development/testing_guide/testing_migrations_guide.md
index 56d4b8b8ccb..ce6ba082f3b 100644
--- a/doc/development/testing_guide/testing_migrations_guide.md
+++ b/doc/development/testing_guide/testing_migrations_guide.md
@@ -369,7 +369,7 @@ end
## Testing a non-`ActiveRecord::Migration` class
To test a non-`ActiveRecord::Migration` test (a background migration),
-you must manually provide a required schema version. Please add a
+you must manually provide a required schema version. Add a
`schema` tag to a context that you want to switch the database schema within.
If not set, `schema` defaults to `:latest`.
diff --git a/doc/development/work_items.md b/doc/development/work_items.md
index 6ce35ea69f9..0c3bc4611f5 100644
--- a/doc/development/work_items.md
+++ b/doc/development/work_items.md
@@ -190,7 +190,7 @@ and incidents) into work items. Eventually (when these resources become regular
work items), `base_type` will be removed.
Until the architecture of WIT widgets is finalized, we are holding off on the creation of new work item
-types. If a new work item type is absolutely necessary, please reach out to a
+types. If a new work item type is absolutely necessary, reach out to a
member of the [Project Management Engineering Team](https://gitlab.com/gitlab-org/gitlab/-/issues/370599).
### Creating a new work item type in the database
diff --git a/doc/integration/advanced_search/elasticsearch_troubleshooting.md b/doc/integration/advanced_search/elasticsearch_troubleshooting.md
index 3e3de155b3f..77ffc584606 100644
--- a/doc/integration/advanced_search/elasticsearch_troubleshooting.md
+++ b/doc/integration/advanced_search/elasticsearch_troubleshooting.md
@@ -367,7 +367,7 @@ Elasticsearch administrator has more experience with.
## Issues with migrations
-Please ensure you've read about [Elasticsearch Migrations](../advanced_search/elasticsearch.md#advanced-search-migrations).
+Ensure you've read about [Elasticsearch Migrations](../advanced_search/elasticsearch.md#advanced-search-migrations).
If there is a halted migration and your [`elasticsearch.log`](../../administration/logs/index.md#elasticsearchlog) file contain errors, this could potentially be a bug/issue. Escalate to GitLab support if retrying migrations does not succeed.
@@ -395,7 +395,7 @@ see details in the [update guide](../../update/upgrading_from_source.md).
## `Elasticsearch::Transport::Transport::Errors::BadRequest`
-If you have this exception (just like in the case above but the actual message is different) please check if you have the correct Elasticsearch version and you met the other [requirements](elasticsearch.md#system-requirements).
+If you have this exception (just like in the case above but the actual message is different), check that you have the correct Elasticsearch version and you met the other [requirements](elasticsearch.md#system-requirements).
There is also an easy way to check it automatically with `sudo gitlab-rake gitlab:check` command.
## `Elasticsearch::Transport::Transport::Errors::RequestEntityTooLarge`
@@ -419,7 +419,7 @@ Set a custom `gitlab_rails['env']` environment variable, called [`no_proxy`](htt
WARNING:
Setting the number of replicas to `0` is discouraged (this is not allowed in the GitLab Elasticsearch Integration menu). If you are planning to add more Elasticsearch nodes (for a total of more than 1 Elasticsearch) the number of replicas needs to be set to an integer value larger than `0`. Failure to do so results in lack of redundancy (losing one node corrupts the index).
-If you have a **hard requirement to have a green status for your single node Elasticsearch cluster**, please make sure you understand the risks outlined in the previous paragraph and then run the following query to set the number of replicas to `0`(the cluster no longer tries to create any shard replicas):
+If you have a **hard requirement to have a green status for your single node Elasticsearch cluster**, make sure you understand the risks outlined in the previous paragraph and then run the following query to set the number of replicas to `0`(the cluster no longer tries to create any shard replicas):
```shell
curl --request PUT localhost:9200/gitlab-production/_settings --header 'Content-Type: application/json' \
@@ -438,7 +438,7 @@ If you're getting a `health check timeout: no Elasticsearch node available` erro
Gitlab::Elastic::Indexer::Error: time="2020-01-23T09:13:00Z" level=fatal msg="health check timeout: no Elasticsearch node available"
```
-You probably have not used either `http://` or `https://` as part of your value in the **"URL"** field of the Elasticsearch Integration Menu. Please make sure you are using either `http://` or `https://` in this field as the [Elasticsearch client for Go](https://github.com/olivere/elastic) that we are using [needs the prefix for the URL to be accepted as valid](https://github.com/olivere/elastic/commit/a80af35aa41856dc2c986204e2b64eab81ccac3a).
+You probably have not used either `http://` or `https://` as part of your value in the **"URL"** field of the Elasticsearch Integration Menu. Make sure you are using either `http://` or `https://` in this field as the [Elasticsearch client for Go](https://github.com/olivere/elastic) that we are using [needs the prefix for the URL to be accepted as valid](https://github.com/olivere/elastic/commit/a80af35aa41856dc2c986204e2b64eab81ccac3a).
After you have corrected the formatting of the URL, delete the index (via the [dedicated Rake task](elasticsearch.md#gitlab-advanced-search-rake-tasks)) and [reindex the content of your instance](elasticsearch.md#enable-advanced-search).
## My Elasticsearch cluster has a plugin and the integration is not working
diff --git a/doc/operations/error_tracking.md b/doc/operations/error_tracking.md
index 8825f3d7451..a46998c11d8 100644
--- a/doc/operations/error_tracking.md
+++ b/doc/operations/error_tracking.md
@@ -8,7 +8,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
Error Tracking allows developers to discover and view errors generated by their application. Because error information is surfaced where the code is developed, this increases efficiency and awareness. Users can choose between [GitLab Integrated error tracking](#integrated-error-tracking) and [Sentry based](#sentry-error-tracking) backends.
-To leave feedback about Error Tracking bugs or functionality, please comment in the [feedback issue](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/2362) or open a [new issue](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/new).
+To leave feedback about Error Tracking bugs or functionality, comment in the [feedback issue](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/2362) or open a [new issue](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/new).
## How error tracking works
diff --git a/doc/operations/tracing.md b/doc/operations/tracing.md
index 9ba32c6ab94..cb31fdd8025 100644
--- a/doc/operations/tracing.md
+++ b/doc/operations/tracing.md
@@ -16,7 +16,7 @@ The feature is not ready for production use.
With distributed tracing, you can troubleshoot application performance issues by inspecting how a request moves through different services and systems, the timing of each operation, and any errors or logs as they occur. Tracing is particularly useful in the context of microservice applications, which group multiple independent services collaborating to fulfill user requests.
-This feature is an [Experiment](../policy/experiment-beta-support.md). For more information, see the [group direction page](https://about.gitlab.com/direction/analytics/observability/). To leave feedback about tracing bugs or functionality, please comment in the [feedback issue](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/2363) or open a [new issue](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/new).
+This feature is an [Experiment](../policy/experiment-beta-support.md). For more information, see the [group direction page](https://about.gitlab.com/direction/analytics/observability/). To leave feedback about tracing bugs or functionality, comment in the [feedback issue](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/2363) or open a [new issue](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/new).
## Configure distributed tracing for a project
diff --git a/doc/security/index.md b/doc/security/index.md
index d3cb1d5dfa5..8fd55fd08ff 100644
--- a/doc/security/index.md
+++ b/doc/security/index.md
@@ -23,6 +23,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
- [Proxying images](asset_proxy.md)
- [CI/CD variables](../ci/variables/index.md#cicd-variable-security)
- [Token overview](token_overview.md)
+- [Rotate secrets of third-party integrations](rotate_integrations_secrets.md)
- [Maximum decompressed file size for imported archives](../administration/settings/import_and_export_settings.md#maximum-decompressed-file-size-for-imported-archives)
- [Responding to security incidents](responding_to_security_incidents.md)
diff --git a/doc/security/rotate_integrations_secrets.md b/doc/security/rotate_integrations_secrets.md
new file mode 100644
index 00000000000..a3370d7287a
--- /dev/null
+++ b/doc/security/rotate_integrations_secrets.md
@@ -0,0 +1,17 @@
+---
+stage: Govern
+group: Authentication
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Rotate secrets of third-party integrations **(FREE SELF)**
+
+Rotating secrets of third-party integrations is an important security practice
+that helps mitigate the risks associated with leaked secrets, such as
+unauthorized access and potential data breaches.
+
+You should rotate the secrets of all third-party integrations at least yearly.
+An incomplete list of such secrets:
+
+- [FortiAuthenticator](../user/profile/account/two_factor_authentication.md#enable-one-time-password-using-fortiauthenticator)
+- [FortiToken Cloud](../user/profile/account/two_factor_authentication.md#enable-one-time-password-using-fortitoken-cloud)
diff --git a/doc/subscriptions/customers_portal.md b/doc/subscriptions/customers_portal.md
index 6298389dfd2..10de69242d9 100644
--- a/doc/subscriptions/customers_portal.md
+++ b/doc/subscriptions/customers_portal.md
@@ -77,7 +77,7 @@ Purchases in the Customers Portal require a credit card on record as a payment m
multiple credit cards to your account, so that purchases for different products are charged to the
correct card.
-If you would like to use an alternative method to pay, please
+If you would like to use an alternative method to pay,
[contact our Sales team](https://about.gitlab.com/sales/).
To change your payment method:
diff --git a/doc/subscriptions/gitlab_com/index.md b/doc/subscriptions/gitlab_com/index.md
index 6b4540c76a1..9299028e56a 100644
--- a/doc/subscriptions/gitlab_com/index.md
+++ b/doc/subscriptions/gitlab_com/index.md
@@ -479,7 +479,7 @@ executive dashboards to drive organizational visibility.
### Purchase additional Enterprise Agile Planning seats
-Please contact your [GitLab sales representative](https://about.gitlab.com/sales/) for more information.
+Contact your [GitLab sales representative](https://about.gitlab.com/sales/) for more information.
## Contact Support
diff --git a/doc/subscriptions/gitlab_dedicated/index.md b/doc/subscriptions/gitlab_dedicated/index.md
index 0864275a85b..0f2fdb43512 100644
--- a/doc/subscriptions/gitlab_dedicated/index.md
+++ b/doc/subscriptions/gitlab_dedicated/index.md
@@ -150,7 +150,7 @@ The following operational features are not available:
### Available AWS regions
-The following is a list of AWS regions verified for use in GitLab Dedicated. Regions must support io2 volumes and meet other requirements. If there is a region you are interested in that is not on this list, please reach out through your account representative or [GitLab Support](https://about.gitlab.com/support/) to inquire about its availability. This list will be updated from time to time as additional regions are verified.
+The following is a list of AWS regions verified for use in GitLab Dedicated. Regions must support io2 volumes and meet other requirements. If there is a region you are interested in that is not on this list, reach out through your account representative or [GitLab Support](https://about.gitlab.com/support/) to inquire about its availability. This list will be updated from time to time as additional regions are verified.
- Asia Pacific (Singapore)
- Asia Pacific (Sydney)
diff --git a/doc/subscriptions/quarterly_reconciliation.md b/doc/subscriptions/quarterly_reconciliation.md
index 339b556cdf3..87a4b65833c 100644
--- a/doc/subscriptions/quarterly_reconciliation.md
+++ b/doc/subscriptions/quarterly_reconciliation.md
@@ -81,13 +81,15 @@ sent and subject to your payment terms.
### Troubleshooting failed payment
-If your credit card is declined during the reconciliation process, an email will be sent with the subject `Your GitLab subscription failed to reconcile`. Please follow these instructions to update your payment information, and the reconciliation will be automatically retried:
+If your credit card is declined during the reconciliation process, an email will be sent with the subject `Your GitLab subscription failed to reconcile`. Follow these instructions to update your payment information, and the reconciliation will be automatically retried:
1. Log in to your account at `https://customers.gitlab.com`.
1. Go to **Payment Methods**.
1. Select **Add New Payment Method**.
1. Make sure that the payment method is set as **Default**.
+Reconciliation is retried automatically as soon as the payment method is updated.
+
## Quarterly reconciliation eligibility
### You are automatically enrolled in quarterly reconciliation if
diff --git a/doc/update/index.md b/doc/update/index.md
index b8da6a3b7e6..c0a6b64a1ac 100644
--- a/doc/update/index.md
+++ b/doc/update/index.md
@@ -218,7 +218,7 @@ upgrade stops allow required background migrations to finish.
During GitLab 16.x, we are scheduling required upgrade stops beforehand so users can better plan out appropriate upgrade stops and downtime when necessary.
-The first scheduled required upgrade stop has been announced for 16.3.x. When planning upgrades, please take this into account.
+The first scheduled required upgrade stop has been announced for 16.3.x. When planning upgrades, take this into account.
### Earlier GitLab versions
diff --git a/doc/user/analytics/value_streams_dashboard.md b/doc/user/analytics/value_streams_dashboard.md
index 7321ddfc15b..a50eab42a2d 100644
--- a/doc/user/analytics/value_streams_dashboard.md
+++ b/doc/user/analytics/value_streams_dashboard.md
@@ -10,7 +10,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> - Released in GitLab 15.11 as an Open [Beta](../../policy/experiment-beta-support.md#beta) feature [with a flag](../../administration/feature_flags.md) named `group_analytics_dashboards_page`. Enabled by default.
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/392734) in GitLab 16.0. Feature flag `group_analytics_dashboards_page` removed.
-To help us improve the Value Streams Dashboard, please share feedback about your experience in this [survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_50guMGNU2HhLeT4).
+To help us improve the Value Streams Dashboard, share feedback about your experience in this [survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_50guMGNU2HhLeT4).
For more information, see also the [Value Stream Management category direction page](https://about.gitlab.com/direction/plan/value_stream_management/).
The Value Streams Dashboard is a customizable dashboard you can use to identify trends, patterns, and opportunities for digital transformation improvements.
diff --git a/doc/user/application_security/continuous_vulnerability_scanning/index.md b/doc/user/application_security/continuous_vulnerability_scanning/index.md
index 8d879ee6fe7..4d6d48012ae 100644
--- a/doc/user/application_security/continuous_vulnerability_scanning/index.md
+++ b/doc/user/application_security/continuous_vulnerability_scanning/index.md
@@ -25,7 +25,7 @@ Continuous Vulnerability Scanning detects vulnerabilities in the latest CycloneD
To enable Continuous Vulnerability Scanning:
- Enable [Dependency Scanning](../dependency_scanning/index.md#configuration) and ensure that its prerequisites are met.
-- On GitLab self-managed only, you can [choose package registry metadata to synchronize](../../../administration/settings/security_and_compliance.md#choose-package-registry-metadata-to-sync) in the Admin Area for the GitLab instance. For this data synchronization to work, you must allow outbound network traffic from your GitLab instance to the domain `storage.googleapis.com`. If you have limited or no network connectivity then please refer to the documentation section [running in an offline environment](#running-in-an-offline-environment) for further guidance.
+- On GitLab self-managed only, you can [choose package registry metadata to synchronize](../../../administration/settings/security_and_compliance.md#choose-package-registry-metadata-to-sync) in the Admin Area for the GitLab instance. For this data synchronization to work, you must allow outbound network traffic from your GitLab instance to the domain `storage.googleapis.com`. If you have limited or no network connectivity then refer to the documentation section [running in an offline environment](#running-in-an-offline-environment) for further guidance.
### Running in an offline environment
diff --git a/doc/user/application_security/dast/browser_based.md b/doc/user/application_security/dast/browser_based.md
index a683060fcfd..c0d71a95f91 100644
--- a/doc/user/application_security/dast/browser_based.md
+++ b/doc/user/application_security/dast/browser_based.md
@@ -89,7 +89,7 @@ A simplified timing attack works as follows:
Active scans do not use a browser to send HTTP requests in an effort to minimize scan time.
-Anti-CSRF tokens are not regenerated for attacks that submit forms. Please disable anti-CSRF tokens when running an active scan.
+Anti-CSRF tokens are not regenerated for attacks that submit forms. Disable anti-CSRF tokens when running an active scan.
## Getting started
diff --git a/doc/user/application_security/dast/checks/1336.1.md b/doc/user/application_security/dast/checks/1336.1.md
index f5a5d1eac4b..d20a67d34d4 100644
--- a/doc/user/application_security/dast/checks/1336.1.md
+++ b/doc/user/application_security/dast/checks/1336.1.md
@@ -17,7 +17,7 @@ system's integrity and confidentiality.
## Remediation
User-controlled data should always have special elements neutralized when used as part of
-constructing Expression Language statements. Please consult the documentation for the template
+constructing Expression Language statements. Consult the documentation for the template
system in use on how properly neutralize user-controlled data.
## Details
diff --git a/doc/user/application_security/dast/checks/16.11.md b/doc/user/application_security/dast/checks/16.11.md
index 6d72e5bf668..cdc0bd4e271 100644
--- a/doc/user/application_security/dast/checks/16.11.md
+++ b/doc/user/application_security/dast/checks/16.11.md
@@ -25,7 +25,7 @@ registry key:
- `HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\W3SVC\Parameters`
-For all other server types, please consult your product's documentation on how to disable the TRACE method.
+For all other server types, consult your product's documentation on how to disable the TRACE method.
## Details
diff --git a/doc/user/application_security/dast/checks/16.2.md b/doc/user/application_security/dast/checks/16.2.md
index 2051b118009..fe6a882e3c4 100644
--- a/doc/user/application_security/dast/checks/16.2.md
+++ b/doc/user/application_security/dast/checks/16.2.md
@@ -27,7 +27,7 @@ IIS:
For IIS based websites version 10 and above you can use the `removeServerHeader` element to the `requestFiltering`
section of the `Web.config` file.
-For all other server types, please consult your product's documentation on how to redact the version information from
+For all other server types, consult your product's documentation on how to redact the version information from
the `Server` header.
## Details
diff --git a/doc/user/application_security/dast/checks/16.3.md b/doc/user/application_security/dast/checks/16.3.md
index d1799baa517..3179be7d691 100644
--- a/doc/user/application_security/dast/checks/16.3.md
+++ b/doc/user/application_security/dast/checks/16.3.md
@@ -20,7 +20,7 @@ We recommend that the version information be removed from the `X-Powered-By` hea
PHP:
For PHP based web sites, set the `expose_php` option to `off` in the `php.ini` configuration file.
-For all other server types, please consult your product's documentation on how to redact the version
+For all other server types, consult your product's documentation on how to redact the version
information from the `X-Powered-By` header.
## Details
diff --git a/doc/user/application_security/dast/checks/16.8.md b/doc/user/application_security/dast/checks/16.8.md
index b8faef75de7..816bc8bd873 100644
--- a/doc/user/application_security/dast/checks/16.8.md
+++ b/doc/user/application_security/dast/checks/16.8.md
@@ -13,7 +13,7 @@ hardening a website against various client side attacks such as Cross-Site Scrip
## Remediation
-If the target site is missing a CSP, please investigate the relevant URLs for enabling CSP. Otherwise,
+If the target site is missing a CSP, investigate the relevant URLs for enabling CSP. Otherwise,
follow the recommendations to determine if any actions are necessary.
## Details
diff --git a/doc/user/application_security/dast/checks/548.1.md b/doc/user/application_security/dast/checks/548.1.md
index 6cef8ccdb63..3bef3bdc744 100644
--- a/doc/user/application_security/dast/checks/548.1.md
+++ b/doc/user/application_security/dast/checks/548.1.md
@@ -28,7 +28,7 @@ IIS:
For IIS based websites version 7.0 and above you can use the `` element
in the `applicationHost.config` or `Web.config` files.
-For all other server types, please consult your product's documentation on how to disable directory
+For all other server types, Consult your product's documentation on how to disable directory
indexing.
## Details
diff --git a/doc/user/application_security/dast/checks/917.1.md b/doc/user/application_security/dast/checks/917.1.md
index 68b9665e393..853407afe30 100644
--- a/doc/user/application_security/dast/checks/917.1.md
+++ b/doc/user/application_security/dast/checks/917.1.md
@@ -17,7 +17,7 @@ intended EL statement prior to it being executed by an interpreter.
## Remediation
User-controlled data should always have special elements neutralized when used as part of
-constructing Expression Language statements. Please consult the documentation for the EL
+constructing Expression Language statements. Consult the documentation for the EL
interpreter in use on how properly neutralize user controlled data.
## Details
diff --git a/doc/user/application_security/dast/proxy-based.md b/doc/user/application_security/dast/proxy-based.md
index 0cc016dfd1b..6127866b0a9 100644
--- a/doc/user/application_security/dast/proxy-based.md
+++ b/doc/user/application_security/dast/proxy-based.md
@@ -8,7 +8,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
WARNING:
Proxy-based DAST is [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/430966).
-We plan to [remove support for Proxy-based DAST](../../../update/deprecations.md#proxy-based-dast-deprecated). Please migrate to [Browser-based DAST](browser_based.md)
+We plan to [remove support for Proxy-based DAST](../../../update/deprecations.md#proxy-based-dast-deprecated). Migrate to [Browser-based DAST](browser_based.md)
to continue analyzing your projects for security findings via dynamic analysis.
The DAST proxy-based analyzer can be added to your [GitLab CI/CD](../../../ci/index.md) pipeline.
diff --git a/doc/user/application_security/index.md b/doc/user/application_security/index.md
index fe7884baa23..76a4f501d86 100644
--- a/doc/user/application_security/index.md
+++ b/doc/user/application_security/index.md
@@ -592,7 +592,7 @@ variables:
This indicates to all GitLab analyzers that they are to output **all** messages. For more details,
see [logging level](#logging-level).
-
### Secure job failing with exit code 1
diff --git a/doc/user/compliance/license_scanning_of_cyclonedx_files/index.md b/doc/user/compliance/license_scanning_of_cyclonedx_files/index.md
index 40b9f6f7290..12380978719 100644
--- a/doc/user/compliance/license_scanning_of_cyclonedx_files/index.md
+++ b/doc/user/compliance/license_scanning_of_cyclonedx_files/index.md
@@ -38,7 +38,7 @@ To enable License scanning of CycloneDX files:
- Enable [Dependency Scanning](../../application_security/dependency_scanning/index.md#enabling-the-analyzer)
and ensure that its prerequisites are met.
-- On GitLab self-managed only, you can [choose package registry metadata to synchronize](../../../administration/settings/security_and_compliance.md#choose-package-registry-metadata-to-sync) in the Admin Area for the GitLab instance. For this data synchronization to work, you must allow outbound network traffic from your GitLab instance to the domain `storage.googleapis.com`. If you have limited or no network connectivity then please refer to the documentation section [running in an offline environment](#running-in-an-offline-environment) for further guidance.
+- On GitLab self-managed only, you can [choose package registry metadata to synchronize](../../../administration/settings/security_and_compliance.md#choose-package-registry-metadata-to-sync) in the Admin Area for the GitLab instance. For this data synchronization to work, you must allow outbound network traffic from your GitLab instance to the domain `storage.googleapis.com`. If you have limited or no network connectivity then refer to the documentation section [running in an offline environment](#running-in-an-offline-environment) for further guidance.
## Supported languages and package managers
diff --git a/doc/user/free_push_limit.md b/doc/user/free_push_limit.md
index d2b47228f31..53088cd4445 100644
--- a/doc/user/free_push_limit.md
+++ b/doc/user/free_push_limit.md
@@ -39,7 +39,7 @@ Git LFS is designed to work with Git to track large files.
## Feedback
-If you have any feedback to share about this limit, please do so in
+If you have any feedback to share about this limit, do so in
[issue 428188](https://gitlab.com/gitlab-org/gitlab/-/issues/428188).
## Related topics
diff --git a/doc/user/product_analytics/index.md b/doc/user/product_analytics/index.md
index 106da1d1d75..75e44471f92 100644
--- a/doc/user/product_analytics/index.md
+++ b/doc/user/product_analytics/index.md
@@ -222,7 +222,7 @@ You can [query the funnel data with the REST API](../../api/product_analytics.md
To do this, you can use the example query body below, where you need to replace `FUNNEL_NAME` with your funnel's name.
NOTE:
-The `afterDate` filter is not supported. Please use `beforeDate` or `inDateRange`.
+The `afterDate` filter is not supported. Use `beforeDate` or `inDateRange`.
```json
{
diff --git a/doc/user/project/git_attributes.md b/doc/user/project/git_attributes.md
index c34c489570a..61e73fcce81 100644
--- a/doc/user/project/git_attributes.md
+++ b/doc/user/project/git_attributes.md
@@ -108,7 +108,7 @@ with no arguments, it always returns a non-zero return code. This means that for
the files specified in `.gitattributes`, merges do nothing.
To use your own merge driver, replace the value in `driver` to point to an
-executable. For more details on how this command is invoked, please see the Git
+executable. For more details on how this command is invoked, see the Git
documentation on [custom merge drivers](https://git-scm.com/docs/gitattributes#_defining_a_custom_merge_driver).
### Use `.gitattributes` to set files custom merge driver applies to
diff --git a/lib/api/concerns/milestones/group_project_params.rb b/lib/api/concerns/milestones/group_project_params.rb
new file mode 100644
index 00000000000..72d07d7dcdb
--- /dev/null
+++ b/lib/api/concerns/milestones/group_project_params.rb
@@ -0,0 +1,54 @@
+# frozen_string_literal: true
+
+# This DRYs up some methods used by both the GraphQL and REST milestone APIs
+module API
+ module Concerns
+ module Milestones
+ module GroupProjectParams
+ extend ActiveSupport::Concern
+
+ private
+
+ def project_finder_params(parent, params)
+ return { project_ids: parent.id } unless params[:include_ancestors].present? && parent.group.present?
+
+ {
+ group_ids: parent.group.self_and_ancestors.select(:id),
+ project_ids: parent.id
+ }
+ end
+
+ def group_finder_params(parent, params)
+ include_ancestors = params[:include_ancestors].present?
+ include_descendants = params[:include_descendants].present?
+ return { group_ids: parent.id } unless include_ancestors || include_descendants
+
+ group_ids = if include_ancestors && include_descendants
+ parent.self_and_hierarchy
+ elsif include_ancestors
+ parent.self_and_ancestors
+ else
+ parent.self_and_descendants
+ end
+
+ if include_descendants
+ project_ids = group_projects(parent).with_issues_or_mrs_available_for_user(current_user)
+ end
+
+ {
+ group_ids: group_ids.public_or_visible_to_user(current_user).select(:id),
+ project_ids: project_ids
+ }
+ end
+
+ def group_projects(parent)
+ GroupProjectsFinder.new(
+ group: parent,
+ current_user: current_user,
+ options: { include_subgroups: true }
+ ).execute
+ end
+ end
+ end
+ end
+end
diff --git a/lib/api/group_milestones.rb b/lib/api/group_milestones.rb
index 0096e466bef..55c5ddfe557 100644
--- a/lib/api/group_milestones.rb
+++ b/lib/api/group_milestones.rb
@@ -19,6 +19,8 @@ module API
end
params do
use :list_params
+ optional :include_descendants, type: Grape::API::Boolean,
+ desc: 'Include milestones from all subgroups and subprojects'
end
get ":id/milestones" do
list_milestones_for(user_group)
diff --git a/lib/api/milestone_responses.rb b/lib/api/milestone_responses.rb
index 85493dfc198..3ef30a5fcd3 100644
--- a/lib/api/milestone_responses.rb
+++ b/lib/api/milestone_responses.rb
@@ -6,6 +6,8 @@ module API
included do
helpers do
+ include ::API::Concerns::Milestones::GroupProjectParams
+
params :optional_params do
optional :description, type: String, desc: 'The description of the milestone'
optional :due_date, type: String, desc: 'The due date of the milestone. The ISO 8601 date format (%Y-%m-%d)'
@@ -90,12 +92,10 @@ module API
end
def parent_finder_params(parent)
- include_parent = params[:include_ancestors].present?
-
if parent.is_a?(Project)
- { project_ids: parent.id, group_ids: (include_parent ? project_group_ids(parent) : nil) }
+ project_finder_params(parent, params)
else
- { group_ids: (include_parent ? group_and_ancestor_ids(parent) : parent.id) }
+ group_finder_params(parent, params)
end
end
@@ -116,21 +116,6 @@ module API
[MergeRequestsFinder, Entities::MergeRequestBasic]
end
end
-
- def project_group_ids(parent)
- group = parent.group
- return unless group.present?
-
- group.self_and_ancestors.select(:id)
- end
-
- def group_and_ancestor_ids(group)
- return unless group.present?
-
- group.self_and_ancestors
- .public_or_visible_to_user(current_user)
- .select(:id)
- end
end
end
end
diff --git a/lib/api/ml_model_packages.rb b/lib/api/ml_model_packages.rb
index 8a7a8fc9525..85c8146dda8 100644
--- a/lib/api/ml_model_packages.rb
+++ b/lib/api/ml_model_packages.rb
@@ -23,8 +23,8 @@ module API
end
authenticate_with do |accept|
- accept.token_types(:personal_access_token, :deploy_token, :job_token)
- .sent_through(:http_token)
+ accept.token_types(:personal_access_token, :job_token)
+ .sent_through(:http_bearer_token)
end
helpers do
@@ -38,6 +38,15 @@ module API
def max_file_size_exceeded?
project.actual_limits.exceeded?(:ml_model_max_file_size, params[:file].size)
end
+
+ def find_model_version!
+ ::Ml::ModelVersion.by_project_id_name_and_version(project.id, params[:model_name], params[:model_version]) ||
+ not_found!
+ end
+
+ def model_version
+ @model_version ||= find_model_version!
+ end
end
params do
@@ -88,10 +97,12 @@ module API
end
put do
authorize_upload!(project)
+ not_found! unless can?(current_user, :write_model_registry, project)
bad_request!('File is too large') if max_file_size_exceeded?
create_package_file_params = declared(params).merge(
+ model_version: model_version,
build: current_authenticated_job,
package_name: params[:model_name],
package_version: params[:model_version]
@@ -123,9 +134,7 @@ module API
get do
authorize_read_package!(project)
- package = ::Packages::MlModel::PackageFinder.new(project)
- .execute!(params[:model_name], params[:model_version])
- package_file = ::Packages::PackageFileFinder.new(package, params[:file_name]).execute!
+ package_file = ::Packages::PackageFileFinder.new(model_version.package, params[:file_name]).execute!
present_package_file!(package_file)
end
diff --git a/lib/tasks/gitlab/tw/codeowners.rake b/lib/tasks/gitlab/tw/codeowners.rake
index 2d004eca5f9..9def51c36a6 100644
--- a/lib/tasks/gitlab/tw/codeowners.rake
+++ b/lib/tasks/gitlab/tw/codeowners.rake
@@ -35,7 +35,7 @@ namespace :tw do
CodeOwnerRule.new('Composition Analysis', '@rdickenson'),
CodeOwnerRule.new('Container Registry', '@marcel.amirault'),
CodeOwnerRule.new('Contributor Experience', '@eread'),
- CodeOwnerRule.new('Database', '@aqualls'),
+ # CodeOwnerRule.new('Database', ''),
CodeOwnerRule.new('DataOps', '@sselhorn'),
# CodeOwnerRule.new('Delivery', ''),
CodeOwnerRule.new('Distribution', '@axil'),
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index 422f0173c21..0fd50bcc35b 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -1897,6 +1897,9 @@ msgstr ""
msgid "A rebase is already in progress."
msgstr ""
+msgid "A release with a date in the future is labeled as an %{linkStart}Upcoming Release%{linkEnd}."
+msgstr ""
+
msgid "A sign-in to your account has been made from the following IP address: %{ip}"
msgstr ""
@@ -33005,6 +33008,9 @@ msgstr ""
msgid "ObservabilityMetrics|Name"
msgstr ""
+msgid "ObservabilityMetrics|No data found for the selected metric."
+msgstr ""
+
msgid "ObservabilityMetrics|Search metrics starting with..."
msgstr ""
@@ -48537,7 +48543,7 @@ msgstr ""
msgid "The data in this pipeline is too old to be rendered as a graph. Please check the Jobs tab to access historical data."
msgstr ""
-msgid "The date when the release is ready. A release with a date in the future is labeled as an %{linkStart}Upcoming Release%{linkEnd}."
+msgid "The date when the release is ready."
msgstr ""
msgid "The default CI/CD configuration file and path for new projects."
diff --git a/qa/qa/specs/features/api/10_govern/group_access_token_spec.rb b/qa/qa/specs/features/api/10_govern/group_access_token_spec.rb
index 86590ced659..3e190608268 100644
--- a/qa/qa/specs/features/api/10_govern/group_access_token_spec.rb
+++ b/qa/qa/specs/features/api/10_govern/group_access_token_spec.rb
@@ -16,7 +16,7 @@ module QA
end
it(
- 'can be used to create a file via the project API',
+ 'can be used to create a file via the project API', :reliable,
testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/367064'
) do
expect do
@@ -31,7 +31,7 @@ module QA
end
it(
- 'can be used to commit via the API',
+ 'can be used to commit via the API', :reliable,
testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/367067'
) do
expect do
diff --git a/qa/qa/specs/features/api/10_govern/project_access_token_spec.rb b/qa/qa/specs/features/api/10_govern/project_access_token_spec.rb
index 594ea21cae9..8f4a04ef389 100644
--- a/qa/qa/specs/features/api/10_govern/project_access_token_spec.rb
+++ b/qa/qa/specs/features/api/10_govern/project_access_token_spec.rb
@@ -14,7 +14,7 @@ module QA
end
context 'for the same project' do
- it 'can be used to create a file via the project API',
+ it 'can be used to create a file via the project API', :reliable,
testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/347858' do
expect do
create(:file,
@@ -58,7 +58,7 @@ module QA
different_project.remove_via_api!
end
- it 'cannot be used to create a file via the project API',
+ it 'cannot be used to create a file via the project API', :reliable,
testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/347860' do
expect do
create(:file,
@@ -68,7 +68,7 @@ module QA
end.to raise_error(Resource::ApiFabricator::ResourceFabricationFailedError, /403 Forbidden/)
end
- it 'cannot be used to commit via the API',
+ it 'cannot be used to commit via the API', :reliable,
testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/347861' do
expect do
create(:commit,
diff --git a/qa/qa/specs/features/browser_ui/10_govern/group/group_access_token_spec.rb b/qa/qa/specs/features/browser_ui/10_govern/group/group_access_token_spec.rb
index 525b22c8a7d..6201ef7d58b 100644
--- a/qa/qa/specs/features/browser_ui/10_govern/group/group_access_token_spec.rb
+++ b/qa/qa/specs/features/browser_ui/10_govern/group/group_access_token_spec.rb
@@ -6,7 +6,7 @@ module QA
let(:group_access_token) { QA::Resource::GroupAccessToken.fabricate_via_browser_ui! }
it(
- 'can be created and revoked via the UI',
+ 'can be created and revoked via the UI', :reliable,
testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/367044'
) do
expect(group_access_token.token).not_to be_nil
diff --git a/qa/qa/specs/features/browser_ui/10_govern/user/impersonation_token_spec.rb b/qa/qa/specs/features/browser_ui/10_govern/user/impersonation_token_spec.rb
index 142d4857d10..9df86c82377 100644
--- a/qa/qa/specs/features/browser_ui/10_govern/user/impersonation_token_spec.rb
+++ b/qa/qa/specs/features/browser_ui/10_govern/user/impersonation_token_spec.rb
@@ -8,7 +8,7 @@ module QA
let!(:user) { create(:user, :hard_delete, api_client: admin_api_client) }
it(
- 'can be created and revoked via the UI',
+ 'can be created and revoked via the UI', :reliable,
testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/368888'
) do
impersonation_token = QA::Resource::ImpersonationToken.fabricate_via_browser_ui! do |impersonation_token|
diff --git a/spec/frontend/emoji/components/emoji_group_spec.js b/spec/frontend/emoji/components/emoji_group_spec.js
index 75397ce25ff..a2a46bedd7b 100644
--- a/spec/frontend/emoji/components/emoji_group_spec.js
+++ b/spec/frontend/emoji/components/emoji_group_spec.js
@@ -1,5 +1,6 @@
import { shallowMount } from '@vue/test-utils';
import Vue from 'vue';
+import { GlButton } from '@gitlab/ui';
import { extendedWrapper } from 'helpers/vue_test_utils_helper';
import EmojiGroup from '~/emoji/components/emoji_group.vue';
@@ -10,6 +11,9 @@ function factory(propsData = {}) {
wrapper = extendedWrapper(
shallowMount(EmojiGroup, {
propsData,
+ stubs: {
+ GlButton,
+ },
}),
);
}
@@ -19,7 +23,6 @@ describe('Emoji group component', () => {
factory({
emojis: [],
renderGroup: false,
- clickEmoji: jest.fn(),
});
expect(wrapper.findByTestId('emoji-button').exists()).toBe(false);
@@ -29,24 +32,20 @@ describe('Emoji group component', () => {
factory({
emojis: ['thumbsup', 'thumbsdown'],
renderGroup: true,
- clickEmoji: jest.fn(),
});
expect(wrapper.findAllByTestId('emoji-button').exists()).toBe(true);
expect(wrapper.findAllByTestId('emoji-button').length).toBe(2);
});
- it('calls clickEmoji', () => {
- const clickEmoji = jest.fn();
-
+ it('emits emoji-click', () => {
factory({
emojis: ['thumbsup', 'thumbsdown'],
renderGroup: true,
- clickEmoji,
});
- wrapper.findByTestId('emoji-button').trigger('click');
+ wrapper.findComponent(GlButton).vm.$emit('click');
- expect(clickEmoji).toHaveBeenCalledWith('thumbsup');
+ expect(wrapper.emitted('emoji-click')).toStrictEqual([['thumbsup']]);
});
});
diff --git a/spec/frontend/organizations/settings/general/components/change_url_spec.js b/spec/frontend/organizations/settings/general/components/change_url_spec.js
index 65289a83b7c..a4e3db0557c 100644
--- a/spec/frontend/organizations/settings/general/components/change_url_spec.js
+++ b/spec/frontend/organizations/settings/general/components/change_url_spec.js
@@ -4,10 +4,14 @@ import Vue, { nextTick } from 'vue';
import { mountExtended } from 'helpers/vue_test_utils_helper';
import ChangeUrl from '~/organizations/settings/general/components/change_url.vue';
-import resolvers from '~/organizations/shared/graphql/resolvers';
-import { updateOrganizationResponse } from '~/organizations/mock_data';
+import organizationUpdateMutation from '~/organizations/settings/general/graphql/mutations/organization_update.mutation.graphql';
+import {
+ organizationUpdateResponse,
+ organizationUpdateResponseWithErrors,
+} from '~/organizations/mock_data';
import { createAlert } from '~/alert';
import { visitUrlWithAlerts } from '~/lib/utils/url_utility';
+import FormErrorsAlert from '~/vue_shared/components/form/errors_alert.vue';
import createMockApollo from 'helpers/mock_apollo_helper';
import waitForPromises from 'helpers/wait_for_promises';
@@ -16,7 +20,6 @@ jest.mock('~/lib/utils/url_utility', () => ({
...jest.requireActual('~/lib/utils/url_utility'),
visitUrlWithAlerts: jest.fn(),
}));
-jest.useFakeTimers();
Vue.use(VueApollo);
@@ -34,8 +37,12 @@ describe('ChangeUrl', () => {
rootUrl: 'http://127.0.0.1:3000/',
};
- const createComponent = ({ mockResolvers = resolvers } = {}) => {
- mockApollo = createMockApollo([], mockResolvers);
+ const successfulResponseHandler = jest.fn().mockResolvedValue(organizationUpdateResponse);
+
+ const createComponent = ({
+ handlers = [[organizationUpdateMutation, successfulResponseHandler]],
+ } = {}) => {
+ mockApollo = createMockApollo(handlers);
wrapper = mountExtended(ChangeUrl, {
attachTo: document.body,
@@ -94,13 +101,11 @@ describe('ChangeUrl', () => {
describe('when API is loading', () => {
beforeEach(async () => {
- const mockResolvers = {
- Mutation: {
- updateOrganization: jest.fn().mockReturnValueOnce(new Promise(() => {})),
- },
- };
-
- createComponent({ mockResolvers });
+ createComponent({
+ handlers: [
+ [organizationUpdateMutation, jest.fn().mockReturnValueOnce(new Promise(() => {}))],
+ ],
+ });
await findOrganizationUrlField().setValue('foo-bar-baz');
await submitForm();
@@ -116,13 +121,18 @@ describe('ChangeUrl', () => {
createComponent();
await findOrganizationUrlField().setValue('foo-bar-baz');
await submitForm();
- jest.runAllTimers();
await waitForPromises();
});
- it('redirects user to new organization settings page and shows success alert', () => {
+ it('calls mutation with correct variables and redirects user to new organization settings page with success alert', () => {
+ expect(successfulResponseHandler).toHaveBeenCalledWith({
+ input: {
+ id: 'gid://gitlab/Organizations::Organization/1',
+ path: 'foo-bar-baz',
+ },
+ });
expect(visitUrlWithAlerts).toHaveBeenCalledWith(
- `${updateOrganizationResponse.organization.webUrl}/settings/general`,
+ `${organizationUpdateResponse.data.organizationUpdate.organization.webUrl}/settings/general`,
[
{
id: 'organization-url-successfully-changed',
@@ -135,27 +145,45 @@ describe('ChangeUrl', () => {
});
describe('when API request is not successful', () => {
- const error = new Error();
+ describe('when there is a network error', () => {
+ const error = new Error();
- beforeEach(async () => {
- const mockResolvers = {
- Mutation: {
- updateOrganization: jest.fn().mockRejectedValueOnce(error),
- },
- };
+ beforeEach(async () => {
+ createComponent({
+ handlers: [[organizationUpdateMutation, jest.fn().mockRejectedValue(error)]],
+ });
+ await findOrganizationUrlField().setValue('foo-bar-baz');
+ await submitForm();
+ await waitForPromises();
+ });
- createComponent({ mockResolvers });
- await findOrganizationUrlField().setValue('foo-bar-baz');
- await submitForm();
- jest.runAllTimers();
- await waitForPromises();
+ it('displays error alert', () => {
+ expect(createAlert).toHaveBeenCalledWith({
+ message: 'An error occurred changing your organization URL. Please try again.',
+ error,
+ captureError: true,
+ });
+ });
});
- it('displays error alert', () => {
- expect(createAlert).toHaveBeenCalledWith({
- message: 'An error occurred changing your organization URL. Please try again.',
- error,
- captureError: true,
+ describe('when there are GraphQL errors', () => {
+ beforeEach(async () => {
+ createComponent({
+ handlers: [
+ [
+ organizationUpdateMutation,
+ jest.fn().mockResolvedValue(organizationUpdateResponseWithErrors),
+ ],
+ ],
+ });
+ await submitForm();
+ await waitForPromises();
+ });
+
+ it('displays form errors alert', () => {
+ expect(wrapper.findComponent(FormErrorsAlert).props('errors')).toEqual(
+ organizationUpdateResponseWithErrors.data.organizationUpdate.errors,
+ );
});
});
});
diff --git a/spec/frontend/organizations/settings/general/components/organization_settings_spec.js b/spec/frontend/organizations/settings/general/components/organization_settings_spec.js
index 7645b41e3bd..d1c637331a8 100644
--- a/spec/frontend/organizations/settings/general/components/organization_settings_spec.js
+++ b/spec/frontend/organizations/settings/general/components/organization_settings_spec.js
@@ -6,14 +6,26 @@ import OrganizationSettings from '~/organizations/settings/general/components/or
import SettingsBlock from '~/vue_shared/components/settings/settings_block.vue';
import NewEditForm from '~/organizations/shared/components/new_edit_form.vue';
import { FORM_FIELD_NAME, FORM_FIELD_ID } from '~/organizations/shared/constants';
-import resolvers from '~/organizations/shared/graphql/resolvers';
-import { createAlert, VARIANT_INFO } from '~/alert';
+import organizationUpdateMutation from '~/organizations/settings/general/graphql/mutations/organization_update.mutation.graphql';
+import {
+ organizationUpdateResponse,
+ organizationUpdateResponseWithErrors,
+} from '~/organizations/mock_data';
+import { createAlert } from '~/alert';
+import { visitUrlWithAlerts } from '~/lib/utils/url_utility';
+import FormErrorsAlert from '~/vue_shared/components/form/errors_alert.vue';
import createMockApollo from 'helpers/mock_apollo_helper';
import waitForPromises from 'helpers/wait_for_promises';
+import { useMockLocationHelper } from 'helpers/mock_window_location_helper';
Vue.use(VueApollo);
-jest.useFakeTimers();
jest.mock('~/alert');
+jest.mock('~/lib/utils/url_utility', () => ({
+ ...jest.requireActual('~/lib/utils/url_utility'),
+ visitUrlWithAlerts: jest.fn(),
+}));
+
+useMockLocationHelper();
describe('OrganizationSettings', () => {
let wrapper;
@@ -26,8 +38,12 @@ describe('OrganizationSettings', () => {
},
};
- const createComponent = ({ mockResolvers = resolvers } = {}) => {
- mockApollo = createMockApollo([], mockResolvers);
+ const successfulResponseHandler = jest.fn().mockResolvedValue(organizationUpdateResponse);
+
+ const createComponent = ({
+ handlers = [[organizationUpdateMutation, successfulResponseHandler]],
+ } = {}) => {
+ mockApollo = createMockApollo(handlers);
wrapper = shallowMountExtended(OrganizationSettings, {
provide: defaultProvide,
@@ -66,13 +82,11 @@ describe('OrganizationSettings', () => {
describe('when form is submitted', () => {
describe('when API is loading', () => {
beforeEach(async () => {
- const mockResolvers = {
- Mutation: {
- updateOrganization: jest.fn().mockReturnValueOnce(new Promise(() => {})),
- },
- };
-
- createComponent({ mockResolvers });
+ createComponent({
+ handlers: [
+ [organizationUpdateMutation, jest.fn().mockReturnValueOnce(new Promise(() => {}))],
+ ],
+ });
await submitForm();
});
@@ -86,39 +100,65 @@ describe('OrganizationSettings', () => {
beforeEach(async () => {
createComponent();
await submitForm();
- jest.runAllTimers();
await waitForPromises();
});
- it('displays info alert', () => {
- expect(createAlert).toHaveBeenCalledWith({
- message: 'Organization was successfully updated.',
- variant: VARIANT_INFO,
+ it('calls mutation with correct variables and displays info alert', () => {
+ expect(successfulResponseHandler).toHaveBeenCalledWith({
+ input: {
+ id: 'gid://gitlab/Organizations::Organization/1',
+ name: 'Foo bar',
+ },
});
+ expect(visitUrlWithAlerts).toHaveBeenCalledWith(window.location.href, [
+ {
+ id: 'organization-successfully-updated',
+ message: 'Organization was successfully updated.',
+ variant: 'info',
+ },
+ ]);
});
});
describe('when API request is not successful', () => {
- const error = new Error();
+ describe('when there is a network error', () => {
+ const error = new Error();
- beforeEach(async () => {
- const mockResolvers = {
- Mutation: {
- updateOrganization: jest.fn().mockRejectedValueOnce(error),
- },
- };
+ beforeEach(async () => {
+ createComponent({
+ handlers: [[organizationUpdateMutation, jest.fn().mockRejectedValue(error)]],
+ });
+ await submitForm();
+ await waitForPromises();
+ });
- createComponent({ mockResolvers });
- await submitForm();
- jest.runAllTimers();
- await waitForPromises();
+ it('displays error alert', () => {
+ expect(createAlert).toHaveBeenCalledWith({
+ message: 'An error occurred updating your organization. Please try again.',
+ error,
+ captureError: true,
+ });
+ });
});
- it('displays error alert', () => {
- expect(createAlert).toHaveBeenCalledWith({
- message: 'An error occurred updating your organization. Please try again.',
- error,
- captureError: true,
+ describe('when there are GraphQL errors', () => {
+ beforeEach(async () => {
+ createComponent({
+ handlers: [
+ [
+ organizationUpdateMutation,
+ jest.fn().mockResolvedValue(organizationUpdateResponseWithErrors),
+ ],
+ ],
+ });
+ await submitForm();
+ await waitForPromises();
+ });
+
+ it('displays form errors alert', () => {
+ expect(wrapper.findComponent(FormErrorsAlert).props('errors')).toEqual(
+ organizationUpdateResponseWithErrors.data.organizationUpdate.errors,
+ );
});
});
});
diff --git a/spec/graphql/resolvers/group_milestones_resolver_spec.rb b/spec/graphql/resolvers/group_milestones_resolver_spec.rb
index b9b8ef1870b..e9caf91ecb7 100644
--- a/spec/graphql/resolvers/group_milestones_resolver_spec.rb
+++ b/spec/graphql/resolvers/group_milestones_resolver_spec.rb
@@ -102,76 +102,7 @@ RSpec.describe Resolvers::GroupMilestonesResolver, feature_category: :team_plann
end
end
- context 'when including descendant milestones in a public group' do
- let_it_be(:group) { create(:group, :public) }
-
- let(:args) { { include_descendants: true } }
-
- it 'finds milestones only in accessible projects and groups' do
- accessible_group = create(:group, :private, parent: group)
- accessible_project = create(:project, group: accessible_group)
- accessible_group.add_developer(current_user)
- inaccessible_group = create(:group, :private, parent: group)
- inaccessible_project = create(:project, :private, group: group)
- milestone1 = create(:milestone, group: group)
- milestone2 = create(:milestone, group: accessible_group)
- milestone3 = create(:milestone, project: accessible_project)
- create(:milestone, group: inaccessible_group)
- create(:milestone, project: inaccessible_project)
-
- expect(resolve_group_milestones(args: args)).to match_array([milestone1, milestone2, milestone3])
- end
- end
-
- describe 'include_descendants and include_ancestors' do
- let_it_be(:parent_group) { create(:group, :public) }
- let_it_be(:group) { create(:group, :public, parent: parent_group) }
- let_it_be(:accessible_group) { create(:group, :private, parent: group) }
- let_it_be(:accessible_project) { create(:project, group: accessible_group) }
- let_it_be(:inaccessible_group) { create(:group, :private, parent: group) }
- let_it_be(:inaccessible_project) { create(:project, :private, group: group) }
- let_it_be(:milestone1) { create(:milestone, group: group) }
- let_it_be(:milestone2) { create(:milestone, group: accessible_group) }
- let_it_be(:milestone3) { create(:milestone, project: accessible_project) }
- let_it_be(:milestone4) { create(:milestone, group: inaccessible_group) }
- let_it_be(:milestone5) { create(:milestone, project: inaccessible_project) }
- let_it_be(:milestone6) { create(:milestone, group: parent_group) }
-
- before do
- accessible_group.add_developer(current_user)
- end
-
- context 'when including neither ancestor or descendant milestones in a public group' do
- let(:args) { {} }
-
- it 'finds milestones only in accessible projects and groups' do
- expect(resolve_group_milestones(args: args)).to match_array([milestone1])
- end
- end
-
- context 'when including descendant milestones in a public group' do
- let(:args) { { include_descendants: true } }
-
- it 'finds milestones only in accessible projects and groups' do
- expect(resolve_group_milestones(args: args)).to match_array([milestone1, milestone2, milestone3])
- end
- end
-
- context 'when including ancestor milestones in a public group' do
- let(:args) { { include_ancestors: true } }
-
- it 'finds milestones only in accessible projects and groups' do
- expect(resolve_group_milestones(args: args)).to match_array([milestone1, milestone6])
- end
- end
-
- context 'when including both ancestor or descendant milestones in a public group' do
- let(:args) { { include_descendants: true, include_ancestors: true } }
-
- it 'finds milestones only in accessible projects and groups' do
- expect(resolve_group_milestones(args: args)).to match_array([milestone1, milestone2, milestone3, milestone6])
- end
- end
- end
+ # testing for include_descendants and include_ancestors moved into
+ # `spec/requests/api/graphql/milestone_spec.rb`
end
end
diff --git a/spec/requests/api/graphql/milestone_spec.rb b/spec/requests/api/graphql/milestone_spec.rb
index 2cea9fd0408..0dc2eabc3e1 100644
--- a/spec/requests/api/graphql/milestone_spec.rb
+++ b/spec/requests/api/graphql/milestone_spec.rb
@@ -151,4 +151,18 @@ RSpec.describe 'Querying a Milestone', feature_category: :team_planning do
end
end
end
+
+ context 'for common GraphQL/REST' do
+ it_behaves_like 'group milestones including ancestors and descendants'
+
+ def query_group_milestone_ids(params)
+ query = graphql_query_for('group', { 'fullPath' => group.full_path },
+ query_graphql_field('milestones', params, query_graphql_path([:nodes], :id))
+ )
+
+ post_graphql(query, current_user: current_user)
+
+ graphql_data_at(:group, :milestones, :nodes).pluck('id').map { |gid| GlobalID.parse(gid).model_id.to_i }
+ end
+ end
end
diff --git a/spec/requests/api/group_milestones_spec.rb b/spec/requests/api/group_milestones_spec.rb
index 2b17ce1d40f..82a4311f7d0 100644
--- a/spec/requests/api/group_milestones_spec.rb
+++ b/spec/requests/api/group_milestones_spec.rb
@@ -30,91 +30,103 @@ RSpec.describe API::GroupMilestones, feature_category: :team_planning do
it_behaves_like 'group and project milestones', "/groups/:id/milestones"
describe 'GET /groups/:id/milestones' do
- let_it_be(:ancestor_group) { create(:group, :private) }
- let_it_be(:ancestor_group_milestone) { create(:milestone, group: ancestor_group, updated_at: 2.days.ago) }
+ context 'for REST only' do
+ let_it_be(:ancestor_group) { create(:group, :private) }
+ let_it_be(:ancestor_group_milestone) { create(:milestone, group: ancestor_group, updated_at: 2.days.ago) }
- before_all do
- group.update!(parent: ancestor_group)
- end
+ before_all do
+ group.update!(parent: ancestor_group)
+ end
- context 'when include_ancestors is true' do
- let(:params) { { include_ancestors: true } }
+ context 'when include_ancestors is true' do
+ let(:params) { { include_ancestors: true } }
- context 'when user has access to ancestor groups' do
- let(:milestones) { [ancestor_group_milestone, milestone, closed_milestone] }
+ context 'when user has access to ancestor groups' do
+ let(:milestones) { [ancestor_group_milestone, milestone, closed_milestone] }
- before do
- ancestor_group.add_guest(user)
- group.add_guest(user)
- end
-
- it_behaves_like 'listing all milestones'
-
- context 'when deprecated include_parent_milestones is true' do
- let(:params) { { include_parent_milestones: true } }
+ before do
+ ancestor_group.add_guest(user)
+ group.add_guest(user)
+ end
it_behaves_like 'listing all milestones'
- end
- context 'when both include_parent_milestones and include_ancestors are specified' do
- let(:params) { { include_ancestors: true, include_parent_milestones: true } }
+ context 'when deprecated include_parent_milestones is true' do
+ let(:params) { { include_parent_milestones: true } }
- it 'returns 400' do
- get api(route, user), params: params
+ it_behaves_like 'listing all milestones'
+ end
- expect(response).to have_gitlab_http_status(:bad_request)
+ context 'when both include_parent_milestones and include_ancestors are specified' do
+ let(:params) { { include_ancestors: true, include_parent_milestones: true } }
+
+ it 'returns 400' do
+ get api(route, user), params: params
+
+ expect(response).to have_gitlab_http_status(:bad_request)
+ end
+ end
+
+ context 'when iids param is present' do
+ let(:params) { { include_ancestors: true, iids: [milestone.iid] } }
+
+ it_behaves_like 'listing all milestones'
+ end
+
+ context 'when updated_before param is present' do
+ let(:params) { { updated_before: 1.day.ago.iso8601, include_ancestors: true } }
+
+ it_behaves_like 'listing all milestones' do
+ let(:milestones) { [ancestor_group_milestone, milestone] }
+ end
+ end
+
+ context 'when updated_after param is present' do
+ let(:params) { { updated_after: 1.day.ago.iso8601, include_ancestors: true } }
+
+ it_behaves_like 'listing all milestones' do
+ let(:milestones) { [closed_milestone] }
+ end
end
end
- context 'when iids param is present' do
- let(:params) { { include_ancestors: true, iids: [milestone.iid] } }
+ context 'when user has no access to ancestor groups' do
+ let(:user) { create(:user) }
- it_behaves_like 'listing all milestones'
- end
-
- context 'when updated_before param is present' do
- let(:params) { { updated_before: 1.day.ago.iso8601, include_ancestors: true } }
-
- it_behaves_like 'listing all milestones' do
- let(:milestones) { [ancestor_group_milestone, milestone] }
+ before do
+ group.add_guest(user)
end
- end
-
- context 'when updated_after param is present' do
- let(:params) { { updated_after: 1.day.ago.iso8601, include_ancestors: true } }
it_behaves_like 'listing all milestones' do
- let(:milestones) { [closed_milestone] }
+ let(:milestones) { [milestone, closed_milestone] }
end
end
end
- context 'when user has no access to ancestor groups' do
- let(:user) { create(:user) }
-
- before do
- group.add_guest(user)
- end
+ context 'when updated_before param is present' do
+ let(:params) { { updated_before: 1.day.ago.iso8601 } }
it_behaves_like 'listing all milestones' do
- let(:milestones) { [milestone, closed_milestone] }
+ let(:milestones) { [milestone] }
+ end
+ end
+
+ context 'when updated_after param is present' do
+ let(:params) { { updated_after: 1.day.ago.iso8601 } }
+
+ it_behaves_like 'listing all milestones' do
+ let(:milestones) { [closed_milestone] }
end
end
end
- context 'when updated_before param is present' do
- let(:params) { { updated_before: 1.day.ago.iso8601 } }
+ context 'for common GraphQL/REST' do
+ it_behaves_like 'group milestones including ancestors and descendants'
- it_behaves_like 'listing all milestones' do
- let(:milestones) { [milestone] }
- end
- end
+ def query_group_milestone_ids(params)
+ get api(route, current_user), params: params
- context 'when updated_after param is present' do
- let(:params) { { updated_after: 1.day.ago.iso8601 } }
-
- it_behaves_like 'listing all milestones' do
- let(:milestones) { [closed_milestone] }
+ json_response.pluck('id')
end
end
end
diff --git a/spec/requests/api/ml_model_packages_spec.rb b/spec/requests/api/ml_model_packages_spec.rb
index 3166298b430..894127cac78 100644
--- a/spec/requests/api/ml_model_packages_spec.rb
+++ b/spec/requests/api/ml_model_packages_spec.rb
@@ -16,6 +16,8 @@ RSpec.describe ::API::MlModelPackages, feature_category: :mlops do
let_it_be(:deploy_token) { create(:deploy_token, read_package_registry: true, write_package_registry: true) }
let_it_be(:project_deploy_token) { create(:project_deploy_token, deploy_token: deploy_token, project: project) }
let_it_be(:another_project, reload: true) { create(:project) }
+ let_it_be(:model) { create(:ml_models, user: project.owner, project: project) }
+ let_it_be(:model_version) { create(:ml_model_versions, :with_package, model: model, version: '0.1.0') }
let_it_be(:tokens) do
{
@@ -70,10 +72,6 @@ RSpec.describe ::API::MlModelPackages, feature_category: :mlops do
:private | :guest | false | :job_token | true | :not_found
:private | :developer | false | :job_token | false | :unauthorized
:private | :guest | false | :job_token | false | :unauthorized
- :public | :developer | true | :deploy_token | true | :success
- :public | :developer | true | :deploy_token | false | :unauthorized
- :private | :developer | true | :deploy_token | true | :success
- :private | :developer | true | :deploy_token | false | :unauthorized
end
# :visibility, :user_role, :member, :token_type, :valid_token, :expected_status
@@ -112,10 +110,6 @@ RSpec.describe ::API::MlModelPackages, feature_category: :mlops do
:private | :guest | false | :job_token | true | :not_found
:private | :developer | false | :job_token | false | :unauthorized
:private | :guest | false | :job_token | false | :unauthorized
- :public | :developer | true | :deploy_token | true | :success
- :public | :developer | true | :deploy_token | false | :unauthorized
- :private | :developer | true | :deploy_token | true | :success
- :private | :developer | true | :deploy_token | false | :unauthorized
end
# rubocop:enable Metrics/AbcSize
end
@@ -128,14 +122,15 @@ RSpec.describe ::API::MlModelPackages, feature_category: :mlops do
include_context 'ml model authorize permissions table'
let(:token) { tokens[:personal_access_token] }
- let(:user_headers) { { 'HTTP_AUTHORIZATION' => token } }
+ let(:user_headers) { { 'Authorization' => "Bearer #{token}" } }
let(:headers) { user_headers.merge(workhorse_headers) }
let(:request) { authorize_upload_file(headers) }
- let(:model_name) { 'my_package' }
+ let(:model_name) { model_version.name }
+ let(:version) { model_version.version }
let(:file_name) { 'myfile.tar.gz' }
subject(:api_response) do
- url = "/projects/#{project.id}/packages/ml_models/#{model_name}/0.0.1/#{file_name}/authorize"
+ url = "/projects/#{project.id}/packages/ml_models/#{model_name}/#{version}/#{file_name}/authorize"
put api(url), headers: headers
@@ -149,7 +144,7 @@ RSpec.describe ::API::MlModelPackages, feature_category: :mlops do
with_them do
let(:token) { valid_token ? tokens[token_type] : 'invalid-token123' }
- let(:user_headers) { user_role == :anonymous ? {} : { 'HTTP_AUTHORIZATION' => token } }
+ let(:user_headers) { user_role == :anonymous ? {} : { 'Authorization' => "Bearer #{token}" } }
before do
project.update_column(:visibility_level, Gitlab::VisibilityLevel.level_value(visibility.to_s))
@@ -183,15 +178,16 @@ RSpec.describe ::API::MlModelPackages, feature_category: :mlops do
let_it_be(:file_name) { 'model.md5' }
let(:token) { tokens[:personal_access_token] }
- let(:user_headers) { { 'HTTP_AUTHORIZATION' => token } }
+ let(:user_headers) { { 'Authorization' => "Bearer #{token}" } }
let(:headers) { user_headers.merge(workhorse_headers) }
let(:params) { { file: temp_file(file_name) } }
let(:file_key) { :file }
let(:send_rewritten_field) { true }
- let(:model_name) { 'my_package' }
+ let(:model_name) { model_version.name }
+ let(:version) { model_version.version }
subject(:api_response) do
- url = "/projects/#{project.id}/packages/ml_models/#{model_name}/0.0.1/#{file_name}"
+ url = "/projects/#{project.id}/packages/ml_models/#{model_name}/#{version}/#{file_name}"
workhorse_finalize(
api(url),
@@ -219,7 +215,7 @@ RSpec.describe ::API::MlModelPackages, feature_category: :mlops do
with_them do
let(:token) { valid_token ? tokens[token_type] : 'invalid-token123' }
- let(:user_headers) { user_role == :anonymous ? {} : { 'HTTP_AUTHORIZATION' => token } }
+ let(:user_headers) { user_role == :anonymous ? {} : { 'Authorization' => "Bearer #{token}" } }
before do
project.update_column(:visibility_level, Gitlab::VisibilityLevel.level_value(visibility.to_s))
@@ -233,25 +229,27 @@ RSpec.describe ::API::MlModelPackages, feature_category: :mlops do
end
it_behaves_like 'Endpoint not found if read_model_registry not available'
+ it_behaves_like 'Endpoint not found if write_model_registry not available'
+ it_behaves_like 'Not found when model version does not exist'
end
end
describe 'GET /api/v4/projects/:project_id/packages/ml_models/:model_name/:model_version/:file_name' do
include_context 'ml model authorize permissions table'
- let_it_be(:package) { create(:ml_model_package, project: project, name: 'model', version: '0.0.1') }
+ let_it_be(:package) { model_version.package }
let_it_be(:package_file) { create(:package_file, :generic, package: package, file_name: 'model.md5') }
- let(:model_name) { package.name }
- let(:model_version) { package.version }
+ let(:model_name) { model_version.name }
+ let(:version) { model_version.version }
let(:file_name) { package_file.file_name }
let(:token) { tokens[:personal_access_token] }
- let(:user_headers) { { 'HTTP_AUTHORIZATION' => token } }
+ let(:user_headers) { { 'Authorization' => "Bearer #{token}" } }
let(:headers) { user_headers.merge(workhorse_headers) }
subject(:api_response) do
- url = "/projects/#{project.id}/packages/ml_models/#{model_name}/#{model_version}/#{file_name}"
+ url = "/projects/#{project.id}/packages/ml_models/#{model_name}/#{version}/#{file_name}"
get api(url), headers: headers
@@ -265,7 +263,7 @@ RSpec.describe ::API::MlModelPackages, feature_category: :mlops do
with_them do
let(:token) { valid_token ? tokens[token_type] : 'invalid-token123' }
- let(:user_headers) { user_role == :anonymous ? {} : { 'HTTP_AUTHORIZATION' => token } }
+ let(:user_headers) { user_role == :anonymous ? {} : { 'Authorization' => "Bearer #{token}" } }
before do
project.update_column(:visibility_level, Gitlab::VisibilityLevel.level_value(visibility.to_s))
diff --git a/spec/services/ml/find_or_create_model_version_service_spec.rb b/spec/services/ml/find_or_create_model_version_service_spec.rb
index e5ca7c3a450..88647f23ad9 100644
--- a/spec/services/ml/find_or_create_model_version_service_spec.rb
+++ b/spec/services/ml/find_or_create_model_version_service_spec.rb
@@ -35,21 +35,29 @@ RSpec.describe ::Ml::FindOrCreateModelVersionService, feature_category: :mlops d
end
end
- context 'when model version does not exist' do
+ context 'when model does not exist' do
let(:project) { existing_version.project }
let(:name) { 'a_new_model' }
let(:version) { '2.0.0' }
+
+ it 'does not create a new model version', :aggregate_failures do
+ expect { model_version }.to change { Ml::ModelVersion.count }.by(0)
+ end
+ end
+
+ context 'when model exists and model version does not' do
+ let(:project) { existing_version.project }
+ let(:name) { existing_version.name }
+ let(:version) { '2.0.0' }
let(:description) { 'A model version' }
let(:package) { create(:ml_model_package, project: project, name: name, version: version) }
it 'creates a new model version', :aggregate_failures do
- expect { model_version }.to change { Ml::ModelVersion.count }.by(1).and change { Ml::Candidate.count }.by(1)
+ expect { model_version }.to change { Ml::ModelVersion.count }.by(1)
- expect(model_version.name).to eq(name)
expect(model_version.version).to eq(version)
- expect(model_version.package).to eq(package)
- expect(model_version.candidate.model_version_id).to eq(model_version.id)
+ expect(model_version.model).to eq(existing_version.model)
expect(model_version.description).to eq(description)
end
end
diff --git a/spec/services/packages/ml_model/create_package_file_service_spec.rb b/spec/services/packages/ml_model/create_package_file_service_spec.rb
index 30a6bedd07b..505c8038976 100644
--- a/spec/services/packages/ml_model/create_package_file_service_spec.rb
+++ b/spec/services/packages/ml_model/create_package_file_service_spec.rb
@@ -8,6 +8,8 @@ RSpec.describe Packages::MlModel::CreatePackageFileService, feature_category: :m
let_it_be(:user) { create(:user) }
let_it_be(:pipeline) { create(:ci_pipeline, user: user, project: project) }
let_it_be(:file_name) { 'myfile.tar.gz.1' }
+ let_it_be(:model) { create(:ml_models, user: user, project: project) }
+ let_it_be(:model_version) { create(:ml_model_versions, :with_package, model: model, version: '0.1.0') }
let(:build) { instance_double(Ci::Build, pipeline: pipeline) }
@@ -26,47 +28,24 @@ RSpec.describe Packages::MlModel::CreatePackageFileService, feature_category: :m
FileUtils.rm_f(temp_file)
end
- context 'without existing package' do
+ context 'when model version is nil' do
let(:params) do
{
- package_name: 'new_model',
- package_version: '1.0.0',
+ model_version: nil,
file: file,
file_name: file_name
}
end
- it 'creates package file', :aggregate_failures do
- expect { execute_service }
- .to change { Packages::MlModel::Package.count }.by(1)
- .and change { Packages::PackageFile.count }.by(1)
- .and change { Packages::PackageFileBuildInfo.count }.by(0)
- .and change { Ml::ModelVersion.count }.by(1)
-
- new_model = Packages::MlModel::Package.last
- package_file = new_model.package_files.last
- new_model_version = Ml::ModelVersion.last
-
- expect(new_model.name).to eq('new_model')
- expect(new_model.version).to eq('1.0.0')
- expect(new_model.status).to eq('default')
- expect(package_file.package).to eq(new_model)
- expect(package_file.file_name).to eq(file_name)
- expect(package_file.size).to eq(file.size)
- expect(package_file.file_sha256).to eq(sha256)
- expect(new_model_version.name).to eq('new_model')
- expect(new_model_version.version).to eq('1.0.0')
- expect(new_model_version.package).to eq(new_model)
+ it 'does not create package file', :aggregate_failures do
+ expect(execute_service).to be(nil)
end
end
- context 'with existing package' do
- let_it_be(:model) { create(:ml_model_package, creator: user, project: project, version: '0.1.0') }
-
+ context 'with existing model version' do
let(:params) do
{
- package_name: model.name,
- package_version: model.version,
+ model_version: model_version,
file: file,
file_name: file_name,
status: :hidden,
@@ -76,18 +55,16 @@ RSpec.describe Packages::MlModel::CreatePackageFileService, feature_category: :m
it 'adds the package file and updates status and ci_build', :aggregate_failures do
expect { execute_service }
- .to change { project.packages.ml_model.count }.by(0)
- .and change { model.package_files.count }.by(1)
+ .to change { model_version.package.package_files.count }.by(1)
.and change { Packages::PackageFileBuildInfo.count }.by(1)
- model.reload
+ package = model_version.reload.package
+ package_file = package.package_files.last
- package_file = model.package_files.last
+ expect(package.build_infos.first.pipeline).to eq(build.pipeline)
+ expect(package.status).to eq('hidden')
- expect(model.build_infos.first.pipeline).to eq(build.pipeline)
- expect(model.status).to eq('hidden')
-
- expect(package_file.package).to eq(model)
+ expect(package_file.package).to eq(package)
expect(package_file.file_name).to eq(file_name)
expect(package_file.size).to eq(file.size)
expect(package_file.file_sha256).to eq(sha256)
diff --git a/spec/support/shared_examples/requests/api/graphql_rest/milestones_shared_examples.rb b/spec/support/shared_examples/requests/api/graphql_rest/milestones_shared_examples.rb
new file mode 100644
index 00000000000..8e147f43091
--- /dev/null
+++ b/spec/support/shared_examples/requests/api/graphql_rest/milestones_shared_examples.rb
@@ -0,0 +1,83 @@
+# frozen_string_literal: true
+
+# Examples for both GraphQL and REST APIs
+RSpec.shared_examples 'group milestones including ancestors and descendants' do
+ context 'for group milestones' do
+ let_it_be(:current_user) { create(:user) }
+
+ context 'when including descendant milestones in a public group' do
+ let_it_be(:group) { create(:group, :public) }
+
+ let(:params) { { include_descendants: true } }
+
+ it 'finds milestones only in accessible projects and groups' do
+ accessible_group = create(:group, :private, parent: group)
+ accessible_project = create(:project, group: accessible_group)
+ accessible_group.add_developer(current_user)
+ inaccessible_group = create(:group, :private, parent: group)
+ inaccessible_project = create(:project, :private, group: group)
+ milestone1 = create(:milestone, group: group)
+ milestone2 = create(:milestone, group: accessible_group)
+ milestone3 = create(:milestone, project: accessible_project)
+ create(:milestone, group: inaccessible_group)
+ create(:milestone, project: inaccessible_project)
+
+ milestone_ids = query_group_milestone_ids(params)
+
+ expect(milestone_ids).to match_array([milestone1, milestone2, milestone3].pluck(:id))
+ end
+ end
+
+ describe 'include_descendants and include_ancestors' do
+ let_it_be(:parent_group) { create(:group, :public) }
+ let_it_be(:group) { create(:group, :public, parent: parent_group) }
+ let_it_be(:accessible_group) { create(:group, :private, parent: group) }
+ let_it_be(:accessible_project) { create(:project, group: accessible_group) }
+ let_it_be(:inaccessible_group) { create(:group, :private, parent: group) }
+ let_it_be(:inaccessible_project) { create(:project, :private, group: group) }
+ let_it_be(:milestone1) { create(:milestone, group: group) }
+ let_it_be(:milestone2) { create(:milestone, group: accessible_group) }
+ let_it_be(:milestone3) { create(:milestone, project: accessible_project) }
+ let_it_be(:milestone4) { create(:milestone, group: inaccessible_group) }
+ let_it_be(:milestone5) { create(:milestone, project: inaccessible_project) }
+ let_it_be(:milestone6) { create(:milestone, group: parent_group) }
+
+ before_all do
+ accessible_group.add_developer(current_user)
+ end
+
+ context 'when including neither ancestor nor descendant milestones in a public group' do
+ let(:params) { {} }
+
+ it 'finds milestones only in accessible projects and groups' do
+ expect(query_group_milestone_ids(params)).to match_array([milestone1.id])
+ end
+ end
+
+ context 'when including descendant milestones in a public group' do
+ let(:params) { { include_descendants: true } }
+
+ it 'finds milestones only in accessible projects and groups' do
+ expect(query_group_milestone_ids(params)).to match_array([milestone1, milestone2, milestone3].pluck(:id))
+ end
+ end
+
+ context 'when including ancestor milestones in a public group' do
+ let(:params) { { include_ancestors: true } }
+
+ it 'finds milestones only in accessible projects and groups' do
+ expect(query_group_milestone_ids(params)).to match_array([milestone1, milestone6].pluck(:id))
+ end
+ end
+
+ context 'when including both ancestor and descendant milestones in a public group' do
+ let(:params) { { include_descendants: true, include_ancestors: true } }
+
+ it 'finds milestones only in accessible projects and groups' do
+ expect(query_group_milestone_ids(params))
+ .to match_array([milestone1, milestone2, milestone3, milestone6].pluck(:id))
+ end
+ end
+ end
+ end
+end
diff --git a/spec/support/shared_examples/requests/api/ml_model_packages_shared_examples.rb b/spec/support/shared_examples/requests/api/ml_model_packages_shared_examples.rb
index 47cbd268a65..30a1398bf94 100644
--- a/spec/support/shared_examples/requests/api/ml_model_packages_shared_examples.rb
+++ b/spec/support/shared_examples/requests/api/ml_model_packages_shared_examples.rb
@@ -15,11 +15,31 @@ RSpec.shared_examples 'Endpoint not found if read_model_registry not available'
end
end
-RSpec.shared_examples 'creates model experiments package files' do
+RSpec.shared_examples 'Endpoint not found if write_model_registry not available' do
+ context 'when write_model_registry is disabled for current project' do
+ before do
+ allow(Ability).to receive(:allowed?).and_call_original
+ allow(Ability).to receive(:allowed?)
+ .with(user, :write_model_registry, project)
+ .and_return(false)
+ end
+
+ it { is_expected.to have_gitlab_http_status(:not_found) }
+ end
+end
+
+RSpec.shared_examples 'Not found when model version does not exist' do
+ context 'when model version does not exist' do
+ let(:version) { "#{non_existing_record_id}.0.0" }
+
+ it { is_expected.to have_gitlab_http_status(:not_found) }
+ end
+end
+
+RSpec.shared_examples 'creates package files for model versions' do
it 'creates package files', :aggregate_failures do
expect { api_response }
- .to change { project.packages.count }.by(1)
- .and change { Packages::PackageFile.count }.by(1)
+ .to change { Packages::PackageFile.count }.by(1)
expect(api_response).to have_gitlab_http_status(:created)
package_file = project.packages.last.package_files.reload.last
@@ -59,7 +79,7 @@ RSpec.shared_examples 'process ml model package upload' do
context 'with correct params' do
it_behaves_like 'package workhorse uploads'
- it_behaves_like 'creates model experiments package files'
+ it_behaves_like 'creates package files for model versions'
# To be reactivated with https://gitlab.com/gitlab-org/gitlab/-/issues/414270
# it_behaves_like 'a package tracking event', '::API::MlModelPackages', 'push_package'
end
@@ -81,7 +101,7 @@ RSpec.shared_examples 'process ml model package upload' do
stub_package_file_object_storage(direct_upload: true)
end
- it_behaves_like 'creates model experiments package files'
+ it_behaves_like 'creates package files for model versions'
['123123', '../../123123'].each do |remote_id|
context "with invalid remote_id: #{remote_id}" do
@@ -102,7 +122,7 @@ RSpec.shared_examples 'process ml model package upload' do
stub_package_file_object_storage(direct_upload: false)
end
- it_behaves_like 'creates model experiments package files'
+ it_behaves_like 'creates package files for model versions'
end
end
end
@@ -112,13 +132,5 @@ RSpec.shared_examples 'process ml model package download' do
it { is_expected.to have_gitlab_http_status(:success) }
end
- context 'when record does not exist' do
- it 'response is not found' do
- expect_next_instance_of(::Packages::MlModel::PackageFinder) do |instance|
- expect(instance).to receive(:execute!).and_raise(ActiveRecord::RecordNotFound)
- end
-
- expect(api_response).to have_gitlab_http_status(:not_found)
- end
- end
+ it_behaves_like 'Not found when model version does not exist'
end
diff --git a/spec/tooling/danger/project_helper_spec.rb b/spec/tooling/danger/project_helper_spec.rb
index 2da90ddbd67..a41aba17f56 100644
--- a/spec/tooling/danger/project_helper_spec.rb
+++ b/spec/tooling/danger/project_helper_spec.rb
@@ -8,7 +8,7 @@ require 'gitlab/dangerfiles/spec_helper'
require_relative '../../../danger/plugins/project_helper'
-RSpec.describe Tooling::Danger::ProjectHelper do
+RSpec.describe Tooling::Danger::ProjectHelper, feature_category: :tooling do
include StubENV
include_context "with dangerfile"
@@ -130,6 +130,7 @@ RSpec.describe Tooling::Danger::ProjectHelper do
'lib/gitlab/background_migration.rb' | [:database, :backend]
'lib/gitlab/background_migration/foo' | [:database, :backend]
'ee/lib/gitlab/background_migration/foo' | [:database, :backend]
+ 'ee/lib/ee/gitlab/background_migration/foo' | [:database, :backend]
'lib/gitlab/database.rb' | [:database, :backend]
'lib/gitlab/database/foo' | [:database, :backend]
'ee/lib/gitlab/database/foo' | [:database, :backend]
@@ -238,7 +239,7 @@ RSpec.describe Tooling::Danger::ProjectHelper do
it { is_expected.to eq(expected_categories) }
end
- context 'having specific changes' do
+ context 'when having specific changes' do
where(:expected_categories, :patch, :changed_files) do
[:analytics_instrumentation] | '+data-track-action' | ['components/welcome.vue']
[:analytics_instrumentation] | '+ data: { track_label:' | ['admin/groups/_form.html.haml']
diff --git a/tooling/danger/project_helper.rb b/tooling/danger/project_helper.rb
index 2b781b58a64..e0953d59dad 100644
--- a/tooling/danger/project_helper.rb
+++ b/tooling/danger/project_helper.rb
@@ -100,7 +100,7 @@ module Tooling
%r{\A((ee|jh)/)?db/(geo/)?(?!click_house|fixtures)[^/]+} => [:database],
%r{\A((ee|jh)/)?db/[^/]+\z} => [:database], # db/ root files
- %r{\A((ee|jh)/)?lib/gitlab/(database|background_migration|sql)(/|\.rb)} => [:database, :backend],
+ %r{\A((ee|jh)/)?lib/(ee/)?gitlab/(database|background_migration|sql)(/|\.rb)} => [:database, :backend],
%r{\A(app/services/authorized_project_update/find_records_due_for_refresh_service)(/|\.rb)} => [:database, :backend],
%r{\A(app/models/project_authorization|app/services/users/refresh_authorized_projects_service)(/|\.rb)} => [:database, :backend],
%r{\A((ee|jh)/)?app/finders/} => [:database, :backend],
diff --git a/vite.config.js b/vite.config.js
index 88d264531a6..b70478abe96 100644
--- a/vite.config.js
+++ b/vite.config.js
@@ -128,5 +128,21 @@ export default defineConfig({
protocol: 'ws',
},
https: false,
+ watch: {
+ ignored: [
+ '**/*.stories.js',
+ function ignoreRootFolder(x) {
+ /*
+ `vite` watches the root folder of gitlab and all of its sub folders
+ This is not what we want, because we have temp files, and all kind
+ of other stuff. As vite starts its watchers recursively, we just
+ ignore if the path matches exactly the root folder
+
+ Additional folders like `ee/app/assets` are defined in
+ */
+ return x === __dirname;
+ },
+ ],
+ },
},
});