Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
2b2746757e
commit
f39371cf6c
|
|
@ -61,9 +61,7 @@ module RapidDiffs
|
|||
def stream_diff_files(options, view_context)
|
||||
return unless resource
|
||||
|
||||
diffs = resource.diffs_for_streaming(options)
|
||||
|
||||
if params.permit(:offset)[:offset].blank? && diffs.diff_files.empty?
|
||||
if params.permit(:offset)[:offset].blank? && resource.first_diffs_slice(1, options).empty?
|
||||
empty_state_component = ::RapidDiffs::EmptyStateComponent.new
|
||||
response.stream.write empty_state_component.render_in(view_context)
|
||||
return
|
||||
|
|
@ -73,11 +71,13 @@ module RapidDiffs
|
|||
if !!ActiveModel::Type::Boolean.new.cast(params.permit(:diff_blobs)[:diff_blobs])
|
||||
stream_diff_blobs(options, view_context)
|
||||
else
|
||||
stream_diff_collection(diffs.diff_files(sorted: sorted?), view_context)
|
||||
stream_diff_collection(options, view_context)
|
||||
end
|
||||
end
|
||||
|
||||
def stream_diff_collection(diff_files, view_context)
|
||||
def stream_diff_collection(options, view_context)
|
||||
diff_files = resource.diffs_for_streaming(options).diff_files(sorted: sorted?)
|
||||
|
||||
each_growing_slice(diff_files, 5, 2) do |slice|
|
||||
response.stream.write(render_diff_files_collection(slice, view_context))
|
||||
end
|
||||
|
|
|
|||
|
|
@ -15,12 +15,14 @@ module Types
|
|||
field :closing_merge_requests,
|
||||
::Types::WorkItems::ClosingMergeRequestType.connection_type,
|
||||
null: true,
|
||||
description: 'Merge requests that will close the work item when merged.'
|
||||
description: 'Merge requests that will close the work item when merged.',
|
||||
complexity: 10
|
||||
field :related_branches,
|
||||
::Types::WorkItems::RelatedBranchType.connection_type,
|
||||
calls_gitaly: true,
|
||||
description: 'Branches that have referred to the work item, but do not have an associated merge request.',
|
||||
null: true do
|
||||
null: true,
|
||||
complexity: 10 do
|
||||
extension ::Gitlab::Graphql::Limit::FieldCallCount, limit: 1
|
||||
end
|
||||
field :related_merge_requests, # rubocop:disable GraphQL/ExtractType -- no need to extract to related
|
||||
|
|
@ -29,13 +31,15 @@ module Types
|
|||
resolver: ::Resolvers::MergeRequests::WorkItemRelatedResolver,
|
||||
description: 'Merge requests where the work item has been mentioned. ' \
|
||||
'This field can only be resolved for one work item in any single request.',
|
||||
experiment: { milestone: '17.6' } do
|
||||
experiment: { milestone: '17.6' },
|
||||
complexity: 10 do
|
||||
extension ::Gitlab::Graphql::Limit::FieldCallCount, limit: 1
|
||||
end
|
||||
field :will_auto_close_by_merge_request,
|
||||
GraphQL::Types::Boolean,
|
||||
null: false,
|
||||
description: 'Whether the work item will automatically be closed when a closing merge request is merged.'
|
||||
description: 'Whether the work item will automatically be closed when a closing merge request is merged.',
|
||||
complexity: 1
|
||||
|
||||
def related_branches
|
||||
return [] unless object.work_item.project
|
||||
|
|
|
|||
|
|
@ -600,9 +600,7 @@ class Commit
|
|||
end
|
||||
|
||||
def first_diffs_slice(limit, diff_options = {})
|
||||
diff_options[:max_files] = limit
|
||||
|
||||
diffs(diff_options).diff_files
|
||||
diffs(diff_options.merge(max_files: limit)).diff_files
|
||||
end
|
||||
|
||||
private
|
||||
|
|
|
|||
|
|
@ -118,4 +118,8 @@ class Compare
|
|||
end
|
||||
paths.to_a
|
||||
end
|
||||
|
||||
def first_diffs_slice(limit, diff_options = {})
|
||||
diffs(diff_options.merge(max_files: limit)).diff_files
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,14 +0,0 @@
|
|||
---
|
||||
redirect_to: ../code_coverage/_index.md
|
||||
remove_date: "2025-04-30"
|
||||
---
|
||||
|
||||
<!-- markdownlint-disable -->
|
||||
<!-- vale off -->
|
||||
|
||||
このドキュメントは[コードカバレッジ](../code_coverage/_index.md)に移動しました。
|
||||
|
||||
<!-- This redirect file can be deleted after 2025-04-30. -->
|
||||
<!-- Redirects that point to other docs in the same project expire in three months. -->
|
||||
<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
|
||||
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
---
|
||||
redirect_to: ../code_coverage/cobertura.md
|
||||
remove_date: "2025-04-30"
|
||||
---
|
||||
|
||||
<!-- markdownlint-disable -->
|
||||
<!-- vale off -->
|
||||
|
||||
このドキュメントは、[Coberturaカバレッジレポート](../code_coverage/cobertura.md)に移動しました。
|
||||
|
||||
<!-- This redirect file can be deleted after 2025-04-30. -->
|
||||
<!-- Redirects that point to other docs in the same project expire in three months. -->
|
||||
<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
|
||||
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
---
|
||||
redirect_to: ../code_coverage/jacoco.md
|
||||
remove_date: "2025-04-30"
|
||||
---
|
||||
|
||||
<!-- markdownlint-disable -->
|
||||
<!-- vale off -->
|
||||
|
||||
このドキュメントは、[JaCoCoカバレッジレポート](../code_coverage/jacoco.md)に移動しました。
|
||||
|
||||
<!-- This redirect file can be deleted after 2025-04-30. -->
|
||||
<!-- Redirects that point to other docs in the same project expire in three months. -->
|
||||
<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
|
||||
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
|
||||
|
|
@ -114,6 +114,7 @@ Buildah
|
|||
Buildkite
|
||||
buildpack
|
||||
buildpacks
|
||||
Buildx
|
||||
bundler
|
||||
bundlers
|
||||
burndown
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ This [problem is usually encountered when upgrading to GitLab 15.1](../../../upd
|
|||
|
||||
## Recovering from a partial failover
|
||||
|
||||
The partial failover to a secondary Geo *site* may be the result of a temporary/transient issue. Therefore, first attempt to run the promote command again.
|
||||
The partial failover to a secondary Geo site may be the result of a temporary/transient issue. Therefore, first attempt to run the promote command again.
|
||||
|
||||
1. SSH into every Sidekiq, PostgreSQL, Gitaly, and Rails node in the **secondary** site and run one of the following commands:
|
||||
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ We provide [example diagrams and statements](#examples) to demonstrate correct u
|
|||
|
||||
## Replicator terms
|
||||
|
||||
Geo uses *replicators* to replicate data of individual GitLab components
|
||||
Geo uses replicators to replicate data of individual GitLab components
|
||||
between primary and secondary sites. They define how the individual [data types](replication/datatypes.md#data-types)
|
||||
of these components have to be processed and verified. For example, data of the
|
||||
GitLab container registry has to be handled differently than CI job artifacts.
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ Any change that requires access to the **Admin area** needs to be done in the
|
|||
## Step 1. Manually replicate secret GitLab values
|
||||
|
||||
GitLab stores a number of secret values in the `/etc/gitlab/gitlab-secrets.json`
|
||||
file which *must* be the same on all of a site's nodes. Until there is
|
||||
file which must be the same on all of a site's nodes. Until there is
|
||||
a means of automatically replicating these between sites (see [issue #3789](https://gitlab.com/gitlab-org/gitlab/-/issues/3789)),
|
||||
they must be manually replicated to **all nodes of the secondary site**.
|
||||
|
||||
|
|
@ -310,7 +310,7 @@ Install the correct certificate based on your certificate type:
|
|||
|
||||
A copy of the self-signed certificate for the external service needs to be added to the trust store on all the **primary** site's nodes that require access to the service.
|
||||
|
||||
For the **secondary** site to be able to access the same external services, these certificates *must* be added to the **secondary** site's trust store.
|
||||
For the **secondary** site to be able to access the same external services, these certificates must be added to the **secondary** site's trust store.
|
||||
|
||||
If your **primary** site is using a [custom or self-signed certificate for inbound HTTPS connections](#custom-or-self-signed-certificate-for-inbound-connections), the **primary** site's certificate needs to be added to the **secondary** site's trust store:
|
||||
|
||||
|
|
|
|||
|
|
@ -273,7 +273,7 @@ from [owasp.org](https://owasp.org/).
|
|||
|
||||
### What user authorization requirements have been defined?
|
||||
|
||||
- **Secondary** sites must only be able to *read* data. They cannot mutate data on the **primary** site.
|
||||
- **Secondary** sites must only be able to read data. They cannot mutate data on the **primary** site.
|
||||
|
||||
### What session management requirements have been defined?
|
||||
|
||||
|
|
|
|||
|
|
@ -407,7 +407,7 @@ When missing files or inconsistencies are present, you can encounter entries in
|
|||
}
|
||||
```
|
||||
|
||||
The same errors are also reflected in the UI under **Admin > Geo > Sites** when reviewing the synchronization status of specific replicables. In this scenario, a specific *upload* is missing:
|
||||
The same errors are also reflected in the UI under **Admin > Geo > Sites** when reviewing the synchronization status of specific replicables. In this scenario, a specific upload is missing:
|
||||
|
||||

|
||||
|
||||
|
|
@ -423,7 +423,7 @@ Ensure you have a recent and working backup at hand before issuing any deletion
|
|||
|
||||
To remove those errors, first identify which particular resources are affected. Then, run the appropriate `destroy` commands to ensure the deletion is propagated across all Geo sites and their databases. Based on the previous scenario, an **upload** is causing those errors which is used as an example below.
|
||||
|
||||
1. Map the identified inconsistencies to their respective [Geo Model class](#geo-data-type-model-classes) name. The class name is needed in the following steps. In this scenario, for *uploads* it corresponds to `Upload`.
|
||||
1. Map the identified inconsistencies to their respective [Geo Model class](#geo-data-type-model-classes) name. The class name is needed in the following steps. In this scenario, for uploads it corresponds to `Upload`.
|
||||
1. Start a [Rails console](../../../operations/rails_console.md#starting-a-rails-console-session) on the **Geo primary site**.
|
||||
1. Query all resources where verification failed due to missing files based on the *Geo Model class* of the previous step. Adjust or remove the `limit(20)` to display more results. Observe how the listed resources should match the failed ones shown in the UI:
|
||||
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ the **primary** node is listed first as `Primary site`.
|
|||
and always attempt to synchronize those changes as quickly as possible.
|
||||
|
||||
Backfill is the act of populating the **secondary** site with repositories and files that
|
||||
existed *before* the **secondary** site was added to the database. Because there may be
|
||||
existed before the **secondary** site was added to the database. Because there may be
|
||||
extremely large numbers of repositories and files, it's not feasible to attempt to
|
||||
download them all at once; so, GitLab places an upper limit on the concurrency of
|
||||
these operations.
|
||||
|
|
|
|||
|
|
@ -245,6 +245,13 @@ go_cloud_url = "s3://<bucket>?region=us-west-1"
|
|||
|
||||
#### Configure S3-compatible servers
|
||||
|
||||
{{< history >}}
|
||||
|
||||
- `awssdk` parameter introduced in GitLab 15.10.
|
||||
- `use_path_style` and `disable_https` parameters [introduced](https://gitlab.com/groups/gitlab-org/-/epics/8939) in GitLab 17.4.
|
||||
|
||||
{{< /history >}}
|
||||
|
||||
S3-compatible servers such as MinIO are configured similarly to S3 with the
|
||||
addition of the `endpoint` parameter.
|
||||
|
||||
|
|
@ -252,8 +259,19 @@ The following parameters are supported:
|
|||
|
||||
- `region`: The AWS region.
|
||||
- `endpoint`: The endpoint URL.
|
||||
- `disabledSSL`: A value of `true` disables SSL.
|
||||
- `s3ForcePathStyle`: A value of `true` forces path-style addressing.
|
||||
- `disableSSL`: Set to `true` to disable SSL. Available for GitLab 17.4.0 and earlier. For GitLab versions after 17.4.0, use `disable_https`.
|
||||
- `disable_https`: Set to `true` to disable HTTPS in the endpoint options.
|
||||
- `s3ForcePathStyle`: Set to `true` to force path-style URLs for S3 objects. Unavailable in GitLab versions 17.4.0 to 17.4.3. In those versions, use `use_path_style` instead.
|
||||
- `use_path_style`: Set to `true` to enable path-style S3 URLs (`https://<host>/<bucket>` instead of `https://<bucket>.<host>`).
|
||||
- `awssdk`: Force a particular version of AWS SDK. Set to `v1` to force AWS SDK v1 or `v2` to force AWS SDK v2. If:
|
||||
- Set to `v1`, you must use `disableSSL` instead of `disable_https`.
|
||||
- Not set, defaults to `v2`.
|
||||
|
||||
`use_path_style` was introduced when the Go Cloud Development Kit dependency was updated from v0.38.0 to v0.39.0, which switched from AWS SDK v1 to v2. However, the `s3ForcePathStyle` parameter was restored in GitLab 17.4.4 after the gocloud.dev maintainers added backward compatibility support. For more information, see [issue 6489](https://gitlab.com/gitlab-org/gitaly/-/issues/6489).
|
||||
|
||||
`disable_https` was introduced in the Go Cloud Development Kit v0.40.0 (AWS SDK v2).
|
||||
|
||||
`awssdk` was introduced in the Go Cloud Development Kit v0.24.0.
|
||||
|
||||
{{< tabs >}}
|
||||
|
||||
|
|
@ -268,7 +286,7 @@ gitaly['env'] = {
|
|||
}
|
||||
gitaly['configuration'] = {
|
||||
bundle_uri: {
|
||||
go_cloud_url: 's3://<bucket>?region=minio&endpoint=my.minio.local:8080&disableSSL=true&s3ForcePathStyle=true'
|
||||
go_cloud_url: 's3://<bucket>?region=minio&endpoint=my.minio.local:8080&disable_https=true&use_path_style=true'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
|
@ -281,7 +299,7 @@ Edit `/home/git/gitaly/config.toml` and configure `go_cloud_url`:
|
|||
|
||||
```toml
|
||||
[bundle_uri]
|
||||
go_cloud_url = "s3://<bucket>?region=minio&endpoint=my.minio.local:8080&disableSSL=true&s3ForcePathStyle=true"
|
||||
go_cloud_url = "s3://<bucket>?region=minio&endpoint=my.minio.local:8080&disable_https=true&use_path_style=true"
|
||||
```
|
||||
|
||||
{{< /tab >}}
|
||||
|
|
|
|||
|
|
@ -859,7 +859,7 @@ Record the following when you configure your OAuth 2.0 application:
|
|||
|
||||
For MailRoom to work as a service account, the application you create
|
||||
in Azure Active Directory requires that you set the `Mail.ReadWrite` property
|
||||
to read/write mail in *all* mailboxes.
|
||||
to read/write mail in all mailboxes.
|
||||
|
||||
To mitigate security concerns, we recommend configuring an application access
|
||||
policy which limits the mailbox access for all accounts, as described in
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ If you self-compiled your installation, you may need to make some changes to you
|
|||
for more details.
|
||||
|
||||
To disable web terminal support in GitLab, stop passing
|
||||
the `Connection` and `Upgrade` hop-by-hop headers in the *first* HTTP reverse
|
||||
the `Connection` and `Upgrade` hop-by-hop headers in the first HTTP reverse
|
||||
proxy in the chain. For most users, this is the NGINX server bundled with
|
||||
Linux package installations. In this case, you need to:
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ options:
|
|||
- Each application node terminates SSL
|
||||
- The load balancers terminate SSL and communication is not secure between
|
||||
the load balancers and the application nodes
|
||||
- The load balancers terminate SSL and communication is *secure* between the
|
||||
- The load balancers terminate SSL and communication is secure between the
|
||||
load balancers and the application nodes
|
||||
|
||||
### Application nodes terminate SSL
|
||||
|
|
|
|||
|
|
@ -77,9 +77,9 @@ From left to right, the performance bar displays:
|
|||
report of the current URL.
|
||||
- **Flamegraph** with mode: a link to generate a flamegraph
|
||||
of the current URL with the selected [Stackprof mode](https://github.com/tmm1/stackprof#sampling):
|
||||
- The **Wall** mode samples every *interval* of the time on a clock on a wall. The interval is set to `10100` microseconds.
|
||||
- The **CPU** mode samples every *interval* of CPU activity. The interval is set to `10100` microseconds.
|
||||
- The **Object** mode samples every *interval*. The interval is set to `100` allocations.
|
||||
- The **Wall** mode samples every interval of the time on a clock on a wall. The interval is set to `10100` microseconds.
|
||||
- The **CPU** mode samples every interval of CPU activity. The interval is set to `10100` microseconds.
|
||||
- The **Object** mode samples every interval. The interval is set to `100` allocations.
|
||||
- **Request Selector**: a select box displayed on the right-hand side of the
|
||||
Performance Bar which enables you to view these metrics for any requests made while
|
||||
the current page was open. Only the first two requests per unique URL are captured.
|
||||
|
|
|
|||
|
|
@ -1473,7 +1473,7 @@ Assume the following Geo scenario:
|
|||
- Both systems have been migrated to object storage
|
||||
- The secondary uses the same object storage as the primary
|
||||
- The option `Allow this secondary site to replicate content on Object Storage` is deactivated
|
||||
- Multiple *uploads* were manually deleted before the object storage migration
|
||||
- Multiple uploads were manually deleted before the object storage migration
|
||||
- For this example, two images which were uploaded to an issue
|
||||
|
||||
In such a scenario, the secondary does no longer need to replicate any data as
|
||||
|
|
@ -1484,9 +1484,9 @@ On the primary site:
|
|||
|
||||
1. On the left sidebar, at the bottom, select **Admin**.
|
||||
1. Select **Geo > Sites**.
|
||||
1. Look at the **primary site** and check the verification information. All *uploads* were verified:
|
||||
1. Look at the **primary site** and check the verification information. All uploads were verified:
|
||||

|
||||
1. Look at the **secondary site** and check the verification information. Notice that two *uploads* are still being synced, even though the secondary should use the same object storage. Meaning it should not have to synchronize any uploads:
|
||||
1. Look at the **secondary site** and check the verification information. Notice that two uploads are still being synced, even though the secondary should use the same object storage. Meaning it should not have to synchronize any uploads:
|
||||

|
||||
|
||||
#### Clean up inconsistencies
|
||||
|
|
|
|||
|
|
@ -95,13 +95,13 @@ Depending on the init system, this `WARNING` can be one of:
|
|||
/sbin/init: unrecognized option '--version'
|
||||
```
|
||||
|
||||
when the underlying init system *is not* upstart.
|
||||
when the underlying init system is not upstart.
|
||||
|
||||
```plaintext
|
||||
-.mount loaded active mounted /
|
||||
```
|
||||
|
||||
when the underlying init system *IS* systemd.
|
||||
when the underlying init system is systemd.
|
||||
|
||||
These warnings _can be safely ignored_. They are not suppressed because this
|
||||
allows everyone to debug possible detection issues faster.
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ Removal target:
|
|||
For regular configuration, removal target should always be the date of the **next major** release. If the date is not known, you can reference the next major version.
|
||||
|
||||
For sensitive configuration things are a bit more complicated.
|
||||
We should aim to not remove sensitive configuration in the *next major* release if the next major release is 2 minor releases away (This number is chosen to match our security backport release policy).
|
||||
We should aim to not remove sensitive configuration in the next major release if the next major release is 2 minor releases away (This number is chosen to match our security backport release policy).
|
||||
|
||||
See the table below for some examples:
|
||||
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ Some drawbacks of a package with bundled dependencies:
|
|||
## Why would you install a package from the Linux package when you can use a system package?
|
||||
|
||||
The answer can be simplified to: less maintenance required. Instead of handling
|
||||
multiple packages that *can* break existing functionality if the versions are
|
||||
multiple packages that can break existing functionality if the versions are
|
||||
not compatible, only handle one.
|
||||
|
||||
Multiple packages require correct configuration in multiple locations.
|
||||
|
|
|
|||
|
|
@ -609,7 +609,7 @@ adding a GitLab-controlled verification code to the DNS records for that domain.
|
|||
{{< alert type="warning" >}}
|
||||
|
||||
Disabling domain verification is unsafe and can lead to various vulnerabilities.
|
||||
If you *do* disable it, either ensure that the Pages root domain itself does not point to the
|
||||
If you do disable it, either ensure that the Pages root domain itself does not point to the
|
||||
secondary IP or add the root domain as custom domain to a project; otherwise, any user can add this
|
||||
domain as a custom domain to their project.
|
||||
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ specifically the [Before you start](_index.md#before-you-start) and [Deciding wh
|
|||
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
|
||||
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
|
||||
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
8. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](_index.md#autoscaling-of-stateful-nodes).
|
||||
However, [Cloud Native Hybrid setups](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) are generally preferred as certain components
|
||||
such as like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) can only be run on one node, which is handled better in Kubernetes.
|
||||
|
|
@ -336,7 +336,7 @@ There are several different options:
|
|||
- [The load balancer terminates SSL without backend SSL](#load-balancer-terminates-ssl-without-backend-ssl)
|
||||
and communication is not secure between the load balancer and the application node.
|
||||
- [The load balancer terminates SSL with backend SSL](#load-balancer-terminates-ssl-with-backend-ssl)
|
||||
and communication is *secure* between the load balancer and the application node.
|
||||
and communication is secure between the load balancer and the application node.
|
||||
|
||||
#### Application node terminates SSL
|
||||
|
||||
|
|
@ -1208,7 +1208,7 @@ designated the primary, and failover occurs automatically if the primary node go
|
|||
{{< alert type="warning" >}}
|
||||
|
||||
**Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact the performance of the environment and further adjustments may be required**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact the performance of the environment and further adjustments may be required**.
|
||||
If you believe this applies to you, contact us for additional guidance as required.
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
@ -1557,7 +1557,7 @@ requirements that are dependent on data and load.
|
|||
{{< alert type="warning" >}}
|
||||
|
||||
**Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact the performance of the environment and further adjustments may be required**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact the performance of the environment and further adjustments may be required**.
|
||||
If you believe this applies to you, contact us for additional guidance as required.
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
@ -2377,7 +2377,7 @@ services where applicable):
|
|||
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
|
||||
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
|
||||
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ Before proceeding, review the [requirements](_index.md#requirements) for the ref
|
|||
{{< alert type="warning" >}}
|
||||
|
||||
**The node's specifications are based on high percentiles of both usage patterns and repository sizes in good health**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads), they might *significantly* impact the performance of the environment**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads), they might significantly impact the performance of the environment**.
|
||||
If this applies to you, [further adjustments might be required](_index.md#scaling-an-environment). See the linked documentation and contact us if required for further guidance.
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ specifically the [Before you start](_index.md#before-you-start) and [Deciding wh
|
|||
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
|
||||
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
|
||||
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
8. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](_index.md#autoscaling-of-stateful-nodes).
|
||||
However, [Cloud Native Hybrid setups](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) are generally preferred as certain components
|
||||
such as like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) can only be run on one node, which is handled better in Kubernetes.
|
||||
|
|
@ -338,7 +338,7 @@ There are several different options:
|
|||
- [The load balancer terminates SSL without backend SSL](#load-balancer-terminates-ssl-without-backend-ssl)
|
||||
and communication is not secure between the load balancer and the application node.
|
||||
- [The load balancer terminates SSL with backend SSL](#load-balancer-terminates-ssl-with-backend-ssl)
|
||||
and communication is *secure* between the load balancer and the application node.
|
||||
and communication is secure between the load balancer and the application node.
|
||||
|
||||
#### Application node terminates SSL
|
||||
|
||||
|
|
@ -1216,7 +1216,7 @@ designated the primary, and failover occurs automatically if the primary node go
|
|||
{{< alert type="warning" >}}
|
||||
|
||||
**Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact the performance of the environment and further adjustments may be required**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact the performance of the environment and further adjustments may be required**.
|
||||
If you believe this applies to you, contact us for additional guidance as required.
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
@ -1563,7 +1563,7 @@ requirements that are dependent on data and load.
|
|||
{{< alert type="warning" >}}
|
||||
|
||||
**Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact the performance of the environment and further adjustments may be required**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact the performance of the environment and further adjustments may be required**.
|
||||
If you believe this applies to you, contact us for additional guidance as required.
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
@ -2384,7 +2384,7 @@ services where applicable):
|
|||
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
|
||||
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
|
||||
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
|
|
|||
|
|
@ -234,7 +234,7 @@ There are several different options:
|
|||
- [The load balancer terminates SSL without backend SSL](#load-balancer-terminates-ssl-without-backend-ssl)
|
||||
and communication is not secure between the load balancer and the application node.
|
||||
- [The load balancer terminates SSL with backend SSL](#load-balancer-terminates-ssl-with-backend-ssl)
|
||||
and communication is *secure* between the load balancer and the application node.
|
||||
and communication is secure between the load balancer and the application node.
|
||||
|
||||
#### Application node terminates SSL
|
||||
|
||||
|
|
@ -444,7 +444,7 @@ specifically the number of projects and those projects' sizes.
|
|||
{{< alert type="warning" >}}
|
||||
|
||||
**Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact the performance of the environment and further adjustments may be required**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact the performance of the environment and further adjustments may be required**.
|
||||
If you believe this applies to you, contact us for additional guidance as required.
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ For a full list of reference architectures, see
|
|||
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
|
||||
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
|
||||
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
8. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](_index.md#autoscaling-of-stateful-nodes).
|
||||
However, [Cloud Native Hybrid setups](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) are generally preferred as certain components
|
||||
such as like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) can only be run on one node, which is handled better in Kubernetes.
|
||||
|
|
@ -323,7 +323,7 @@ There are several different options:
|
|||
- [The load balancer terminates SSL without backend SSL](#load-balancer-terminates-ssl-without-backend-ssl)
|
||||
and communication is not secure between the load balancer and the application node.
|
||||
- [The load balancer terminates SSL with backend SSL](#load-balancer-terminates-ssl-with-backend-ssl)
|
||||
and communication is *secure* between the load balancer and the application node.
|
||||
and communication is secure between the load balancer and the application node.
|
||||
|
||||
#### Application node terminates SSL
|
||||
|
||||
|
|
@ -1044,7 +1044,7 @@ designated the primary, and failover occurs automatically if the primary node go
|
|||
{{< alert type="warning" >}}
|
||||
|
||||
**Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact the performance of the environment and further adjustments may be required**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact the performance of the environment and further adjustments may be required**.
|
||||
If you believe this applies to you, contact us for additional guidance as required.
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
@ -1389,7 +1389,7 @@ requirements that are dependent on data and load.
|
|||
{{< alert type="warning" >}}
|
||||
|
||||
**Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact the performance of the environment and further adjustments may be required**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact the performance of the environment and further adjustments may be required**.
|
||||
If you believe this applies to you, contact us for additional guidance as required.
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
@ -2274,7 +2274,7 @@ services where applicable):
|
|||
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
|
||||
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
|
||||
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ specifically the [Before you start](_index.md#before-you-start) and [Deciding wh
|
|||
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
|
||||
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
|
||||
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
8. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](_index.md#autoscaling-of-stateful-nodes).
|
||||
However, [Cloud Native Hybrid setups](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) are generally preferred as certain components
|
||||
such as like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) can only be run on one node, which is handled better in Kubernetes.
|
||||
|
|
@ -344,7 +344,7 @@ There are several different options:
|
|||
- [The load balancer terminates SSL without backend SSL](#load-balancer-terminates-ssl-without-backend-ssl)
|
||||
and communication is not secure between the load balancer and the application node.
|
||||
- [The load balancer terminates SSL with backend SSL](#load-balancer-terminates-ssl-with-backend-ssl)
|
||||
and communication is *secure* between the load balancer and the application node.
|
||||
and communication is secure between the load balancer and the application node.
|
||||
|
||||
#### Application node terminates SSL
|
||||
|
||||
|
|
@ -1223,7 +1223,7 @@ designated the primary, and failover occurs automatically if the primary node go
|
|||
{{< alert type="warning" >}}
|
||||
|
||||
**Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact the performance of the environment and further adjustments may be required**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact the performance of the environment and further adjustments may be required**.
|
||||
If you believe this applies to you, contact us for additional guidance as required.
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
@ -1568,7 +1568,7 @@ requirements that are dependent on data and load.
|
|||
{{< alert type="warning" >}}
|
||||
|
||||
**Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact the performance of the environment and further adjustments may be required**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact the performance of the environment and further adjustments may be required**.
|
||||
If you believe this applies to you, contact us for additional guidance as required.
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
@ -2397,7 +2397,7 @@ services where applicable):
|
|||
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
|
||||
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
|
||||
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ specifically the [Before you start](_index.md#before-you-start) and [Deciding wh
|
|||
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
|
||||
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
|
||||
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
8. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](_index.md#autoscaling-of-stateful-nodes).
|
||||
However, [Cloud Native Hybrid setups](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) are generally preferred as certain components
|
||||
such as like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) can only be run on one node, which is handled better in Kubernetes.
|
||||
|
|
@ -326,7 +326,7 @@ There are several different options:
|
|||
- [The load balancer terminates SSL without backend SSL](#load-balancer-terminates-ssl-without-backend-ssl)
|
||||
and communication is not secure between the load balancer and the application node.
|
||||
- [The load balancer terminates SSL with backend SSL](#load-balancer-terminates-ssl-with-backend-ssl)
|
||||
and communication is *secure* between the load balancer and the application node.
|
||||
and communication is secure between the load balancer and the application node.
|
||||
|
||||
#### Application node terminates SSL
|
||||
|
||||
|
|
@ -1047,7 +1047,7 @@ designated the primary, and failover occurs automatically if the primary node go
|
|||
{{< alert type="warning" >}}
|
||||
|
||||
**Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact the performance of the environment and further adjustments may be required**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact the performance of the environment and further adjustments may be required**.
|
||||
If you believe this applies to you, contact us for additional guidance as required.
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
@ -1394,7 +1394,7 @@ requirements that are dependent on data and load.
|
|||
{{< alert type="warning" >}}
|
||||
|
||||
**Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact the performance of the environment and further adjustments may be required**.
|
||||
**However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact the performance of the environment and further adjustments may be required**.
|
||||
If you believe this applies to you, contact us for additional guidance as required.
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
@ -2248,7 +2248,7 @@ services where applicable):
|
|||
6. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
|
||||
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/_index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed in the previous table for `Gitaly`.
|
||||
7. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
However, if you have [large monorepos](_index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](_index.md#additional-workloads) these can significantly impact Git and Gitaly performance and further adjustments will likely be required.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
|
|
|||
|
|
@ -1050,7 +1050,7 @@ A line code is of the form `<SHA>_<old>_<new>`, like this: `adc83b19e793491b1c6e
|
|||
- `<new>` is the line number after the change.
|
||||
|
||||
For example, if a commit (`<COMMIT_ID>`) deletes line 463 in the README, you can comment
|
||||
on the deletion by referencing line 463 in the *old* file:
|
||||
on the deletion by referencing line 463 in the old file:
|
||||
|
||||
```shell
|
||||
curl --request POST \
|
||||
|
|
@ -1063,7 +1063,7 @@ curl --request POST \
|
|||
```
|
||||
|
||||
If a commit (`<COMMIT_ID>`) adds line 157 to `hello.rb`, you can comment on the
|
||||
addition by referencing line 157 in the *new* file:
|
||||
addition by referencing line 157 in the new file:
|
||||
|
||||
```shell
|
||||
curl --request POST \
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ Supported attributes:
|
|||
| `view` | string | No | If `simple`, returns the `iid`, URL, title, description, and basic state of merge request. |
|
||||
| `with_labels_details` | boolean | No | If `true`, response returns more details for each label in labels field: `:name`, `:color`, `:description`, `:description_html`, `:text_color`. Default is `false`. |
|
||||
| `with_merge_status_recheck` | boolean | No | If `true`, this projection requests (but does not guarantee) an asynchronous recalculation of the `merge_status` field. Default is `false`. In GitLab 15.11 and later, enable the `restrict_merge_status_recheck` feature [flag](../administration/feature_flags.md) to ignore this attribute when requested by users without at least the Developer role. |
|
||||
| `wip` | string | No | Filter merge requests against their `wip` status. Use `yes` to return *only* draft merge requests, `no` to return *non-draft* merge requests. |
|
||||
| `wip` | string | No | Filter merge requests against their `wip` status. Use `yes` to return only draft merge requests, `no` to return non-draft merge requests. |
|
||||
|
||||
Example response:
|
||||
|
||||
|
|
@ -283,7 +283,7 @@ Supported attributes:
|
|||
| `updated_after` | datetime | No | Returns merge requests updated on or after the given time. Expected in ISO 8601 format (`2019-03-15T08:00:00Z`). |
|
||||
| `updated_before` | datetime | No | Returns merge requests updated on or before the given time. Expected in ISO 8601 format (`2019-03-15T08:00:00Z`). |
|
||||
| `view` | string | No | If `simple`, returns the `iid`, URL, title, description, and basic state of merge request. |
|
||||
| `wip` | string | No | Filter merge requests against their `wip` status. `yes` to return *only* draft merge requests, `no` to return *non-draft* merge requests. |
|
||||
| `wip` | string | No | Filter merge requests against their `wip` status. `yes` to return only draft merge requests, `no` to return non-draft merge requests. |
|
||||
| `with_labels_details` | boolean | No | If `true`, response returns more details for each label in labels field: `:name`, `:color`, `:description`, `:description_html`, `:text_color`. Default is `false`. |
|
||||
| `with_merge_status_recheck` | boolean | No | If `true`, this projection requests (but does not guarantee) the asynchronous recalculation of the `merge_status` field. Default is `false`. In GitLab 15.11 and later, enable the `restrict_merge_status_recheck` feature [flag](../administration/feature_flags.md) to ignore this attribute when requested by users without at least the Developer role. |
|
||||
|
||||
|
|
|
|||
|
|
@ -14,16 +14,13 @@ title: Docker integration
|
|||
|
||||
You can incorporate [Docker](https://www.docker.com) into your CI/CD workflow in two primary ways:
|
||||
|
||||
- **[Run your CI/CD jobs](using_docker_images.md) in Docker containers**.
|
||||
- [Run your CI/CD jobs](using_docker_images.md) in Docker containers.
|
||||
|
||||
You can create CI/CD jobs to do things like test, build, or publish
|
||||
an application. These jobs can run in Docker containers.
|
||||
Create jobs to test, build, or publish applications that run in Docker containers.
|
||||
For example, use a Node image from Docker Hub so your job runs in a container
|
||||
with all the Node dependencies you need.
|
||||
|
||||
For example, you can tell GitLab CI/CD to use a Node image that's hosted on Docker Hub
|
||||
or in the GitLab container registry. Your job then runs in a container that's based on the image.
|
||||
The container has all the Node dependencies you need to build your app.
|
||||
- Use [Docker Build](using_docker_build.md) or [BuildKit](using_buildkit.md) to build Docker images.
|
||||
|
||||
- **Use [Docker](using_docker_build.md) to build Docker images**.
|
||||
|
||||
You can create CI/CD jobs to build Docker images and publish
|
||||
them to a container registry.
|
||||
Create jobs that build Docker images and publish them to a container registry.
|
||||
BuildKit provides multiple approaches including rootless builds.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,440 @@
|
|||
---
|
||||
stage: Verify
|
||||
group: Pipeline Execution
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
|
||||
title: Build Docker images with BuildKit
|
||||
---
|
||||
|
||||
{{< details >}}
|
||||
|
||||
- Tier: Free, Premium, Ultimate
|
||||
- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated
|
||||
|
||||
{{< /details >}}
|
||||
|
||||
[BuildKit](https://docs.docker.com/build/buildkit/) is the build engine used by Docker
|
||||
and provides multi-platform builds and build caching.
|
||||
|
||||
## BuildKit methods
|
||||
|
||||
BuildKit offers the following methods to build Docker images:
|
||||
|
||||
| Method | Security requirement | Commands | Use when you need |
|
||||
| ----------------- | ------------------------ | ------------------------ | ----------------- |
|
||||
| BuildKit rootless | No privileged containers | `buildctl-daemonless.sh` | Maximum security or a replacement for Kaniko |
|
||||
| Docker Buildx | Requires `docker:dind` | `docker buildx` | Familiar Docker workflow |
|
||||
| Native BuildKit | Requires `docker:dind` | `buildctl` | Advanced BuildKit control |
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- GitLab Runner with Docker executor
|
||||
- Docker 19.03 or later to use Docker Buildx
|
||||
- A project with a `Dockerfile`
|
||||
|
||||
## BuildKit rootless
|
||||
|
||||
BuildKit in standalone mode provides rootless image builds without Docker daemon dependency.
|
||||
This method eliminates privileged containers entirely and provides a direct replacement for Kaniko builds.
|
||||
|
||||
Key differences from other methods:
|
||||
|
||||
- Uses the `moby/buildkit:rootless` image
|
||||
- Includes `BUILDKITD_FLAGS: --oci-worker-no-process-sandbox` for rootless operation
|
||||
- Uses `buildctl-daemonless.sh` to manage BuildKit daemon automatically
|
||||
- No Docker daemon or privileged container dependency
|
||||
- Requires manual registry authentication setup
|
||||
|
||||
### Authenticate with container registries
|
||||
|
||||
GitLab CI/CD provides automatic authentication for the GitLab container registry through
|
||||
predefined variables. For BuildKit rootless, you must manually create the Docker
|
||||
configuration file.
|
||||
|
||||
#### Authenticate with the GitLab container registry
|
||||
|
||||
GitLab automatically provides these predefined variables:
|
||||
|
||||
- `CI_REGISTRY`: Registry URL
|
||||
- `CI_REGISTRY_USER`: Registry username
|
||||
- `CI_REGISTRY_PASSWORD`: Registry password
|
||||
|
||||
To configure authentication for rootless builds, add a `before_script` configuration
|
||||
to your jobs. For example:
|
||||
|
||||
```yaml
|
||||
before_script:
|
||||
- mkdir -p ~/.docker
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
|
||||
```
|
||||
|
||||
#### Authenticate with multiple registries
|
||||
|
||||
To authenticate with additional container registries, combine authentication entries
|
||||
in your `before_script` section. For example:
|
||||
|
||||
```yaml
|
||||
before_script:
|
||||
- mkdir -p ~/.docker
|
||||
- |
|
||||
echo "{
|
||||
\"auths\": {
|
||||
\"${CI_REGISTRY}\": {
|
||||
\"auth\": \"$(printf "%s:%s" "${CI_REGISTRY_USER}" "${CI_REGISTRY_PASSWORD}" | base64 | tr -d '\n')\"
|
||||
},
|
||||
\"docker.io\": {
|
||||
\"auth\": \"$(printf "%s:%s" "${DOCKER_HUB_USER}" "${DOCKER_HUB_PASSWORD}" | base64 | tr -d '\n')\"
|
||||
}
|
||||
}
|
||||
}" > ~/.docker/config.json
|
||||
```
|
||||
|
||||
#### Authenticate with the dependency proxy
|
||||
|
||||
To pull images through the GitLab dependency proxy, configure the authentication
|
||||
in your `before_script` section. For example:
|
||||
|
||||
```yaml
|
||||
before_script:
|
||||
- mkdir -p ~/.docker
|
||||
- |
|
||||
echo "{
|
||||
\"auths\": {
|
||||
\"${CI_REGISTRY}\": {
|
||||
\"auth\": \"$(printf "%s:%s" "${CI_REGISTRY_USER}" "${CI_REGISTRY_PASSWORD}" | base64 | tr -d '\n')\"
|
||||
},
|
||||
\"$(echo -n $CI_DEPENDENCY_PROXY_SERVER | awk -F[:] '{print $1}')\": {
|
||||
\"auth\": \"$(printf "%s:%s" ${CI_DEPENDENCY_PROXY_USER} "${CI_DEPENDENCY_PROXY_PASSWORD}" | base64 | tr -d '\n')\"
|
||||
}
|
||||
}
|
||||
}" > ~/.docker/config.json
|
||||
```
|
||||
|
||||
For more information, see [authenticate within CI/CD](../../user/packages/dependency_proxy/_index.md#authenticate-within-cicd).
|
||||
|
||||
### Build images in rootless mode
|
||||
|
||||
To build images without Docker daemon dependency, add a job similar to this example:
|
||||
|
||||
```yaml
|
||||
build-rootless:
|
||||
image: moby/buildkit:rootless
|
||||
stage: build
|
||||
variables:
|
||||
BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
|
||||
before_script:
|
||||
- mkdir -p ~/.docker
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
|
||||
script:
|
||||
- |
|
||||
buildctl-daemonless.sh build \
|
||||
--frontend dockerfile.v0 \
|
||||
--local context=. \
|
||||
--local dockerfile=. \
|
||||
--output type=image,name=$CI_REGISTRY_IMAGE:$CI_COMMIT_SHA,push=true
|
||||
```
|
||||
|
||||
### Build multi-platform images in rootless mode
|
||||
|
||||
To build images for multiple architectures in rootless mode, configure your job
|
||||
to specify the target platforms. For example:
|
||||
|
||||
```yaml
|
||||
build-multiarch-rootless:
|
||||
image: moby/buildkit:rootless
|
||||
stage: build
|
||||
variables:
|
||||
BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
|
||||
before_script:
|
||||
- mkdir -p ~/.docker
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
|
||||
script:
|
||||
- |
|
||||
buildctl-daemonless.sh build \
|
||||
--frontend dockerfile.v0 \
|
||||
--local context=. \
|
||||
--local dockerfile=. \
|
||||
--opt platform=linux/amd64,linux/arm64 \
|
||||
--output type=image,name=$CI_REGISTRY_IMAGE:$CI_COMMIT_SHA,push=true
|
||||
```
|
||||
|
||||
### Use caching in rootless mode
|
||||
|
||||
To enable registry-based caching for faster subsequent builds, configure cache
|
||||
import and export in your build job. For example:
|
||||
|
||||
```yaml
|
||||
build-cached-rootless:
|
||||
image: moby/buildkit:rootless
|
||||
stage: build
|
||||
variables:
|
||||
BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
|
||||
CACHE_IMAGE: $CI_REGISTRY_IMAGE:cache
|
||||
before_script:
|
||||
- mkdir -p ~/.docker
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
|
||||
script:
|
||||
- |
|
||||
buildctl-daemonless.sh build \
|
||||
--frontend dockerfile.v0 \
|
||||
--local context=. \
|
||||
--local dockerfile=. \
|
||||
--export-cache type=registry,ref=$CACHE_IMAGE \
|
||||
--import-cache type=registry,ref=$CACHE_IMAGE \
|
||||
--output type=image,name=$CI_REGISTRY_IMAGE:$CI_COMMIT_SHA,push=true
|
||||
```
|
||||
|
||||
### Configure proxy settings
|
||||
|
||||
If your GitLab Runner operates behind an HTTP(S) proxy, configure proxy settings
|
||||
as variables in your job. For example:
|
||||
|
||||
```yaml
|
||||
build-behind-proxy:
|
||||
image: moby/buildkit:rootless
|
||||
stage: build
|
||||
variables:
|
||||
BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
|
||||
http_proxy: <your-proxy>
|
||||
https_proxy: <your-proxy>
|
||||
no_proxy: <your-no-proxy>
|
||||
before_script:
|
||||
- mkdir -p ~/.docker
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
|
||||
script:
|
||||
- |
|
||||
buildctl-daemonless.sh build \
|
||||
--frontend dockerfile.v0 \
|
||||
--local context=. \
|
||||
--local dockerfile=. \
|
||||
--build-arg http_proxy=$http_proxy \
|
||||
--build-arg https_proxy=$https_proxy \
|
||||
--build-arg no_proxy=$no_proxy \
|
||||
--output type=image,name=$CI_REGISTRY_IMAGE:$CI_COMMIT_SHA,push=true
|
||||
```
|
||||
|
||||
In this example, replace `<your-proxy>` and `<your-no-proxy>` with your proxy configuration.
|
||||
|
||||
### Add custom certificates
|
||||
|
||||
To push to a registry using custom CA certificates, add the certificate to the
|
||||
container's certificate store before building. For example:
|
||||
|
||||
```yaml
|
||||
build-with-custom-certs:
|
||||
image: moby/buildkit:rootless
|
||||
stage: build
|
||||
variables:
|
||||
BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
|
||||
before_script:
|
||||
- |
|
||||
echo "-----BEGIN CERTIFICATE-----
|
||||
...
|
||||
-----END CERTIFICATE-----" >> /etc/ssl/certs/ca-certificates.crt
|
||||
- mkdir -p ~/.docker
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
|
||||
script:
|
||||
- |
|
||||
buildctl-daemonless.sh build \
|
||||
--frontend dockerfile.v0 \
|
||||
--local context=. \
|
||||
--local dockerfile=. \
|
||||
--output type=image,name=$CI_REGISTRY_IMAGE:$CI_COMMIT_SHA,push=true
|
||||
```
|
||||
|
||||
In this example, replace the certificate placeholder with your actual certificate content.
|
||||
|
||||
## Migrate from Kaniko to BuildKit
|
||||
|
||||
BuildKit rootless is a secure alternative for Kaniko.
|
||||
It offers improved performance, better caching, and enhanced security features while
|
||||
maintaining rootless operation.
|
||||
|
||||
### Update your configuration
|
||||
|
||||
Update your existing Kaniko configuration to use the BuildKit rootless method. For example:
|
||||
|
||||
Before, with Kaniko:
|
||||
|
||||
```yaml
|
||||
build:
|
||||
image:
|
||||
name: gcr.io/kaniko-project/executor:debug
|
||||
entrypoint: [""]
|
||||
script:
|
||||
- /kaniko/executor
|
||||
--context $CI_PROJECT_DIR
|
||||
--dockerfile $CI_PROJECT_DIR/Dockerfile
|
||||
--destination $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
```
|
||||
|
||||
After, with BuildKit rootless:
|
||||
|
||||
```yaml
|
||||
build:
|
||||
image: moby/buildkit:rootless
|
||||
variables:
|
||||
BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
|
||||
before_script:
|
||||
- mkdir -p ~/.docker
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
|
||||
script:
|
||||
- |
|
||||
buildctl-daemonless.sh build \
|
||||
--frontend dockerfile.v0 \
|
||||
--local context=. \
|
||||
--local dockerfile=. \
|
||||
--output type=image,name=$CI_REGISTRY_IMAGE:$CI_COMMIT_SHA,push=true
|
||||
```
|
||||
|
||||
## Alternative BuildKit methods
|
||||
|
||||
If you don't need rootless builds, BuildKit offers additional methods that require
|
||||
the `docker:dind` service but provide familiar workflows or advanced features.
|
||||
|
||||
### Docker Buildx
|
||||
|
||||
Docker Buildx extends Docker build capabilities with BuildKit features while maintaining
|
||||
familiar command syntax. This method requires the `docker:dind` service.
|
||||
|
||||
#### Build basic images
|
||||
|
||||
To build Docker images with Buildx, configure your job with the `docker:dind` service
|
||||
and create a `buildx` builder. For example:
|
||||
|
||||
```yaml
|
||||
variables:
|
||||
DOCKER_TLS_CERTDIR: "/certs"
|
||||
|
||||
build-image:
|
||||
image: docker:latest
|
||||
services:
|
||||
- docker:dind
|
||||
stage: build
|
||||
before_script:
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
- docker buildx create --use --driver docker-container --name builder
|
||||
- docker buildx inspect --bootstrap
|
||||
script:
|
||||
- docker buildx build --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA --push .
|
||||
after_script:
|
||||
- docker buildx rm builder
|
||||
```
|
||||
|
||||
#### Build multi-platform images
|
||||
|
||||
Multi-platform builds create images for different architectures in a single build command.
|
||||
The resulting manifest supports multiple architectures,
|
||||
and Docker automatically selects the appropriate image for each deployment target.
|
||||
|
||||
To build images for multiple architectures, add the `--platform` flag to specify
|
||||
target architectures. For example:
|
||||
|
||||
```yaml
|
||||
variables:
|
||||
DOCKER_TLS_CERTDIR: "/certs"
|
||||
|
||||
build-multiplatform:
|
||||
image: docker:latest
|
||||
services:
|
||||
- docker:dind
|
||||
stage: build
|
||||
before_script:
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
- docker buildx create --use --driver docker-container --name multibuilder
|
||||
- docker buildx inspect --bootstrap
|
||||
script:
|
||||
- docker buildx build
|
||||
--platform linux/amd64,linux/arm64
|
||||
--tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
--push .
|
||||
after_script:
|
||||
- docker buildx rm multibuilder
|
||||
```
|
||||
|
||||
#### Use build caching
|
||||
|
||||
Registry-based caching stores build layers in a container registry for reuse across builds.
|
||||
|
||||
The `mode=max` option exports all layers to the cache
|
||||
and provides maximum reuse potential for subsequent builds.
|
||||
|
||||
To use build caching, add cache options to your build command. For example:
|
||||
|
||||
```yaml
|
||||
variables:
|
||||
DOCKER_TLS_CERTDIR: "/certs"
|
||||
CACHE_IMAGE: $CI_REGISTRY_IMAGE:cache
|
||||
|
||||
build-with-cache:
|
||||
image: docker:latest
|
||||
services:
|
||||
- docker:dind
|
||||
stage: build
|
||||
before_script:
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
- docker buildx create --use --driver docker-container --name cached-builder
|
||||
- docker buildx inspect --bootstrap
|
||||
script:
|
||||
- docker buildx build
|
||||
--cache-from type=registry,ref=$CACHE_IMAGE
|
||||
--cache-to type=registry,ref=$CACHE_IMAGE,mode=max
|
||||
--tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
--push .
|
||||
after_script:
|
||||
- docker buildx rm cached-builder
|
||||
```
|
||||
|
||||
### Native BuildKit
|
||||
|
||||
Use native BuildKit `buildctl` commands for more control over the build process.
|
||||
This method requires the `docker:dind` service.
|
||||
|
||||
To use BuildKit directly, configure your job with the BuildKit image and `docker:dind` service. For example:
|
||||
|
||||
```yaml
|
||||
variables:
|
||||
DOCKER_TLS_CERTDIR: "/certs"
|
||||
|
||||
build-with-buildkit:
|
||||
image: moby/buildkit:latest
|
||||
services:
|
||||
- docker:dind
|
||||
stage: build
|
||||
before_script:
|
||||
- mkdir -p ~/.docker
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
|
||||
script:
|
||||
- |
|
||||
buildctl build \
|
||||
--frontend dockerfile.v0 \
|
||||
--local context=. \
|
||||
--local dockerfile=. \
|
||||
--output type=image,name=$CI_REGISTRY_IMAGE:$CI_COMMIT_SHA,push=true
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Build fails with authentication errors
|
||||
|
||||
If you encounter registry authentication failures:
|
||||
|
||||
- Verify that `CI_REGISTRY_USER` and `CI_REGISTRY_PASSWORD` variables are available.
|
||||
- Check that you have push permissions to the target registry.
|
||||
- For external registries, ensure authentication credentials are correctly configured
|
||||
in your project's CI/CD variables.
|
||||
|
||||
### Rootless build fails with permission errors
|
||||
|
||||
For permission-related issues in rootless mode:
|
||||
|
||||
- Ensure `BUILDKITD_FLAGS: --oci-worker-no-process-sandbox` is set.
|
||||
- Verify that the GitLab Runner has sufficient resources allocated.
|
||||
- Check that no privileged operations are attempted in your `Dockerfile`.
|
||||
|
||||
### Multi-platform builds fail
|
||||
|
||||
For multi-platform build issues:
|
||||
|
||||
- Verify that base images in your `Dockerfile` support the target architectures.
|
||||
- Check that architecture-specific dependencies are available for all target platforms.
|
||||
- Consider using conditional statements in your `Dockerfile` for architecture-specific logic.
|
||||
|
|
@ -748,8 +748,10 @@ and [using the OverlayFS storage driver](https://docs.docker.com/storage/storage
|
|||
|
||||
## Docker alternatives
|
||||
|
||||
To build Docker images without enabling privileged mode on the runner,
|
||||
use [`buildah`](#buildah-example).
|
||||
You can build container images without enabling privileged mode on your runner:
|
||||
|
||||
- [BuildKit](using_buildkit.md): Includes rootless BuildKit options that eliminate Docker daemon dependency.
|
||||
- [Buildah](#buildah-example): Build OCI-compliant images without requiring a Docker daemon.
|
||||
|
||||
### Buildah example
|
||||
|
||||
|
|
|
|||
|
|
@ -241,7 +241,7 @@ configured:
|
|||
deployment ruleset.
|
||||
- Developers should be given no more than the Developer role
|
||||
for the top-level group, or explicitly given the Owner role for a child project.
|
||||
They do *not* have access to the CI/CD configurations in the
|
||||
They do not have access to the CI/CD configurations in the
|
||||
top-level group, so operators can ensure that the critical configuration won't
|
||||
be accidentally changed by the developers.
|
||||
- For subgroups and child projects:
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ When running manual jobs you can supply additional job specific variables.
|
|||
|
||||
You can do this from the job page of the manual job you want to run with
|
||||
additional variables. To access this page, select the **name** of the manual job in
|
||||
the pipeline view, *not* **Run** ({{< icon name="play" >}}).
|
||||
the pipeline view, not **Run** ({{< icon name="play" >}}).
|
||||
|
||||
Define CI/CD variables here when you want to alter the execution of a job that uses
|
||||
[CI/CD variables](../variables/_index.md).
|
||||
|
|
|
|||
|
|
@ -287,7 +287,7 @@ job:
|
|||
- when: always
|
||||
```
|
||||
|
||||
This job does not run when `$CUSTOM_VARIABLE` is false, but it *does* run in **all**
|
||||
This job does not run when `$CUSTOM_VARIABLE` is false, but it does run in **all**
|
||||
other pipelines, including **both** push (branch) and merge request pipelines. With
|
||||
this configuration, every push to an open merge request's source branch
|
||||
causes duplicated pipelines.
|
||||
|
|
@ -420,7 +420,7 @@ Variable pattern matching with regular expressions uses the
|
|||
Expressions evaluate as `true` if:
|
||||
|
||||
- Matches are found when using `=~`.
|
||||
- Matches are *not* found when using `!~`.
|
||||
- Matches are not found when using `!~`.
|
||||
|
||||
For example:
|
||||
|
||||
|
|
|
|||
|
|
@ -17,9 +17,9 @@ Downstream pipelines run independently and concurrently to the upstream pipeline
|
|||
that triggered them.
|
||||
|
||||
- A [parent-child pipeline](downstream_pipelines.md#parent-child-pipelines) is a downstream pipeline
|
||||
triggered in the *same* project as the first pipeline.
|
||||
triggered in the same project as the first pipeline.
|
||||
- A [multi-project pipeline](#multi-project-pipelines) is a downstream pipeline triggered
|
||||
in a *different* project than the first pipeline.
|
||||
in a different project than the first pipeline.
|
||||
|
||||
You can sometimes use parent-child pipelines and multi-project pipelines for similar purposes,
|
||||
but there are [key differences](pipeline_architectures.md).
|
||||
|
|
|
|||
|
|
@ -485,7 +485,7 @@ Project runners process jobs by using a first in, first out ([FIFO](https://en.w
|
|||
{{< alert type="note" >}}
|
||||
|
||||
Project runners do not get instance with forked projects automatically.
|
||||
A fork *does* copy the CI/CD settings of the cloned repository.
|
||||
A fork does copy the CI/CD settings of the cloned repository.
|
||||
|
||||
{{< /alert >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@ attached to the resulting Vault token.
|
|||
[Bound claims](https://developer.hashicorp.com/vault/docs/auth/jwt#bound-claims) are predefined
|
||||
values that are matched to the JWT claims. With bounded claims, you can restrict access
|
||||
to specific GitLab users, specific projects, or even jobs running for specific Git
|
||||
references. You can have as many bounded claims you need, but they must *all* match
|
||||
references. You can have as many bounded claims you need, but they must all match
|
||||
for authentication to be successful.
|
||||
|
||||
Combining bounded claims with GitLab features like [user roles](../../user/permissions.md)
|
||||
|
|
|
|||
|
|
@ -5704,7 +5704,7 @@ job:
|
|||
- postgres
|
||||
```
|
||||
|
||||
In this example, only runners with *both* the `ruby` and `postgres` tags can run the job.
|
||||
In this example, only runners with both the `ruby` and `postgres` tags can run the job.
|
||||
|
||||
**Additional details**:
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ This cascading behavior means:
|
|||
1. When the setting is `true` at a parent level, all child entities are reset to `true`
|
||||
1. When the setting is `false` at a parent level, all child entities are reset to `false`
|
||||
1. A parent entity can "lock" the setting using `lock_duo_features_enabled: true` (displayed as "Always off" in the UI)
|
||||
- When locked, child entities *cannot* override the parent setting
|
||||
- When locked, child entities cannot override the parent setting
|
||||
- This effectively disables GitLab Duo features for the entire hierarchy below that point
|
||||
|
||||
## Feature Accessibility By Context
|
||||
|
|
|
|||
|
|
@ -1854,7 +1854,7 @@ The three states a mutation response can be in are:
|
|||
|
||||
#### Success
|
||||
|
||||
In the happy path, errors *may* be returned, along with the anticipated payload, but
|
||||
In the happy path, errors may be returned, along with the anticipated payload, but
|
||||
if everything was successful, then `errors` should be an empty array, because
|
||||
there are no problems we need to inform the user of.
|
||||
|
||||
|
|
@ -1899,14 +1899,14 @@ need to be told what is wrong, so they understand the reason for the failure and
|
|||
what they can do to achieve their intent. For example, they might only need to retry the
|
||||
request.
|
||||
|
||||
It is possible to return *recoverable* errors alongside mutation data. For example, if
|
||||
It is possible to return recoverable errors alongside mutation data. For example, if
|
||||
a user uploads 10 files and 3 of them fail and the rest succeed, the errors for the
|
||||
failures can be made available to the user, alongside the information about
|
||||
the successes.
|
||||
|
||||
#### Failure (irrelevant to the user)
|
||||
|
||||
One or more *non-recoverable* errors can be returned at the _top level_. These
|
||||
One or more non-recoverable errors can be returned at the _top level_. These
|
||||
are things over which the **user** has little to no control, and should mainly
|
||||
be system or programming problems, that a **developer** needs to know about.
|
||||
In this case there is no `data`:
|
||||
|
|
|
|||
|
|
@ -280,9 +280,9 @@ the `update` method in controllers. With Grape, the framework we use to write
|
|||
the GitLab API, you must explicitly set the `PATCH` or `PUT` HTTP verb for an
|
||||
endpoint that does updates.
|
||||
|
||||
If the endpoint updates *all* attributes of a given resource, use the
|
||||
If the endpoint updates all attributes of a given resource, use the
|
||||
[`PUT`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/PUT) request
|
||||
method. If the endpoint updates *some* attributes of a given resource, use the
|
||||
method. If the endpoint updates some attributes of a given resource, use the
|
||||
[`PATCH`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/PATCH)
|
||||
request method.
|
||||
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ The attribute reader method (`delayed_project_removal`) returns the correct
|
|||
cascaded value using the following criteria:
|
||||
|
||||
1. Returns the dirty value, if the attribute has changed. This allows standard
|
||||
Rails validators to be used on the attribute, though `nil` values *must* be allowed.
|
||||
Rails validators to be used on the attribute, though `nil` values must be allowed.
|
||||
1. Return locked ancestor value.
|
||||
1. Return locked instance-level application settings value.
|
||||
1. Return this namespace's attribute, if not nil.
|
||||
|
|
|
|||
|
|
@ -260,7 +260,7 @@ job1:
|
|||
### Backward compatibility
|
||||
|
||||
A template might be dynamically included with the `include:template:` keyword. If
|
||||
you make a change to an *existing* template, you **must** make sure that it doesn't break
|
||||
you make a change to an existing template, you **must** make sure that it doesn't break
|
||||
CI/CD in existing projects.
|
||||
|
||||
For example, changing a job name in a template could break pipelines in an existing project.
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ To reach the definition of done, the merge request must create no regressions an
|
|||
- Verified as supporting [Geo](../../administration/geo/_index.md) through the [self-service framework](../geo/framework.md). For more information, see [Geo is a requirement in the definition of done](../geo/framework.md#geo-is-a-requirement-in-the-definition-of-done).
|
||||
|
||||
If a regression occurs, we prefer you revert the change.
|
||||
Your contribution is *incomplete* until you have made sure it meets all of these
|
||||
Your contribution is incomplete until you have made sure it meets all of these
|
||||
requirements.
|
||||
|
||||
### Functionality
|
||||
|
|
@ -277,7 +277,7 @@ requirements.
|
|||
|
||||
1. Use available components from the GitLab Design System,
|
||||
[Pajamas](https://design.gitlab.com/).
|
||||
1. The MR must include *Before* and *After* screenshots if UI changes are made.
|
||||
1. The MR must include "Before" and "After" screenshots if UI changes are made.
|
||||
1. If the MR changes CSS classes, include the list of affected pages, which
|
||||
can be found by running `grep css-class ./app -R`.
|
||||
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ In order to extract it from `users` into a new table, we'll have to do the follo
|
|||
- Update the application to read from the new table, and fallback to the original column when there is no data yet.
|
||||
- Start to back-fill the new table
|
||||
1. Release N [example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/141833)
|
||||
- Finalize the background migration doing the back-fill. This should be done in the next release *after* a [required stop](../../update/upgrade_paths.md).
|
||||
- Finalize the background migration doing the back-fill. This should be done in the next release after a [required stop](../../update/upgrade_paths.md).
|
||||
1. Release N + 1 [example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/141835)
|
||||
- Update the application to read and write from the new table only.
|
||||
- Ignore the original column. This starts the process of safely removing database columns, as described in our [guides](avoiding_downtime_in_migrations.md#dropping-columns).
|
||||
|
|
|
|||
|
|
@ -291,7 +291,7 @@ The process required follows:
|
|||
|
||||
We are not adding them at the beginning as they are adding overhead to each insert and they
|
||||
would slow down the initial backfilling of the table (in this case for more than half a billion
|
||||
records, which can add up significantly). So we create a lightweight, *vanilla* version of the
|
||||
records, which can add up significantly). So we create a lightweight, vanilla version of the
|
||||
table, copy all the data and then add any remaining indexes and foreign keys.
|
||||
|
||||
1. Swap the base table with partitioned copy: this is when the partitioned table
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ For example:
|
|||
with an expected release date of 2021-10-22.
|
||||
1. It is merged on 2021-10-19 and available online the same day at <https://docs.gitlab.com>.
|
||||
1. GitLab 14.4 is released on 2021-10-22, based on the `gitlab` codebase from 2021-10-18
|
||||
(one day *before* the update was merged).
|
||||
(one day before the update was merged).
|
||||
1. The change shows up in the 14.5 GitLab Self-Managed release, due to missing the release cutoff
|
||||
for 14.4.
|
||||
|
||||
|
|
|
|||
|
|
@ -308,7 +308,7 @@ Documentation quality might be lower, because:
|
|||
|
||||
- Having documentation in a separate MR will mean far fewer people will see and
|
||||
verify them, increasing the likelihood that issues will be missed.
|
||||
- In a *split* workflow, engineers might only create the documentation MR after
|
||||
- In a split workflow, engineers might only create the documentation MR after
|
||||
the feature MR is ready, or almost ready. This gives the technical writer
|
||||
little time to learn about the feature to do a good review. It also
|
||||
increases pressure on them to review and merge faster than desired, letting
|
||||
|
|
@ -321,7 +321,7 @@ process) has many benefits:
|
|||
|
||||
- There are no timing issues connected to releases:
|
||||
- If a feature slips to the next release, the documentation slips too.
|
||||
- If the feature *just* makes it into a release, the documentation *just*
|
||||
- If the feature just makes it into a release, the documentation just
|
||||
makes it in too.
|
||||
- If a feature makes it to GitLab.com early, the documentation will be ready
|
||||
for our early adopters.
|
||||
|
|
@ -338,7 +338,7 @@ process) has many benefits:
|
|||
to catch issues with examples, and background or concepts that the
|
||||
technical writer may not be aware of.
|
||||
- Increasing visibility of the documentation also has the side effect of
|
||||
improving *other* engineers' documentation. By reviewing each other's MRs,
|
||||
improving other engineers' documentation. By reviewing each other's MRs,
|
||||
each engineer's own documentation skills will improve.
|
||||
- Thinking about the documentation early can help engineers generate better
|
||||
examples, as they will need to think about what examples a user will want,
|
||||
|
|
|
|||
|
|
@ -255,7 +255,7 @@ if a context should even be considered as something we should include in the exp
|
|||
and track events toward. Exclusion means we don't care about the events in relation
|
||||
to the given context.
|
||||
|
||||
These examples exclude all users named `'Richard'`, *and* any account
|
||||
These examples exclude all users named `'Richard'`, and any account
|
||||
older than 2 weeks old. Not only are they given the control behavior - which could
|
||||
be nothing - but no events are tracked in these cases as well.
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ Use semantic HTML, which has accessibility semantics baked in, and ideally test
|
|||
|
||||
In [WebAIM's accessibility analysis of the top million home pages](https://webaim.org/projects/million/#aria),
|
||||
they found that "ARIA correlated to higher detectable errors".
|
||||
It is likely that *misuse* of ARIA is a big cause of increased errors,
|
||||
It is likely that misuse of ARIA is a big cause of increased errors,
|
||||
so when in doubt don't use `aria-*`, `role`, and `tabindex` and stick with semantic HTML.
|
||||
|
||||
## Enable keyboard navigation on macOS
|
||||
|
|
|
|||
|
|
@ -342,7 +342,7 @@ Previously, to enable a feature 25% of the time, we would run the following in S
|
|||
/chatops run feature set new_navigation_bar 25 --random
|
||||
```
|
||||
|
||||
This command enables the `new_navigation_bar` feature for GitLab.com. However, this command does *not* enable the feature for 25% of the total users.
|
||||
This command enables the `new_navigation_bar` feature for GitLab.com. However, this command does not enable the feature for 25% of the total users.
|
||||
Instead, when the feature is checked with `enabled?`, it returns `true` 25% of the time.
|
||||
|
||||
Percentage of time feature flags are now deprecated in favor of [percentage of actors](#percentage-based-actor-selection)
|
||||
|
|
|
|||
|
|
@ -426,7 +426,7 @@ system for which FIPS Linux packages are available.
|
|||
### Nightly Omnibus FIPS builds
|
||||
|
||||
The Distribution team has created [nightly FIPS Omnibus builds](https://packages.gitlab.com/gitlab/nightly-fips-builds),
|
||||
which can be used for *testing* purposes. These should never be used for production environments.
|
||||
which can be used for testing purposes. These should never be used for production environments.
|
||||
|
||||
## Runner
|
||||
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ the Geo team if you are unsure.
|
|||
|
||||
Models that use [CarrierWave's](https://github.com/carrierwaveuploader/carrierwave) `Uploader::Base` are supported by Geo with the `Geo::BlobReplicatorStrategy` module. For example, see how [Geo replication was implemented for Pipeline Artifacts](https://gitlab.com/gitlab-org/gitlab/-/issues/238464).
|
||||
|
||||
Each file is expected to have its own primary ID and model. Geo strongly recommends treating *every single file* as a first-class citizen, because in our experience this greatly simplifies tracking replication and verification state.
|
||||
Each file is expected to have its own primary ID and model. Geo strongly recommends treating every single file as a first-class citizen, because in our experience this greatly simplifies tracking replication and verification state.
|
||||
|
||||
To implement Geo replication of a new blob-type Model, [open an issue with the provided issue template](https://gitlab.com/gitlab-org/gitlab/-/issues/new?issuable_template=Geo%20Replicate%20a%20new%20blob%20type).
|
||||
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ in `lib/gitlab/git` changes have to be made to Gitaly.
|
|||
|
||||
There should be no new code that touches Git repositories by using disk access
|
||||
anywhere in the `gitlab` repository. Anything that
|
||||
needs direct access to the Git repository *must* be implemented in Gitaly, and
|
||||
needs direct access to the Git repository must be implemented in Gitaly, and
|
||||
exposed through an RPC.
|
||||
|
||||
It's often easier to develop a new feature in Gitaly if you make the changes to
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ source-based instead of artifact-based. In an artifact-based dependency
|
|||
management system, packages consist of artifacts generated from source code and
|
||||
are stored in a separate repository system from source code. For example, many
|
||||
NodeJS packages use `npmjs.org` as a package repository and `github.com` as a
|
||||
source repository. On the other hand, packages in Go *are* source code and
|
||||
source repository. On the other hand, packages in Go are source code and
|
||||
releasing a package does not involve artifact generation or a separate
|
||||
repository. Go packages must be stored in a version control repository on a VCS
|
||||
server. Dependencies are fetched directly from their VCS server or via an
|
||||
|
|
@ -51,7 +51,7 @@ on the official Go website.
|
|||
|
||||
- A package is a folder containing `*.go` files.
|
||||
- A module is a folder containing a `go.mod` file.
|
||||
- A module is *usually* also a package, that is a folder containing a `go.mod`
|
||||
- A module is usually also a package, that is a folder containing a `go.mod`
|
||||
file and `*.go` files.
|
||||
- A module may have subdirectories, which may be packages.
|
||||
- Modules usually come in the form of a VCS repository (Git, SVN, Hg, and so on).
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ This guide describes the dictionary and how it's implemented.
|
|||
|
||||
We are using [JSON Schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/schema.json) to validate the metrics definition.
|
||||
|
||||
This process is meant to ensure consistent and valid metrics defined for Service Ping. All metrics *must*:
|
||||
This process is meant to ensure consistent and valid metrics defined for Service Ping. All metrics must:
|
||||
|
||||
- Comply with the defined [JSON schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/schema.json).
|
||||
- Have a unique `key_path` .
|
||||
|
|
|
|||
|
|
@ -462,7 +462,7 @@ are run. It's important to maintain a rough correlation between:
|
|||
1. When a migration is added to the GitLab codebase.
|
||||
1. The timestamp of the migration itself.
|
||||
|
||||
A new migration's timestamp should *never* be before the previous [required upgrade stop](database/required_stops.md).
|
||||
A new migration's timestamp should never be before the previous [required upgrade stop](database/required_stops.md).
|
||||
Migrations are occasionally squashed, and if a migration is added whose timestamp
|
||||
falls before the previous required stop, a problem like what happened in
|
||||
[issue 408304](https://gitlab.com/gitlab-org/gitlab/-/issues/408304) can occur.
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ This document is a collection of techniques and best practices to consider while
|
|||
|
||||
## Pinning tests
|
||||
|
||||
Pinning tests help you ensure that you don't unintentionally change the output or behavior of the entity you're refactoring. This even includes preserving any existing *buggy* behavior, since consumers may rely on those bugs implicitly.
|
||||
Pinning tests help you ensure that you don't unintentionally change the output or behavior of the entity you're refactoring. This even includes preserving any existing buggy behavior, since consumers may rely on those bugs implicitly.
|
||||
|
||||
### Example steps
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ title: SBoM dependency graph ingestion overview
|
|||
|
||||
## Overview
|
||||
|
||||
The process starts *after* all `SBoM::Occurence` models have been ingested because we ingest them in slices and it would be tricky to process that in slices as well.
|
||||
The process starts after all `SBoM::Occurence` models have been ingested because we ingest them in slices and it would be tricky to process that in slices as well.
|
||||
|
||||
All work happens in a background worker which will be added in a subsequent MR so that we do not increase the time it takes to ingest an SBoM report. This means that there will be a delay between when the SBoM report is ingested and before the dependency graph is updated.
|
||||
|
||||
|
|
@ -22,7 +22,7 @@ All record pertaining to dependency graphs are stored in `sbom_graph_paths` data
|
|||
1. After it's done, we fire off [Sbom::BuildDependencyGraphWorker](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/workers/sbom/build_dependency_graph_worker.rb) which kicks off the dependency graph calculation to a background worker.
|
||||
1. [Sbom::BuildDependencyGraph](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/services/sbom/build_dependency_graph.rb) does the actual heavy lifting for us. The class is documented so the details are omitted here.
|
||||
1. We will [skip calculation](https://gitlab.com/groups/gitlab-org/-/epics/17340) of the dependency graph if the SBoM report did not change.
|
||||
1. [Sbom::PathFinder](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/finders/sbom/path_finder.rb) returns *all possible* paths to reach target dependency. Do note that this accepts an `Sbom::Occurrence` because `(name, version)` pair is not precise enough when working with monorepos.
|
||||
1. [Sbom::PathFinder](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/finders/sbom/path_finder.rb) returns all possible paths to reach target dependency. Do note that this accepts an `Sbom::Occurrence` because `(name, version)` pair is not precise enough when working with monorepos.
|
||||
|
||||
## Details
|
||||
|
||||
|
|
|
|||
|
|
@ -2000,7 +2000,7 @@ helper method to redirect the user to a warning page first. For example:
|
|||
```ruby
|
||||
# Bad :(
|
||||
# This URL comes from User-Land and may not be safe...
|
||||
# We need the user to *see* where they are going.
|
||||
# We need the user to see where they are going.
|
||||
link_to foo_social_url(@user), title: "Foo Social" do
|
||||
sprite_icon('question-o')
|
||||
end
|
||||
|
|
|
|||
|
|
@ -229,7 +229,7 @@ use them as an argument for another query. In general, moving query logic out of
|
|||
and into Ruby is detrimental because PostgreSQL has a query optimizer that performs better
|
||||
when it has relatively more context about the desired operation.
|
||||
|
||||
If, for some reason, you need to `pluck` and use the results in a *single* query then,
|
||||
If, for some reason, you need to `pluck` and use the results in a single query then,
|
||||
most likely, a materialized CTE will be a better choice:
|
||||
|
||||
```sql
|
||||
|
|
@ -638,7 +638,7 @@ Using transactions does not solve this problem.
|
|||
To solve this we've added the `ApplicationRecord.safe_find_or_create_by`.
|
||||
|
||||
This method can be used the same way as
|
||||
`find_or_create_by`, but it wraps the call in a *new* transaction (or a subtransaction) and
|
||||
`find_or_create_by`, but it wraps the call in a new transaction (or a subtransaction) and
|
||||
retries if it were to fail because of an
|
||||
`ActiveRecord::RecordNotUnique` error.
|
||||
|
||||
|
|
|
|||
|
|
@ -292,7 +292,7 @@ end
|
|||
|
||||
Our framework includes a couple of parallelization mechanisms that work by executing spec files in parallel.
|
||||
|
||||
However, because tests are parallelized by spec *file* and not by test/example, we can't achieve greater parallelization if a new test is added to an existing file.
|
||||
However, because tests are parallelized by spec file and not by test/example, we can't achieve greater parallelization if a new test is added to an existing file.
|
||||
|
||||
Nonetheless, there could be other reasons to add a new test to an existing file.
|
||||
|
||||
|
|
|
|||
|
|
@ -23,6 +23,6 @@ def wait(max: 60, time: 0.1, reload: true)
|
|||
end
|
||||
```
|
||||
|
||||
- `max` : Specifies the max amount of *seconds* to wait until the block given is satisfied
|
||||
- `max` : Specifies the max amount of seconds to wait until the block given is satisfied
|
||||
- `time` : The interval/poll time to sleep *in seconds*. If this time reaches `max`, the wait returns `false`
|
||||
- `reload` : If the wait is not satiated, the test will sleep then reload the page if `:reload` is set to `true`
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ graph RL
|
|||
- **Vuex mutations**:
|
||||
For complex Vuex mutations, you should separate the tests from other parts of the Vuex store to simplify problem-solving.
|
||||
|
||||
#### When *not* to use unit tests
|
||||
#### When not to use unit tests
|
||||
|
||||
- **Non-exported functions or classes**:
|
||||
Anything not exported from a module can be considered private or an implementation detail, and doesn't need to be tested.
|
||||
|
|
@ -130,7 +130,7 @@ graph RL
|
|||
- **Asynchronous background operations**:
|
||||
Background operations cannot be stopped or waited on, so they continue running in the following tests and cause side effects.
|
||||
|
||||
#### What *not* to mock in unit tests
|
||||
#### What not to mock in unit tests
|
||||
|
||||
- **Non-exported functions or classes**:
|
||||
Everything that is not exported can be considered private to the module, and is implicitly tested through the exported classes and functions.
|
||||
|
|
@ -184,7 +184,7 @@ graph RL
|
|||
|
||||
- **Vue components**
|
||||
|
||||
#### When *not* to use component tests
|
||||
#### When not to use component tests
|
||||
|
||||
- **Vue applications**:
|
||||
Vue applications may contain many components.
|
||||
|
|
@ -202,7 +202,7 @@ graph RL
|
|||
Every component is tested individually, so child components are mocked.
|
||||
See also [`shallowMount()`](https://v1.test-utils.vuejs.org/api/#shallowmount)
|
||||
|
||||
#### What *not* to mock in component tests
|
||||
#### What not to mock in component tests
|
||||
|
||||
- **Methods or computed properties of the component under test**:
|
||||
By mocking part of the component under test, the mocks are tested and not the real component.
|
||||
|
|
@ -290,7 +290,7 @@ graph RL
|
|||
Background operations that affect the page must be tested on this level.
|
||||
All other background operations cannot be stopped or waited on, so they continue running in the following tests and cause side effects.
|
||||
|
||||
#### What *not* to mock in integration tests
|
||||
#### What not to mock in integration tests
|
||||
|
||||
- **DOM**:
|
||||
Testing on the real DOM ensures your components work in the intended environment.
|
||||
|
|
@ -322,8 +322,8 @@ Formal definitions:
|
|||
- <https://en.wikipedia.org/wiki/System_testing>
|
||||
- <https://en.wikipedia.org/wiki/White-box_testing>
|
||||
|
||||
These kind of tests ensure the GitLab *Rails* application (for example,
|
||||
`gitlab-foss`/`gitlab`) works as expected from a *browser* point of view.
|
||||
These kind of tests ensure the GitLab Rails application (for example,
|
||||
`gitlab-foss`/`gitlab`) works as expected from a browser point of view.
|
||||
|
||||
Note that:
|
||||
|
||||
|
|
@ -334,7 +334,7 @@ Note that:
|
|||
These tests should only be used when:
|
||||
|
||||
- the functionality/component being tested is small
|
||||
- the internal state of the objects/database *needs* to be tested
|
||||
- the internal state of the objects/database needs to be tested
|
||||
- it cannot be tested at a lower level
|
||||
|
||||
For instance, to test the breadcrumbs on a given page, writing a system test
|
||||
|
|
|
|||
|
|
@ -261,7 +261,7 @@ To get the access credentials that your application needs to communicate with Gi
|
|||
1. Select **Configure** to view the following:
|
||||
- **API URL**: URL where the client (application) connects to get a list of feature flags.
|
||||
- **Instance ID**: Unique token that authorizes the retrieval of the feature flags.
|
||||
- **Application name**: The name of the *environment* the application runs in
|
||||
- **Application name**: The name of the environment the application runs in
|
||||
(not the name of the application itself).
|
||||
|
||||
For example, if the application runs for a production server, the **Application name**
|
||||
|
|
|
|||
|
|
@ -273,7 +273,7 @@ namespace.
|
|||
|
||||
{{< alert type="warning" >}}
|
||||
|
||||
Your apps should *not* be manipulated outside of Helm (using Kubernetes directly).
|
||||
Your apps should not be manipulated outside of Helm (using Kubernetes directly).
|
||||
This can cause confusion with Helm not detecting the change and subsequent
|
||||
deploys with Auto DevOps can undo your changes. Also, if you change something
|
||||
and want to undo it by deploying again, Helm may not detect that anything changed
|
||||
|
|
@ -411,7 +411,7 @@ namespace.
|
|||
|
||||
{{< alert type="warning" >}}
|
||||
|
||||
Your apps should *not* be manipulated outside of Helm (using Kubernetes directly).
|
||||
Your apps should not be manipulated outside of Helm (using Kubernetes directly).
|
||||
This can cause confusion with Helm not detecting the change and subsequent
|
||||
deploys with Auto DevOps can undo your changes. Also, if you change something
|
||||
and want to undo it by deploying again, Helm may not detect that anything changed
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ methods are:
|
|||
|
||||
If you receive this error, you can do one of the following actions:
|
||||
|
||||
- You can *safely* ignore the warning and continue using the channel 1 PostgreSQL
|
||||
- You can safely ignore the warning and continue using the channel 1 PostgreSQL
|
||||
database by setting `AUTO_DEVOPS_POSTGRES_CHANNEL` to `1` and redeploying.
|
||||
|
||||
- You can delete the channel 1 PostgreSQL database and install a fresh channel 2
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ create-pages: # a user-defined job that builds your pages and saves them to the
|
|||
|
||||
- `image` specifies an image from the GitLab Registry that contains Hugo. This image is used to create the environment where your site is built.
|
||||
- The `GIT_SUBMODULE_STRATEGY` variable ensures GitLab also looks at your Git submodules, which are sometimes used for Hugo themes.
|
||||
- `test` is a job where you can run tests on your Hugo site before it's deployed. The test job runs in all cases, *except* if you're committing a change to your default branch. You place any commands under `script`. The command in this job - `hugo`- builds your site so it can be tested.
|
||||
- `test` is a job where you can run tests on your Hugo site before it's deployed. The test job runs in all cases, except if you're committing a change to your default branch. You place any commands under `script`. The command in this job - `hugo`- builds your site so it can be tested.
|
||||
- `deploy-pages` is a user-defined job for creating pages from Static Site Generators. Again, this job uses
|
||||
[user-defined job names](../../user/project/pages/_index.md#user-defined-job-names) and runs the `hugo` command to
|
||||
build your site. Then `pages: true` specifies that this is a Pages job and `artifacts` specifies that those resulting pages are added to a directory called `public`. With
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ Prerequisites:
|
|||
|
||||
{{< alert type="warning" >}}
|
||||
|
||||
**Never** run fuzz testing against a production server. Not only can it perform *any* function that
|
||||
**Never** run fuzz testing against a production server. Not only can it perform any function that
|
||||
the API can, it may also trigger bugs in the API. This includes actions like modifying and deleting
|
||||
data. Only run fuzzing against a test server.
|
||||
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ For an overview, see [Dynamic Application Security Testing (DAST)](https://www.y
|
|||
|
||||
{{< alert type="warning" >}}
|
||||
|
||||
Do not run DAST scans against a production server. Not only can it perform *any* function that a
|
||||
Do not run DAST scans against a production server. Not only can it perform any function that a
|
||||
user can, such as clicking buttons or submitting forms, but it may also trigger bugs, leading to
|
||||
modification or loss of production data. Only run DAST scans against a test server.
|
||||
|
||||
|
|
|
|||
|
|
@ -36,13 +36,13 @@ Consider using built-in framework capabilities for automatically encoding user i
|
|||
automatically encode input, be careful to use the proper output encoding. The following recommendations
|
||||
are a best effort, and might not work in all circumstances.
|
||||
|
||||
- Encode the following inside HTML tags, *excluding* `script`:
|
||||
- Encode the following inside HTML tags, excluding `script`:
|
||||
- `<` to `<`
|
||||
- `>` to `>`
|
||||
- `'` to `'`
|
||||
- `"` to `"`
|
||||
- `=` to `=`
|
||||
- Encode the following inside attributes, *excluding* event attributes:
|
||||
- Encode the following inside attributes, excluding event attributes:
|
||||
- `<` to `<`
|
||||
- `>` to `>`
|
||||
- `'` to `'`
|
||||
|
|
@ -70,7 +70,7 @@ are a best effort, and might not work in all circumstances.
|
|||
- Literal open brace (`{`) to `\u007b`
|
||||
- Literal close brace (`}`) to `\u007d`
|
||||
- Literal back slash (`\`) to `\\`
|
||||
|
||||
|
||||
This list is not exhaustive. You might need to encode additional characters depending on context.
|
||||
- Inside URLs:
|
||||
- Never allow user input to be printed in URLs. Attackers could inject `javascript:...` code or malicious links.
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ title: DAST on-demand scan
|
|||
|
||||
{{< alert type="warning" >}}
|
||||
|
||||
Do not run DAST scans against a production server. Not only can it perform *any* function that a user can, such
|
||||
Do not run DAST scans against a production server. Not only can it perform any function that a user can, such
|
||||
as clicking buttons or submitting forms, but it may also trigger bugs, leading to modification or loss of production data.
|
||||
Only run DAST scans against a test server.
|
||||
|
||||
|
|
|
|||
|
|
@ -226,7 +226,7 @@ After about 10 minutes, your cluster is ready.
|
|||
|
||||
When connecting a cluster through GitLab integration, you may specify whether the
|
||||
cluster is RBAC-enabled or not. This affects how GitLab interacts with the
|
||||
cluster for certain operations. If you did *not* check the **RBAC-enabled cluster**
|
||||
cluster for certain operations. If you did not check the **RBAC-enabled cluster**
|
||||
checkbox at creation time, GitLab assumes RBAC is disabled for your cluster
|
||||
when interacting with it. If so, you must disable RBAC on your cluster for the
|
||||
integration to work properly.
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ an environment's deployment job. This includes `KUBECONFIG`, which gives access
|
|||
any secret available to the associated service account in your cluster.
|
||||
To keep your production credentials safe, consider using
|
||||
[protected environments](../../../ci/environments/protected_environments.md),
|
||||
combined with *one* of the following:
|
||||
combined with one of the following:
|
||||
|
||||
- A GitLab-managed cluster and namespace per environment.
|
||||
- An environment-scoped cluster per protected environment. The same cluster
|
||||
|
|
|
|||
|
|
@ -345,12 +345,12 @@ Sidekiq workers that process the following queues:
|
|||
- `github_importer_advance_stage`
|
||||
|
||||
For an optimal experience, it's recommended having at least 4 Sidekiq processes (each running a number of threads equal
|
||||
to the number of CPU cores) that *only* process these queues. It's also recommended that these processes run on separate
|
||||
to the number of CPU cores) that only process these queues. It's also recommended that these processes run on separate
|
||||
servers. For 4 servers with 8 cores this means you can import up to 32 objects (for example, issues) in parallel.
|
||||
|
||||
Reducing the time spent in cloning a repository can be done by increasing network throughput, CPU capacity, and disk
|
||||
performance (by using high performance SSDs, for example) of the disks that store the Git repositories (for your GitLab instance).
|
||||
Increasing the number of Sidekiq workers does *not* reduce the time spent cloning repositories.
|
||||
Increasing the number of Sidekiq workers does not reduce the time spent cloning repositories.
|
||||
|
||||
### Enable GitHub OAuth using a GitHub Enterprise Cloud OAuth App
|
||||
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ To create a confidential merge request:
|
|||
1. Select the item that meets your needs:
|
||||
- To create both a branch and a merge request, select
|
||||
**Create confidential merge request and branch**. Your merge request will
|
||||
target the default branch of your fork, *not* the default branch of the
|
||||
target the default branch of your fork, not the default branch of the
|
||||
public upstream project.
|
||||
- To create only a branch, select **Create branch**.
|
||||
1. Select a **Project** to use. These projects have merge requests enabled, and
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ Differences between burndown and burnup charts are:
|
|||
- Burnup charts reflect the difference between an issue being moved to another milestone
|
||||
(**Total** issues line goes down) and an issue being closed (**Total** issues line remains unchanged).
|
||||
- Burndown charts measure "total issues minus closed issues" for each day while burnup charts measure
|
||||
the total issues (open *and* closed) separately from the issues resolved for each day.
|
||||
the total issues (open and closed) separately from the issues resolved for each day.
|
||||
|
||||
### Switch between number of issues and issue weight
|
||||
|
||||
|
|
@ -43,7 +43,7 @@ When sorting by weight, make sure all your issues have a weight assigned, becaus
|
|||
|
||||
### When to use burndown and burnup charts
|
||||
|
||||
Burndown and burnup charts provide valuable insights when tracking milestone progress.
|
||||
Burndown and burnup charts provide valuable insights when tracking milestone progress.
|
||||
Their use depends on [how you structure your milestones](_index.md) in your workflow.
|
||||
|
||||
These charts help teams:
|
||||
|
|
|
|||
|
|
@ -251,7 +251,7 @@ public/
|
|||
Pages supports reaching each of these files through several different URLs. In
|
||||
particular, it always looks for an `index.html` file if the URL only
|
||||
specifies the directory. If the URL references a file that doesn't exist, but
|
||||
adding `.html` to the URL leads to a file that *does* exist, it's served
|
||||
adding `.html` to the URL leads to a file that does exist, it's served
|
||||
instead. Here are some examples of what happens given the previous Pages site:
|
||||
|
||||
| URL path | HTTP response |
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ The main difference is the way GitLab determines whether or not the developer's
|
|||
|
||||
GitLab uses its own certificate store and therefore defines the
|
||||
[trust chain](https://www.ssl.com/article/what-is-a-certificate-authority-ca/).
|
||||
For a commit or tag to be *verified* by GitLab:
|
||||
For a commit or tag to be verified by GitLab:
|
||||
|
||||
- The signing certificate email must match a verified email address in GitLab.
|
||||
- The GitLab instance requires a full trust chain
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ stakeholders, system, software, or anything else you find important to capture.
|
|||
A requirement is an artifact in GitLab which describes the specific behavior of your product.
|
||||
Requirements are long-lived and don't disappear unless manually cleared.
|
||||
|
||||
If an industry standard *requires* that your application has a certain feature or behavior, you can
|
||||
If an industry standard requires that your application has a certain feature or behavior, you can
|
||||
[create a requirement](#create-a-requirement) to reflect this.
|
||||
When a feature is no longer necessary, you can [archive the related requirement](#archive-a-requirement).
|
||||
|
||||
|
|
|
|||
|
|
@ -70,6 +70,7 @@ RSpec.describe RapidDiffs::StreamingResource, type: :controller, feature_categor
|
|||
params: ActionController::Parameters.new)
|
||||
allow(controller_instance).to receive_message_chain(:helpers, :diff_view).and_return('inline')
|
||||
allow(mock_resource).to receive(:diffs_for_streaming).and_return(mock_diffs)
|
||||
allow(mock_resource).to receive(:first_diffs_slice).with(1, any_args).and_return(diff_files)
|
||||
allow(RapidDiffs::DiffFileComponent).to receive_message_chain(:with_collection, :render_in)
|
||||
.and_return(diffs_html)
|
||||
end
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ RSpec.describe 'Work item detail', :js, feature_category: :team_planning do
|
|||
let_it_be_with_reload(:user2) { create(:user, name: 'John') }
|
||||
|
||||
let_it_be(:group) { create(:group) }
|
||||
let_it_be(:project) { create(:project, :public, group: group) }
|
||||
let_it_be(:project) { create(:project, :public, :repository, group: group) }
|
||||
let_it_be(:label) { create(:label, project: project, title: "testing-label") }
|
||||
let_it_be(:label2) { create(:label, project: project, title: "another-label") }
|
||||
let_it_be(:work_item) { create(:work_item, project: project, labels: [label]) }
|
||||
|
|
@ -177,4 +177,50 @@ RSpec.describe 'Work item detail', :js, feature_category: :team_planning do
|
|||
|
||||
it_behaves_like 'change type action is not displayed'
|
||||
end
|
||||
|
||||
context 'for development widget' do
|
||||
let_it_be(:merge_request) do
|
||||
create(
|
||||
:merge_request,
|
||||
source_project: project,
|
||||
source_branch: "#{work_item.iid}-feature",
|
||||
target_project: project,
|
||||
target_branch: "master",
|
||||
title: "Related Merge Request",
|
||||
description: "Merge request description, fixes ##{work_item.iid}"
|
||||
)
|
||||
end
|
||||
|
||||
before_all do
|
||||
project.add_developer(user)
|
||||
end
|
||||
|
||||
context 'for user signed in' do
|
||||
before do
|
||||
sign_in(user)
|
||||
visit work_items_path
|
||||
|
||||
wait_for_all_requests
|
||||
end
|
||||
|
||||
it 'shows development widget with merge request' do
|
||||
within_testid('work-item-development') do
|
||||
expect(page.find('li a')[:href]).to include(merge_request_path(merge_request))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'for user not signed in' do
|
||||
before do
|
||||
visit work_items_path
|
||||
wait_for_all_requests
|
||||
end
|
||||
|
||||
it 'shows development widget with merge request' do
|
||||
within_testid('work-item-development') do
|
||||
expect(page.find('li a')[:href]).to include(merge_request_path(merge_request))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ RSpec.describe 'Cursor based batched background migrations', feature_category: :
|
|||
|
||||
let(:runner) { Gitlab::Database::BackgroundMigration::BatchedMigrationRunner.new(connection: connection) }
|
||||
|
||||
it 'migrates correctly' do
|
||||
it 'migrates correctly', quarantine: 'https://gitlab.com/gitlab-org/gitlab/-/issues/505354' do
|
||||
runner.run_entire_migration(migration)
|
||||
expect(model.where(backfilled: 1).count).to eq(model.count)
|
||||
|
||||
|
|
|
|||
|
|
@ -220,4 +220,20 @@ RSpec.describe Compare, feature_category: :source_code_management do
|
|||
let(:resource) { compare }
|
||||
end
|
||||
end
|
||||
|
||||
describe '#first_diffs_slice' do
|
||||
let(:limit) { 5 }
|
||||
|
||||
subject(:first_diffs_slice) { compare.first_diffs_slice(limit) }
|
||||
|
||||
it 'returns limited diffs' do
|
||||
expect(first_diffs_slice.count).to eq(limit)
|
||||
end
|
||||
|
||||
it 'passes the correct options to diffs' do
|
||||
expect(compare).to receive(:diffs).with(hash_including(max_files: limit)).and_call_original
|
||||
|
||||
first_diffs_slice
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
Loading…
Reference in New Issue