Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2025-07-02 15:10:08 +00:00
parent 36aae58f55
commit b29b091f52
34 changed files with 456 additions and 319 deletions

View File

@ -249,16 +249,12 @@ export default {
'ee/app/assets/javascripts/boards/components/epics_swimlanes.vue',
'ee/app/assets/javascripts/boards/components/group_select.vue',
'ee/app/assets/javascripts/boards/components/issue_board_filtered_search.vue',
'ee/app/assets/javascripts/burndown_chart/components/burn_charts.vue',
'ee/app/assets/javascripts/burndown_chart/components/open_timebox_summary.vue',
'ee/app/assets/javascripts/ci/job_details/components/job_log_top_bar.vue',
'ee/app/assets/javascripts/ci/merge_trains/components/merge_trains_table.vue',
'ee/app/assets/javascripts/ci/runner/components/shared_runner_limit_block.vue',
'ee/app/assets/javascripts/ci/secrets/components/secret_details/secret_details.vue',
'ee/app/assets/javascripts/ci/secrets/components/secret_details/secret_details_wrapper.vue',
'ee/app/assets/javascripts/ci/secrets/components/secret_form/secret_form.vue',
'ee/app/assets/javascripts/dependencies/components/app.vue',
'ee/app/assets/javascripts/dependencies/components/dependency_project_count.vue',
'ee/app/assets/javascripts/environments_dashboard/components/dashboard/dashboard.vue',
'ee/app/assets/javascripts/environments_dashboard/components/dashboard/environment.vue',
'ee/app/assets/javascripts/geo_sites/components/header/geo_site_last_updated.vue',

View File

@ -1,4 +1,5 @@
<script>
import { GlAlert } from '@gitlab/ui';
import MergeRequestsWidget from './merge_requests_widget.vue';
import WorkItemsWidget from './work_items_widget.vue';
import ActivityWidget from './activity_widget.vue';
@ -7,6 +8,7 @@ import TodosWidget from './todos_widget.vue';
export default {
components: {
GlAlert,
MergeRequestsWidget,
WorkItemsWidget,
ActivityWidget,
@ -31,6 +33,11 @@ export default {
required: true,
},
},
data() {
return {
hasMergeRequestsMetadataError: false,
};
},
};
</script>
@ -39,10 +46,22 @@ export default {
<h1>{{ __("Today's highlights") }}</h1>
<div class="gl-grid gl-grid-cols-1 gl-gap-6 md:gl-grid-cols-3">
<div class="gl-flex gl-flex-col gl-gap-4 md:gl-col-span-2">
<gl-alert
v-if="hasMergeRequestsMetadataError"
variant="warning"
data-testid="merge-requests-fetch-metadata-error"
@dismiss="hasMergeRequestsMetadataError = false"
>{{
s__(
'Homepage|The number of merge requests is not available. Please refresh the page to try again.',
)
}}</gl-alert
>
<div class="gl-grid gl-grid-cols-1 gl-gap-5 lg:gl-grid-cols-2">
<merge-requests-widget
:review-requested-path="reviewRequestedPath"
:assigned-to-you-path="assignedMergeRequestsPath"
@fetch-metadata-error="hasMergeRequestsMetadataError = true"
/>
<work-items-widget
:assigned-to-you-path="assignedWorkItemsPath"

View File

@ -1,8 +1,7 @@
<script>
import { GlIcon, GlLink, GlBadge } from '@gitlab/ui';
import timeagoMixin from '~/vue_shared/mixins/timeago';
import { createAlert, VARIANT_WARNING } from '~/alert';
import { __ } from '~/locale';
import * as Sentry from '~/sentry/sentry_browser_wrapper';
import mergeRequestsWidgetMetadataQuery from '../graphql/queries/merge_requests_widget_metadata.query.graphql';
export default {
@ -27,7 +26,6 @@ export default {
data() {
return {
metadata: {},
hasFetchError: false,
};
},
apollo: {
@ -42,15 +40,8 @@ export default {
return currentUser;
},
error(error) {
this.hasFetchError = true;
createAlert({
title: __('Number of merge requests not available'),
message: __(
'The number of merge requests is not available. Please refresh the page to try again.',
),
variant: VARIANT_WARNING,
error,
});
this.$emit('fetch-metadata-error');
Sentry.captureException(error);
},
},
},
@ -59,25 +50,13 @@ export default {
return this.$apollo.queries.metadata.loading;
},
reviewRequestedCount() {
if (
this.isLoadingMetadata ||
this.hasFetchError ||
this.metadata.reviewRequestedMergeRequests?.count === undefined
)
return '-';
return this.metadata.reviewRequestedMergeRequests.count;
return this.metadata.reviewRequestedMergeRequests?.count ?? '-';
},
reviewRequestedLastUpdatedAt() {
return this.metadata?.reviewRequestedMergeRequests?.nodes?.[0]?.updatedAt ?? null;
},
assignedCount() {
if (
this.isLoadingMetadata ||
this.hasFetchError ||
this.metadata.assignedMergeRequests?.count === undefined
)
return '-';
return this.metadata.assignedMergeRequests.count;
return this.metadata.assignedMergeRequests?.count ?? '-';
},
assignedLastUpdatedAt() {
return this.metadata?.assignedMergeRequests?.nodes?.[0]?.updatedAt ?? null;

View File

@ -8,7 +8,7 @@ module MilestoneActions
format.html { redirect_to milestone_redirect_path }
format.json do
render json: tabs_json("shared/milestones/_issues_tab", {
issues: @milestone.sorted_issues(current_user), # rubocop:disable Gitlab/ModuleWithInstanceVariables
issues: @milestone.milestone_issues(current_user), # rubocop:disable Gitlab/ModuleWithInstanceVariables
show_project_name: Gitlab::Utils.to_boolean(params[:show_project_name])
})
end
@ -20,7 +20,7 @@ module MilestoneActions
format.html { redirect_to milestone_redirect_path }
format.json do
render json: tabs_json("shared/milestones/_merge_requests_tab", {
merge_requests: @milestone.sorted_merge_requests(current_user).preload_milestoneish_associations, # rubocop:disable Gitlab/ModuleWithInstanceVariables
merge_requests: @milestone.merge_requests_visible_to_user(current_user).preload_milestoneish_associations, # rubocop:disable Gitlab/ModuleWithInstanceVariables
show_project_name: Gitlab::Utils.to_boolean(params[:show_project_name])
})
end

View File

@ -30,6 +30,14 @@ module Mutations
required: false,
description: 'Input for milestone widget.',
experiment: { milestone: '18.2' }
argument :state_event, ::Types::WorkItems::StateEventEnum,
required: false,
description: 'Close or reopen multiple work items at once.',
experiment: { milestone: '18.2' }
argument :subscription_event, ::Types::WorkItems::SubscriptionEventEnum,
required: false,
description: 'Subscribe or unsubscribe from the work items.',
experiment: { milestone: '18.2' }
argument :parent_id, ::Types::GlobalIDType[::WorkItems::Parent],
required: true,

View File

@ -0,0 +1,13 @@
# frozen_string_literal: true
module Types
module WorkItems
class SubscriptionEventEnum < BaseEnum
graphql_name 'WorkItemSubscriptionEvent'
description 'Values for work item subscription events'
value 'SUBSCRIBE', 'Subscribe to a work item.', value: 'subscribe'
value 'UNSUBSCRIBE', 'Unsubscribe from a work item.', value: 'unsubscribe'
end
end
end

View File

@ -61,7 +61,7 @@ module Milestoneish
.distinct
end
def sorted_issues(user)
def milestone_issues(user)
work_items_finder_params = issues_finder_params
work_items_finder_params[:include_descendants] = true if work_items_finder_params[:include_subgroups]
@ -71,18 +71,12 @@ module Milestoneish
.execute.preload_associated_models
.where(milestone_id: milestoneish_id)
.limit(DISPLAY_ISSUES_LIMIT)
.sort_by_attribute('label_priority')
WorkItem.where(id: WorkItem.select(:id).from(work_item_ids))
.preload_associated_models
.sort_by_attribute('label_priority')
.preload(namespace: :route)
end
def sorted_merge_requests(user)
merge_requests_visible_to_user(user).sort_by_attribute('label_priority')
end
def merge_requests_visible_to_user(user)
memoize_per_user(user, :merge_requests_visible_to_user) do
MergeRequestsFinder.new(user, issues_finder_params)

View File

@ -79,7 +79,7 @@
%span= milestone.issues_visible_to_user(current_user).count
.title.hide-collapsed
= s_('MilestoneSidebar|Work items')
= gl_badge_tag milestone.sorted_issues(current_user).length, variant: :muted
= gl_badge_tag milestone.milestone_issues(current_user).length, variant: :muted
- if show_new_issue_link?(project)
= link_to new_project_issue_path(project, issue: { milestone_id: milestone.id }), class: "gl-float-right", title: s_('MilestoneSidebar|New Issue') do
= s_('MilestoneSidebar|New issue')
@ -87,11 +87,11 @@
%span.milestone-stat
= link_to milestones_browse_issuables_path(milestone, type: :issues) do
= s_('MilestoneSidebar|Open:')
= milestone.sorted_issues(current_user).opened.length
= milestone.milestone_issues(current_user).opened.length
%span.milestone-stat
= link_to milestones_browse_issuables_path(milestone, type: :issues, state: 'closed') do
= s_('MilestoneSidebar|Closed:')
= milestone.sorted_issues(current_user).closed.length
= milestone.milestone_issues(current_user).closed.length
.block
.js-sidebar-time-tracking-root{ data: { time_estimate: @milestone.total_time_estimate,

View File

@ -8,7 +8,7 @@
= gl_tabs_nav({ class: %w[scrolling-tabs js-milestone-tabs] }) do
= gl_tab_link_to '#tab-issues', item_active: true, data: { endpoint: milestone_tab_path(milestone, 'issues', show_project_name: show_project_name) } do
= _('Work items')
= gl_tab_counter_badge milestone.sorted_issues(current_user).length
= gl_tab_counter_badge milestone.milestone_issues(current_user).length
- if milestone.merge_requests_enabled?
= gl_tab_link_to '#tab-merge-requests', data: { endpoint: milestone_tab_path(milestone, 'merge_requests', show_project_name: show_project_name) } do
= _('Merge requests')

View File

@ -5,6 +5,9 @@ module Ci
include ApplicationWorker
data_consistency :sticky
concurrency_limit -> {
1000 if Feature.enabled?(:concurrency_limit_ci_archive_trace_worker, Feature.current_request)
}
sidekiq_options retry: 3
include PipelineBackgroundQueue

View File

@ -0,0 +1,10 @@
---
name: concurrency_limit_ci_archive_trace_worker
description:
feature_issue_url:
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/196373
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/553177
milestone: '18.2'
group: group::pipeline execution
type: gitlab_com_derisk
default_enabled: false

View File

@ -113,6 +113,8 @@
- 2
- - authz_user_group_member_roles_destroy_for_group
- 1
- - authz_user_group_member_roles_destroy_for_shared_group
- 1
- - auto_devops
- 2
- - auto_merge

View File

@ -156,12 +156,12 @@ or for users to re-add their keys.
This overview is brief. Refer to the previous instructions for more context.
1. [Rebuild the `authorized_keys` file](../raketasks/maintenance.md#rebuild-authorized_keys-file).
1. Enable writes to the `authorized_keys` file.
1. On the left sidebar, at the bottom, select **Admin**.
1. On the left sidebar, select **Settings > Network**.
1. Expand **Performance optimization**.
1. Select the **Use `authorized_keys` file to authenticate SSH keys** checkbox.
1. [Rebuild the `authorized_keys` file](../raketasks/maintenance.md#rebuild-authorized_keys-file).
1. Remove the `AuthorizedKeysCommand` lines from `/etc/ssh/sshd_config` or from `/assets/sshd_config` if you are using Docker
from a Linux package installation.
1. Reload `sshd`: `sudo service sshd reload`.

View File

@ -1352,7 +1352,7 @@ only starts it again after garbage collection completes. If you prefer to avoid
you can manually set the container registry to [read-only mode and bypass `gitlab-ctl`](#performing-garbage-collection-without-downtime).
This command proceeds only if the metadata is in object storage. This command does not proceed
if the [container registry metadata database](#container-registry-metadata-database) is enabled.
if the [container registry metadata database](#container-registry-metadata-database) is enabled.
{{< /alert >}}
@ -1677,52 +1677,59 @@ To configure GitLab and the container registry on separate nodes:
1. Ensure file permissions are correct.
1. Run `sudo gitlab-ctl reconfigure` on both nodes.
## Architecture of GitLab container registry
## Container registry architecture
The GitLab registry is what users use to store their own Docker images.
Because of that the Registry is client facing, meaning that we expose it directly
on the web server (or load balancers, LB for short).
Users can store their own Docker images in the container registry. Because the registry
is client facing, the registry is directly exposed
on the web server or load balancer (LB).
```mermaid
%%{init: { "fontFamily": "GitLab Sans" }}%%
flowchart LR
A[User] --->|1: Docker login<br>on port 443| C{Frontend load<br>balancer}
C --->|2: connection attempt<br>without token fails| D[Registry]
C --->|5: connect with <br>token succeeds| D[Registry]
C --->|3: Docker<br>requests token| E[API frontend]
E --->|4:API returns<br>signed token| C
accTitle: Container registry authentication flow
accDescr: Shows how users authenticate with the container registry with GitLab API to push and pull Docker images
A[User] --->|1: Docker loginon port 443| C{Frontend loadbalancer}
C --->|2: connection attemptwithout token fails| D[Container registry]
C --->|5: connect with token succeeds| D[Container registry]
C --->|3: Dockerrequests token| E[API frontend]
E --->|4:API returnssigned token| C
linkStyle 1 stroke-width:4px,stroke:red
linkStyle 2 stroke-width:4px,stroke:green
```
The flow described by the previous diagram:
The authentication flow includes these steps:
1. A user runs `docker login registry.gitlab.example` on their client. This reaches the web server (or LB) on port 443.
1. Web server connects to the Registry backend pool (by default, using port 5000). Because the user
didn't provide a valid token, the Registry returns a 401 HTTP code and the URL (`token_realm` from
Registry configuration) where to get one. This points to the GitLab API.
1. The Docker client then connects to the GitLab API and obtains a token.
1. The API signs the token with the registry key and hands it to the Docker client
1. The Docker client now logs in again with the token received from the API. It can now push and pull Docker images.
1. A user runs `docker login registry.gitlab.example` on their client. This request reaches the web server (or LB) on port 443.
1. The web server connects to the registry backend pool (port 5000 by default). Because the user does not have a valid token, the registry returns a `401 Unauthorized` HTTP code and a URL to get a token. The URL is defined by the [`token_realm`](#registry-node-settings) setting in the registry configuration and points to the GitLab API.
1. The Docker client connects to the GitLab API and obtains a token.
1. The API signs the token with the registry key and sends it to the Docker client.
1. The Docker client logs in again with the token received from the API. The authenticated client can now push and pull Docker images.
Reference: <https://distribution.github.io/distribution/spec/auth/token/>
### Communication between GitLab and Registry
### Communication between GitLab and the container registry
Registry doesn't have a way to authenticate users internally so it relies on
GitLab to validate credentials. The connection between Registry and GitLab is
TLS encrypted. The key is used by GitLab to sign the tokens while the certificate
is used by Registry to validate the signature. By default, a self-signed certificate key pair is generated
for all installations. This can be overridden as needed.
The container registry cannot authenticate users internally, so it validates credentials through GitLab.
The connection between the registry and GitLab is
TLS encrypted.
GitLab interacts with the Registry using the Registry private key. When a Registry
request goes out, a new short-living (10 minutes) namespace limited token is generated
GitLab uses the private key to sign tokens, and the registry uses the public key provided
by the certificate to validate the signature.
By default, a self-signed certificate key pair is generated
for all installations. You can override this behavior using the [`internal_key`](#registry-node-settings) setting in the registry configuration.
The following steps describe the communication flow:
1. GitLab interacts with the registry using the registry's private key. When a registry
request is sent, a short-lived (10 minutes), namespace-limited token is generated
and signed with the private key.
The Registry then verifies that the signature matches the registry certificate
1. The registry verifies that the signature matches the registry certificate
specified in its configuration and allows the operation.
GitLab background jobs processing (through Sidekiq) also interacts with Registry.
These jobs talk directly to Registry to handle image deletion.
1. GitLab processes background jobs through Sidekiq, which also interacts with the registry.
These jobs communicate directly with the registry to handle image deletion.
## Migrate from a third-party registry

View File

@ -71,7 +71,7 @@ Additional information about this: [issue 18239](https://gitlab.com/gitlab-org/g
## Docker login attempt fails with: 'token signed by untrusted key'
[Registry relies on GitLab to validate credentials](container_registry.md#architecture-of-gitlab-container-registry)
[Registry relies on GitLab to validate credentials](container_registry.md#container-registry-architecture)
If the registry fails to authenticate valid login attempts, you get the following error message:
```shell

View File

@ -103,8 +103,7 @@ To prevent this from happening, you can set a hard limit for your repositories'
This limit can be set globally, per group, or per project, with per project limits
taking the highest priority.
The repository size limit applies to both private and public projects. It includes repository files and LFS,
but does not include:
The repository size limit applies to both private and public projects. It includes repository files and Git LFS objects (even when stored in external object storage), but does not include:
- Artifacts
- Containers
@ -148,7 +147,21 @@ The first push of a new project, including LFS objects, is checked for size.
If the sum of their sizes exceeds the maximum allowed repository size, the push
is rejected.
For details on manually purging files, see [reducing the repository size using Git](../../user/project/repository/repository_size.md#methods-to-reduce-repository-size).
### Check repository size
To determine if a project is nearing its configured repository size limit:
1. [View your storage usage](../../user/storage_usage_quotas.md#view-storage).
The **Repository** size includes both Git repository files and
[Git LFS](../../topics/git/lfs/_index.md) objects.
1. Compare the current usage to your configured repository size limit to estimate
remaining capacity.
You can also use the [Projects API](../../api/projects.md) to retrieve repository
statistics.
To reduce repository size, see
[methods to reduce repository size](../../user/project/repository/repository_size.md#methods-to-reduce-repository-size).
## Session duration

View File

@ -13006,6 +13006,8 @@ Input type: `WorkItemBulkUpdateInput`
| <a id="mutationworkitembulkupdatelabelswidget"></a>`labelsWidget` | [`WorkItemWidgetLabelsUpdateInput`](#workitemwidgetlabelsupdateinput) | Input for labels widget. |
| <a id="mutationworkitembulkupdatemilestonewidget"></a>`milestoneWidget` {{< icon name="warning-solid" >}} | [`WorkItemWidgetMilestoneInput`](#workitemwidgetmilestoneinput) | **Deprecated**: **Status**: Experiment. Introduced in GitLab 18.2. |
| <a id="mutationworkitembulkupdateparentid"></a>`parentId` | [`WorkItemsParentID!`](#workitemsparentid) | Global ID of the parent to which the bulk update will be scoped. The parent can be a project. The parent can also be a group (Premium and Ultimate only). Example `WorkItemsParentID` are `"gid://gitlab/Project/1"` and `"gid://gitlab/Group/1"`. |
| <a id="mutationworkitembulkupdatestateevent"></a>`stateEvent` {{< icon name="warning-solid" >}} | [`WorkItemStateEvent`](#workitemstateevent) | **Deprecated**: **Status**: Experiment. Introduced in GitLab 18.2. |
| <a id="mutationworkitembulkupdatesubscriptionevent"></a>`subscriptionEvent` {{< icon name="warning-solid" >}} | [`WorkItemSubscriptionEvent`](#workitemsubscriptionevent) | **Deprecated**: **Status**: Experiment. Introduced in GitLab 18.2. |
#### Fields
@ -43925,6 +43927,7 @@ The category of the additional context.
| <a id="aiadditionalcontextcategoryrepository"></a>`REPOSITORY` | Repository content category. |
| <a id="aiadditionalcontextcategorysnippet"></a>`SNIPPET` | Snippet content category. |
| <a id="aiadditionalcontextcategoryterminal"></a>`TERMINAL` | Terminal content category. |
| <a id="aiadditionalcontextcategoryuser_rule"></a>`USER_RULE` | User_rule content category. |
### `AiCatalogItemType`
@ -47881,6 +47884,15 @@ Category of the work item status.
| <a id="workitemstatuscategoryenumto_do"></a>`TO_DO` | To do status category. |
| <a id="workitemstatuscategoryenumtriage"></a>`TRIAGE` | Triage status category. |
### `WorkItemSubscriptionEvent`
Values for work item subscription events.
| Value | Description |
| ----- | ----------- |
| <a id="workitemsubscriptioneventsubscribe"></a>`SUBSCRIBE` | Subscribe to a work item. |
| <a id="workitemsubscriptioneventunsubscribe"></a>`UNSUBSCRIBE` | Unsubscribe from a work item. |
### `WorkItemTodoUpdateAction`
Values for work item to-do update enum.

View File

@ -178,14 +178,7 @@ all variables become available to the pipeline.
### For a project
{{< history >}}
- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/362227) in GitLab 15.7, projects can have a maximum of 200 CI/CD variables.
- [Updated](https://gitlab.com/gitlab-org/gitlab/-/issues/373289) in GitLab 15.9, projects can have a maximum of 8000 CI/CD variables.
{{< /history >}}
You can add CI/CD variables to a project's settings.
You can add CI/CD variables to a project's settings. Projects can have a maximum of 8000 CI/CD variables.
Prerequisites:
@ -213,14 +206,7 @@ or in [job scripts](job_scripts.md).
### For a group
{{< history >}}
- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/362227) in GitLab 15.7, groups can have a maximum of 200 CI/CD variables.
- [Updated](https://gitlab.com/gitlab-org/gitlab/-/issues/373289) in GitLab 15.9, groups can have a maximum of 30000 CI/CD variables.
{{< /history >}}
You can make a CI/CD variable available to all projects in a group.
You can make a CI/CD variable available to all projects in a group. Groups can have a maximum of 30000 CI/CD variables.
Prerequisites:
@ -386,13 +372,7 @@ variable has the same value, that value is also masked, including when a variabl
references a masked variable. The string `[MASKED]` is shown instead of the value,
possibly with some trailing `x` characters.
Different versions of [GitLab Runner](../runners/_index.md) have different masking limitations:
| Version | Limitations |
| ------------------- | ----------- |
| v14.1.0 and earlier | Masking of large secrets (greater than 4 KiB) could potentially be [revealed](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28128). No sensitive URL parameter masking. |
| v14.2.0 to v15.3.0 | The tail of a large secret (greater than 4 KiB) could potentially be [revealed](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28128). No sensitive URL parameter masking. |
| v15.7.0 and later | Secrets could be revealed when `CI_DEBUG_SERVICES` is enabled. For details, read about [service container logging](../services/_index.md#capturing-service-container-logs). |
Secrets could be revealed when `CI_DEBUG_SERVICES` is enabled. For details, read about [service container logging](../services/_index.md#capturing-service-container-logs).
### Hide a CI/CD variable
@ -476,14 +456,6 @@ as a `--certificate-authority` option, which accepts a path to a file:
kubectl config set-cluster e2e --server="$KUBE_URL" --certificate-authority="$KUBE_CA_PEM"
```
{{< alert type="warning" >}}
Be careful when assigning the value of a file variable to another variable in GitLab 15.6 or older.
The other variable takes the content of the file as its value, **not** the path to the file.
In GitLab 15.7 and later, this behavior [was fixed](https://gitlab.com/gitlab-org/gitlab/-/issues/29407) and the other variable now takes the path to the file as the value.
{{< /alert >}}
#### Use a `.gitlab-ci.yml` variable as a file type variable
You cannot set a CI/CD variable [defined in the `.gitlab-ci.yml` file](#define-a-cicd-variable-in-the-gitlab-ciyml-file)
@ -507,12 +479,6 @@ job:
## Prevent CI/CD variable expansion
{{< history >}}
- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/217309) in GitLab 15.7.
{{< /history >}}
Expanded variables treat values with the `$` character as a reference to another variable.
CI/CD variables are expanded by default. To treat variables with a `$` character as raw strings,
disable variable expansion for the variable

View File

@ -228,7 +228,7 @@ The administrator is notified that 2FA has been disabled.
#### Non-administrators
In GitLab 15.2 and later, you can use either the Rails console or the
You can use either the Rails console or the
[API endpoint](../api/users.md#disable-two-factor-authentication-for-a-user) to disable 2FA
for a non-administrator.
@ -245,12 +245,6 @@ You cannot use the API endpoint to disable 2FA for administrators.
{{< /details >}}
{{< history >}}
- [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/9484) in GitLab 15.8.
{{< /history >}}
Top-level group Owners can disable two-factor authentication (2FA) for enterprise users.
To disable 2FA:
@ -271,16 +265,9 @@ You can also [use the API](../api/group_enterprise_users.md#disable-two-factor-a
{{< /details >}}
{{< history >}}
- It's deployed behind a feature flag, disabled by default.
- Push notification support [introduced](https://gitlab.com/gitlab-org/gitlab-shell/-/issues/506) in GitLab 15.3.
{{< /history >}}
{{< alert type="flag" >}}
On GitLab Self-Managed, by default this feature is not available. To make it available, an administrator can [enable the feature flag](../administration/feature_flags/_index.md) named `two_factor_for_cli`. On GitLab.com and GitLab Dedicated, this feature is not available. This feature is not ready for production use. This feature flag also affects [session duration for Git Operations when 2FA is enabled](../administration/settings/account_and_limit_settings.md#customize-session-duration-for-git-operations-when-2fa-is-enabled).
By default this feature is not available. To make it available, an administrator can [enable the feature flag](../administration/feature_flags/_index.md) named `two_factor_for_cli`. This feature is not ready for production use. This feature flag also affects [session duration for Git Operations when 2FA is enabled](../administration/settings/account_and_limit_settings.md#customize-session-duration-for-git-operations-when-2fa-is-enabled).
{{< /alert >}}
@ -296,7 +283,7 @@ ssh git@<hostname> 2fa_verify
Then authenticate by either:
- Entering the correct OTP.
- In GitLab 15.3 and later, responding to a device push notification if
- Responding to a device push notification if
[FortiAuthenticator is enabled](../user/profile/account/two_factor_authentication.md#enable-a-one-time-password-authenticator-using-fortiauthenticator).
After successful authentication, you can perform Git over SSH operations for 15 minutes (default) with the associated

View File

@ -53,6 +53,88 @@ JavaScript, such as single-page applications. See
To add the analyzer to your CI/CD pipeline, see [enabling the analyzer](configuration/enabling_the_analyzer.md).
## Getting started
If you're new to DAST, get started by enabling it for a project.
Prerequisites:
- You have a [GitLab Runner](../../../../ci/runners/_index.md) with the
[`docker` executor](https://docs.gitlab.com/runner/executors/docker.html) on Linux/amd64.
- You have a deployed target application. For more details, see the [deployment options](application_deployment_options.md).
- The `dast` stage is added to the CI/CD pipeline definition, after the `deploy` stage. For example:
```yaml
stages:
- build
- test
- deploy
- dast
```
- You have a network connection between the runner and your target application.
How you connect depends on your DAST configuration:
- If `DAST_TARGET_URL` and `DAST_AUTH_URL` specify port numbers, use those ports.
- If ports are not specified, use the standard port numbers for HTTP and HTTPS.
You might need to open both an HTTP and HTTPS port. For example, if the target URL uses HTTP, but the application links to resources using HTTPS. Always test your connection when you configure a scan.
To enable DAST in a project:
- [Add a DAST job to you CI/CD configuration](configuration/enabling_the_analyzer.md#create-a-dast-cicd-job).
## Understanding the results
You can review vulnerabilities in a pipeline:
1. On the left sidebar, select **Search or go to** and find your project.
1. On the left sidebar, select **Build > Pipelines**.
1. Select the pipeline.
1. Select the **Security** tab.
1. Select a vulnerability to view its details, including:
- Status: Indicates whether the vulnerability has been triaged or resolved.
- Description: Explains the cause of the vulnerability, its potential impact, and recommended remediation steps.
- Severity: Categorized into six levels based on impact.
[Learn more about severity levels](../../vulnerabilities/severities.md).
- Scanner: Identifies which analyzer detected the vulnerability.
- Method: Establishes the vulnerable server interaction type.
- URL: Shows the location of the vulnerability.
- Evidence: Describes test case to prove the presence of a given vulnerability.
- Identifiers: A list of references used to classify the vulnerability, such as CWE identifiers.
You can also download the security scan results:
- In the pipeline's **Security** tab, select **Download results**.
For more details, see [Pipeline security report](../../vulnerability_report/pipeline.md).
{{< alert type="note" >}}
Findings are generated on feature branches. When they are merged into the default branch, they become vulnerabilities. This distinction is important when evaluating your security posture.
{{< /alert >}}
## Optimization
For information about configuring DAST for a specific application or environment, see the [configuration options](configuration/_index.md).
## Roll out
After you configure DAST for a single project, you can extend the configuration to other projects:
- Take care if your pipeline is configured to deploy to the same web server in each run. Running a DAST scan while a server is being updated leads to inaccurate and non-deterministic results.
- Configure runners to use the [always pull policy](https://docs.gitlab.com/runner/executors/docker.html#using-the-always-pull-policy) to run the latest versions of the analyzers.
- By default, DAST downloads all artifacts defined by previous jobs in the pipeline. If
your DAST job does not rely on `environment_url.txt` to define the URL under test or any other files created
in previous jobs, you shouldn't download artifacts. To avoid downloading
artifacts, extend the analyzer CI/CD job to specify no dependencies. For example, for the DAST proxy-based analyzer add the following to your `.gitlab-ci.yml` file:
```yaml
dast:
dependencies: []
```
## How DAST scans an application
A scan performs the following steps:

View File

@ -0,0 +1,77 @@
---
type: reference, howto
stage: Application Security Testing
group: Dynamic Analysis
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
title: Application deployment options
---
DAST requires a deployed application to be available to scan.
Depending on the complexity of the target application, there are a few options as to how to deploy and configure
the DAST template. A set of example applications have been provided with their configurations in the
[DAST demonstrations](https://gitlab.com/gitlab-org/security-products/demos/dast/) project.
## Review apps
Review apps are the most involved method of deploying your DAST target application. To assist in the process,
GitLab created a review app deployment using Google Kubernetes Engine (GKE). This example can be found in the
[Review apps - GKE](https://gitlab.com/gitlab-org/security-products/demos/dast/review-app-gke) project, plus detailed
instructions to configure review apps for DAST in the [README](https://gitlab.com/gitlab-org/security-products/demos/dast/review-app-gke/-/blob/master/README.md).
## Docker Services
If your application uses Docker containers you have another option for deploying and scanning with DAST.
After your Docker build job completes and your image is added to your container registry, you can use the image as a
[service](../../../../ci/services/_index.md).
By using service definitions in your `.gitlab-ci.yml`, you can scan services with the DAST analyzer.
When adding a `services` section to the job, the `alias` is used to define the hostname that can be used to access the service. In the following example, the `alias: yourapp` portion of the `dast` job definition means that the URL to the deployed application uses `yourapp` as the hostname (`https://yourapp/`).
```yaml
stages:
- build
- dast
include:
- template: DAST.gitlab-ci.yml
# Deploys the container to the GitLab container registry
deploy:
services:
- name: docker:dind
alias: dind
image: docker:20.10.16
stage: build
script:
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
- docker pull $CI_REGISTRY_IMAGE:latest || true
- docker build --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA --tag $CI_REGISTRY_IMAGE:latest .
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
- docker push $CI_REGISTRY_IMAGE:latest
dast:
services: # use services to link your app container to the dast job
- name: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
alias: yourapp
variables:
DAST_TARGET_URL: https://yourapp
DAST_FULL_SCAN: "true" # do a full scan
DAST_BROWSER_SCAN: "true" # use the browser-based GitLab DAST crawler
```
Most applications depend on multiple services such as databases or caching services. By default, services defined in the services fields cannot communicate
with each another. To allow communication between services, enable the `FF_NETWORK_PER_BUILD` [feature flag](https://docs.gitlab.com/runner/configuration/feature-flags.html#available-feature-flags).
```yaml
variables:
FF_NETWORK_PER_BUILD: "true" # enable network per build so all services can communicate on the same network
services: # use services to link the container to the dast job
- name: mongo:latest
alias: mongo
- name: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
alias: yourapp
```

View File

@ -6,7 +6,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
title: Configuration
---
- [Requirements](requirements.md)
- [Requirements](../_index.md)
- [Enabling the analyzer](enabling_the_analyzer.md)
- [Customize analyzer settings](customize_settings.md)
- [Overriding analyzer jobs](overriding_analyzer_jobs.md)

View File

@ -8,7 +8,7 @@ title: Enabling the analyzer
To run a DAST scan:
- Read the [requirements](requirements.md) conditions for running a DAST scan.
- Read the [requirements](../_index.md) conditions for running a DAST scan.
- Create a [DAST job](#create-a-dast-cicd-job) in your CI/CD pipeline.
- [Authenticate](authentication.md) as a user if your application requires it.

View File

@ -21,7 +21,7 @@ successfully run. For more information, see [Offline environments](../../../offl
You can use any version of DAST in an offline environment. To do this, you need:
- GitLab Runner with the [`docker` or `kubernetes` executor](requirements.md).
- GitLab Runner with the [`docker` or `kubernetes` executor](../_index.md).
The runner must have network access to the target application.
- Docker container registry with a locally available copy of the DAST
[container image](https://gitlab.com/security-products/dast), found in the

View File

@ -1,115 +1,13 @@
---
type: reference, howto
stage: Application Security Testing
group: Dynamic Analysis
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
title: Requirements
redirect_to: '../application_deployment_options.md'
remove_date: '2025-09-17'
---
- [GitLab Runner](../../../../../ci/runners/_index.md) available, with the
[`docker` executor](https://docs.gitlab.com/runner/executors/docker.html) on Linux/amd64.
- Target application deployed. For more details, read [Deployment options](#application-deployment-options).
- `dast` stage added to the CI/CD pipeline definition. This should be added after the deploy step, for example:
<!-- markdownlint-disable -->
```yaml
stages:
- build
- test
- deploy
- dast
```
This document was moved to [another location](../application_deployment_options.md).
- A network connection between the runner and your target application.
How you connect depends on your DAST configuration:
- If `DAST_TARGET_URL` and `DAST_AUTH_URL` specify port numbers, use those ports.
- If ports are not specified, use the standard port numbers for HTTP and HTTPS.
You might need to open both an HTTP and HTTPS port. For example, if the target URL uses HTTP, but the application links to resources using HTTPS. Always test your connection when you configure a scan.
## Recommendations
- Take care if your pipeline is configured to deploy to the same web server in each run. Running a DAST scan while a server is being updated leads to inaccurate and non-deterministic results.
- Configure runners to use the [always pull policy](https://docs.gitlab.com/runner/executors/docker.html#using-the-always-pull-policy) to run the latest versions of the analyzers.
- By default, DAST downloads all artifacts defined by previous jobs in the pipeline. If
your DAST job does not rely on `environment_url.txt` to define the URL under test or any other files created
in previous jobs, we recommend you don't download artifacts. To avoid downloading
artifacts, extend the analyzer CI/CD job to specify no dependencies. For example, for the DAST proxy-based analyzer add the following to your `.gitlab-ci.yml` file:
```yaml
dast:
dependencies: []
```
## Application deployment options
DAST requires a deployed application to be available to scan.
Depending on the complexity of the target application, there are a few options as to how to deploy and configure
the DAST template. A set of example applications have been provided with their configurations in the
[DAST demonstrations](https://gitlab.com/gitlab-org/security-products/demos/dast/) project.
### Review apps
Review apps are the most involved method of deploying your DAST target application. To assist in the process,
we created a Review App deployment using Google Kubernetes Engine (GKE). This example can be found in our
[Review apps - GKE](https://gitlab.com/gitlab-org/security-products/demos/dast/review-app-gke) project, along with detailed
instructions in the [README.md](https://gitlab.com/gitlab-org/security-products/demos/dast/review-app-gke/-/blob/master/README.md)
on how to configure review apps for DAST.
### Docker Services
If your application uses Docker containers you have another option for deploying and scanning with DAST.
After your Docker build job completes and your image is added to your container registry, you can use the image as a
[service](../../../../../ci/services/_index.md).
By using service definitions in your `.gitlab-ci.yml`, you can scan services with the DAST analyzer.
When adding a `services` section to the job, the `alias` is used to define the hostname that can be used to access the service. In the following example, the `alias: yourapp` portion of the `dast` job definition means that the URL to the deployed application uses `yourapp` as the hostname (`https://yourapp/`).
```yaml
stages:
- build
- dast
include:
- template: DAST.gitlab-ci.yml
# Deploys the container to the GitLab container registry
deploy:
services:
- name: docker:dind
alias: dind
image: docker:20.10.16
stage: build
script:
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
- docker pull $CI_REGISTRY_IMAGE:latest || true
- docker build --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA --tag $CI_REGISTRY_IMAGE:latest .
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
- docker push $CI_REGISTRY_IMAGE:latest
dast:
services: # use services to link your app container to the dast job
- name: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
alias: yourapp
variables:
DAST_TARGET_URL: https://yourapp
DAST_FULL_SCAN: "true" # do a full scan
DAST_BROWSER_SCAN: "true" # use the browser-based GitLab DAST crawler
```
Most applications depend on multiple services such as databases or caching services. By default, services defined in the services fields cannot communicate
with each another. To allow communication between services, enable the `FF_NETWORK_PER_BUILD` [feature flag](https://docs.gitlab.com/runner/configuration/feature-flags.html#available-feature-flags).
```yaml
variables:
FF_NETWORK_PER_BUILD: "true" # enable network per build so all services can communicate on the same network
services: # use services to link the container to the dast job
- name: mongo:latest
alias: mongo
- name: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
alias: yourapp
```
<!-- This redirect file can be deleted after <2025-09-17>. -->
<!-- Redirects that point to other docs in the same project expire in three months. -->
<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
<!-- Before deletion, see: https://docs.gitlab.com/development/documentation/redirects -->

View File

@ -606,6 +606,7 @@ the only jobs that run are the pipeline execution policy jobs.
{{< history >}}
- Updated handling of workflow rules [introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/175088) in GitLab 17.8 [with a flag](../../../administration/feature_flags/_index.md) named `policies_always_override_project_ci`. Enabled by default.
- Updated [handling of `override_project_ci`](https://gitlab.com/gitlab-org/gitlab/-/issues/504434) to allow scan execution policies to run together with pipeline execution policies, in GitLab 17.9.
- Updated handling of workflow rules [generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/512877) in GitLab 17.10. Feature flag `policies_always_override_project_ci` removed.
{{< /history >}}
@ -614,6 +615,9 @@ This strategy replaces the project's existing CI/CD configuration with a new one
The strategy takes precedence over other policies that use the `inject_ci` or `inject_policy` strategy. If a policy with `override_project_ci` applies, the project CI/CD configuration is ignored. However, other security policy configurations are not overridden.
When you use `override_project_ci` in a pipeline execution policy together with a scan execution policy,
the CI/CD configurations are merged and both policies are applied to the resulting pipeline.
Alternatively, you can merge the project's CI/CD configuration with the project's `.gitlab-ci.yml` instead of overriding it. To merge the configuration, use `include:project`. This strategy allows users to include the project CI/CD configuration in the pipeline execution policy configuration, enabling the users to customize the policy jobs. For example, they can combine the policy and project CI/CD configuration into one YAML file to override the `before_script` configuration or define required variables, such as `CS_IMAGE`, to define the required path to the container to scan. Here's a [short demo](https://youtu.be/W8tubneJ1X8) of this behavior.
The following diagram illustrates how variables defined at the project and policy levels are selected in the resulting pipeline:
@ -792,23 +796,6 @@ By default, to prevent a regular pipeline from triggering, users can push a comm
For more flexible control over `[skip ci]` behavior, see the [`skip_ci` type](#skip_ci-type) section.
## Interaction with scan execution policies
Using pipeline execution policies with the `override_project_ci` strategy can affect the behavior of [scan execution policies](scan_execution_policies.md).
The scan execution policy can be overridden if all of these are true:
- The scan execution policies is configured for the project
- A pipeline execution policies is configured for the project
- the pipeline execution policy uses the `override_project_ci` strategy.
The scan execution policy is ignored because the `override_project_ci` strategy removes all CI/CD configuration that is defined for the project, including policies.
To ensure that both pipeline execution policies and scan execution policies are applied:
- Consider using a different strategy for pipeline execution policies, such as `inject_policy`.
- If you must use `override_project_ci`, include the scanner templates that you require in your pipeline execution policy to maintain the desired security scans.
## Examples
These examples demonstrate what you can achieve with pipeline execution policies.

View File

@ -48,7 +48,6 @@ If any of the following cases are true, use [pipeline execution policies](pipeli
- You can assign a maximum of five rules to each policy.
- You can assign a maximum of five scan execution policies to each security policy project.
- Local project YAML files cannot override scan execution policies. These policies take precedence over any configurations defined for a pipeline, even if you use the same job name in your project's CI/CD configuration.
- Pipeline execution policies can [override](pipeline_execution_policies.md#interaction-with-scan-execution-policies) scan execution policies when you use the `override_ci` strategy.
## Jobs

View File

@ -20,15 +20,31 @@ This page describes where the AI gateway is deployed, and answers questions abou
## Region support
For GitLab Self-Managed and Dedicated customers, the ability to choose the region is planned for future implementation. Currently, the process for region selection is managed internally by GitLab.
### GitLab Self-Managed and GitLab Dedicated
Runway, is currently not available to external customers. GitLab is working on expanding support to include GitLab Self-Managed instances in the future (Epic: [Expand Platform Engineering to more runtimes](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/1330)).
For GitLab Self-Managed and GitLab Dedicated customers, region selection
is managed internally by GitLab.
[View the available regions](https://gitlab-com.gitlab.io/gl-infra/platform/runway/runwayctl/manifest.schema.html#spec_regions).
[View the available regions](https://gitlab-com.gitlab.io/gl-infra/platform/runway/runwayctl/manifest.schema.html#spec_regions) in the [Runway](https://gitlab.com/gitlab-com/gl-infra/platform/runway) service manifest.
For GitLab.com customers, the current routing mechanism is based on the location of the GitLab instance, not the user's location. As GitLab.com is currently single-homed in `us-east1`, requests to the AI gateway are routed to us-east4 in almost all cases. This means that the routing may not always result in the absolute nearest deployment for every user.
Runway is the GitLab internal developer platform. It is not available to external
customers. Support for improvements to GitLab Self-Managed instances is proposed in
[epic 1330](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/1330).
The IDE communicates directly with the AI gateway by default, bypassing the GitLab monolith. This direct connection improves routing efficiency. To change this, you can [configure direct and indirect connections](../project/repository/code_suggestions/_index.md#direct-and-indirect-connections).
### GitLab.com
For GitLab.com customers, the routing mechanism is based on the GitLab instance
location, instead of the user's instance location.
Because GitLab.com is currently single-homed in `us-east1`, requests to the AI gateway
are routed to `us-east4` in almost all cases. This means that the routing might
not always result in the absolute nearest deployment for every user.
### Direct and indirect connections
The IDE communicates directly with the AI gateway by default, bypassing the GitLab
monolith. This direct connection improves routing efficiency. To change this, you can
[configure direct and indirect connections](../project/repository/code_suggestions/_index.md#direct-and-indirect-connections).
### Automatic routing
@ -36,7 +52,8 @@ GitLab leverages Cloudflare and Google Cloud Platform (GCP) load balancers to ro
gateway requests to the nearest available deployment automatically. This routing
mechanism prioritizes low latency and efficient processing of user requests.
You cannot manually control this routing process. The system dynamically selects the optimal region based on factors like network conditions and server load.
You cannot manually control this routing process. The system dynamically selects the
optimal region based on factors like network conditions and server load.
### Tracing requests to specific regions

View File

@ -115,7 +115,7 @@ To use CI/CD to authenticate with the container registry, you can use:
### `docker login` command fails with `access forbidden`
The container registry [returns the GitLab API URL to the Docker client](../../../administration/packages/container_registry.md#architecture-of-gitlab-container-registry)
The container registry [returns the GitLab API URL to the Docker client](../../../administration/packages/container_registry.md#container-registry-architecture)
to validate credentials. The Docker client uses basic auth, so the request contains
the `Authorization` header. If the `Authorization` header is missing in the request to the
`/jwt/auth` endpoint configured in the `token_realm` for the registry configuration,

View File

@ -31597,6 +31597,9 @@ msgstr ""
msgid "HomepageActivityWidget|The activity feed is not available. Please refresh the page to try again."
msgstr ""
msgid "Homepage|The number of merge requests is not available. Please refresh the page to try again."
msgstr ""
msgid "Hook execution failed. %{error}"
msgstr ""
@ -42400,9 +42403,6 @@ msgstr ""
msgid "Number of files touched"
msgstr ""
msgid "Number of merge requests not available"
msgstr ""
msgid "Number of namespaces per indexing rollout"
msgstr ""
@ -62625,9 +62625,6 @@ msgstr ""
msgid "The number of direct members in the current group. Members in subgroups are not included. %{link_start}What is a direct member%{link_end}?"
msgstr ""
msgid "The number of merge requests is not available. Please refresh the page to try again."
msgstr ""
msgid "The number of merge requests merged by month."
msgstr ""

View File

@ -1,4 +1,5 @@
import { shallowMount } from '@vue/test-utils';
import { nextTick } from 'vue';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import HomepageApp from '~/homepage/components/homepage_app.vue';
import MergeRequestsWidget from '~/homepage/components/merge_requests_widget.vue';
import WorkItemsWidget from '~/homepage/components/work_items_widget.vue';
@ -13,9 +14,11 @@ describe('HomepageApp', () => {
const findMergeRequestsWidget = () => wrapper.findComponent(MergeRequestsWidget);
const findWorkItemsWidget = () => wrapper.findComponent(WorkItemsWidget);
const findMergeRequestsFetchMetadataError = () =>
wrapper.findByTestId('merge-requests-fetch-metadata-error');
function createWrapper() {
wrapper = shallowMount(HomepageApp, {
wrapper = shallowMountExtended(HomepageApp, {
propsData: {
reviewRequestedPath: MOCK_MERGE_REQUESTS_REVIEW_REQUESTED_PATH,
assignedMergeRequestsPath: MOCK_ASSIGNED_MERGE_REQUESTS_PATH,
@ -29,10 +32,32 @@ describe('HomepageApp', () => {
createWrapper();
});
it('passes the correct props to the `MergeRequestsWidget` component', () => {
expect(findMergeRequestsWidget().props()).toEqual({
reviewRequestedPath: MOCK_MERGE_REQUESTS_REVIEW_REQUESTED_PATH,
assignedToYouPath: MOCK_ASSIGNED_MERGE_REQUESTS_PATH,
describe('MergeRequestsWidget', () => {
it('passes the correct props to the `MergeRequestsWidget` component', () => {
expect(findMergeRequestsWidget().props()).toEqual({
reviewRequestedPath: MOCK_MERGE_REQUESTS_REVIEW_REQUESTED_PATH,
assignedToYouPath: MOCK_ASSIGNED_MERGE_REQUESTS_PATH,
});
});
it('shows an alert of if `MergeRequestsWidget` fails to fetch the metadata', async () => {
expect(findMergeRequestsFetchMetadataError().exists()).toBe(false);
findMergeRequestsWidget().vm.$emit('fetch-metadata-error');
await nextTick();
expect(findMergeRequestsFetchMetadataError().text()).toBe(
'The number of merge requests is not available. Please refresh the page to try again.',
);
});
it('hides the alert on dismiss', async () => {
findMergeRequestsWidget().vm.$emit('fetch-metadata-error');
await nextTick();
findMergeRequestsFetchMetadataError().vm.$emit('dismiss');
await nextTick();
expect(findMergeRequestsFetchMetadataError().exists()).toBe(false);
});
});

View File

@ -6,11 +6,12 @@ import waitForPromises from 'helpers/wait_for_promises';
import createMockApollo from 'helpers/mock_apollo_helper';
import { useFakeDate } from 'helpers/fake_date';
import MergeRequestsWidget from '~/homepage/components/merge_requests_widget.vue';
import * as Sentry from '~/sentry/sentry_browser_wrapper';
import mergeRequestsWidgetMetadataQuery from '~/homepage/graphql/queries/merge_requests_widget_metadata.query.graphql';
import { createAlert, VARIANT_WARNING } from '~/alert';
import { withItems, withoutItems } from './mocks/merge_requests_widget_metadata_query_mocks';
jest.mock('~/alert');
jest.mock('~/sentry/sentry_browser_wrapper');
describe('MergeRequestsWidget', () => {
Vue.use(VueApollo);
@ -101,25 +102,22 @@ describe('MergeRequestsWidget', () => {
expect(findAssignedCount().text()).toBe('0');
});
it('shows an alert if the query errors out', async () => {
it('emits the `fetch-metadata-error` event if the query errors out', async () => {
createWrapper({
mergeRequestsWidgetMetadataQueryHandler: () => jest.fn().mockRejectedValue(),
});
expect(wrapper.emitted('fetch-metadata-error')).toBeUndefined();
await waitForPromises();
expect(wrapper.emitted('fetch-metadata-error')).toHaveLength(1);
expect(Sentry.captureException).toHaveBeenCalled();
expect(findReviewRequestedLastUpdatedAt().exists()).toBe(false);
expect(findAssignedLastUpdatedAt().exists()).toBe(false);
expect(findReviewRequestedCount().text()).toBe('-');
expect(findAssignedCount().text()).toBe('-');
expect(createAlert).toHaveBeenCalledWith({
error: expect.any(Object),
title: 'Number of merge requests not available',
message:
'The number of merge requests is not available. Please refresh the page to try again.',
variant: VARIANT_WARNING,
});
});
});
});

View File

@ -31,25 +31,19 @@ RSpec.describe Milestone, 'Milestoneish', factory_default: :keep do
project.add_guest(guest)
end
describe '#sorted_issues' do
before do
issue.labels << label_1
security_issue_1.labels << label_2
closed_issue_1.labels << label_3
end
it 'sorts issues by label priority' do
issues = milestone.sorted_issues(member)
describe '#milestone_issues' do
it 'returns issues' do
issues = milestone.milestone_issues(member)
expect(issues.first).to eq(issue)
expect(issues.second).to eq(security_issue_1)
expect(issues.third).not_to eq(closed_issue_1)
end
it 'limits issue count and keeps the ordering' do
it 'limits issue count' do
stub_const('Milestoneish::DISPLAY_ISSUES_LIMIT', 4)
issues = milestone.sorted_issues(member)
issues = milestone.milestone_issues(member)
# Cannot use issues.count here because it is sorting
# by a virtual column 'highest_priority' and it will break
# the query.
@ -57,9 +51,6 @@ RSpec.describe Milestone, 'Milestoneish', factory_default: :keep do
expect(issues.length).to eq(4)
expect(total_issues_count).to eq(4)
expect(issues.first).to eq(issue)
expect(issues.second).to eq(security_issue_1)
expect(issues.third).not_to eq(closed_issue_1)
end
end
@ -152,20 +143,6 @@ RSpec.describe Milestone, 'Milestoneish', factory_default: :keep do
end
end
describe '#sorted_merge_requests' do
it 'sorts merge requests by label priority' do
merge_request_1 = create(:labeled_merge_request, labels: [label_2], source_project: project, source_branch: 'branch_1', milestone: milestone)
merge_request_2 = create(:labeled_merge_request, labels: [label_1], source_project: project, source_branch: 'branch_2', milestone: milestone)
merge_request_3 = create(:labeled_merge_request, labels: [label_3], source_project: project, source_branch: 'branch_3', milestone: milestone)
merge_requests = milestone.sorted_merge_requests(member)
expect(merge_requests.first).to eq(merge_request_2)
expect(merge_requests.second).to eq(merge_request_1)
expect(merge_requests.third).to eq(merge_request_3)
end
end
describe '#merge_requests_visible_to_user' do
context 'when project is private' do
before do

View File

@ -139,6 +139,73 @@ RSpec.describe 'Bulk update work items', feature_category: :team_planning do
end
end
context 'when updating state_event' do
let(:additional_arguments) do
{
'stateEvent' => 'CLOSE'
}
end
it 'closes all work items' do
expect do
post_graphql_mutation(mutation, current_user: current_user)
updatable_work_items.each(&:reload)
end.to change { updatable_work_items.map(&:state) }.from(%w[opened opened]).to(%w[closed closed])
expect(mutation_response).to include(
'updatedWorkItemCount' => updatable_work_items.count
)
end
end
context 'when updating subscription_event' do
let(:additional_arguments) do
{
'subscriptionEvent' => 'SUBSCRIBE'
}
end
it 'subscribes current user to all work items' do
post_graphql_mutation(mutation, current_user: current_user)
expect(mutation_response).to include(
'updatedWorkItemCount' => updatable_work_items.count
)
expect(Subscription.where(user: current_user, subscribed: true, subscribable: updatable_work_items).count)
.to eq(updatable_work_items.count)
expect(updatable_work_items.all? { |w| w.subscribed?(current_user, project) }).to be_truthy
end
context 'when unsubscribing' do
before do
updatable_work_items.each do |work_item|
work_item.subscribe(current_user, project)
end
end
let(:additional_arguments) do
{
'subscriptionEvent' => 'UNSUBSCRIBE'
}
end
it 'unsubscribes current user from all work items' do
post_graphql_mutation(mutation, current_user: current_user)
expect(mutation_response).to include(
'updatedWorkItemCount' => updatable_work_items.count
)
updatable_work_items.each do |work_item|
work_item.reload
expect(work_item.subscriptions.where(user: current_user, subscribed: true)).not_to exist
expect(work_item.subscribed?(current_user, project)).to be_falsy
end
end
end
end
context 'when updating multiple attributes simultaneously' do
let_it_be(:assignee) { create(:user, developer_of: group) }
let_it_be(:milestone) { create(:milestone, project: project) }
@ -154,7 +221,9 @@ RSpec.describe 'Bulk update work items', feature_category: :team_planning do
},
'labelsWidget' => {
'addLabelIds' => [label2.to_gid.to_s]
}
},
'subscriptionEvent' => 'SUBSCRIBE',
'stateEvent' => 'CLOSE'
}
end
@ -166,6 +235,8 @@ RSpec.describe 'Bulk update work items', feature_category: :team_planning do
.and change { updatable_work_items.flat_map(&:assignee_ids) }.from([]).to([assignee.id] * 2)
.and change { updatable_work_items.map(&:milestone_id) }.from([nil, nil]).to([milestone.id] * 2)
.and change { updatable_work_items.flat_map(&:label_ids) }.from([label1.id] * 2).to([label1.id, label2.id] * 2)
.and change { updatable_work_items.map(&:state) }.from(%w[opened opened]).to(%w[closed closed])
.and change { updatable_work_items.all? { |wi| wi.subscribed?(current_user, project) } }.from(false).to(true)
expect(mutation_response).to include(
'updatedWorkItemCount' => updatable_work_items.count