Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-06-09 12:10:27 +00:00
parent 82a546b14c
commit f4d6d3ec77
43 changed files with 297 additions and 271 deletions

View File

@ -1 +1 @@
1.39.0
1.40.0

View File

@ -36,6 +36,7 @@ export const ListTypeTitles = {
milestone: __('Milestone'),
iteration: __('Iteration'),
label: __('Label'),
backlog: __('Open'),
};
export const formType = {

View File

@ -12,6 +12,7 @@ import {
updateListQueries,
issuableTypes,
FilterFields,
ListTypeTitles,
} from 'ee_else_ce/boards/constants';
import createBoardListMutation from 'ee_else_ce/boards/graphql/board_list_create.mutation.graphql';
import issueMoveListMutation from 'ee_else_ce/boards/graphql/issue_move_list.mutation.graphql';
@ -169,8 +170,11 @@ export default {
});
},
addList: ({ commit }, list) => {
addList: ({ commit, dispatch, getters }, list) => {
commit(types.RECEIVE_ADD_LIST_SUCCESS, updateListPosition(list));
dispatch('fetchItemsForList', {
listId: getters.getListByTitle(ListTypeTitles.backlog).id,
});
},
fetchLabels: ({ state, commit, getters }, searchTerm) => {
@ -261,7 +265,7 @@ export default {
commit(types.TOGGLE_LIST_COLLAPSED, { listId, collapsed });
},
removeList: ({ state: { issuableType, boardLists }, commit }, listId) => {
removeList: ({ state: { issuableType, boardLists }, commit, dispatch, getters }, listId) => {
const listsBackup = { ...boardLists };
commit(types.REMOVE_LIST, listId);
@ -281,6 +285,10 @@ export default {
}) => {
if (errors.length > 0) {
commit(types.REMOVE_LIST_FAILURE, listsBackup);
} else {
dispatch('fetchItemsForList', {
listId: getters.getListByTitle(ListTypeTitles.backlog).id,
});
}
},
)
@ -290,6 +298,9 @@ export default {
},
fetchItemsForList: ({ state, commit }, { listId, fetchNext = false }) => {
if (!fetchNext) {
commit(types.RESET_ITEMS_FOR_LIST, listId);
}
commit(types.REQUEST_ITEMS_FOR_LIST, { listId, fetchNext });
const { fullPath, fullBoardId, boardType, filterParams } = state;

View File

@ -15,6 +15,7 @@ export const UPDATE_LIST_FAILURE = 'UPDATE_LIST_FAILURE';
export const TOGGLE_LIST_COLLAPSED = 'TOGGLE_LIST_COLLAPSED';
export const REMOVE_LIST = 'REMOVE_LIST';
export const REMOVE_LIST_FAILURE = 'REMOVE_LIST_FAILURE';
export const RESET_ITEMS_FOR_LIST = 'RESET_ITEMS_FOR_LIST';
export const REQUEST_ITEMS_FOR_LIST = 'REQUEST_ITEMS_FOR_LIST';
export const RECEIVE_ITEMS_FOR_LIST_FAILURE = 'RECEIVE_ITEMS_FOR_LIST_FAILURE';
export const RECEIVE_ITEMS_FOR_LIST_SUCCESS = 'RECEIVE_ITEMS_FOR_LIST_SUCCESS';

View File

@ -117,6 +117,11 @@ export default {
state.boardLists = listsBackup;
},
[mutationTypes.RESET_ITEMS_FOR_LIST]: (state, listId) => {
Vue.set(state, 'backupItemsList', state.boardItemsByListId[listId]);
Vue.set(state.boardItemsByListId, listId, []);
},
[mutationTypes.REQUEST_ITEMS_FOR_LIST]: (state, { listId, fetchNext }) => {
Vue.set(state.listsFlags, listId, { [fetchNext ? 'isLoadingMore' : 'isLoading']: true });
},
@ -138,6 +143,7 @@ export default {
'Boards|An error occurred while fetching the board issues. Please reload the page.',
);
Vue.set(state.listsFlags, listId, { isLoading: false, isLoadingMore: false });
Vue.set(state.boardItemsByListId, listId, state.backupItemsList);
},
[mutationTypes.RESET_ISSUES]: (state) => {

View File

@ -11,6 +11,7 @@ export default () => ({
boardLists: {},
listsFlags: {},
boardItemsByListId: {},
backupItemsList: [],
isSettingAssignees: false,
pageInfoByListId: {},
boardItems: {},

View File

@ -39,4 +39,12 @@ module TimeFrameArguments
raise Gitlab::Graphql::Errors::ArgumentError, error_message
end
end
def transform_timeframe_parameters(args)
if args[:timeframe]
args[:timeframe].transform_keys { |k| :"#{k}_date" }
else
args.slice(:start_date, :end_date)
end
end
end

View File

@ -44,15 +44,7 @@ module Resolvers
title: args[:title],
search_title: args[:search_title],
containing_date: args[:containing_date]
}.merge!(timeframe_parameters(args)).merge!(parent_id_parameters(args))
end
def timeframe_parameters(args)
if args[:timeframe]
args[:timeframe].transform_keys { |k| :"#{k}_date" }
else
args.slice(:start_date, :end_date)
end
}.merge!(transform_timeframe_parameters(args)).merge!(parent_id_parameters(args))
end
def parent

View File

@ -220,6 +220,10 @@ module IssuablesHelper
@show_full_reference ? issuable.to_reference(full: true) : issuable.to_reference(@group || @project)
end
def issuable_project_reference(issuable)
"#{issuable.project.full_name} #{issuable.to_reference}"
end
def issuable_initial_data(issuable)
data = {
endpoint: issuable_path(issuable),

View File

@ -27,7 +27,6 @@ module MergeRequests
merge_requests_for_source_branch.each do |mr|
outdate_suggestions(mr)
refresh_pipelines_on_merge_requests(mr)
abort_auto_merges(mr)
mark_pending_todos_done(mr)
end
@ -44,6 +43,8 @@ module MergeRequests
notify_about_push(mr)
mark_mr_as_draft_from_commits(mr)
execute_mr_web_hooks(mr)
# Run at the end of the loop to avoid any potential contention on the MR object
refresh_pipelines_on_merge_requests(mr)
merge_request_activity_counter.track_mr_including_ci_config(user: mr.author, merge_request: mr)
end

View File

@ -46,7 +46,7 @@
%ul.content-list.all-branches
- @branches.each do |branch|
= render "projects/branches/branch", branch: branch, merged: @merged_branch_names.include?(branch.name), commit_status: @branch_pipeline_statuses[branch.name], show_commit_status: @branch_pipeline_statuses.any?
- if Feature.enabled?(:branches_pagination_without_count, @project, default_enabled: true)
- if Feature.enabled?(:branches_pagination_without_count, @project, default_enabled: :yaml)
= render('kaminari/gitlab/without_count', previous_path: @prev_path, next_path: @next_path)
- else
= paginate @branches, theme: 'gitlab'

View File

@ -5,6 +5,10 @@
= link_to issuable_path(issuable), data: { track_event: 'click_text', track_label: "#{issuable.class.name.downcase}_title", track_property: 'search_result' }, class: 'gl-w-full' do
%span.term.str-truncated.gl-font-weight-bold.gl-ml-2= issuable.title
.gl-text-gray-500.gl-my-3
= sprintf(s_(' %{project_name}#%{issuable_iid} · created %{issuable_created} by %{author} · updated %{issuable_updated}'), { project_name: issuable.project.full_name, issuable_iid: issuable.iid, issuable_created: time_ago_with_tooltip(issuable.created_at, placement: 'bottom'), issuable_updated: time_ago_with_tooltip(issuable.updated_at, placement: 'bottom'), author: link_to_member(@project, issuable.author, avatar: false) }).html_safe
= issuable_project_reference(issuable)
·
= sprintf(s_('created %{issuable_created} by %{author}'), { issuable_created: time_ago_with_tooltip(issuable.created_at, placement: 'bottom'), author: link_to_member(@project, issuable.author, avatar: false) }).html_safe
·
= sprintf(s_('updated %{time_ago}'), { time_ago: time_ago_with_tooltip(issuable.updated_at, placement: 'bottom') }).html_safe
.description.term.col-sm-10.gl-px-0
= highlight_and_truncate_issuable(issuable, @search_term, @search_highlight)

View File

@ -10,6 +10,7 @@ class BuildQueueWorker # rubocop:disable Scalability/IdempotentWorker
feature_category :continuous_integration
urgency :high
worker_resource_boundary :cpu
data_consistency :sticky, feature_flag: :load_balancing_for_build_queue_worker
# rubocop: disable CodeReuse/ActiveRecord
def perform(build_id)

View File

@ -5,4 +5,4 @@ rollout_issue_url:
milestone: '13.9'
type: development
group: group::source code
default_enabled: true
default_enabled: false

View File

@ -1,8 +1,8 @@
---
name: ingress_modsecurity
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/20194
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/258554
milestone: '12.5'
name: load_balancing_for_build_queue_worker
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/63212
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/332586
milestone: '14.0'
type: development
group: group::container security
group: group::memory
default_enabled: false

View File

@ -678,9 +678,6 @@ Gitlab.ee do
Settings.cron_jobs['sync_seat_link_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['sync_seat_link_worker']['cron'] ||= "#{rand(60)} 3 * * * UTC"
Settings.cron_jobs['sync_seat_link_worker']['job_class'] = 'SyncSeatLinkWorker'
Settings.cron_jobs['web_application_firewall_metrics_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['web_application_firewall_metrics_worker']['cron'] ||= '0 1 * * 0'
Settings.cron_jobs['web_application_firewall_metrics_worker']['job_class'] = 'IngressModsecurityCounterMetricsWorker'
Settings.cron_jobs['users_create_statistics_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['users_create_statistics_worker']['cron'] ||= '2 15 * * *'
Settings.cron_jobs['users_create_statistics_worker']['job_class'] = 'Users::CreateStatisticsWorker'

View File

@ -7,7 +7,8 @@ product_stage: protect
product_group: group::container security
product_category: web_firewall
value_type: number
status: deprecated
status: removed
milestone_removed: 14.0
time_frame: all
data_source: database
distribution:

View File

@ -7,7 +7,8 @@ product_stage: protect
product_group: group::container security
product_category: web_firewall
value_type: number
status: deprecated
status: removed
milestone_removed: 14.0
time_frame: all
data_source: database
distribution:

View File

@ -6,7 +6,8 @@ product_stage: protect
product_group: group::container security
product_category: web_firewall
value_type: number
status: deprecated
status: removed
milestone_removed: 14.0
time_frame: all
data_source: database
distribution:

View File

@ -6,7 +6,8 @@ product_stage: protect
product_group: group::container security
product_category: web_firewall
value_type: number
status: deprecated
status: removed
milestone_removed: 14.0
time_frame: all
data_source: database
distribution:

View File

@ -6,7 +6,8 @@ product_stage: protect
product_group: group::container security
product_category: web_firewall
value_type: number
status: deprecated
status: removed
milestone_removed: 14.0
time_frame: all
data_source: database
distribution:

View File

@ -6,7 +6,8 @@ product_stage: protect
product_group: group::container security
product_category: web_firewall
value_type: number
status: deprecated
status: removed
milestone_removed: 14.0
time_frame: all
data_source: database
distribution:

View File

@ -6,7 +6,8 @@ product_stage: protect
product_group: group::container security
product_category: web_firewall
value_type: boolean
status: deprecated
status: removed
milestone_removed: 14.0
time_frame: none
data_source: system
distribution:

View File

@ -53,14 +53,22 @@ def message_for_feature_flag_with_group!(feature_flag:, mr_group_label:)
end
end
def feature_flag_file_added?
feature_flag.feature_flag_files(change_type: :added).any?
end
def feature_flag_file_added_or_removed?
feature_flag.feature_flag_files(change_type: :added).any? || feature_flag.feature_flag_files(change_type: :deleted).any?
feature_flag_file_added? || feature_flag.feature_flag_files(change_type: :deleted).any?
end
feature_flag.feature_flag_files(change_type: :added).each do |feature_flag|
check_feature_flag_yaml(feature_flag)
end
if helper.security_mr? && feature_flag_file_added?
fail "Feature flags are discouraged from security merge requests. Read the [security documentation](https://gitlab.com/gitlab-org/release/docs/-/blob/master/general/security/utilities/feature_flags.md) for details."
end
if feature_flag_file_added_or_removed?
new_mr_title = helper.mr_title.dup
new_mr_title << ' [RUN ALL RSPEC]' unless helper.run_all_rspec_mr?

View File

@ -80,6 +80,15 @@ within each node. The command will return an empty array if the cluster is healt
curl "http://127.0.0.1:8500/v1/health/state/critical"
```
If the Consul version has changed, you'll see a notice at the end of `gitlab-ctl reconfigure`
informing you that Consul needs to be restarted for the new version to be used.
Restart Consul one node at a time:
```shell
sudo gitlab-ctl restart consul
```
Consul nodes communicate using the raft protocol. If the current leader goes
offline, there needs to be a leader election. A leader node must exist to facilitate
synchronization across the cluster. If too many nodes go offline at the same time,

View File

@ -17,13 +17,13 @@ On each **secondary** site, there is a read-only replicated copy of the GitLab d
A **secondary** site also has a tracking database where it stores which projects have been synced.
Geo compares the two databases to find projects that are not yet tracked.
At the start, this tracking database is empty, so Geo will start trying to update from every project that it can see in the GitLab database.
At the start, this tracking database is empty, so Geo tries to update from every project that it can see in the GitLab database.
For each project to sync:
1. Geo will issue a `git fetch geo --mirror` to get the latest information from the **primary** site.
If there are no changes, the sync will be fast and end quickly. Otherwise, it will pull the latest commits.
1. The **secondary** site will update the tracking database to store the fact that it has synced projects A, B, C, etc.
1. Geo issues a `git fetch geo --mirror` to get the latest information from the **primary** site.
If there are no changes, the sync is fast. Otherwise, it has to pull the latest commits.
1. The **secondary** site updates the tracking database to store the fact that it has synced projects A, B, C, etc.
1. Repeat until all projects are synced.
When someone pushes a commit to the **primary** site, it generates an event in the GitLab database that the repository has changed.
@ -70,4 +70,4 @@ Yes. See [Docker Registry for a **secondary** site](docker_registry.md).
## Can I login to a secondary site?
Yes, but secondary sites receive all authentication data (like user accounts and logins) from the primary instance. This means you will be re-directed to the primary for authentication and routed back afterwards.
Yes, but secondary sites receive all authentication data (like user accounts and logins) from the primary instance. This means you are re-directed to the primary for authentication and then routed back.

View File

@ -864,10 +864,10 @@ Most tests for Elasticsearch logic relate to:
- Searching for that data.
- Ensuring that the test gives the expected result.
There are some exceptions, such as checking for structural changes rather than individual records in an index.
There are some exceptions, such as checking for structural changes rather than individual records in an index.
The `:elastic_with_delete_by_query` trait was added to reduce run time for pipelines by creating and deleting indices
at the start and end of each context only. The [Elasticsearch DeleteByQuery API](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html)
at the start and end of each context only. The [Elasticsearch DeleteByQuery API](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html)
is used to delete data in all indices in between examples to ensure a clean index.
Note that Elasticsearch indexing uses [`Gitlab::Redis::SharedState`](../../../ee/development/redis.md#gitlabrediscachesharedstatequeues).
@ -994,6 +994,7 @@ Only use simple values as input in the `where` block. Using
objects, FactoryBot-created objects, and similar items can lead to
[unexpected results](https://github.com/tomykaira/rspec-parameterized/issues/8).
<!-- vale gitlab.Spelling = YES -->
### Prometheus tests
Prometheus metrics may be preserved from one test run to another. To ensure that metrics are

View File

@ -7134,7 +7134,7 @@ Whether or not ModSecurity is enabled within Ingress
Group: `group::container security`
Status: `deprecated`
Status: `removed`
Tiers: `free`, `premium`, `ultimate`

View File

@ -1354,7 +1354,6 @@ The following is example content of the Usage Ping payload.
"reply_by_email_enabled": "incoming+%{key}@incoming.gitlab.com",
"signup_enabled": true,
"web_ide_clientside_preview_enabled": true,
"ingress_modsecurity_enabled": true,
"projects_with_expiration_policy_disabled": 999,
"projects_with_expiration_policy_enabled": 999,
...

View File

@ -8,6 +8,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/259669) in [GitLab Premium](https://about.gitlab.com/pricing/) 13.7.
> - [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/3834) in GitLab 13.11, the Kubernetes Agent became available on GitLab.com.
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/332227) in GitLab 14.0, the `resource_inclusions` and `resource_exclusions` attributes were removed.
WARNING:
This feature might not be available to you. Check the **version history** note above for details.
@ -38,16 +39,7 @@ with Kubernetes resource definitions in YAML or JSON format. The Agent monitors
each project you declare, and when the project changes, GitLab deploys the changes
using the Agent.
To use multiple YAML files, specify a `paths` attribute in the `gitops` section.
By default, the Agent monitors all
[Kubernetes object types](https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/#required-fields).
You can exclude some types of resources from monitoring. This enables you to reduce
the permissions needed by the GitOps feature, through `resource_exclusions`.
To enable a specific named resource, first use `resource_inclusions` to enable desired resources.
The following file excerpt includes specific `api_groups` and `kinds`. The `resource_exclusions`
which follow excludes all other `api_groups` and `kinds`:
To use multiple YAML files, specify a `paths` attribute in the `gitops.manifest_projects` section.
```yaml
gitops:
@ -58,28 +50,6 @@ gitops:
# The `id` is a path to a Git repository with Kubernetes resource definitions
# in YAML or JSON format.
- id: gitlab-org/cluster-integration/gitlab-agent
# Holds the only API groups and kinds of resources that gitops will monitor.
# Inclusion rules are evaluated first, then exclusion rules.
# If there is still no match, resource is monitored.
# Resources: https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/#required-fields
# Groups: https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups-and-versioning
resource_inclusions:
- api_groups:
- apps
kinds:
- '*'
- api_groups:
- ''
kinds:
- 'ConfigMap'
# Holds the API groups and kinds of resources to exclude from gitops watch.
# Inclusion rules are evaluated first, then exclusion rules.
# If there is still no match, resource is monitored.
resource_exclusions:
- api_groups:
- '*'
kinds:
- '*'
# Namespace to use if not set explicitly in object manifest.
default_namespace: my-ns
# Paths inside of the repository to scan for manifest files.
@ -93,6 +63,37 @@ gitops:
- glob: '/team2/apps/**/*.yaml'
# If 'paths' is not specified or is an empty list, the configuration below is used
- glob: '/**/*.{yaml,yml,json}'
# Reconcile timeout defines whether the applier should wait
# until all applied resources have been reconciled, and if so,
# how long to wait.
reconcile_timeout: 3600s # 1 hour by default
# Dry run strategy defines whether changes should actually be performed,
# or if it is just talk and no action.
# https://github.com/kubernetes-sigs/cli-utils/blob/d6968048dcd80b1c7b55d9e4f31fc25f71c9b490/pkg/common/common.go#L68-L89
# Can be: none, client, server
dry_run_strategy: none # 'none' by default
# Prune defines whether pruning of previously applied
# objects should happen after apply.
prune: true # enabled by default
# Prune timeout defines whether we should wait for all resources
# to be fully deleted after pruning, and if so, how long we should
# wait.
prune_timeout: 3600s # 1 hour by default
# Prune propagation policy defines the deletion propagation policy
# that should be used for pruning.
# https://github.com/kubernetes/apimachinery/blob/44113beed5d39f1b261a12ec398a356e02358307/pkg/apis/meta/v1/types.go#L456-L470
# Can be: orphan, background, foreground
prune_propagation_policy: foreground # 'foreground' by default
# InventoryPolicy defines if an inventory object can take over
# objects that belong to another inventory object or don't
# belong to any inventory object.
# This is done by determining if the apply/prune operation
# can go through for a resource based on the comparison
# the inventory-id value in the package and the owning-inventory
# annotation in the live object.
# https://github.com/kubernetes-sigs/cli-utils/blob/d6968048dcd80b1c7b55d9e4f31fc25f71c9b490/pkg/inventory/policy.go#L12-L66
# Can be: must_match, adopt_if_no_inventory, adopt_all
inventory_policy: must_match # 'must_match' by default
```
### Using multiple manifest projects

View File

@ -188,7 +188,6 @@ module Gitlab
services_usage,
usage_counters,
user_preferences_usage,
ingress_modsecurity_usage,
container_expiration_policies_usage,
service_desk_counts,
email_campaign_counts
@ -295,7 +294,6 @@ module Gitlab
reply_by_email_enabled: alt_usage_data(fallback: nil) { Gitlab::IncomingEmail.enabled? },
signup_enabled: alt_usage_data(fallback: nil) { Gitlab::CurrentSettings.allow_signup? },
web_ide_clientside_preview_enabled: alt_usage_data(fallback: nil) { Gitlab::CurrentSettings.web_ide_clientside_preview_enabled? },
ingress_modsecurity_enabled: Feature.enabled?(:ingress_modsecurity),
grafana_link_enabled: alt_usage_data(fallback: nil) { Gitlab::CurrentSettings.grafana_enabled? },
gitpod_enabled: alt_usage_data(fallback: nil) { Gitlab::CurrentSettings.gitpod_enabled? }
}
@ -377,29 +375,6 @@ module Gitlab
Gitlab::UsageData::Topology.new.topology_usage_data
end
# rubocop: disable UsageData/DistinctCountByLargeForeignKey
def ingress_modsecurity_usage
##
# This method measures usage of the Modsecurity Web Application Firewall across the entire
# instance's deployed environments.
#
# NOTE: this service is an approximation as it does not yet take into account if environment
# is enabled and only measures applications installed using GitLab Managed Apps (disregards
# CI-based managed apps).
#
# More details: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/28331#note_318621786
##
column = ::Deployment.arel_table[:environment_id]
{
ingress_modsecurity_logging: distinct_count(successful_deployments_with_cluster(::Clusters::Applications::Ingress.modsecurity_enabled.logging), column),
ingress_modsecurity_blocking: distinct_count(successful_deployments_with_cluster(::Clusters::Applications::Ingress.modsecurity_enabled.blocking), column),
ingress_modsecurity_disabled: distinct_count(successful_deployments_with_cluster(::Clusters::Applications::Ingress.modsecurity_disabled), column),
ingress_modsecurity_not_installed: distinct_count(successful_deployments_with_cluster(::Clusters::Applications::Ingress.modsecurity_not_installed), column)
}
end
# rubocop: enable UsageData/DistinctCountByLargeForeignKey
# rubocop: disable CodeReuse/ActiveRecord
def container_expiration_policies_usage
results = {}

View File

@ -19,9 +19,6 @@ msgstr ""
msgid " %{name}, confirm your email address now! "
msgstr ""
msgid " %{project_name}#%{issuable_iid} &middot; created %{issuable_created} by %{author} &middot; updated %{issuable_updated}"
msgstr ""
msgid " %{start} to %{end}"
msgstr ""
@ -21642,9 +21639,6 @@ msgstr ""
msgid "Multiple uploaders found: %{uploader_types}"
msgstr ""
msgid "Must have a unique policy, status, and elapsed time"
msgstr ""
msgid "Must match with the %{codeStart}external_url%{codeEnd} in %{codeStart}/etc/gitlab/gitlab.rb%{codeEnd}."
msgstr ""
@ -29153,6 +29147,9 @@ msgstr ""
msgid "SecurityReports|Error fetching the vulnerability list. Please check your network connection and try again."
msgstr ""
msgid "SecurityReports|Error parsing security reports"
msgstr ""
msgid "SecurityReports|Failed to get security report information. Please reload the page or try again later."
msgstr ""
@ -29258,6 +29255,9 @@ msgstr ""
msgid "SecurityReports|Take survey"
msgstr ""
msgid "SecurityReports|The security reports below contain one or more vulnerability findings that could not be parsed and were not recorded. Download the artifacts in the job output to investigate. Ensure any security report created conforms to the relevant %{helpPageLinkStart}JSON schema%{helpPageLinkEnd}."
msgstr ""
msgid "SecurityReports|There was an error adding the comment."
msgstr ""
@ -38521,6 +38521,9 @@ msgstr ""
msgid "created"
msgstr ""
msgid "created %{issuable_created} by %{author}"
msgstr ""
msgid "created %{timeAgoString} by %{email} via %{user}"
msgstr ""
@ -39249,6 +39252,9 @@ msgstr ""
msgid "must be greater than start date"
msgstr ""
msgid "must have a unique schedule, status, and elapsed time"
msgstr ""
msgid "my-awesome-group"
msgstr ""

View File

@ -29,6 +29,11 @@ RSpec.describe 'User searches for merge requests', :js do
page.within('.results') do
expect(page).to have_link(merge_request1.title)
expect(page).not_to have_link(merge_request2.title)
# Each result should have MR refs like `gitlab-org/gitlab!1`
page.all('.search-result-row').each do |e|
expect(e.text).to match(/!\d+/)
end
end
end

View File

@ -15,6 +15,7 @@ import {
formatIssueInput,
formatIssue,
getMoveData,
updateListPosition,
} from '~/boards/boards_util';
import destroyBoardListMutation from '~/boards/graphql/board_list_destroy.mutation.graphql';
import issueCreateMutation from '~/boards/graphql/issue_create.mutation.graphql';
@ -36,6 +37,7 @@ import {
mockMoveIssueParams,
mockMoveState,
mockMoveData,
mockList,
} from '../mock_data';
jest.mock('~/flash');
@ -374,6 +376,24 @@ describe('createIssueList', () => {
});
});
describe('addList', () => {
const getters = {
getListByTitle: jest.fn().mockReturnValue(mockList),
};
it('should commit RECEIVE_ADD_LIST_SUCCESS mutation and dispatch fetchItemsForList action', () => {
testAction({
action: actions.addList,
payload: mockLists[1],
state: { ...getters },
expectedMutations: [
{ type: types.RECEIVE_ADD_LIST_SUCCESS, payload: updateListPosition(mockLists[1]) },
],
expectedActions: [{ type: 'fetchItemsForList', payload: { listId: mockList.id } }],
});
});
});
describe('fetchLabels', () => {
it('should commit mutation RECEIVE_LABELS_SUCCESS on success', async () => {
const queryResponse = {
@ -521,7 +541,8 @@ describe('toggleListCollapsed', () => {
describe('removeList', () => {
let state;
const list = mockLists[0];
let getters;
const list = mockLists[1];
const listId = list.id;
const mutationVariables = {
mutation: destroyBoardListMutation,
@ -535,6 +556,9 @@ describe('removeList', () => {
boardLists: mockListsById,
issuableType: issuableTypes.issue,
};
getters = {
getListByTitle: jest.fn().mockReturnValue(mockList),
};
});
afterEach(() => {
@ -544,13 +568,15 @@ describe('removeList', () => {
it('optimistically deletes the list', () => {
const commit = jest.fn();
actions.removeList({ commit, state }, listId);
actions.removeList({ commit, state, getters, dispatch: () => {} }, listId);
expect(commit.mock.calls).toEqual([[types.REMOVE_LIST, listId]]);
});
it('keeps the updated list if remove succeeds', async () => {
const commit = jest.fn();
const dispatch = jest.fn();
jest.spyOn(gqlClient, 'mutate').mockResolvedValue({
data: {
destroyBoardList: {
@ -559,17 +585,18 @@ describe('removeList', () => {
},
});
await actions.removeList({ commit, state }, listId);
await actions.removeList({ commit, state, getters, dispatch }, listId);
expect(gqlClient.mutate).toHaveBeenCalledWith(mutationVariables);
expect(commit.mock.calls).toEqual([[types.REMOVE_LIST, listId]]);
expect(dispatch.mock.calls).toEqual([['fetchItemsForList', { listId: mockList.id }]]);
});
it('restores the list if update fails', async () => {
const commit = jest.fn();
jest.spyOn(gqlClient, 'mutate').mockResolvedValue(Promise.reject());
await actions.removeList({ commit, state }, listId);
await actions.removeList({ commit, state, getters, dispatch: () => {} }, listId);
expect(gqlClient.mutate).toHaveBeenCalledWith(mutationVariables);
expect(commit.mock.calls).toEqual([
@ -588,7 +615,7 @@ describe('removeList', () => {
},
});
await actions.removeList({ commit, state }, listId);
await actions.removeList({ commit, state, getters, dispatch: () => {} }, listId);
expect(gqlClient.mutate).toHaveBeenCalledWith(mutationVariables);
expect(commit.mock.calls).toEqual([
@ -649,6 +676,10 @@ describe('fetchItemsForList', () => {
{ listId },
state,
[
{
type: types.RESET_ITEMS_FOR_LIST,
payload: listId,
},
{
type: types.REQUEST_ITEMS_FOR_LIST,
payload: { listId, fetchNext: false },
@ -671,6 +702,10 @@ describe('fetchItemsForList', () => {
{ listId },
state,
[
{
type: types.RESET_ITEMS_FOR_LIST,
payload: listId,
},
{
type: types.REQUEST_ITEMS_FOR_LIST,
payload: { listId, fetchNext: false },

View File

@ -273,6 +273,53 @@ describe('Board Store Mutations', () => {
});
});
describe('RESET_ITEMS_FOR_LIST', () => {
it('should remove issues from boardItemsByListId state', () => {
const listId = 'gid://gitlab/List/1';
const boardItemsByListId = {
[listId]: [mockIssue.id],
};
state = {
...state,
boardItemsByListId,
};
mutations[types.RESET_ITEMS_FOR_LIST](state, listId);
expect(state.boardItemsByListId[listId]).toEqual([]);
});
});
describe('REQUEST_ITEMS_FOR_LIST', () => {
const listId = 'gid://gitlab/List/1';
const boardItemsByListId = {
[listId]: [mockIssue.id],
};
it.each`
fetchNext | isLoading | isLoadingMore
${true} | ${undefined} | ${true}
${false} | ${true} | ${undefined}
`(
'sets isLoading to $isLoading and isLoadingMore to $isLoadingMore when fetchNext is $fetchNext',
({ fetchNext, isLoading, isLoadingMore }) => {
state = {
...state,
boardItemsByListId,
listsFlags: {
[listId]: {},
},
};
mutations[types.REQUEST_ITEMS_FOR_LIST](state, { listId, fetchNext });
expect(state.listsFlags[listId].isLoading).toBe(isLoading);
expect(state.listsFlags[listId].isLoadingMore).toBe(isLoadingMore);
},
);
});
describe('RECEIVE_ITEMS_FOR_LIST_SUCCESS', () => {
it('updates boardItemsByListId and issues on state', () => {
const listIssues = {

View File

@ -202,6 +202,20 @@ RSpec.describe IssuablesHelper do
end
end
describe '#issuable_project_reference' do
it 'display project name and simple reference with `#` to an issue' do
issue = build_stubbed(:issue)
expect(helper.issuable_project_reference(issue)).to eq("#{issue.project.full_name} ##{issue.iid}")
end
it 'display project name and simple reference with `!` to an MR' do
merge_request = build_stubbed(:merge_request)
expect(helper.issuable_project_reference(merge_request)).to eq("#{merge_request.project.full_name} !#{merge_request.iid}")
end
end
describe '#updated_at_by' do
let(:user) { create(:user) }
let(:unedited_issuable) { create(:issue) }

View File

@ -33,24 +33,6 @@ RSpec.describe Gitlab::Usage::Metrics::NamesSuggestions::Generator do
end
context 'joined relations' do
context 'counted attribute comes from joined relation' do
it_behaves_like 'name suggestion' do
# corresponding metric is collected with:
# distinct_count(
# ::Clusters::Applications::Ingress.modsecurity_enabled.logging
# .joins(cluster: :deployments)
# .merge(::Clusters::Cluster.enabled)
# .merge(Deployment.success),
# ::Deployment.arel_table[:environment_id]
# )
let(:key_path) { 'counts.ingress_modsecurity_logging' }
let(:name_suggestion) do
constrains = /'\(clusters_applications_ingress\.modsecurity_enabled = TRUE AND clusters_applications_ingress\.modsecurity_mode = \d+ AND clusters.enabled = TRUE AND deployments.status = \d+\)'/
/count_distinct_environment_id_from_<adjective describing\: #{constrains}>_deployments_<with>_<adjective describing\: #{constrains}>_clusters_<having>_<adjective describing\: #{constrains}>_clusters_applications_ingress/
end
end
end
context 'counted attribute comes from source relation' do
it_behaves_like 'name suggestion' do
# corresponding metric is collected with count(Issue.with_alert_management_alerts.not_authored_by(::User.alert_bot), start: issue_minimum_id, finish: issue_maximum_id)

View File

@ -966,138 +966,6 @@ RSpec.describe Gitlab::UsageData, :aggregate_failures do
end
end
describe '.ingress_modsecurity_usage' do
subject { described_class.ingress_modsecurity_usage }
let(:environment) { create(:environment) }
let(:project) { environment.project }
let(:environment_scope) { '*' }
let(:deployment) { create(:deployment, :success, environment: environment, project: project, cluster: cluster) }
let(:cluster) { create(:cluster, environment_scope: environment_scope, projects: [project]) }
let(:ingress_mode) { :modsecurity_blocking }
let!(:ingress) { create(:clusters_applications_ingress, ingress_mode, cluster: cluster) }
context 'when cluster is disabled' do
let(:cluster) { create(:cluster, :disabled, projects: [project]) }
it 'gathers ingress data' do
expect(subject[:ingress_modsecurity_logging]).to eq(0)
expect(subject[:ingress_modsecurity_blocking]).to eq(0)
expect(subject[:ingress_modsecurity_disabled]).to eq(0)
expect(subject[:ingress_modsecurity_not_installed]).to eq(0)
end
end
context 'when deployment is unsuccessful' do
let!(:deployment) { create(:deployment, :failed, environment: environment, project: project, cluster: cluster) }
it 'gathers ingress data' do
expect(subject[:ingress_modsecurity_logging]).to eq(0)
expect(subject[:ingress_modsecurity_blocking]).to eq(0)
expect(subject[:ingress_modsecurity_disabled]).to eq(0)
expect(subject[:ingress_modsecurity_not_installed]).to eq(0)
end
end
context 'when deployment is successful' do
let!(:deployment) { create(:deployment, :success, environment: environment, project: project, cluster: cluster) }
context 'when modsecurity is in blocking mode' do
it 'gathers ingress data' do
expect(subject[:ingress_modsecurity_logging]).to eq(0)
expect(subject[:ingress_modsecurity_blocking]).to eq(1)
expect(subject[:ingress_modsecurity_disabled]).to eq(0)
expect(subject[:ingress_modsecurity_not_installed]).to eq(0)
end
end
context 'when modsecurity is in logging mode' do
let(:ingress_mode) { :modsecurity_logging }
it 'gathers ingress data' do
expect(subject[:ingress_modsecurity_logging]).to eq(1)
expect(subject[:ingress_modsecurity_blocking]).to eq(0)
expect(subject[:ingress_modsecurity_disabled]).to eq(0)
expect(subject[:ingress_modsecurity_not_installed]).to eq(0)
end
end
context 'when modsecurity is disabled' do
let(:ingress_mode) { :modsecurity_disabled }
it 'gathers ingress data' do
expect(subject[:ingress_modsecurity_logging]).to eq(0)
expect(subject[:ingress_modsecurity_blocking]).to eq(0)
expect(subject[:ingress_modsecurity_disabled]).to eq(1)
expect(subject[:ingress_modsecurity_not_installed]).to eq(0)
end
end
context 'when modsecurity is not installed' do
let(:ingress_mode) { :modsecurity_not_installed }
it 'gathers ingress data' do
expect(subject[:ingress_modsecurity_logging]).to eq(0)
expect(subject[:ingress_modsecurity_blocking]).to eq(0)
expect(subject[:ingress_modsecurity_disabled]).to eq(0)
expect(subject[:ingress_modsecurity_not_installed]).to eq(1)
end
end
context 'with multiple projects' do
let(:environment_2) { create(:environment) }
let(:project_2) { environment_2.project }
let(:cluster_2) { create(:cluster, environment_scope: environment_scope, projects: [project_2]) }
let!(:ingress_2) { create(:clusters_applications_ingress, :modsecurity_logging, cluster: cluster_2) }
let!(:deployment_2) { create(:deployment, :success, environment: environment_2, project: project_2, cluster: cluster_2) }
it 'gathers non-duplicated ingress data' do
expect(subject[:ingress_modsecurity_logging]).to eq(1)
expect(subject[:ingress_modsecurity_blocking]).to eq(1)
expect(subject[:ingress_modsecurity_disabled]).to eq(0)
expect(subject[:ingress_modsecurity_not_installed]).to eq(0)
end
end
context 'with multiple deployments' do
let!(:deployment_2) { create(:deployment, :success, environment: environment, project: project, cluster: cluster) }
it 'gathers non-duplicated ingress data' do
expect(subject[:ingress_modsecurity_logging]).to eq(0)
expect(subject[:ingress_modsecurity_blocking]).to eq(1)
expect(subject[:ingress_modsecurity_disabled]).to eq(0)
expect(subject[:ingress_modsecurity_not_installed]).to eq(0)
end
end
context 'with multiple projects' do
let(:environment_2) { create(:environment) }
let(:project_2) { environment_2.project }
let!(:deployment_2) { create(:deployment, :success, environment: environment_2, project: project_2, cluster: cluster) }
let(:cluster) { create(:cluster, environment_scope: environment_scope, projects: [project, project_2]) }
it 'gathers ingress data' do
expect(subject[:ingress_modsecurity_logging]).to eq(0)
expect(subject[:ingress_modsecurity_blocking]).to eq(2)
expect(subject[:ingress_modsecurity_disabled]).to eq(0)
expect(subject[:ingress_modsecurity_not_installed]).to eq(0)
end
end
context 'with multiple environments' do
let!(:environment_2) { create(:environment, project: project) }
let!(:deployment_2) { create(:deployment, :success, environment: environment_2, project: project, cluster: cluster) }
it 'gathers ingress data' do
expect(subject[:ingress_modsecurity_logging]).to eq(0)
expect(subject[:ingress_modsecurity_blocking]).to eq(2)
expect(subject[:ingress_modsecurity_disabled]).to eq(0)
expect(subject[:ingress_modsecurity_not_installed]).to eq(0)
end
end
end
end
describe '.grafana_embed_usage_data' do
subject { described_class.grafana_embed_usage_data }

View File

@ -40,6 +40,13 @@ RSpec.describe 'Milestones through GroupQuery' do
expect_array_response(milestone_2.to_global_id.to_s, milestone_3.to_global_id.to_s)
end
it 'fetches milestones between timeframe start and end arguments' do
today = Date.today
fetch_milestones(user, { timeframe: { start: today.to_s, end: (today + 2.days).to_s } })
expect_array_response(milestone_2.to_global_id.to_s, milestone_3.to_global_id.to_s)
end
end
context 'when filtering by state' do

View File

@ -1,6 +1,8 @@
# frozen_string_literal: true
module AccessMatchersHelpers
include Gitlab::Utils::StrongMemoize
USER_ACCESSOR_METHOD_NAME = 'user'
def provide_user(role, membership = nil)
@ -61,11 +63,6 @@ module AccessMatchersHelpers
# (or defined by `method_name`) method generated by `let` definition in example group before it's used by `subject`.
# This override is per concrete example only because the example group class gets re-created for each example.
instance_eval(<<~CODE, __FILE__, __LINE__ + 1)
if instance_variable_get(:@__#{USER_ACCESSOR_METHOD_NAME}_patched)
raise ArgumentError, 'An access matcher be_allowed_for/be_denied_for can be used only once per example (`it` block)'
end
instance_variable_set(:@__#{USER_ACCESSOR_METHOD_NAME}_patched, true)
def #{USER_ACCESSOR_METHOD_NAME}
@#{USER_ACCESSOR_METHOD_NAME} ||= User.find(#{user.id})
end
@ -81,6 +78,13 @@ module AccessMatchersHelpers
end
end
def reset_matcher_environment
instance_eval(<<~CODE, __FILE__, __LINE__ + 1)
clear_memoization(:#{USER_ACCESSOR_METHOD_NAME})
undef #{USER_ACCESSOR_METHOD_NAME} if defined? user
CODE
end
def run_matcher(action, role, membership, owned_objects)
raise_if_non_block_expectation!(action)
@ -91,5 +95,7 @@ module AccessMatchersHelpers
else
action.call
end
reset_matcher_environment
end
end

View File

@ -162,7 +162,6 @@ module UsageDataHelpers
database
prometheus_metrics_enabled
web_ide_clientside_preview_enabled
ingress_modsecurity_enabled
object_store
topology
).freeze

View File

@ -0,0 +1,31 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe BuildQueueWorker do
describe '#perform' do
context 'when build exists' do
let!(:build) { create(:ci_build) }
it 'ticks runner queue value' do
expect_next_instance_of(Ci::UpdateBuildQueueService) do |instance|
expect(instance).to receive(:tick).with(build)
end
described_class.new.perform(build.id)
end
end
context 'when build does not exist' do
it 'does not raise exception' do
expect { described_class.new.perform(123) }
.not_to raise_error
end
end
end
it_behaves_like 'worker with data consistency',
described_class,
feature_flag: :load_balancing_for_build_queue_worker,
data_consistency: :sticky
end

View File

@ -11,7 +11,6 @@ import (
"net/http"
"os"
"gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/labkit/mask"
)
@ -98,11 +97,11 @@ func (m *Multipart) readAndUploadOnePart(ctx context.Context, partURL string, pu
if err != nil {
return nil, fmt.Errorf("create temporary buffer file: %v", err)
}
defer func(path string) {
if err := os.Remove(path); err != nil {
log.WithError(err).WithField("file", path).Warning("Unable to delete temporary file")
}
}(file.Name())
defer file.Close()
if err := os.Remove(file.Name()); err != nil {
return nil, err
}
n, err := io.Copy(file, src)
if err != nil {