Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-05-20 15:10:13 +00:00
parent a32e60a7ea
commit e45c8a7e74
64 changed files with 526 additions and 228 deletions

View File

@ -2,16 +2,13 @@
import { GlSkeletonLoader, GlTable } from '@gitlab/ui';
import createFlash from '~/flash';
import { convertNodeIdsFromGraphQLIds } from '~/graphql_shared/utils';
import { thWidthClass } from '~/lib/utils/table_utility';
import { s__, __ } from '~/locale';
import UserDate from '~/vue_shared/components/user_date.vue';
import getUsersGroupCountsQuery from '../graphql/queries/get_users_group_counts.query.graphql';
import UserActions from './user_actions.vue';
import UserAvatar from './user_avatar.vue';
const DEFAULT_TH_CLASSES =
'gl-bg-transparent! gl-border-b-solid! gl-border-b-gray-100! gl-p-5! gl-border-b-1!';
const thWidthClass = (width) => `gl-w-${width}p ${DEFAULT_TH_CLASSES}`;
export default {
components: {
GlSkeletonLoader,

View File

@ -1,5 +1,6 @@
<script>
import { GlTable } from '@gitlab/ui';
import { DEFAULT_TH_CLASSES } from '~/lib/utils/constants';
import { s__, __ } from '~/locale';
import CiBadge from '~/vue_shared/components/ci_badge_link.vue';
import ActionsCell from './cells/actions_cell.vue';
@ -9,7 +10,7 @@ import PipelineCell from './cells/pipeline_cell.vue';
const defaultTableClasses = {
tdClass: 'gl-p-5!',
thClass: 'gl-bg-transparent! gl-border-b-solid! gl-border-b-gray-100! gl-p-5! gl-border-b-1!',
thClass: DEFAULT_TH_CLASSES,
};
// eslint-disable-next-line @gitlab/require-i18n-strings
const coverageTdClasses = `${defaultTableClasses.tdClass} gl-display-none! gl-lg-display-table-cell!`;

View File

@ -16,3 +16,6 @@ export const BV_HIDE_MODAL = 'bv::hide::modal';
export const BV_HIDE_TOOLTIP = 'bv::hide::tooltip';
export const BV_DROPDOWN_SHOW = 'bv::dropdown::show';
export const BV_DROPDOWN_HIDE = 'bv::dropdown::hide';
export const DEFAULT_TH_CLASSES =
'gl-bg-transparent! gl-border-b-solid! gl-border-b-gray-100! gl-p-5! gl-border-b-1!';

View File

@ -0,0 +1,9 @@
import { DEFAULT_TH_CLASSES } from './constants';
/**
* Generates the table header classes to be used for GlTable fields.
*
* @param {Number} width - The column width as a percentage.
* @returns {String} The classes to be used in GlTable fields object.
*/
export const thWidthClass = (width) => `gl-w-${width}p ${DEFAULT_TH_CLASSES}`;

View File

@ -48,9 +48,12 @@ module IntegrationsActions
private
# rubocop:disable Gitlab/ModuleWithInstanceVariables
def integration
@integration ||= find_or_initialize_non_project_specific_integration(params[:id])
@service ||= @integration # TODO: remove references to @service https://gitlab.com/gitlab-org/gitlab/-/issues/329759
end
# rubocop:enable Gitlab/ModuleWithInstanceVariables
def success_message
if integration.active?

View File

@ -85,7 +85,7 @@ class Projects::ServicesController < Projects::ApplicationController
def integration
@integration ||= @project.find_or_initialize_service(params[:id])
@service ||= @integration # TODO: remove references to @service
@service ||= @integration # TODO: remove references to @service https://gitlab.com/gitlab-org/gitlab/-/issues/329759
end
alias_method :service, :integration

View File

@ -33,6 +33,7 @@ class ContainerRepository < ApplicationRecord
scope :search_by_name, ->(query) { fuzzy_search(query, [:name], use_minimum_char_limit: false) }
scope :waiting_for_cleanup, -> { where(expiration_policy_cleanup_status: WAITING_CLEANUP_STATUSES) }
scope :expiration_policy_started_at_nil_or_before, ->(timestamp) { where('expiration_policy_started_at < ? OR expiration_policy_started_at IS NULL', timestamp) }
scope :with_stale_ongoing_cleanup, ->(threshold) { cleanup_ongoing.where('expiration_policy_started_at < ?', threshold) }
def self.exists_by_path?(path)
where(

View File

@ -8,7 +8,7 @@ module MergeRequests
merge_request.id,
current_user.id,
old_assignees.map(&:id),
options
options.stringify_keys # see: https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1090
)
end

View File

@ -9,9 +9,11 @@ module MergeRequests
def execute(merge_request)
return merge_request unless current_user&.can?(:update_merge_request, merge_request)
old_assignees = merge_request.assignees
old_assignees = merge_request.assignees.to_a
old_ids = old_assignees.map(&:id)
new_ids = new_assignee_ids(merge_request)
return merge_request if merge_request.errors.any?
return merge_request if new_ids.size != update_attrs[:assignee_ids].size
return merge_request if old_ids.to_set == new_ids.to_set # no-change
@ -30,8 +32,11 @@ module MergeRequests
def new_assignee_ids(merge_request)
# prime the cache - prevent N+1 lookup during authorization loop.
merge_request.project.team.max_member_access_for_user_ids(update_attrs[:assignee_ids])
User.id_in(update_attrs[:assignee_ids]).map do |user|
user_ids = update_attrs[:assignee_ids]
return [] if user_ids.empty?
merge_request.project.team.max_member_access_for_user_ids(user_ids)
User.id_in(user_ids).map do |user|
if user.can?(:read_merge_request, merge_request)
user.id
else

View File

@ -293,7 +293,7 @@ module MergeRequests
def attempt_specialized_update_services(merge_request, attribute)
case attribute
when :assignee_ids
when :assignee_ids, :assignee_id
assignees_service.execute(merge_request)
when :spend_time
add_time_spent_service.execute(merge_request)

View File

@ -2283,15 +2283,6 @@
:weight: 1
:idempotent: true
:tags: []
- :name: merge_requests_assignees_change
:worker_name: MergeRequests::AssigneesChangeWorker
:feature_category: :source_code_management
:has_external_dependencies:
:urgency: :high
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
- :name: merge_requests_delete_source_branch
:worker_name: MergeRequests::DeleteSourceBranchWorker
:feature_category: :source_code_management

View File

@ -14,11 +14,18 @@ class ContainerExpirationPolicyWorker # rubocop:disable Scalability/IdempotentWo
BATCH_SIZE = 1000
def perform
process_stale_ongoing_cleanups
throttling_enabled? ? perform_throttled : perform_unthrottled
end
private
def process_stale_ongoing_cleanups
threshold = delete_tags_service_timeout.seconds + 30.minutes
ContainerRepository.with_stale_ongoing_cleanup(threshold.ago)
.update_all(expiration_policy_cleanup_status: :cleanup_unfinished)
end
def perform_unthrottled
with_runnable_policy(preloaded: true) do |policy|
with_context(project: policy.project,
@ -86,4 +93,8 @@ class ContainerExpirationPolicyWorker # rubocop:disable Scalability/IdempotentWo
def lease_timeout
5.hours
end
def delete_tags_service_timeout
::Gitlab::CurrentSettings.current_application_settings.container_registry_delete_tags_service_timeout || 0
end
end

View File

@ -1,28 +0,0 @@
# frozen_string_literal: true
class MergeRequests::AssigneesChangeWorker
include ApplicationWorker
sidekiq_options retry: 3
feature_category :source_code_management
urgency :high
deduplicate :until_executed
idempotent!
def perform(merge_request_id, user_id, old_assignee_ids)
merge_request = MergeRequest.find(merge_request_id)
current_user = User.find(user_id)
# if a user was added and then removed, or removed and then added
# while waiting for this job to run, assume that nothing happened.
users = User.id_in(old_assignee_ids - merge_request.assignee_ids)
return if users.blank?
::MergeRequests::HandleAssigneesChangeService
.new(project: merge_request.target_project, current_user: current_user)
.execute(merge_request, users, execute_hooks: true)
rescue ActiveRecord::RecordNotFound
end
end

View File

@ -8,6 +8,7 @@ class WebHookWorker
feature_category :integrations
worker_has_external_dependencies!
loggable_arguments 2
data_consistency :delayed, feature_flag: :load_balancing_for_web_hook_worker
sidekiq_options retry: 4, dead: false

View File

@ -0,0 +1,5 @@
---
title: Properly process stale ongoing container repository cleanups
merge_request: 62005
author:
type: fixed

View File

@ -0,0 +1,5 @@
---
title: Ensure post-update actions are applied when assignees change
merge_request: 61897
author:
type: fixed

View File

@ -0,0 +1,5 @@
---
title: Fix errors in instance and group-level integration pages for some integrations
merge_request: 62054
author:
type: fixed

View File

@ -0,0 +1,8 @@
---
name: load_balancing_for_web_hook_worker
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/62075
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/331365
milestone: '14.0'
type: development
group: group::memory
default_enabled: false

View File

@ -218,8 +218,6 @@
- 1
- - merge_request_reset_approvals
- 1
- - merge_requests_assignees_change
- 1
- - merge_requests_delete_source_branch
- 1
- - merge_requests_handle_assignees_change

View File

@ -13,6 +13,7 @@ anonymization
anonymized
Ansible
Anthos
Apdex
approvers
architected
architecting
@ -281,6 +282,7 @@ jQuery
jsdom
Jsonnet
JupyterHub
Kaminari
kanban
kanbans
kaniko
@ -546,6 +548,7 @@ serializer
serializers
serializing
serverless
severities
sharded
sharding
shfmt

View File

@ -127,5 +127,5 @@ time="2020-10-29T04:44:14Z" level=warning msg="Config: failed to fetch" agent_id
It means that the path to the configuration project is incorrect,
or the path to `config.yaml` inside the project is not valid.
To fix this, ensure that the paths to the configuration repo and to the `config.yaml` file
To fix this, ensure that the paths to the configuration repository and to the `config.yaml` file
are correct.

View File

@ -537,13 +537,13 @@ Data that was created on the primary while the secondary was paused will be lost
1. Update the existing cluster configuration.
You can retrieve the existing config with Helm:
You can retrieve the existing configuration with Helm:
```shell
helm --namespace gitlab get values gitlab-geo > gitlab.yaml
```
The existing config will contain a section for Geo that should resemble:
The existing configuration will contain a section for Geo that should resemble:
```yaml
geo:
@ -562,7 +562,7 @@ Data that was created on the primary while the secondary was paused will be lost
You can remove the entire `psql` section if the cluster will remain as a primary site, this refers to the tracking database and will be ignored whilst the cluster is acting as a primary site.
Update the cluster with the new config:
Update the cluster with the new configuration:
```shell
helm upgrade --install --version <current Chart version> gitlab-geo gitlab/gitlab --namespace gitlab -f gitlab.yaml

View File

@ -196,7 +196,7 @@ This list of limitations only reflects the latest version of GitLab. If you are
- Object pools for forked project deduplication work only on the **primary** site, and are duplicated on the **secondary** site.
- GitLab Runners cannot register with a **secondary** site. Support for this is [planned for the future](https://gitlab.com/gitlab-org/gitlab/-/issues/3294).
- Configuring Geo **secondary** sites to [use high-availability configurations of PostgreSQL](https://gitlab.com/groups/gitlab-org/-/epics/2536) is currently in **alpha** support.
- [Selective synchronization](replication/configuration.md#selective-synchronization) only limits what repositories are replicated. The entire PostgreSQL data is still replicated. Selective synchronization is not built to accomodate compliance / export control use cases.
- [Selective synchronization](replication/configuration.md#selective-synchronization) only limits what repositories are replicated. The entire PostgreSQL data is still replicated. Selective synchronization is not built to accommodate compliance / export control use cases.
### Limitations on replication/verification

View File

@ -854,6 +854,11 @@ To resolve this issue:
the **primary** node using IPv4 in the `/etc/hosts` file. Alternatively, you should
[enable IPv6 on the **primary** node](https://docs.gitlab.com/omnibus/settings/nginx.html#setting-the-nginx-listen-address-or-addresses).
### Geo Admin Area shows 'Unknown' for health status and 'Request failed with status code 401'
If using a load balancer, ensure that the load balancer's URL is set as the `external_url` in the
`/etc/gitlab/gitlab.rb` of the nodes behind the load balancer.
### GitLab Pages return 404 errors after promoting
This is due to [Pages data not being managed by Geo](datatypes.md#limitations-on-replicationverification).

View File

@ -147,7 +147,7 @@ attributes](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/all_q
- `worker_name` - the worker name. The other attributes are typically more useful as
they are more general, but this is available in case a particular worker needs
to be selected.
- `name` - the queue name. Similiarly, this is available in case a particular queue needs
- `name` - the queue name. Similarly, this is available in case a particular queue needs
to be selected.
- `resource_boundary` - if the queue is bound by `cpu`, `memory`, or
`unknown`. For example, the `project_export` queue is memory bound as it has

View File

@ -831,8 +831,8 @@ or persistent errors, or the Pages Daemon serving old content.
NOTE:
Expiry, interval and timeout flags use [Golang's duration formatting](https://golang.org/pkg/time/#ParseDuration).
A duration string is a possibly signed sequence of decimal numbers,
each with optional fraction and a unit suffix, such as "300ms", "1.5h" or "2h45m".
Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
each with optional fraction and a unit suffix, such as `300ms`, `1.5h` or `2h45m`.
Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.
Examples:
@ -938,7 +938,7 @@ In installations from source:
## ZIP storage
In GitLab 14.0 the underlaying storage format of GitLab Pages is changing from
In GitLab 14.0 the underlying storage format of GitLab Pages is changing from
files stored directly in disk to a single ZIP archive per project.
These ZIP archives can be stored either locally on disk storage or on the [object storage](#using-object-storage) if it is configured.
@ -1210,7 +1210,7 @@ These are due to the Pages files not being among the
It is possible to copy the subfolders and files in the [Pages path](#change-storage-path)
to the new primary node to resolve this.
For example, you can adapt the `rsync` strategy from the
[moving repositories documenation](../operations/moving_repositories.md).
[moving repositories documentation](../operations/moving_repositories.md).
Alternatively, run the CI pipelines of those projects that contain a `pages` job again.
### Failed to connect to the internal GitLab API

View File

@ -39,7 +39,7 @@ NOTE:
Components marked with * can be optionally run on reputable
third party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work.
Components marked with ** can be optionally run on reputable
third party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
third party external PaaS Redis solutions. Google Memorystore and AWS ElastiCache are known to work.
```plantuml
@startuml 10k
@ -2423,7 +2423,7 @@ NOTE:
Components marked with * can be optionally run on reputable
third party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work.
Components marked with ** can be optionally run on reputable
third party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
third party external PaaS Redis solutions. Google Memorystore and AWS ElastiCache are known to work.
```plantuml
@startuml 10k

View File

@ -39,7 +39,7 @@ NOTE:
Components marked with * can be optionally run on reputable
third party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work.
Components marked with ** can be optionally run on reputable
third party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
third party external PaaS Redis solutions. Google Memorystore and AWS ElastiCache are known to work.
```plantuml
@startuml 25k

View File

@ -31,7 +31,7 @@ NOTE:
Components marked with * can be optionally run on reputable
third party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work.
Components marked with ** can be optionally run on reputable
third party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
third party external PaaS Redis solutions. Google Memorystore and AWS ElastiCache are known to work.
```plantuml
@startuml 2k

View File

@ -46,7 +46,7 @@ NOTE:
Components marked with * can be optionally run on reputable
third party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work.
Components marked with ** can be optionally run on reputable
third party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
third party external PaaS Redis solutions. Google Memorystore and AWS ElastiCache are known to work.
```plantuml
@startuml 3k
@ -1213,7 +1213,7 @@ Praefect requires several secret tokens to secure communications across the Clus
Gitaly Cluster nodes are configured in Praefect via a `virtual storage`. Each storage contains
the details of each Gitaly node that makes up the cluster. Each storage is also given a name
and this name is used in several areas of the config. In this guide, the name of the storage will be
and this name is used in several areas of the configuration. In this guide, the name of the storage will be
`default`. Also, this guide is geared towards new installs, if upgrading an existing environment
to use Gitaly Cluster, you may need to use a different name.
Refer to the [Praefect documentation](../gitaly/praefect.md#praefect) for more info.
@ -2074,7 +2074,7 @@ but with smaller performance requirements, several modifications can be consider
- PostgreSQL: Can be run on reputable Cloud PaaS solutions such as Google Cloud SQL or AWS RDS. In this setup, the PgBouncer and Consul nodes are no longer required:
- Consul may still be desired if [Prometheus](../monitoring/prometheus/index.md) auto discovery is a requirement, otherwise you would need to [manually add scrape configurations](../monitoring/prometheus/index.md#adding-custom-scrape-configurations) for all nodes.
- As Redis Sentinel runs on the same box as Consul in this architecture, it may need to be run on a separate box if Redis is still being run via Omnibus.
- Redis: Can be run on reputable Cloud PaaS solutions such as Google Memorystore and AWS Elasticache. In this setup, the Redis Sentinel is no longer required.
- Redis: Can be run on reputable Cloud PaaS solutions such as Google Memorystore and AWS ElastiCache. In this setup, the Redis Sentinel is no longer required.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">

View File

@ -39,7 +39,7 @@ NOTE:
Components marked with * can be optionally run on reputable
third party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work.
Components marked with ** can be optionally run on reputable
third party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
third party external PaaS Redis solutions. Google Memorystore and AWS ElastiCache are known to work.
```plantuml
@startuml 50k

View File

@ -43,7 +43,7 @@ NOTE:
Components marked with * can be optionally run on reputable
third party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work.
Components marked with ** can be optionally run on reputable
third party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
third party external PaaS Redis solutions. Google Memorystore and AWS ElastiCache are known to work.
```plantuml
@startuml 5k

View File

@ -23,7 +23,7 @@ Use [external object storage](https://docs.gitlab.com/charts/advanced/external-o
## Disabling Terraform state
To disable terraform state site-wide, follow the steps below.
A GitLab administrator may want to disable Terraform state to reduce diskspace or if Terraform is not used in your instance.
A GitLab administrator may want to disable Terraform state to reduce disk space or if Terraform is not used in your instance.
To do so, follow the steps below according to your installation's type.
**In Omnibus installations:**

View File

@ -71,7 +71,7 @@ Example response:
## V1 packages list
Given the V1 provider sha, returns a list of packages within the repository. Using Composer V2 is
Given the V1 provider SHA, returns a list of packages in the repository. Using Composer V2 is
recommended over V1.
```plaintext
@ -81,7 +81,7 @@ GET group/:id/-/packages/composer/p/:sha
| Attribute | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `id` | string | yes | The ID or full path of the group. |
| `sha` | string | yes | The provider sha, provided by the Composer [base request](#base-repository-request). |
| `sha` | string | yes | The provider SHA, provided by the Composer [base request](#base-repository-request). |
```shell
curl --user <username>:<personal_access_token> "https://gitlab.example.com/api/v4/group/1/-/packages/composer/p/082df4a5035f8725a12i4a3d2da5e6aaa966d06843d0a5c6d499313810427bd6"
@ -115,7 +115,7 @@ the symbol `%24` (see example below).
| -------------- | ------ | -------- | ----------- |
| `id` | string | yes | The ID or full path of the group. |
| `package_name` | string | yes | The name of the package. |
| `sha` | string | yes | The sha digest of the package, provided by the [V1 packages list](#v1-packages-list). |
| `sha` | string | yes | The SHA digest of the package, provided by the [V1 packages list](#v1-packages-list). |
```shell
curl --user <username>:<personal_access_token> "https://gitlab.example.com/api/v4/group/1/-/packages/composer/my-org/my-composer-package%245c873497cdaa82eda35af5de24b789be92dfb6510baf117c42f03899c166b6e7"
@ -272,7 +272,7 @@ GET projects/:id/packages/composer/archives/:package_name
| -------------- | ------ | -------- | ----------- |
| `id` | string | yes | The ID or full path of the group. |
| `package_name` | string | yes | The name of the package. |
| `sha` | string | yes | The target sha of the requested package version. |
| `sha` | string | yes | The target SHA of the requested package version. |
```shell
curl --user <username>:<personal_access_token> "https://gitlab.example.com/api/v4/projects/1/packages/composer/archives/my-org/my-composer-package.zip?sha=673594f85a55fe3c0eb45df7bd2fa9d95a1601ab"

View File

@ -28,7 +28,7 @@ store CI/CD data.
We expect to see 20M builds created daily on GitLab.com in the first half of
2024.
![ci_builds cumulative with forecast](ci_builds_cumulative_forecast.png)
![CI builds cumulative with forecast](ci_builds_cumulative_forecast.png)
## Goals
@ -46,9 +46,9 @@ Historically, Rails used to use [integer](https://www.postgresql.org/docs/9.1/da
type when creating primary keys for a table. We did use the default when we
[created the `ci_builds` table in 2012](https://gitlab.com/gitlab-org/gitlab/-/blob/046b28312704f3131e72dcd2dbdacc5264d4aa62/db/ci/migrate/20121004165038_create_builds.rb).
[The behavior of Rails has changed](https://github.com/rails/rails/pull/26266)
since the release of Rails 5. The framework is now using bigint type that is 8
since the release of Rails 5. The framework is now using `bigint` type that is 8
bytes long, however we have not migrated primary keys for `ci_builds` table to
bigint yet.
`bigint` yet.
We will run out of the capacity of the integer type to store primary keys in
`ci_builds` table before December 2021. When it happens without a viable
@ -89,7 +89,7 @@ Prophet](https://facebook.github.io/prophet/) shows that in the first half of
to around 2M we see created today, this is 10x growth our product might need to
sustain in upcoming years.
![ci_builds daily forecast](ci_builds_daily_forecast.png)
![CI builds daily forecast](ci_builds_daily_forecast.png)
### Queuing mechanisms are using the large table
@ -101,7 +101,7 @@ want to process them.
This mechanism is very inefficient, and it has been causing problems on the
production environment frequently. This usually results in a significant drop
of the CI/CD apdex score, and sometimes even causes a significant performance
of the CI/CD Apdex score, and sometimes even causes a significant performance
degradation in the production environment.
There are multiple other strategies that can improve performance and

View File

@ -136,7 +136,7 @@ you can use tiers:
| Environment tier | Environment name examples |
|------------------|----------------------------------------------------|
| `production` | Production, Live |
| `staging` | Staging, Model, Pre, Demo |
| `staging` | Staging, Model, Demo |
| `testing` | Test, QC |
| `development` | Dev, [Review apps](../review_apps/index.md), Trunk |
| `other` | |

View File

@ -41,7 +41,7 @@ With pagination, the data is split into equal pieces (pages). On the first visit
### Pick the right approach
Let the database handle the pagination, filtering, and data retrieval. Implementing in-memory pagination on the backend (`paginate_array` from kaminari) or on the frontend (JavaScript) might work for a few hundreds of records. If application limits are not defined, things can get out of control quickly.
Let the database handle the pagination, filtering, and data retrieval. Implementing in-memory pagination on the backend (`paginate_array` from Kaminari) or on the frontend (JavaScript) might work for a few hundreds of records. If application limits are not defined, things can get out of control quickly.
### Reduce complexity
@ -78,7 +78,7 @@ Infinite scroll can use keyset pagination without affecting the user experience
### Offset pagination
The most common way to paginate lists is using offset-based pagination (UI and REST API). It's backed by the popular [kaminari](https://github.com/kaminari/kaminari) Ruby gem, which provides convenient helper methods to implement pagination on ActiveRecord queries.
The most common way to paginate lists is using offset-based pagination (UI and REST API). It's backed by the popular [Kaminari](https://github.com/kaminari/kaminari) Ruby gem, which provides convenient helper methods to implement pagination on ActiveRecord queries.
Offset-based pagination is leveraging the `LIMIT` and `OFFSET` SQL clauses to take out a specific slice from the table.
@ -97,9 +97,9 @@ Notice that the query also orders the rows by the primary key (`id`). When pagin
Example pagination bar:
![Page selector rendered by kaminari](../img/offset_pagination_ui_v13_11.jpg)
![Page selector rendered by Kaminari](../img/offset_pagination_ui_v13_11.jpg)
The kaminari gem renders a nice pagination bar on the UI with page numbers and optionally quick shortcuts the next, previous, first, and last page buttons. To render these buttons, kaminari needs to know the number of rows, and for that, a count query is executed.
The Kaminari gem renders a nice pagination bar on the UI with page numbers and optionally quick shortcuts the next, previous, first, and last page buttons. To render these buttons, Kaminari needs to know the number of rows, and for that, a count query is executed.
```sql
SELECT COUNT(*) FROM issues WHERE project_id = 1
@ -158,7 +158,7 @@ Here we're leveraging the ordered property of the b-tree database index. Values
Kaminari by default executes a count query to determine the number of pages for rendering the page links. Count queries can be quite expensive for a large table, in an unfortunate scenario the queries will simply time out.
To work around this, we can run kaminari without invoking the count SQL query.
To work around this, we can run Kaminari without invoking the count SQL query.
```ruby
Issue.where(project_id: 1).page(1).per(20).without_count
@ -311,5 +311,5 @@ Using keyset pagination outside of GraphQL is not straightforward. We have the l
Keyset pagination provides stable performance regardless of the number of pages we moved forward. To achieve this performance, the paginated query needs an index that covers all the columns in the `ORDER BY` clause, similarly to the offset pagination.
### General performance guidelines
See the [pagination general performance guidelines page](pagination_performance_guidelines.md).

View File

@ -23,11 +23,11 @@ You may create a new account or use any of their supported sign in services.
GitLab is being translated into many languages.
1. Find the language that you want to contribute to, in our
[GitLab Crowdin project](https://crowdin.com/project/gitlab-ee).
[GitLab CrowdIn project](https://crowdin.com/project/gitlab-ee).
- If the language that you're looking for is available, proceed
to the next step.
- If the language you are looking for is not available,
[open an issue](https://gitlab.com/gitlab-org/gitlab/-/issues?scope=all&utf8=✓&state=all&label_name[]=Category%3AInternationalization). Notify our Crowdin
[open an issue](https://gitlab.com/gitlab-org/gitlab/-/issues?scope=all&utf8=✓&state=all&label_name[]=Category%3AInternationalization). Notify our CrowdIn
administrators by including `@gitlab-org/manage/import` in your issue.
- After the issue/Merge Request is complete, restart this procedure.
1. Next, you can view list of files and folders.

View File

@ -851,6 +851,46 @@ using expectations, or dependency injection along with stubs, to avoid the need
for modifications. If you have no other choice, an `around` block like the global
variables example can be used, but avoid this if at all possible.
#### Elasticsearch specs
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/61171) in GitLab 14.0.
Specs that require Elasticsearch must be marked with the `:elastic` trait. This
creates and deletes indices between examples to ensure a clean index, so that there is no room
for polluting the tests with nonessential data.
Most tests for Elasticsearch logic relate to:
- Creating data in Postgres and waiting for it to be indexed in Elasticsearch.
- Searching for that data.
- Ensuring that the test gives the expected result.
There are some exceptions, such as checking for structural changes rather than individual records in an index.
The `:elastic_with_delete_by_query` trait was added to reduce run time for pipelines by creating and deleting indices
at the start and end of each context only. The [Elasticsearch DeleteByQuery API](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html)
is used to delete data in all indices in between examples to ensure a clean index.
Note that Elasticsearch indexing uses [`Gitlab::Redis::SharedState`](../../../ee/development/redis.md#gitlabrediscachesharedstatequeues).
Therefore, it is recommended to use `:clean_gitlab_redis_shared_state` in conjunction with the Elasticsearch traits.
Specs using Elasticsearch require that you:
- Create data in Postgres and then index it into Elasticsearch.
- Enable Application Settings for Elasticsearch (which is disabled by default).
To do so, use:
```ruby
before do
stub_ee_application_setting(elasticsearch_search: true, elasticsearch_indexing: true)
end
```
Additionally, you can use the `ensure_elasticsearch_index!` method to overcome the asynchronous nature of Elasticsearch.
It uses the [Elasticsearch Refresh API](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html#refresh-api-desc)
to make sure all operations performed on an index since the last refresh are available for search. This method is typically
called after loading data into Postgres to ensure the data is indexed and searchable.
#### Test Snowplow events
WARNING:

View File

@ -63,6 +63,12 @@ including:
- Snippets
- Group wikis **(PREMIUM)**
Backups do not include:
- [Terraform state files](../administration/terraform_state.md)
- [Package registry files](../administration/packages/index.md)
- [Mattermost data](https://docs.mattermost.com/administration/config-settings.html#file-storage)
WARNING:
GitLab does not back up any configuration files, SSL certificates, or system
files. You are highly advised to read about [storing configuration files](#storing-configuration-files).

View File

@ -0,0 +1,40 @@
---
stage: none
group: unassigned
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Federated Learning of Cohorts (FLoC) **(FREE SELF)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60933) in GitLab Free 13.12.
Federated Learning of Conhorts (FLoC) is a feature that the Chrome browser has
rolled out, where users are categorized into different cohorts, so that
advertisers can use this data to uniquely target and track users. For more
information, visit the [FLoC repository](https://github.com/WICG/floc).
To avoid users being tracked and categorized in any GitLab instance, FLoC is
disabled by default by sending the following header:
```plaintext
Permissions-Policy: interest-cohort=()
```
To enable it:
1. Go to the Admin Area (**{admin}**) and select **Settings > General**.
1. Expand **Federated Learning of Cohorts**.
1. Check the box.
1. Click **Save changes**.
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->

View File

@ -28,6 +28,7 @@ Access the default page for admin area settings by navigating to **Admin Area >
| [External Authentication](external_authorization.md#configuration) | External Classification Policy Authorization |
| [Web terminal](../../../administration/integration/terminal.md#limiting-websocket-connection-time) | Set max session time for web terminal. |
| [Web IDE](../../project/web_ide/index.md#enabling-live-preview) | Manage Web IDE Features. |
| [FLoC](floc.md) | Enable or disable [Federated Learning of Cohorts (FLoC)](https://en.wikipedia.org/wiki/Federated_Learning_of_Cohorts) tracking. |
## Integrations

View File

@ -25,6 +25,9 @@ learn how to proceed to keep your apps up and running:
- [One-click install method](#install-with-one-click-deprecated)
- [CI/CD template method](#install-using-gitlab-cicd-deprecated)
NOTE:
Despite being deprecated, the recommended way for installing GitLab integrated applications is by the GitLab CI/CD method presented below. We are working on a [cluster management project template](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) with a simple upgrade path from the CI/CD based method.
## Install using GitLab CI/CD (DEPRECATED)
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/20822) in GitLab 12.6.

View File

@ -7,7 +7,7 @@ type: reference, how-to
# Wiki **(FREE)**
If you don't want to keep your documentation in your repository, but you do want
If you don't want to keep your documentation in your repository, but you want
to keep it in the same project as your code, you can use the wiki GitLab provides
in each GitLab project. Every wiki is a separate Git repository, so you can create
wiki pages in the web interface, or [locally using Git](#create-or-edit-wiki-pages-locally).
@ -34,8 +34,8 @@ with sibling pages listed in alphabetical order. To view a list of all pages, se
When a wiki is created, it is empty. On your first visit, create the landing page
users see when viewing the wiki:
1. Go to the page for your project or group.
1. In the left sidebar, select **Wiki**, then **Create your first page**.
1. Go to your project or group and select **Wiki**.
1. Select **Create your first page**.
1. Select a **Format** for styling your text.
1. Add a welcome message in the **Content** section. You can always edit it later.
1. Add a **Commit message**. Git requires a commit message, so GitLab creates one
@ -46,8 +46,7 @@ users see when viewing the wiki:
Users with Developer [permissions](../../permissions.md) can create new wiki pages:
1. Go to the page for your project or group.
1. In the left sidebar, select **Wiki**.
1. Go to your project or group and select **Wiki**.
1. Select **New page** on this page, or any other wiki page.
1. Select a content format.
1. Add a title for your new page. Page titles use
@ -111,8 +110,8 @@ may not be able to check out the wiki locally afterward.
You need Developer [permissions](../../permissions.md) or higher to edit a wiki page:
1. Go to the page for your project or group.
1. In the left sidebar, select **Wiki**, and go to the page you want to edit.
1. Go to your project or group and select **Wiki**.
1. Go to the page you want to edit.
1. Select the edit icon (**{pencil}**).
1. Edit the content.
1. Select **Save changes**.
@ -126,8 +125,8 @@ For an example, read [Table of contents](../../markdown.md#table-of-contents).
You need Maintainer [permissions](../../permissions.md) or higher to delete a wiki page:
1. Go to the page for your project or group.
1. In the left sidebar, select **Wiki**, and go to the page you want to delete.
1. Go to your project or group and select **Wiki**.
1. Go to the page you want to delete.
1. Select **Delete page**.
1. Confirm the deletion.
@ -135,8 +134,8 @@ You need Maintainer [permissions](../../permissions.md) or higher to delete a wi
You need Developer [permissions](../../permissions.md) or higher to move a wiki page:
1. Go to the page for your project or group.
1. In the left sidebar, select **Wiki**, and go to the page you want to move.
1. Go to your project or group and select **Wiki**.
1. Go to the page you want to move.
1. Select the edit icon (**{pencil}**).
1. Add the new path to the **Title** field. For example, if you have a wiki page
called `about` under `company` and you want to move it to the wiki's root,
@ -164,8 +163,8 @@ From the history page you can see:
You can see the changes made in a version of a wiki page, similar to versioned diff file views:
1. Go to the page for your project or group.
1. In the left sidebar, select **Wiki**, and go to the wiki page you're interested in.
1. Go to your project or group and select **Wiki**.
1. Go to the wiki page you're interested in.
1. Select **Page history** to see all page versions.
1. Select the commit message in the **Changes** column for the version you're interested in.
@ -192,8 +191,7 @@ You need Developer [permissions](../../permissions.md) or higher to customize th
navigation sidebar. This process creates a wiki page named `_sidebar` which fully
replaces the default sidebar navigation:
1. Go to the page for your project or group.
1. In the left sidebar, select **Wiki**.
1. Go to your project or group and select **Wiki**.
1. In the top right corner of the page, select **Edit sidebar**.
1. When complete, select **Save changes**.
@ -243,7 +241,7 @@ and above. Group wiki repositories can be moved using the
To add a link to an external wiki from a project's left sidebar:
1. In your project, go to **Settings > Integrations**.
1. Go to your project and select **Settings > Integrations**.
1. Select **External wiki**.
1. Add the URL to your external wiki.
1. (Optional) Select **Test settings** to verify the connection.
@ -258,16 +256,16 @@ To hide the internal wiki from the sidebar, [disable the project's wiki](#disabl
To hide the link to an external wiki:
1. In your project, go to **Settings > Integrations**.
1. Go to your project and select **Settings > Integrations**.
1. Select **External wiki**.
1. Unselect **Enable integration**.
1. In the **Enable integration** section, clear the **Active** checkbox.
1. Select **Save changes**.
## Disable the project's wiki
To disable a project's internal wiki:
1. In your project, go to **Settings > General**.
1. Go to your project and select **Settings > General**.
1. Expand **Visibility, project features, permissions**.
1. Scroll down to find **Wiki** and toggle it off (in gray).
1. Select **Save changes**.

View File

@ -436,14 +436,11 @@ module API
mr_params = declared_params(include_missing: false)
mr_params[:force_remove_source_branch] = mr_params.delete(:remove_source_branch) if mr_params.has_key?(:remove_source_branch)
mr_params = convert_parameters_from_legacy_format(mr_params)
mr_params[:use_specialized_service] = true
service = if mr_params.one? && (mr_params.keys & %i[assignee_id assignee_ids]).one?
::MergeRequests::UpdateAssigneesService
else
::MergeRequests::UpdateService
end
merge_request = service.new(project: user_project, current_user: current_user, params: mr_params).execute(merge_request)
merge_request = ::MergeRequests::UpdateService
.new(project: user_project, current_user: current_user, params: mr_params)
.execute(merge_request)
handle_merge_request_errors!(merge_request)

View File

@ -21619,6 +21619,9 @@ msgstr ""
msgid "Name has already been taken"
msgstr ""
msgid "Name is already taken."
msgstr ""
msgid "Name new label"
msgstr ""
@ -24633,12 +24636,18 @@ msgstr ""
msgid "Please provide a name"
msgstr ""
msgid "Please provide a name."
msgstr ""
msgid "Please provide a valid URL"
msgstr ""
msgid "Please provide a valid URL ending with .git"
msgstr ""
msgid "Please provide a valid URL."
msgstr ""
msgid "Please provide a valid YouTube URL or ID"
msgstr ""
@ -24675,6 +24684,9 @@ msgstr ""
msgid "Please select a valid target branch"
msgstr ""
msgid "Please select a valid target branch."
msgstr ""
msgid "Please select and add a member"
msgstr ""
@ -31045,12 +31057,24 @@ msgstr ""
msgid "StatusCheck|An error occurred fetching the status checks."
msgstr ""
msgid "StatusCheck|Apply this status check to any branch or a specific protected branch."
msgstr ""
msgid "StatusCheck|Check for a status response in Merge Requests. Failures do not block merges. %{link_start}Learn more%{link_end}."
msgstr ""
msgid "StatusCheck|Examples: QA, Security."
msgstr ""
msgid "StatusCheck|External API is already in use by another status check."
msgstr ""
msgid "StatusCheck|Invoke an external API as part of the approvals"
msgstr ""
msgid "StatusCheck|Invoke an external API as part of the pipeline process."
msgstr ""
msgid "StatusCheck|No status checks are defined yet."
msgstr ""
@ -31069,6 +31093,9 @@ msgstr ""
msgid "StatusCheck|Status to check"
msgstr ""
msgid "StatusCheck|Target branch"
msgstr ""
msgid "StatusCheck|You are about to remove the %{name} status check."
msgstr ""
@ -34609,6 +34636,9 @@ msgstr ""
msgid "Unable to fetch branch list for this project."
msgstr ""
msgid "Unable to fetch branches list, please close the form and try again"
msgstr ""
msgid "Unable to fetch unscanned projects"
msgstr ""

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'User activates the instance-level Mattermost Slash Command integration', :js do
include_context 'instance integration activation'
before do
stub_mattermost_setting(enabled: true)
visit_instance_integration('Mattermost slash commands')
end
let(:edit_path) { edit_admin_application_settings_integration_path(:mattermost_slash_commands) }
include_examples 'user activates the Mattermost Slash Command integration'
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'User activates the group-level Mattermost Slash Command integration', :js do
include_context 'group integration activation'
before do
stub_mattermost_setting(enabled: true)
visit_group_integration('Mattermost slash commands')
end
let(:edit_path) { edit_group_settings_integration_path(group, :mattermost_slash_commands) }
include_examples 'user activates the Mattermost Slash Command integration'
end

View File

@ -14,35 +14,10 @@ RSpec.describe 'Set up Mattermost slash commands', :js do
context 'mattermost service is enabled' do
let(:mattermost_enabled) { true }
it 'shows a help message' do
expect(page).to have_content("Use this service to perform common")
end
describe 'activation' do
let(:edit_path) { edit_project_service_path(project, :mattermost_slash_commands) }
it 'shows a token placeholder' do
token_placeholder = find_field('service_token')['placeholder']
expect(token_placeholder).to eq('XXxxXXxxXXxxXXxxXXxxXXxx')
end
it 'redirects to the integrations page after saving but not activating' do
token = ('a'..'z').to_a.join
fill_in 'service_token', with: token
click_active_checkbox
click_save_integration
expect(current_path).to eq(edit_project_service_path(project, :mattermost_slash_commands))
expect(page).to have_content('Mattermost slash commands settings saved, but not active.')
end
it 'redirects to the integrations page after activating' do
token = ('a'..'z').to_a.join
fill_in 'service_token', with: token
click_save_integration
expect(current_path).to eq(edit_project_service_path(project, :mattermost_slash_commands))
expect(page).to have_content('Mattermost slash commands settings saved and active.')
include_examples 'user activates the Mattermost Slash Command integration'
end
it 'shows the add to mattermost button' do

View File

@ -0,0 +1,11 @@
import { DEFAULT_TH_CLASSES } from '~/lib/utils/constants';
import * as tableUtils from '~/lib/utils/table_utility';
describe('table_utility', () => {
describe('thWidthClass', () => {
it('returns the width class including default table header classes', () => {
const width = 50;
expect(tableUtils.thWidthClass(width)).toBe(`gl-w-${width}p ${DEFAULT_TH_CLASSES}`);
});
});
});

View File

@ -360,6 +360,17 @@ RSpec.describe ContainerRepository do
it { is_expected.to contain_exactly(repository1, repository2, repository4) }
end
describe '.with_stale_ongoing_cleanup' do
let_it_be(:repository1) { create(:container_repository, :cleanup_ongoing, expiration_policy_started_at: 1.day.ago) }
let_it_be(:repository2) { create(:container_repository, :cleanup_ongoing, expiration_policy_started_at: 25.minutes.ago) }
let_it_be(:repository3) { create(:container_repository, :cleanup_ongoing, expiration_policy_started_at: 1.week.ago) }
let_it_be(:repository4) { create(:container_repository, :cleanup_unscheduled, expiration_policy_started_at: 25.minutes.ago) }
subject { described_class.with_stale_ongoing_cleanup(27.minutes.ago) }
it { is_expected.to contain_exactly(repository1, repository3) }
end
describe '.waiting_for_cleanup' do
let_it_be(:repository_cleanup_scheduled) { create(:container_repository, :cleanup_scheduled) }
let_it_be(:repository_cleanup_unfinished) { create(:container_repository, :cleanup_unfinished) }

View File

@ -2174,6 +2174,12 @@ RSpec.describe API::MergeRequests do
a_hash_including('name' => user2.name)
)
end
it 'creates appropriate system notes', :sidekiq_inline do
put api("/projects/#{project.id}/merge_requests/#{merge_request.iid}", user), params: params
expect(merge_request.notes.system.last.note).to include("assigned to #{user2.to_reference}")
end
end
context 'when assignee_id=user2.id' do
@ -2193,6 +2199,27 @@ RSpec.describe API::MergeRequests do
end
end
context 'when assignee_id=0' do
let(:params) do
{
assignee_id: 0
}
end
it 'clears the assignees' do
put api("/projects/#{project.id}/merge_requests/#{merge_request.iid}", user), params: params
expect(response).to have_gitlab_http_status(:ok)
expect(json_response['assignees']).to be_empty
end
it 'creates appropriate system notes', :sidekiq_inline do
put api("/projects/#{project.id}/merge_requests/#{merge_request.iid}", user), params: params
expect(merge_request.notes.system.last.note).to include("unassigned #{user.to_reference}")
end
end
context 'when only assignee_ids are provided, and the list is empty' do
let(:params) do
{

View File

@ -6,7 +6,7 @@ RSpec.describe MergeRequests::HandleAssigneesChangeService do
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { create(:user) }
let_it_be(:assignee) { create(:user) }
let_it_be(:merge_request) { create(:merge_request, author: user, source_project: project, assignees: [assignee]) }
let_it_be_with_reload(:merge_request) { create(:merge_request, author: user, source_project: project, assignees: [assignee]) }
let_it_be(:old_assignees) { create_list(:user, 3) }
let(:options) { {} }
@ -45,13 +45,27 @@ RSpec.describe MergeRequests::HandleAssigneesChangeService do
service.execute(merge_request, old_assignees, options)
end
let(:note) { merge_request.notes.system.last }
let(:removed_note) { "unassigned #{old_assignees.map(&:to_reference).to_sentence}" }
context 'when unassigning all users' do
before do
merge_request.update!(assignee_ids: [])
end
it 'creates assignee note' do
execute
expect(note).not_to be_nil
expect(note.note).to eq removed_note
end
end
it 'creates assignee note' do
execute
note = merge_request.notes.last
expect(note).not_to be_nil
expect(note.note).to include "assigned to #{assignee.to_reference} and unassigned #{old_assignees.map(&:to_reference).to_sentence}"
expect(note.note).to include "assigned to #{assignee.to_reference} and #{removed_note}"
end
it 'sends email notifications to old and new assignees', :mailer, :sidekiq_inline do

View File

@ -17,6 +17,7 @@ RSpec.describe MergeRequests::UpdateAssigneesService do
description: "FYI #{user2.to_reference}",
assignee_ids: [user3.id],
source_project: project,
target_project: project,
author: create(:user))
end
@ -24,6 +25,7 @@ RSpec.describe MergeRequests::UpdateAssigneesService do
project.add_maintainer(user)
project.add_developer(user2)
project.add_developer(user3)
merge_request.errors.clear
end
let(:service) { described_class.new(project: project, current_user: user, params: opts) }
@ -32,35 +34,53 @@ RSpec.describe MergeRequests::UpdateAssigneesService do
describe 'execute' do
def update_merge_request
service.execute(merge_request)
merge_request.reload
end
context 'when the parameters are valid' do
context 'when using sentinel values' do
let(:opts) { { assignee_ids: [0] } }
it 'removes all assignees' do
expect { update_merge_request }.to change(merge_request, :assignees).to([])
end
shared_examples 'removing all assignees' do
it 'removes all assignees' do
expect(update_merge_request).to have_attributes(assignees: be_empty, errors: be_none)
end
context 'the assignee_ids parameter is the empty list' do
let(:opts) { { assignee_ids: [] } }
it 'removes all assignees' do
expect { update_merge_request }.to change(merge_request, :assignees).to([])
end
end
it 'updates the MR, and queues the more expensive work for later' do
it 'enqueues the correct background work' do
expect_next(MergeRequests::HandleAssigneesChangeService, project: project, current_user: user) do |service|
expect(service)
.to receive(:async_execute)
.with(merge_request, [user3], execute_hooks: true)
end
update_merge_request
end
end
context 'when the parameters are valid' do
context 'when using sentinel values' do
context 'when using assignee_ids' do
let(:opts) { { assignee_ids: [0] } }
it_behaves_like 'removing all assignees'
end
context 'when using assignee_id' do
let(:opts) { { assignee_id: 0 } }
it_behaves_like 'removing all assignees'
end
end
context 'when the assignee_ids parameter is the empty list' do
let(:opts) { { assignee_ids: [] } }
it_behaves_like 'removing all assignees'
end
it 'updates the MR, and queues the more expensive work for later' do
expect_next(MergeRequests::HandleAssigneesChangeService, project: project, current_user: user) do |service|
expect(service)
.to receive(:async_execute).with(merge_request, [user3], execute_hooks: true)
end
expect { update_merge_request }
.to change(merge_request, :assignees).to([user2])
.to change { merge_request.reload.assignees }.from([user3]).to([user2])
.and change(merge_request, :updated_at)
.and change(merge_request, :updated_by).to(user)
end
@ -68,7 +88,10 @@ RSpec.describe MergeRequests::UpdateAssigneesService do
it 'does not update the assignees if they do not have access' do
opts[:assignee_ids] = [create(:user).id]
expect { update_merge_request }.not_to change(merge_request, :assignee_ids)
expect(update_merge_request).to have_attributes(
assignees: [user3],
errors: be_any
)
end
it 'is more efficient than using the full update-service' do

View File

@ -0,0 +1,28 @@
# frozen_string_literal: true
RSpec.shared_context 'group integration activation' do
include_context 'instance and group integration activation'
let_it_be(:group) { create(:group) }
let_it_be(:user) { create(:user) }
before_all do
group.add_owner(user)
end
before do
sign_in(user)
end
def visit_group_integrations
visit group_settings_integrations_path(group)
end
def visit_group_integration(name)
visit_group_integrations
within('#content-body') do
click_link(name)
end
end
end

View File

@ -0,0 +1,18 @@
# frozen_string_literal: true
RSpec.shared_context 'instance and group integration activation' do
include_context 'integration activation'
def click_save_integration
click_save_changes_button
click_save_settings_modal
end
def click_save_changes_button
click_button('Save changes')
end
def click_save_settings_modal
click_button('Save')
end
end

View File

@ -0,0 +1,24 @@
# frozen_string_literal: true
RSpec.shared_context 'instance integration activation' do
include_context 'instance and group integration activation'
let_it_be(:user) { create(:user, :admin) }
before do
sign_in(user)
gitlab_enable_admin_mode_sign_in(user)
end
def visit_instance_integrations
visit integrations_admin_application_settings_path
end
def visit_instance_integration(name)
visit_instance_integrations
within('#content-body') do
click_link(name)
end
end
end

View File

@ -70,3 +70,9 @@ Integration.available_services_names.each do |service|
end
end
end
RSpec.shared_context 'integration activation' do
def click_active_checkbox
find('label', text: 'Active').click
end
end

View File

@ -1,6 +1,8 @@
# frozen_string_literal: true
RSpec.shared_context 'project service activation' do
include_context 'integration activation'
let(:project) { create(:project) }
let(:user) { create(:user) }
@ -21,10 +23,6 @@ RSpec.shared_context 'project service activation' do
end
end
def click_active_checkbox
find('label', text: 'Active').click
end
def click_save_integration
click_button('Save changes')
end

View File

@ -0,0 +1,34 @@
# frozen_string_literal: true
RSpec.shared_examples 'user activates the Mattermost Slash Command integration' do
it 'shows a help message' do
expect(page).to have_content('Use this service to perform common')
end
it 'shows a token placeholder' do
token_placeholder = find_field('service_token')['placeholder']
expect(token_placeholder).to eq('XXxxXXxxXXxxXXxxXXxxXXxx')
end
it 'redirects to the integrations page after saving but not activating' do
token = ('a'..'z').to_a.join
fill_in 'service_token', with: token
click_active_checkbox
click_save_integration
expect(current_path).to eq(edit_path)
expect(page).to have_content('Mattermost slash commands settings saved, but not active.')
end
it 'redirects to the integrations page after activating' do
token = ('a'..'z').to_a.join
fill_in 'service_token', with: token
click_save_integration
expect(current_path).to eq(edit_path)
expect(page).to have_content('Mattermost slash commands settings saved and active.')
end
end

View File

@ -193,5 +193,18 @@ RSpec.describe ContainerExpirationPolicyWorker do
end
end
end
context 'process stale ongoing cleanups' do
let_it_be(:stuck_cleanup) { create(:container_repository, :cleanup_ongoing, expiration_policy_started_at: 1.day.ago) }
let_it_be(:container_repository) { create(:container_repository, :cleanup_scheduled) }
let_it_be(:container_repository) { create(:container_repository, :cleanup_unfinished) }
it 'set them as unfinished' do
expect { subject }
.to change { ContainerRepository.cleanup_ongoing.count }.from(1).to(0)
.and change { ContainerRepository.cleanup_unfinished.count }.from(1).to(2)
expect(stuck_cleanup.reload).to be_cleanup_unfinished
end
end
end
end

View File

@ -1,59 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe MergeRequests::AssigneesChangeWorker do
include AfterNextHelpers
let_it_be(:merge_request) { create(:merge_request) }
let_it_be(:user) { create(:user) }
let_it_be(:old_assignees) { create_list(:user, 3) }
let(:user_ids) { old_assignees.map(&:id).to_a }
let(:worker) { described_class.new }
it_behaves_like 'an idempotent worker' do
let(:job_args) { [merge_request.id, user.id, user_ids] }
end
describe '#perform' do
context 'with a non-existing merge request' do
it 'does nothing' do
expect(::MergeRequests::HandleAssigneesChangeService).not_to receive(:new)
worker.perform(non_existing_record_id, user.id, user_ids)
end
end
context 'with a non-existing user' do
it 'does nothing' do
expect(::MergeRequests::HandleAssigneesChangeService).not_to receive(:new)
worker.perform(merge_request.id, non_existing_record_id, user_ids)
end
end
context 'when there are no changes' do
it 'does nothing' do
expect(::MergeRequests::HandleAssigneesChangeService).not_to receive(:new)
worker.perform(merge_request.id, user.id, merge_request.assignee_ids)
end
end
context 'when the old users cannot be found' do
it 'does nothing' do
expect(::MergeRequests::HandleAssigneesChangeService).not_to receive(:new)
worker.perform(merge_request.id, user.id, [non_existing_record_id])
end
end
it 'gets MergeRequests::UpdateAssigneesService to handle the changes' do
expect_next(::MergeRequests::HandleAssigneesChangeService)
.to receive(:execute).with(merge_request, match_array(old_assignees), execute_hooks: true)
worker.perform(merge_request.id, user.id, user_ids)
end
end
end

View File

@ -14,5 +14,10 @@ RSpec.describe WebHookWorker do
subject.perform(project_hook.id, data, hook_name)
end
it_behaves_like 'worker with data consistency',
described_class,
feature_flag: :load_balancing_for_web_hook_worker,
data_consistency: :delayed
end
end