Add latest changes from gitlab-org/gitlab@master
|
|
@ -2,3 +2,4 @@ VERSION merge=ours
|
|||
Dangerfile gitlab-language=ruby
|
||||
*.pdf filter=lfs diff=lfs merge=lfs -text
|
||||
*.rb diff=ruby
|
||||
workhorse/testdata/*.pdf -filter -diff -merge
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
<script>
|
||||
import { GlLink, GlTable } from '@gitlab/ui';
|
||||
import { last } from 'lodash';
|
||||
import { __ } from '~/locale';
|
||||
import Tracking from '~/tracking';
|
||||
import { numberToHumanSize } from '~/lib/utils/number_utils';
|
||||
|
|
@ -27,30 +28,41 @@ export default {
|
|||
return this.packageFiles.map(pf => ({
|
||||
...pf,
|
||||
size: this.formatSize(pf.size),
|
||||
pipeline: last(pf.pipelines),
|
||||
}));
|
||||
},
|
||||
showCommitColumn() {
|
||||
return this.filesTableRows.some(row => Boolean(row.pipeline?.id));
|
||||
},
|
||||
filesTableHeaderFields() {
|
||||
return [
|
||||
{
|
||||
key: 'name',
|
||||
label: __('Name'),
|
||||
tdClass: 'gl-display-flex gl-align-items-center',
|
||||
},
|
||||
{
|
||||
key: 'commit',
|
||||
label: __('Commit'),
|
||||
hide: !this.showCommitColumn,
|
||||
},
|
||||
{
|
||||
key: 'size',
|
||||
label: __('Size'),
|
||||
},
|
||||
{
|
||||
key: 'created',
|
||||
label: __('Created'),
|
||||
class: 'gl-text-right',
|
||||
},
|
||||
].filter(c => !c.hide);
|
||||
},
|
||||
},
|
||||
methods: {
|
||||
formatSize(size) {
|
||||
return numberToHumanSize(size);
|
||||
},
|
||||
},
|
||||
filesTableHeaderFields: [
|
||||
{
|
||||
key: 'name',
|
||||
label: __('Name'),
|
||||
tdClass: 'gl-display-flex gl-align-items-center',
|
||||
},
|
||||
{
|
||||
key: 'size',
|
||||
label: __('Size'),
|
||||
},
|
||||
{
|
||||
key: 'created',
|
||||
label: __('Created'),
|
||||
class: 'gl-text-right',
|
||||
},
|
||||
],
|
||||
};
|
||||
</script>
|
||||
|
||||
|
|
@ -58,14 +70,14 @@ export default {
|
|||
<div>
|
||||
<h3 class="gl-font-lg gl-mt-5">{{ __('Files') }}</h3>
|
||||
<gl-table
|
||||
:fields="$options.filesTableHeaderFields"
|
||||
:fields="filesTableHeaderFields"
|
||||
:items="filesTableRows"
|
||||
:tbody-tr-attr="{ 'data-testid': 'file-row' }"
|
||||
>
|
||||
<template #cell(name)="{ item }">
|
||||
<gl-link
|
||||
:href="item.download_path"
|
||||
class="gl-relative"
|
||||
class="gl-relative gl-text-gray-500"
|
||||
data-testid="download-link"
|
||||
@click="$emit('download-file')"
|
||||
>
|
||||
|
|
@ -78,6 +90,15 @@ export default {
|
|||
</gl-link>
|
||||
</template>
|
||||
|
||||
<template #cell(commit)="{item}">
|
||||
<gl-link
|
||||
:href="item.pipeline.project.commit_url"
|
||||
class="gl-text-gray-500"
|
||||
data-testid="commit-link"
|
||||
>{{ item.pipeline.git_commit_message }}</gl-link
|
||||
>
|
||||
</template>
|
||||
|
||||
<template #cell(created)="{ item }">
|
||||
<time-ago-tooltip :time="item.created_at" />
|
||||
</template>
|
||||
|
|
|
|||
|
|
@ -12,16 +12,18 @@
|
|||
}
|
||||
}
|
||||
|
||||
.runner-status-online {
|
||||
color: $green-600;
|
||||
}
|
||||
.runner-status {
|
||||
&.runner-status-online {
|
||||
background-color: $green-600;
|
||||
}
|
||||
|
||||
.runner-status-offline {
|
||||
color: $gray-darkest;
|
||||
}
|
||||
&.runner-status-offline {
|
||||
background-color: $gray-darkest;
|
||||
}
|
||||
|
||||
.runner-status-paused {
|
||||
color: $red-500;
|
||||
&.runner-status-paused {
|
||||
background-color: $red-500;
|
||||
}
|
||||
}
|
||||
|
||||
.runner {
|
||||
|
|
|
|||
|
|
@ -8,14 +8,14 @@ module Ci
|
|||
status = runner.status
|
||||
case status
|
||||
when :not_connected
|
||||
content_tag(:span, title: "New runner. Has not connected yet") do
|
||||
content_tag(:span, title: _("New runner. Has not connected yet")) do
|
||||
sprite_icon("warning-solid", size: 24, css_class: "gl-vertical-align-bottom!")
|
||||
end
|
||||
|
||||
when :online, :offline, :paused
|
||||
content_tag :i, nil,
|
||||
class: "fa fa-circle runner-status-#{status}",
|
||||
title: "Runner is #{status}, last contact was #{time_ago_in_words(runner.contacted_at)} ago"
|
||||
content_tag :span, nil,
|
||||
class: "gl-display-inline-block gl-avatar gl-avatar-s16 gl-avatar-circle runner-status runner-status-#{status}",
|
||||
title: _("Runner is %{status}, last contact was %{runner_contact} ago") % { status: status, runner_contact: time_ago_in_words(runner.contacted_at) }
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -775,12 +775,10 @@ module Ci
|
|||
variables.append(key: 'CI_MERGE_REQUEST_SOURCE_BRANCH_SHA', value: source_sha.to_s)
|
||||
variables.append(key: 'CI_MERGE_REQUEST_TARGET_BRANCH_SHA', value: target_sha.to_s)
|
||||
|
||||
if Feature.enabled?(:ci_mr_diff_variables, project)
|
||||
diff = self.merge_request_diff
|
||||
if diff.present?
|
||||
variables.append(key: 'CI_MERGE_REQUEST_DIFF_ID', value: diff.id.to_s)
|
||||
variables.append(key: 'CI_MERGE_REQUEST_DIFF_BASE_SHA', value: diff.base_commit_sha)
|
||||
end
|
||||
diff = self.merge_request_diff
|
||||
if diff.present?
|
||||
variables.append(key: 'CI_MERGE_REQUEST_DIFF_ID', value: diff.id.to_s)
|
||||
variables.append(key: 'CI_MERGE_REQUEST_DIFF_BASE_SHA', value: diff.base_commit_sha)
|
||||
end
|
||||
|
||||
variables.concat(merge_request.predefined_variables)
|
||||
|
|
|
|||
|
|
@ -18,6 +18,14 @@ class ProtectedBranch::PushAccessLevel < ApplicationRecord
|
|||
end
|
||||
end
|
||||
|
||||
def check_access(user)
|
||||
if Feature.enabled?(:deploy_keys_on_protected_branches, project) && user && deploy_key.present?
|
||||
return true if user.can?(:read_project, project) && enabled_deploy_key_for_user?(deploy_key, user)
|
||||
end
|
||||
|
||||
super
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def validate_deploy_key_membership
|
||||
|
|
@ -27,4 +35,10 @@ class ProtectedBranch::PushAccessLevel < ApplicationRecord
|
|||
self.errors.add(:deploy_key, 'is not enabled for this project')
|
||||
end
|
||||
end
|
||||
|
||||
def enabled_deploy_key_for_user?(deploy_key, user)
|
||||
return false unless deploy_key.user_id == user.id
|
||||
|
||||
DeployKey.with_write_access_for_project(protected_branch.project, deploy_key: deploy_key).any?
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -414,6 +414,10 @@ class Service < ApplicationRecord
|
|||
!instance? && !group_id
|
||||
end
|
||||
|
||||
def parent
|
||||
project || group
|
||||
end
|
||||
|
||||
# Returns a hash of the properties that have been assigned a new value since last save,
|
||||
# indicating their original values (attr => original value).
|
||||
# ActiveRecord does not provide a mechanism to track changes in serialized keys,
|
||||
|
|
|
|||
|
|
@ -396,7 +396,7 @@
|
|||
:idempotent:
|
||||
:tags: []
|
||||
- :name: cronjob:schedule_merge_request_cleanup_refs
|
||||
:feature_category: :source_code_management
|
||||
:feature_category: :code_review
|
||||
:has_external_dependencies:
|
||||
:urgency: :low
|
||||
:resource_boundary: :unknown
|
||||
|
|
@ -404,7 +404,7 @@
|
|||
:idempotent: true
|
||||
:tags: []
|
||||
- :name: cronjob:schedule_migrate_external_diffs
|
||||
:feature_category: :source_code_management
|
||||
:feature_category: :code_review
|
||||
:has_external_dependencies:
|
||||
:urgency: :low
|
||||
:resource_boundary: :unknown
|
||||
|
|
@ -428,7 +428,7 @@
|
|||
:idempotent:
|
||||
:tags: []
|
||||
- :name: cronjob:stuck_merge_jobs
|
||||
:feature_category: :source_code_management
|
||||
:feature_category: :code_review
|
||||
:has_external_dependencies:
|
||||
:urgency: :low
|
||||
:resource_boundary: :unknown
|
||||
|
|
@ -1426,7 +1426,7 @@
|
|||
:idempotent: true
|
||||
:tags: []
|
||||
- :name: create_note_diff_file
|
||||
:feature_category: :source_code_management
|
||||
:feature_category: :code_review
|
||||
:has_external_dependencies:
|
||||
:urgency: :low
|
||||
:resource_boundary: :unknown
|
||||
|
|
@ -1442,7 +1442,7 @@
|
|||
:idempotent:
|
||||
:tags: []
|
||||
- :name: delete_diff_files
|
||||
:feature_category: :source_code_management
|
||||
:feature_category: :code_review
|
||||
:has_external_dependencies:
|
||||
:urgency: :low
|
||||
:resource_boundary: :unknown
|
||||
|
|
@ -1700,7 +1700,7 @@
|
|||
:idempotent:
|
||||
:tags: []
|
||||
- :name: merge_request_cleanup_refs
|
||||
:feature_category: :source_code_management
|
||||
:feature_category: :code_review
|
||||
:has_external_dependencies:
|
||||
:urgency: :low
|
||||
:resource_boundary: :unknown
|
||||
|
|
@ -1708,7 +1708,7 @@
|
|||
:idempotent: true
|
||||
:tags: []
|
||||
- :name: merge_request_mergeability_check
|
||||
:feature_category: :source_code_management
|
||||
:feature_category: :code_review
|
||||
:has_external_dependencies:
|
||||
:urgency: :low
|
||||
:resource_boundary: :unknown
|
||||
|
|
@ -1732,7 +1732,7 @@
|
|||
:idempotent: true
|
||||
:tags: []
|
||||
- :name: migrate_external_diffs
|
||||
:feature_category: :source_code_management
|
||||
:feature_category: :code_review
|
||||
:has_external_dependencies:
|
||||
:urgency: :low
|
||||
:resource_boundary: :unknown
|
||||
|
|
@ -1756,7 +1756,7 @@
|
|||
:idempotent:
|
||||
:tags: []
|
||||
- :name: new_merge_request
|
||||
:feature_category: :source_code_management
|
||||
:feature_category: :code_review
|
||||
:has_external_dependencies:
|
||||
:urgency: :high
|
||||
:resource_boundary: :cpu
|
||||
|
|
@ -2072,7 +2072,7 @@
|
|||
:idempotent: true
|
||||
:tags: []
|
||||
- :name: update_merge_requests
|
||||
:feature_category: :source_code_management
|
||||
:feature_category: :code_review
|
||||
:has_external_dependencies:
|
||||
:urgency: :high
|
||||
:resource_boundary: :cpu
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
class CreateNoteDiffFileWorker # rubocop:disable Scalability/IdempotentWorker
|
||||
include ApplicationWorker
|
||||
|
||||
feature_category :source_code_management
|
||||
feature_category :code_review
|
||||
|
||||
def perform(diff_note_id)
|
||||
diff_note = DiffNote.find(diff_note_id)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
class DeleteDiffFilesWorker # rubocop:disable Scalability/IdempotentWorker
|
||||
include ApplicationWorker
|
||||
|
||||
feature_category :source_code_management
|
||||
feature_category :code_review
|
||||
|
||||
# rubocop: disable CodeReuse/ActiveRecord
|
||||
def perform(merge_request_diff_id)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
class MergeRequestCleanupRefsWorker
|
||||
include ApplicationWorker
|
||||
|
||||
feature_category :source_code_management
|
||||
feature_category :code_review
|
||||
idempotent!
|
||||
|
||||
def perform(merge_request_id)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
class MergeRequestMergeabilityCheckWorker
|
||||
include ApplicationWorker
|
||||
|
||||
feature_category :source_code_management
|
||||
feature_category :code_review
|
||||
idempotent!
|
||||
|
||||
def perform(merge_request_id)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
class MigrateExternalDiffsWorker # rubocop:disable Scalability/IdempotentWorker
|
||||
include ApplicationWorker
|
||||
|
||||
feature_category :source_code_management
|
||||
feature_category :code_review
|
||||
|
||||
def perform(merge_request_diff_id)
|
||||
diff = MergeRequestDiff.find_by_id(merge_request_diff_id)
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ class NewMergeRequestWorker # rubocop:disable Scalability/IdempotentWorker
|
|||
include ApplicationWorker
|
||||
include NewIssuable
|
||||
|
||||
feature_category :source_code_management
|
||||
feature_category :code_review
|
||||
urgency :high
|
||||
worker_resource_boundary :cpu
|
||||
weight 2
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ class ScheduleMergeRequestCleanupRefsWorker
|
|||
include ApplicationWorker
|
||||
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext
|
||||
|
||||
feature_category :source_code_management
|
||||
feature_category :code_review
|
||||
idempotent!
|
||||
|
||||
# Based on existing data, MergeRequestCleanupRefsWorker can run 3 jobs per
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ class ScheduleMigrateExternalDiffsWorker # rubocop:disable Scalability/Idempoten
|
|||
|
||||
include Gitlab::ExclusiveLeaseHelpers
|
||||
|
||||
feature_category :source_code_management
|
||||
feature_category :code_review
|
||||
|
||||
def perform
|
||||
in_lock(self.class.name.underscore, ttl: 2.hours, retries: 0) do
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ class StuckMergeJobsWorker # rubocop:disable Scalability/IdempotentWorker
|
|||
include ApplicationWorker
|
||||
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext
|
||||
|
||||
feature_category :source_code_management
|
||||
feature_category :code_review
|
||||
|
||||
def self.logger
|
||||
Gitlab::AppLogger
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
class UpdateMergeRequestsWorker # rubocop:disable Scalability/IdempotentWorker
|
||||
include ApplicationWorker
|
||||
|
||||
feature_category :source_code_management
|
||||
feature_category :code_review
|
||||
urgency :high
|
||||
worker_resource_boundary :cpu
|
||||
weight 3
|
||||
|
|
|
|||
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: Update package_file table to display commits when present
|
||||
merge_request: 48882
|
||||
author:
|
||||
type: changed
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: Add Merge Request diff CI variables
|
||||
merge_request: 48764
|
||||
author: Jonas Hahnfeld
|
||||
type: added
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: Replace fa-cirlce in runners helper
|
||||
merge_request: 48981
|
||||
author:
|
||||
type: changed
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
---
|
||||
name: ci_mr_diff_variables
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/46621
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/285468
|
||||
milestone: '13.7'
|
||||
type: development
|
||||
group: group::pipeline authoring
|
||||
default_enabled: false
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
name: merge_request_widget_graphql
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/38311
|
||||
rollout_issue_url:
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/267560
|
||||
milestone: '13.3'
|
||||
type: development
|
||||
group: group::code review
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@ const IS_EE = require('./helpers/is_ee_env');
|
|||
const DEV_SERVER_HOST = process.env.DEV_SERVER_HOST || 'localhost';
|
||||
const DEV_SERVER_PORT = parseInt(process.env.DEV_SERVER_PORT, 10) || 3808;
|
||||
const DEV_SERVER_PUBLIC_ADDR = process.env.DEV_SERVER_PUBLIC_ADDR;
|
||||
const DEV_SERVER_ALLOWED_HOSTS =
|
||||
process.env.DEV_SERVER_ALLOWED_HOSTS && process.env.DEV_SERVER_ALLOWED_HOSTS.split(',');
|
||||
const DEV_SERVER_HTTPS = process.env.DEV_SERVER_HTTPS && process.env.DEV_SERVER_HTTPS !== 'false';
|
||||
const DEV_SERVER_LIVERELOAD = IS_DEV_SERVER && process.env.DEV_SERVER_LIVERELOAD !== 'false';
|
||||
const WEBPACK_REPORT = process.env.WEBPACK_REPORT && process.env.WEBPACK_REPORT !== 'false';
|
||||
|
|
@ -568,6 +570,7 @@ module.exports = {
|
|||
host: DEV_SERVER_HOST,
|
||||
port: DEV_SERVER_PORT,
|
||||
public: DEV_SERVER_PUBLIC_ADDR,
|
||||
allowedHosts: DEV_SERVER_ALLOWED_HOSTS,
|
||||
https: DEV_SERVER_HTTPS,
|
||||
contentBase: false,
|
||||
stats: 'errors-only',
|
||||
|
|
|
|||
|
|
@ -92,6 +92,8 @@ Kubernetes-specific environment variables are detailed in the
|
|||
| `CI_MERGE_REQUEST_TARGET_BRANCH_SHA` | 11.9 | all | The HEAD SHA of the target branch of the merge request if [the pipelines are for merge requests](../merge_request_pipelines/index.md). Available only if `only: [merge_requests]` or [`rules`](../yaml/README.md#rules) syntax is used, the merge request is created, and the pipeline is a [merged result pipeline](../merge_request_pipelines/pipelines_for_merged_results/index.md). **(PREMIUM)** |
|
||||
| `CI_MERGE_REQUEST_TITLE` | 11.9 | all | The title of the merge request if [the pipelines are for merge requests](../merge_request_pipelines/index.md). Available only if `only: [merge_requests]` or [`rules`](../yaml/README.md#rules) syntax is used and the merge request is created. |
|
||||
| `CI_MERGE_REQUEST_EVENT_TYPE` | 12.3 | all | The event type of the merge request, if [the pipelines are for merge requests](../merge_request_pipelines/index.md). Can be `detached`, `merged_result` or `merge_train`. |
|
||||
| `CI_MERGE_REQUEST_DIFF_ID` | 13.7 | all | The version of the merge request diff, if [the pipelines are for merge requests](../merge_request_pipelines/index.md). |
|
||||
| `CI_MERGE_REQUEST_DIFF_BASE_SHA` | 13.7 | all | The base SHA of the merge request diff, if [the pipelines are for merge requests](../merge_request_pipelines/index.md). |
|
||||
| `CI_NODE_INDEX` | 11.5 | all | Index of the job in the job set. If the job is not parallelized, this variable is not set. |
|
||||
| `CI_NODE_TOTAL` | 11.5 | all | Total number of instances of this job running in parallel. If the job is not parallelized, this variable is set to `1`. |
|
||||
| `CI_PAGES_DOMAIN` | 11.8 | all | The configured domain that hosts GitLab Pages. |
|
||||
|
|
|
|||
|
After Width: | Height: | Size: 23 KiB |
|
After Width: | Height: | Size: 22 KiB |
|
After Width: | Height: | Size: 18 KiB |
|
After Width: | Height: | Size: 20 KiB |
|
After Width: | Height: | Size: 17 KiB |
|
After Width: | Height: | Size: 19 KiB |
|
After Width: | Height: | Size: 18 KiB |
|
After Width: | Height: | Size: 20 KiB |
|
After Width: | Height: | Size: 18 KiB |
|
After Width: | Height: | Size: 17 KiB |
|
|
@ -41,3 +41,332 @@ User Load (0.7ms) SELECT "users"."id" FROM "users" WHERE ("users"."id" >= 4165
|
|||
The API of this method is similar to `in_batches`, though it doesn't support
|
||||
all of the arguments that `in_batches` supports. You should always use
|
||||
`each_batch` _unless_ you have a specific need for `in_batches`.
|
||||
|
||||
## Column definition
|
||||
|
||||
`EachBatch` uses the primary key of the model by default for the iteration. This works most of the cases, however in some cases, you might want to use a different column for the iteration.
|
||||
|
||||
```ruby
|
||||
Project.distinct.each_batch(column: :creator_id, of: 10) do |relation|
|
||||
puts User.where(id: relation.select(:creator_id)).map(&:id)
|
||||
end
|
||||
```
|
||||
|
||||
The query above iterates over the project creators and prints them out without duplications.
|
||||
|
||||
NOTE: **Note:**
|
||||
In case the column is not unique (no unique index definition), calling the `distinct` method on the relation is necessary.
|
||||
|
||||
## `EachBatch` in data migrations
|
||||
|
||||
When dealing with data migrations the preferred way to iterate over large volume of data is using `EachBatch`.
|
||||
|
||||
A special case of data migration is a background migration where the actual data modification is executed in a background job. The migration code that determines the data ranges (slices) and schedules the background jobs uses `each_batch`. More info: [background migration scheduling](background_migrations.md#scheduling)
|
||||
|
||||
## Efficient usage of `each_batch`
|
||||
|
||||
`EachBatch` helps iterating over large tables. It's important to highlight that `EachBatch` is not going to magically solve all iteration related performance problems and it might not help at all in some scenarios. From the database point of view, correctly configured database indexes are also necessary to make `EachBatch` perform well.
|
||||
|
||||
### Example 1: Simple iteration
|
||||
|
||||
Let's consider that we want to iterate over the `users` table and print the `User` records to the standard output. The `users` table contains millions of records, thus running one query to fetch the users will likely time out.
|
||||
|
||||

|
||||
|
||||
This is a simplified version of the `users` table which contains several rows. We have a few smaller gaps in the `id` column to make the example a bit more realistic (a few records were already deleted). Currently we have one index on the `id` field.
|
||||
|
||||
Loading all users into memory (avoid):
|
||||
|
||||
```ruby
|
||||
users = User.all
|
||||
|
||||
users.each { |user| puts user.inspect }
|
||||
```
|
||||
|
||||
Use `each_batch`:
|
||||
|
||||
```ruby
|
||||
# Note: for this example I picked 5 as the batch size, the default is 1_000
|
||||
User.each_batch(of: 5) do |relation|
|
||||
relation.each { |user| puts user.inspect }
|
||||
end
|
||||
```
|
||||
|
||||
#### How does `each_batch` work?
|
||||
|
||||
As the first step, it finds the lowest `id` (start `id`) in the table by executing the following database query:
|
||||
|
||||
```sql
|
||||
SELECT "users"."id" FROM "users" ORDER BY "users"."id" ASC LIMIT 1
|
||||
```
|
||||
|
||||

|
||||
|
||||
Notice that the query only reads data from the index (`INDEX ONLY SCAN`), the table is not accessed. Database indexes are sorted so taking out the first item is a very cheap operation.
|
||||
|
||||
The next step is to find the next `id` (end `id`) which should respect the batch size configuration. In this example we used batch size of 5. `EachBatch` uses the `OFFSET` clause to get a "shifted" `id` value.
|
||||
|
||||
```sql
|
||||
SELECT "users"."id" FROM "users" WHERE "users"."id" >= 1 ORDER BY "users"."id" ASC LIMIT 1 OFFSET 5
|
||||
```
|
||||
|
||||

|
||||
|
||||
Again, the query only looks into the index. The `OFFSET 5` takes out the sixth `id` value: this query reads a maximum of six items from the index regardless of the table size or the iteration count.
|
||||
|
||||
At this point we know the `id` range for the first batch. Now it's time to construct the query for the `relation` block.
|
||||
|
||||
```sql
|
||||
SELECT "users".* FROM "users" WHERE "users"."id" >= 1 AND "users"."id" < 302
|
||||
```
|
||||
|
||||

|
||||
|
||||
Notice the `<` sign. Previously six items were read from the index and in this query the last value is "excluded". The query will look at the index to get the location of the five `user` rows on the disk and read the rows from the table. The returned array is processed in Ruby.
|
||||
|
||||
The first iteration is done. For the next iteration, the last `id` value is reused from the previous iteration in order to find out the next end `id` value.
|
||||
|
||||
```sql
|
||||
SELECT "users"."id" FROM "users" WHERE "users"."id" >= 302 ORDER BY "users"."id" ASC LIMIT 1 OFFSET 5
|
||||
```
|
||||
|
||||

|
||||
|
||||
Now we can easily construct the `users` query for the second iteration.
|
||||
|
||||
```sql
|
||||
SELECT "users".* FROM "users" WHERE "users"."id" >= 302 AND "users"."id" < 353
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Example 2: Iteration with filters
|
||||
|
||||
Building on top of the previous example, we want to print users with zero sign-in count. We keep track of the number of sign-ins in the `sign_in_count` column so we write the following code:
|
||||
|
||||
```ruby
|
||||
users = User.where(sign_in_count: 0)
|
||||
|
||||
users.each_batch(of: 5) do |relation|
|
||||
relation.each { |user| puts user.inspect }
|
||||
end
|
||||
```
|
||||
|
||||
`each_batch` will produce the following SQL query for the start `id` value:
|
||||
|
||||
```sql
|
||||
SELECT "users"."id" FROM "users" WHERE "users"."sign_in_count" = 0 ORDER BY "users"."id" ASC LIMIT 1
|
||||
```
|
||||
|
||||
Selecting only the `id` column and ordering by `id` is going to "force" the database to use the index on the `id` (primary key index) column, however we also have an extra condition on the `sign_in_count` column. The column is not part of the index, so the database needs to look into the actual table to find the first matching row.
|
||||
|
||||

|
||||
|
||||
NOTE: **Important:**
|
||||
The number of scanned rows depends on the data distribution in the table.
|
||||
|
||||
- Best case scenario: the first user was never logged in. The database reads only one row.
|
||||
- Worst case scenario: all users were logged in at least once. The database reads all rows.
|
||||
|
||||
In this particular example the database had to read 10 rows (regardless of our batch size setting) to determine the first `id` value. In a "real-world" application it's hard to predict whether the filtering is going to cause problems or not. In case of GitLab, verifying the data on a production replica is a good start, but keep in mind that data distribution on GitLab.com can be different from self-managed instances.
|
||||
|
||||
#### Improve filtering with `each_batch`
|
||||
|
||||
##### Specialized conditinal index
|
||||
|
||||
```sql
|
||||
CREATE INDEX index_on_users_never_logged_in ON users (id) WHERE sign_in_count = 0
|
||||
```
|
||||
|
||||
This is how our table and the newly created index looks like:
|
||||
|
||||

|
||||
|
||||
This index definition covers the conditions on the `id` and `sign_in_count` columns thus makes the `each_batch` queries very effective (similar to the simple iteration example).
|
||||
|
||||
It's rare when a user was never signed in so we anticipate small index size. Including only the `id` in the index definition also helps keeping the index size small.
|
||||
|
||||
##### Index on columns
|
||||
|
||||
Later on we might want to iterate over the table filtering for different `sign_in_count` values, in those cases we cannot use the previously suggested conditional index because the `WHERE` condition does not match with our new filter (`sign_in_count > 10`).
|
||||
|
||||
To address this problem, we have two options:
|
||||
|
||||
- Create another, conditional index to cover the new query.
|
||||
- Replace the index with more generalized configuration.
|
||||
|
||||
NOTE: **Note:**
|
||||
Having multiple indexes on the same table and on the same columns could be a performance bottleneck when writing data.
|
||||
|
||||
Let's consider the following index (avoid):
|
||||
|
||||
```sql
|
||||
CREATE INDEX index_on_users_never_logged_in ON users (id, sign_in_count)
|
||||
```
|
||||
|
||||
The index definition starts with the `id` column which makes the index very inefficient from data selectivity point of view.
|
||||
|
||||
```sql
|
||||
SELECT "users"."id" FROM "users" WHERE "users"."sign_in_count" = 0 ORDER BY "users"."id" ASC LIMIT 1
|
||||
```
|
||||
|
||||
Executing the query above results in an `INDEX ONLY SCAN`. However, the query still needs to iterate over unknown number of entries in the index, and then find the first item where the `sign_in_count` is `0`.
|
||||
|
||||

|
||||
|
||||
We can improve the query significantly by swapping the columns in the index definition (prefer).
|
||||
|
||||
```sql
|
||||
CREATE INDEX index_on_users_never_logged_in ON users (sign_in_count, id)
|
||||
```
|
||||
|
||||

|
||||
|
||||
The following index definition is not going to work well with `each_batch` (avoid).
|
||||
|
||||
```sql
|
||||
CREATE INDEX index_on_users_never_logged_in ON users (sign_in_count)
|
||||
```
|
||||
|
||||
Since `each_batch` builds range queries based on the `id` column, this index cannot be used efficiently. The DB reads the rows from the table or uses a bitmap search where the primary key index is also read.
|
||||
|
||||
##### "Slow" iteraton
|
||||
|
||||
Slow iteration means that we use a good index configuration to iterate over the table and apply filtering on the yielded relation.
|
||||
|
||||
```ruby
|
||||
User.each_batch(of: 5) do |relation|
|
||||
relation.where(sign_in_count: 0).each { |user| puts user inspect }
|
||||
end
|
||||
```
|
||||
|
||||
The iteration uses the primary key index (on the `id` column) which makes it safe from statement
|
||||
timeouts. The filter (`sign_in_count: 0`) is applied on the `relation` where the `id` is already constrained (range). The number of rows are limited.
|
||||
|
||||
Slow iteration generally takes more time to finish. The iteration count is higher and
|
||||
one iteration could yield fewer records than the batch size. Iterations may even yield
|
||||
0 records. This is not an optimal solution; however, in some cases (especially when
|
||||
dealing with large tables) this is the only viable option.
|
||||
|
||||
### Using Subqueries
|
||||
|
||||
Using subqueries in your `each_batch` query does not work well in most cases. Consider the following example:
|
||||
|
||||
```ruby
|
||||
projects = Project.where(creator_id: Issue.where(confidential: true).select(:author_id))
|
||||
|
||||
projects.each_batch do |relation|
|
||||
# do something
|
||||
end
|
||||
```
|
||||
|
||||
The iteration uses the `id` column of the `projects` table. The batching does not affect the subquery.
|
||||
This means for each iteration, the subquery is executed by the database. This adds a constant "load"
|
||||
on the query which often ends up in statement timeouts. We have an unknown number of confidential
|
||||
issues, the execution time and the accessed database rows depends on the data distribution in the
|
||||
`issues` table.
|
||||
|
||||
NOTE: **Note:**
|
||||
Using subqueries works only when the subquery returns a small number of rows.
|
||||
|
||||
#### Improving Subqueries
|
||||
|
||||
When dealing with subqueries, a slow iteration approach could work: the filter on `creator_id` can be part of the generated `relation` object.
|
||||
|
||||
```ruby
|
||||
projects = Project.all
|
||||
|
||||
projects.each_batch do |relation|
|
||||
relation.where(creator_id: Issue.where(confidential: true).select(:author_id))
|
||||
end
|
||||
```
|
||||
|
||||
If the query on the `issues` table itself is not performant enough, a nested loop could be constructed. Try to avoid it when possible.
|
||||
|
||||
```ruby
|
||||
projects = Project.all
|
||||
|
||||
projects.each_batch do |relation|
|
||||
issues = Issue.where(confidential: true)
|
||||
|
||||
issues.each_batch do |issues_relation|
|
||||
relation.where(creator_id: issues_relation.select(:author_id))
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
If we know that the `issues` table has many more rows than `projects`, it would make sense to flip the queries, where the `issues` table is batched first.
|
||||
|
||||
### Using `JOIN` and `EXISTS`
|
||||
|
||||
When to use `JOINS`:
|
||||
|
||||
- When there's a 1:1 or 1:N relationship between the tables where we know that the joined record
|
||||
(almost) always exists. This works well for "extension-like" tables:
|
||||
- `projects` - `project_settings`
|
||||
- `users` - `user_details`
|
||||
- `users` - `user_statuses`
|
||||
- `LEFT JOIN` works well in this case. Conditions on the joined table need to go to the yielded relation so the iteration is not affected by the data distribution in the joined table.
|
||||
|
||||
Example:
|
||||
|
||||
```ruby
|
||||
users = User.joins("LEFT JOIN personal_access_tokens on personal_access_tokens.user_id = users.id")
|
||||
|
||||
users.each_batch do |relation|
|
||||
relation.where("personal_access_tokens.name = 'name'")
|
||||
end
|
||||
```
|
||||
|
||||
`EXISTS` queries should be added only to the inner `relation` of the `each_batch` query:
|
||||
|
||||
```ruby
|
||||
User.each_batch do |relation|
|
||||
relation.where("EXISTS (SELECT 1 FROM ...")
|
||||
end
|
||||
```
|
||||
|
||||
### Complex queries on the relation object
|
||||
|
||||
When the `relation` object has several extra conditions, the execution plans might become "unstable".
|
||||
|
||||
Example:
|
||||
|
||||
```ruby
|
||||
Issue.each_batch do |relation|
|
||||
relation
|
||||
.joins(:metrics)
|
||||
.joins(:merge_requests_closing_issues)
|
||||
.where("id IN (SELECT ...)")
|
||||
.where(confidential: true)
|
||||
end
|
||||
```
|
||||
|
||||
Here, we expect that the `relation` query reads the `BATCH_SIZE` of user records and then
|
||||
filters down the results according to the provided queries. The planner might decide that
|
||||
using a bitmap index lookup with the index on the `confidential` column is a better way to
|
||||
execute the query. This can cause unexpectedly high amount of rows to be read and the query
|
||||
could time out.
|
||||
|
||||
Problem: we know for sure that the relation is returning maximum `BATCH_SIZE` of records, however the planner does not know this.
|
||||
|
||||
Common table expression (CTE) trick to force the range query to execute first:
|
||||
|
||||
```ruby
|
||||
Issue.each_batch(of: 1000) do |relation|
|
||||
cte = Gitlab::SQL::CTE.new(:batched_relation, relation.limit(1000))
|
||||
|
||||
scope = cte
|
||||
.apply_to(Issue.all)
|
||||
.joins(:metrics)
|
||||
.joins(:merge_requests_closing_issues)
|
||||
.where("id IN (SELECT ...)")
|
||||
.where(confidential: true)
|
||||
|
||||
puts scope.to_a
|
||||
end
|
||||
```
|
||||
|
||||
### `EachBatch` vs `BatchCount`
|
||||
|
||||
When adding new counters for usage ping, the preferred way to count records is using the `Gitlab::Database::BatchCount` class. The iteration logic implemented in `BatchCount` has similar performance characterisics like `EachBatch`. Most of the tips and suggestions for improving `BatchCount` mentioned above applies to `BatchCount` as well.
|
||||
|
|
|
|||
|
|
@ -261,7 +261,7 @@ Ensure your SAML identity provider sends an attribute statement named `Groups` o
|
|||
```
|
||||
|
||||
When SAML SSO is enabled for the top-level group, `Maintainer` and `Owner` level users
|
||||
see a new menu item in group **Settings -> SAML Group Links**. Each group can specify
|
||||
see a new menu item in group **Settings -> SAML Group Links**. Each group (parent or subgroup) can specify
|
||||
one or more group links to map a SAML identity provider group name to a GitLab access level.
|
||||
|
||||

|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ module API
|
|||
class MergeRequestApprovals < ::API::Base
|
||||
before { authenticate_non_get! }
|
||||
|
||||
feature_category :code_review
|
||||
feature_category :source_code_management
|
||||
|
||||
helpers do
|
||||
params :ee_approval_params do
|
||||
|
|
|
|||
|
|
@ -0,0 +1,33 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
class DeployKeyAccess < UserAccess
|
||||
def initialize(deploy_key, container: nil)
|
||||
@deploy_key = deploy_key
|
||||
@user = deploy_key.user
|
||||
@container = container
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :deploy_key
|
||||
|
||||
def protected_tag_accessible_to?(ref, action:)
|
||||
assert_project!
|
||||
|
||||
# a deploy key can always push a protected tag
|
||||
# (which is not always the case when pushing to a protected branch)
|
||||
true
|
||||
end
|
||||
|
||||
def can_collaborate?(_ref)
|
||||
assert_project!
|
||||
|
||||
project_has_active_user_keys?
|
||||
end
|
||||
|
||||
def project_has_active_user_keys?
|
||||
user.can?(:read_project, project) && DeployKey.with_write_access_for_project(project).id_in(deploy_key.id).exists?
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -18424,6 +18424,9 @@ msgstr ""
|
|||
msgid "New response for issue #%{issue_iid}:"
|
||||
msgstr ""
|
||||
|
||||
msgid "New runner. Has not connected yet"
|
||||
msgstr ""
|
||||
|
||||
msgid "New runners registration token has been generated!"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -23541,6 +23544,9 @@ msgstr ""
|
|||
msgid "Runner cannot be assigned to other projects"
|
||||
msgstr ""
|
||||
|
||||
msgid "Runner is %{status}, last contact was %{runner_contact} ago"
|
||||
msgstr ""
|
||||
|
||||
msgid "Runner runs jobs from all unassigned projects"
|
||||
msgstr ""
|
||||
|
||||
|
|
|
|||
|
|
@ -15,16 +15,17 @@ module QA
|
|||
sign_up.fill_new_user_username_field(user.username)
|
||||
sign_up.fill_new_user_email_field(user.email)
|
||||
sign_up.fill_new_user_password_field(user.password)
|
||||
|
||||
# Because invisible_captcha would prevent submitting this form
|
||||
# within 4 seconds, sleep here. This can be removed once we
|
||||
# implement invisible_captcha as an application setting instead
|
||||
# of a feature flag, so we can turn it off while testing.
|
||||
# Issue: https://gitlab.com/gitlab-org/gitlab/-/issues/284113
|
||||
sleep 5
|
||||
|
||||
sign_up.click_new_user_register_button
|
||||
end
|
||||
|
||||
# Because invisible_captcha would prevent submitting this form
|
||||
# within 4 seconds, sleep here. This can be removed once we
|
||||
# implement invisible_captcha as an application setting instead
|
||||
# of a feature flag, so we can turn it off while testing.
|
||||
# Issue: https://gitlab.com/gitlab-org/gitlab/-/issues/284113
|
||||
sleep 4
|
||||
|
||||
Page::Registration::Welcome.perform(&:click_get_started_button_if_available)
|
||||
|
||||
if user.expect_fabrication_success
|
||||
|
|
|
|||
|
|
@ -9,7 +9,12 @@ module QA
|
|||
end
|
||||
|
||||
def click_get_started_button_if_available
|
||||
click_element :get_started_button if has_element?(:get_started_button)
|
||||
if has_element?(:get_started_button)
|
||||
Support::Retrier.retry_until do
|
||||
click_element :get_started_button
|
||||
has_no_element?(:get_started_button)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -122,6 +122,7 @@ module QA
|
|||
end
|
||||
|
||||
after do
|
||||
set_require_admin_approval_after_user_signup_via_api(false)
|
||||
@user.remove_via_api! if @user
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ describe('Package Files', () => {
|
|||
const findAllRows = () => wrapper.findAll('[data-testid="file-row"');
|
||||
const findFirstRow = () => findAllRows().at(0);
|
||||
const findFirstRowDownloadLink = () => findFirstRow().find('[data-testid="download-link"');
|
||||
const findFirstRowCommitLink = () => findFirstRow().find('[data-testid="commit-link"');
|
||||
const findFirstRowFileIcon = () => findFirstRow().find(FileIcon);
|
||||
const findFirstRowCreatedAt = () => findFirstRow().find(TimeAgoTooltip);
|
||||
|
||||
|
|
@ -96,4 +97,35 @@ describe('Package Files', () => {
|
|||
expect(findFirstRowCreatedAt().props('time')).toBe(npmFiles[0].created_at);
|
||||
});
|
||||
});
|
||||
|
||||
describe('commit', () => {
|
||||
describe('when package file has a pipeline associated', () => {
|
||||
it('exists', () => {
|
||||
createComponent();
|
||||
|
||||
expect(findFirstRowCommitLink().exists()).toBe(true);
|
||||
});
|
||||
|
||||
it('the link points to the commit url', () => {
|
||||
createComponent();
|
||||
|
||||
expect(findFirstRowCommitLink().attributes('href')).toBe(
|
||||
npmFiles[0].pipelines[0].project.commit_url,
|
||||
);
|
||||
});
|
||||
|
||||
it('the text is git_commit_message', () => {
|
||||
createComponent();
|
||||
|
||||
expect(findFirstRowCommitLink().text()).toBe(npmFiles[0].pipelines[0].git_commit_message);
|
||||
});
|
||||
});
|
||||
describe('when package file has no pipeline associated', () => {
|
||||
it('does not exist', () => {
|
||||
createComponent(mavenFiles);
|
||||
|
||||
expect(findFirstRowCommitLink().exists()).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -76,6 +76,9 @@ export const npmFiles = [
|
|||
id: 2,
|
||||
size: 200,
|
||||
download_path: '/-/package_files/2/download',
|
||||
pipelines: [
|
||||
{ id: 1, project: { commit_url: 'http://foo.bar' }, git_commit_message: 'foo bar baz?' },
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,66 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::DeployKeyAccess do
|
||||
let_it_be(:user) { create(:user) }
|
||||
let_it_be(:deploy_key) { create(:deploy_key, user: user) }
|
||||
let(:project) { create(:project, :repository) }
|
||||
let(:protected_branch) { create(:protected_branch, :no_one_can_push, project: project) }
|
||||
|
||||
subject(:access) { described_class.new(deploy_key, container: project) }
|
||||
|
||||
before do
|
||||
project.add_guest(user)
|
||||
create(:deploy_keys_project, :write_access, project: project, deploy_key: deploy_key)
|
||||
end
|
||||
|
||||
describe '#can_create_tag?' do
|
||||
context 'push tag that matches a protected tag pattern via a deploy key' do
|
||||
it 'still pushes that tag' do
|
||||
create(:protected_tag, project: project, name: 'v*')
|
||||
|
||||
expect(access.can_create_tag?('v0.1.2')).to be_truthy
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '#can_push_to_branch?' do
|
||||
context 'push to a protected branch of this project via a deploy key' do
|
||||
before do
|
||||
create(:protected_branch_push_access_level, protected_branch: protected_branch, deploy_key: deploy_key)
|
||||
end
|
||||
|
||||
context 'when the project has active deploy key owned by this user' do
|
||||
it 'returns true' do
|
||||
expect(access.can_push_to_branch?(protected_branch.name)).to be_truthy
|
||||
end
|
||||
end
|
||||
|
||||
context 'when the project has active deploy keys, but not by this user' do
|
||||
let(:deploy_key) { create(:deploy_key, user: create(:user)) }
|
||||
|
||||
it 'returns false' do
|
||||
expect(access.can_push_to_branch?(protected_branch.name)).to be_falsey
|
||||
end
|
||||
end
|
||||
|
||||
context 'when there is another branch no one can push to' do
|
||||
let(:another_branch) { create(:protected_branch, :no_one_can_push, name: 'another_branch', project: project) }
|
||||
|
||||
it 'returns false when trying to push to that other branch' do
|
||||
expect(access.can_push_to_branch?(another_branch.name)).to be_falsey
|
||||
end
|
||||
|
||||
context 'and the deploy key added for the first protected branch is also added for this other branch' do
|
||||
it 'returns true for both protected branches' do
|
||||
create(:protected_branch_push_access_level, protected_branch: another_branch, deploy_key: deploy_key)
|
||||
|
||||
expect(access.can_push_to_branch?(protected_branch.name)).to be_truthy
|
||||
expect(access.can_push_to_branch?(another_branch.name)).to be_truthy
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -755,10 +755,6 @@ RSpec.describe Ci::Pipeline, :mailer, factory_default: :keep do
|
|||
end
|
||||
|
||||
context 'when pipeline is merge request' do
|
||||
before do
|
||||
stub_feature_flags(ci_mr_diff_variables: false)
|
||||
end
|
||||
|
||||
let(:pipeline) do
|
||||
create(:ci_pipeline, :detached_merge_request_pipeline, merge_request: merge_request)
|
||||
end
|
||||
|
|
@ -799,22 +795,13 @@ RSpec.describe Ci::Pipeline, :mailer, factory_default: :keep do
|
|||
'CI_MERGE_REQUEST_MILESTONE' => milestone.title,
|
||||
'CI_MERGE_REQUEST_LABELS' => labels.map(&:title).sort.join(','),
|
||||
'CI_MERGE_REQUEST_EVENT_TYPE' => 'detached')
|
||||
expect(subject.to_hash.keys).not_to include(
|
||||
%w[CI_MERGE_REQUEST_DIFF_ID
|
||||
CI_MERGE_REQUEST_DIFF_BASE_SHA])
|
||||
end
|
||||
|
||||
context 'when feature flag ci_mr_diff_variables is enabled' do
|
||||
before do
|
||||
stub_feature_flags(ci_mr_diff_variables: true)
|
||||
end
|
||||
|
||||
it 'exposes diff variables' do
|
||||
expect(subject.to_hash)
|
||||
.to include(
|
||||
'CI_MERGE_REQUEST_DIFF_ID' => merge_request.merge_request_diff.id.to_s,
|
||||
'CI_MERGE_REQUEST_DIFF_BASE_SHA' => merge_request.merge_request_diff.base_commit_sha)
|
||||
end
|
||||
it 'exposes diff variables' do
|
||||
expect(subject.to_hash)
|
||||
.to include(
|
||||
'CI_MERGE_REQUEST_DIFF_ID' => merge_request.merge_request_diff.id.to_s,
|
||||
'CI_MERGE_REQUEST_DIFF_BASE_SHA' => merge_request.merge_request_diff.base_commit_sha)
|
||||
end
|
||||
|
||||
context 'without assignee' do
|
||||
|
|
@ -867,22 +854,13 @@ RSpec.describe Ci::Pipeline, :mailer, factory_default: :keep do
|
|||
'CI_MERGE_REQUEST_MILESTONE' => milestone.title,
|
||||
'CI_MERGE_REQUEST_LABELS' => labels.map(&:title).sort.join(','),
|
||||
'CI_MERGE_REQUEST_EVENT_TYPE' => 'merged_result')
|
||||
expect(subject.to_hash.keys).not_to include(
|
||||
%w[CI_MERGE_REQUEST_DIFF_ID
|
||||
CI_MERGE_REQUEST_DIFF_BASE_SHA])
|
||||
end
|
||||
|
||||
context 'when feature flag ci_mr_diff_variables is enabled' do
|
||||
before do
|
||||
stub_feature_flags(ci_mr_diff_variables: true)
|
||||
end
|
||||
|
||||
it 'exposes diff variables' do
|
||||
expect(subject.to_hash)
|
||||
.to include(
|
||||
'CI_MERGE_REQUEST_DIFF_ID' => merge_request.merge_request_diff.id.to_s,
|
||||
'CI_MERGE_REQUEST_DIFF_BASE_SHA' => merge_request.merge_request_diff.base_commit_sha)
|
||||
end
|
||||
it 'exposes diff variables' do
|
||||
expect(subject.to_hash)
|
||||
.to include(
|
||||
'CI_MERGE_REQUEST_DIFF_ID' => merge_request.merge_request_diff.id.to_s,
|
||||
'CI_MERGE_REQUEST_DIFF_BASE_SHA' => merge_request.merge_request_diff.base_commit_sha)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -34,4 +34,45 @@ RSpec.describe ProtectedBranch::PushAccessLevel do
|
|||
expect(level.errors.full_messages).to contain_exactly('Deploy key is not enabled for this project')
|
||||
end
|
||||
end
|
||||
|
||||
describe '#check_access' do
|
||||
let_it_be(:project) { create(:project) }
|
||||
let_it_be(:protected_branch) { create(:protected_branch, :no_one_can_push, project: project) }
|
||||
let_it_be(:user) { create(:user) }
|
||||
let_it_be(:deploy_key) { create(:deploy_key, user: user) }
|
||||
let!(:deploy_keys_project) { create(:deploy_keys_project, project: project, deploy_key: deploy_key, can_push: can_push) }
|
||||
let(:can_push) { true }
|
||||
|
||||
before_all do
|
||||
project.add_guest(user)
|
||||
end
|
||||
|
||||
context 'when this push_access_level is tied to a deploy key' do
|
||||
let(:push_access_level) { create(:protected_branch_push_access_level, protected_branch: protected_branch, deploy_key: deploy_key) }
|
||||
|
||||
context 'when the deploy key is among the active keys for this project' do
|
||||
specify do
|
||||
expect(push_access_level.check_access(user)).to be_truthy
|
||||
end
|
||||
|
||||
context 'when the deploy_keys_on_protected_branches FF is false' do
|
||||
before do
|
||||
stub_feature_flags(deploy_keys_on_protected_branches: false)
|
||||
end
|
||||
|
||||
it 'is false' do
|
||||
expect(push_access_level.check_access(user)).to be_falsey
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when the deploy key is not among the active keys of this project' do
|
||||
let(:can_push) { false }
|
||||
|
||||
it 'is false' do
|
||||
expect(push_access_level.check_access(user)).to be_falsey
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,3 +1,13 @@
|
|||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:6895f005b1998a884f9c5cc1831b78f5d45e802dc498402f07a33bacf6b3e2f3
|
||||
size 420
|
||||
%PDF-1.3
|
||||
%<25><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
4 0 obj
|
||||
<< /Length 5 0 R /Filter /FlateDecode >>
|
||||
stream
|
||||
xe<><65><0E>0<0C><>>ō<>@<1A><><EFBFBD>
|
||||
ba<EFBFBD>d<EFBFBD>1U<31>V<EFBFBD><10>_8<5F><38>n<EFBFBD>e}<7D><>fXU<58>`\F<>d2<64><32><EFBFBD><EFBFBD><07>S%,<15>Q]<5D>;XC<58>9<07>+Qy<51><79><EFBFBD>k>a2>31B4<42>;<3B><><EFBFBD>d)!Md<4D>M<4D>-<2D>B<EFBFBD><42>F<EFBFBD><46><10>N<EFBFBD>[v<05><>~<7E><>E<>5<EFBFBD><35><EFBFBD>^<08>Z_<03><><1F>o<EFBFBD>l.<2E>
|
||||
endstream
|
||||
endobj
|
||||
5 0 obj
|
||||
155
|
||||
endobj
|
||||
2 0 obj
|
||||
|
|
|
|||
|
|
@ -1,3 +1,13 @@
|
|||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:6895f005b1998a884f9c5cc1831b78f5d45e802dc498402f07a33bacf6b3e2f3
|
||||
size 420
|
||||
%PDF-1.3
|
||||
%<25><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
4 0 obj
|
||||
<< /Length 5 0 R /Filter /FlateDecode >>
|
||||
stream
|
||||
xe<><65><0E>0<0C><>>ō<>@<1A><><EFBFBD>
|
||||
ba<EFBFBD>d<EFBFBD>1U<31>V<EFBFBD><10>_8<5F><38>n<EFBFBD>e}<7D><>fXU<58>`\F<>d2<64><32><EFBFBD><EFBFBD><07>S%,<15>Q]<5D>;XC<58>9<07>+Qy<51><79><EFBFBD>k>a2>31B4<42>;<3B><><EFBFBD>d)!Md<4D>M<4D>-<2D>B<EFBFBD><42>F<EFBFBD><46><10>N<EFBFBD>[v<05><>~<7E><>E<>5<EFBFBD><35><EFBFBD>^<08>Z_<03><><1F>o<EFBFBD>l.<2E>
|
||||
endstream
|
||||
endobj
|
||||
5 0 obj
|
||||
155
|
||||
endobj
|
||||
2 0 obj
|
||||
|
|
|
|||