Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-11-09 18:07:50 +00:00
parent e38a99eb07
commit 20f6a17ba2
107 changed files with 1854 additions and 1246 deletions

View File

@ -45,8 +45,8 @@ workflow:
RUBY_VERSION: "3.0"
# For (detached) merge request pipelines.
- if: '$CI_MERGE_REQUEST_IID'
# For the maintenance scheduled pipelines, we set specific variables.
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PIPELINE_SOURCE == "schedule" && $SCHEDULE_TYPE == "maintenance"'
# For the scheduled pipelines, we set specific variables.
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PIPELINE_SOURCE == "schedule"'
variables:
CRYSTALBALL: "true"
CREATE_INCIDENT_FOR_PIPELINE_FAILURE: "true"

View File

@ -10,20 +10,18 @@ pages:
environment: pages
resource_group: pages
needs:
- job: "rspec:coverage"
- job: "coverage-frontend"
- job: "compile-production-assets"
- job: "compile-storybook"
# `update-tests-metadata` only runs on GitLab.com's EE schedules pipelines
# while `pages` runs for all the maintenance scheduled pipelines.
- job: "update-tests-metadata"
optional: true
- "rspec:coverage"
- "coverage-frontend"
- "compile-production-assets"
- "compile-storybook"
- "update-tests-metadata"
- "generate-frontend-fixtures-mapping"
before_script:
- apt-get update && apt-get -y install brotli gzip
script:
- mv public/ .public/
- mkdir public/
- mkdir -p public/$(dirname "$KNAPSACK_RSPEC_SUITE_REPORT_PATH") public/$(dirname "$FLAKY_RSPEC_SUITE_REPORT_PATH") public/$(dirname "$RSPEC_PACKED_TESTS_MAPPING_PATH")
- mkdir -p public/$(dirname "$KNAPSACK_RSPEC_SUITE_REPORT_PATH") public/$(dirname "$FLAKY_RSPEC_SUITE_REPORT_PATH") public/$(dirname "$RSPEC_PACKED_TESTS_MAPPING_PATH") public/$(dirname "$FRONTEND_FIXTURES_MAPPING_PATH")
- mv coverage/ public/coverage-ruby/ || true
- mv coverage-frontend/ public/coverage-frontend/ || true
- mv storybook/public public/storybook || true
@ -31,6 +29,7 @@ pages:
- mv $KNAPSACK_RSPEC_SUITE_REPORT_PATH public/$KNAPSACK_RSPEC_SUITE_REPORT_PATH || true
- mv $FLAKY_RSPEC_SUITE_REPORT_PATH public/$FLAKY_RSPEC_SUITE_REPORT_PATH || true
- mv $RSPEC_PACKED_TESTS_MAPPING_PATH.gz public/$RSPEC_PACKED_TESTS_MAPPING_PATH.gz || true
- mv $FRONTEND_FIXTURES_MAPPING_PATH public/$FRONTEND_FIXTURES_MAPPING_PATH || true
- *compress-public
artifacts:
paths:

View File

@ -21,7 +21,7 @@
if: '$FORCE_GITLAB_CI'
.if-default-refs: &if-default-refs
if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH || $CI_COMMIT_REF_NAME =~ /^[\d-]+-stable(-ee)?$/ || $CI_COMMIT_REF_NAME =~ /^\d+-\d+-auto-deploy-\d+$/ || $CI_COMMIT_REF_NAME =~ /^security\// || $CI_MERGE_REQUEST_IID || $CI_COMMIT_TAG || $FORCE_GITLAB_CI'
if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH || $CI_COMMIT_REF_NAME =~ /^[\d-]+-stable(-ee)?$/ || $CI_COMMIT_REF_NAME =~ /^\d+-\d+-auto-deploy-\d+$/ || $CI_COMMIT_REF_NAME =~ /^security\// || $CI_COMMIT_REF_NAME == "ruby3" || $CI_MERGE_REQUEST_IID || $CI_COMMIT_TAG || $FORCE_GITLAB_CI'
.if-default-branch-refs: &if-default-branch-refs
if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $CI_MERGE_REQUEST_IID == null'
@ -868,6 +868,7 @@
- <<: *if-merge-request-targeting-stable-branch
- <<: *if-merge-request-labels-run-review-app
- <<: *if-auto-deploy-branches
- <<: *if-ruby3-branch
- <<: *if-default-refs
changes: *ci-build-images-patterns
- <<: *if-default-refs

View File

@ -1 +1 @@
7a8f7c377bd013483aba14ced8eafd073c631d4a
1ba70888404fcb9719d4eb33481f57138bce7447

View File

@ -12,6 +12,10 @@ export default {
type: String,
required: true,
},
userId: {
type: Number,
required: true,
},
paths: {
type: Object,
required: true,

View File

@ -1,17 +1,26 @@
<script>
import { GlDropdownItem } from '@gitlab/ui';
import { s__ } from '~/locale';
import { GlDropdownItem, GlLoadingIcon } from '@gitlab/ui';
import { s__, __ } from '~/locale';
import { associationsCount } from '~/api/user_api';
import eventHub, { EVENT_OPEN_DELETE_USER_MODAL } from '../modals/delete_user_modal_event_hub';
export default {
i18n: {
loading: __('Loading'),
},
components: {
GlDropdownItem,
GlLoadingIcon,
},
props: {
username: {
type: String,
required: true,
},
userId: {
type: Number,
required: true,
},
paths: {
type: Object,
required: true,
@ -22,21 +31,38 @@ export default {
default: () => [],
},
},
data() {
return {
loading: false,
};
},
methods: {
onClick() {
async onClick() {
this.loading = true;
try {
const { data: associationsCountData } = await associationsCount(this.userId);
this.openModal(associationsCountData);
} catch (error) {
this.openModal(new Error());
} finally {
this.loading = false;
}
},
openModal(associationsCountData) {
const { username, paths, userDeletionObstacles } = this;
eventHub.$emit(EVENT_OPEN_DELETE_USER_MODAL, {
username,
blockPath: paths.block,
deletePath: paths.deleteWithContributions,
userDeletionObstacles,
associationsCount: associationsCountData,
i18n: {
title: s__('AdminUsers|Delete User %{username} and contributions?'),
primaryButtonLabel: s__('AdminUsers|Delete user and contributions'),
messageBody: s__(`AdminUsers|You are about to permanently delete the user %{username}. This will delete all of the issues,
merge requests, and groups linked to them. To avoid data loss,
consider using the %{strongStart}block user%{strongEnd} feature instead. Once you %{strongStart}Delete user%{strongEnd},
it cannot be undone or recovered.`),
messageBody: s__(`AdminUsers|You are about to permanently delete the user %{username}. This will delete all issues,
merge requests, groups, and projects linked to them. To avoid data loss,
consider using the %{strongStart}Block user%{strongEnd} feature instead. After you %{strongStart}Delete user%{strongEnd},
you cannot undo this action or recover the data.`),
},
});
},
@ -45,8 +71,12 @@ export default {
</script>
<template>
<gl-dropdown-item @click="onClick">
<span class="gl-text-red-500">
<gl-dropdown-item :disabled="loading" :aria-busy="loading" @click.capture.native.stop="onClick">
<div v-if="loading" class="gl-display-flex gl-align-items-center">
<gl-loading-icon class="gl-mr-3" />
{{ $options.i18n.loading }}
</div>
<span v-else class="gl-text-red-500">
<slot></slot>
</span>
</gl-dropdown-item>

View File

@ -0,0 +1,65 @@
<script>
import { GlAlert } from '@gitlab/ui';
import { s__ } from '~/locale';
import AssociationsListItem from './associations_list_item.vue';
export default {
i18n: {
errorMessage: s__(
"AdminUsers|An error occurred while fetching this user's contributions, and the request cannot return the number of issues, merge requests, groups, and projects linked to this user. If you proceed with deleting the user, all their contributions will still be deleted.",
),
},
components: {
AssociationsListItem,
GlAlert,
},
props: {
associationsCount: {
type: [Object, Error],
required: true,
},
},
computed: {
hasError() {
return this.associationsCount instanceof Error;
},
hasAssociations() {
return Object.values(this.associationsCount).some((count) => count > 0);
},
},
};
</script>
<template>
<gl-alert v-if="hasError" class="gl-mb-5" variant="danger" :dismissible="false">{{
$options.i18n.errorMessage
}}</gl-alert>
<ul v-else-if="hasAssociations" class="gl-mb-5">
<associations-list-item
v-if="associationsCount.groups_count"
:message="n__('%{count} group', '%{count} groups', associationsCount.groups_count)"
:count="associationsCount.groups_count"
/>
<associations-list-item
v-if="associationsCount.projects_count"
:message="n__('%{count} project', '%{count} projects', associationsCount.projects_count)"
:count="associationsCount.projects_count"
/>
<associations-list-item
v-if="associationsCount.issues_count"
:message="n__('%{count} issue', '%{count} issues', associationsCount.issues_count)"
:count="associationsCount.issues_count"
/>
<associations-list-item
v-if="associationsCount.merge_requests_count"
:message="
n__(
'%{count} merge request',
'%{count} merge requests',
associationsCount.merge_requests_count,
)
"
:count="associationsCount.merge_requests_count"
/>
</ul>
</template>

View File

@ -0,0 +1,27 @@
<script>
import { GlSprintf } from '@gitlab/ui';
export default {
components: { GlSprintf },
props: {
message: {
type: String,
required: true,
},
count: {
type: Number,
required: true,
},
},
};
</script>
<template>
<li>
<gl-sprintf :message="message">
<template #count>
<strong>{{ count }}</strong>
</template>
</gl-sprintf>
</li>
</template>

View File

@ -2,6 +2,7 @@
import { GlModal, GlButton, GlFormInput, GlSprintf } from '@gitlab/ui';
import { s__, sprintf } from '~/locale';
import UserDeletionObstaclesList from '~/vue_shared/components/user_deletion_obstacles/user_deletion_obstacles_list.vue';
import AssociationsList from '../associations/associations_list.vue';
import eventHub, { EVENT_OPEN_DELETE_USER_MODAL } from './delete_user_modal_event_hub';
export default {
@ -11,6 +12,7 @@ export default {
GlFormInput,
GlSprintf,
UserDeletionObstaclesList,
AssociationsList,
},
props: {
csrfToken: {
@ -25,6 +27,7 @@ export default {
blockPath: '',
deletePath: '',
userDeletionObstacles: [],
associationsCount: {},
i18n: {
title: '',
primaryButtonLabel: '',
@ -53,11 +56,19 @@ export default {
eventHub.$off(EVENT_OPEN_DELETE_USER_MODAL, this.onOpenEvent);
},
methods: {
onOpenEvent({ username, blockPath, deletePath, userDeletionObstacles, i18n }) {
onOpenEvent({
username,
blockPath,
deletePath,
userDeletionObstacles,
associationsCount = {},
i18n,
}) {
this.username = username;
this.blockPath = blockPath;
this.deletePath = deletePath;
this.userDeletionObstacles = userDeletionObstacles;
this.associationsCount = associationsCount;
this.i18n = i18n;
this.openModal();
},
@ -100,8 +111,10 @@ export default {
:user-name="trimmedUsername"
/>
<associations-list :associations-count="associationsCount" />
<p>
<gl-sprintf :message="s__('AdminUsers|To confirm, type %{username}')">
<gl-sprintf :message="s__('AdminUsers|To confirm, type %{username}.')">
<template #username>
<code data-testid="confirm-username" class="gl-white-space-pre-wrap">{{
trimmedUsername

View File

@ -139,6 +139,7 @@ export default {
:key="action"
:paths="userPaths"
:username="user.name"
:user-id="user.id"
:user-deletion-obstacles="obstaclesForUserDeletion"
:data-testid="`delete-${action}`"
>

View File

@ -12,6 +12,7 @@ const USER_PROJECTS_PATH = '/api/:version/users/:id/projects';
const USER_POST_STATUS_PATH = '/api/:version/user/status';
const USER_FOLLOW_PATH = '/api/:version/users/:id/follow';
const USER_UNFOLLOW_PATH = '/api/:version/users/:id/unfollow';
const USER_ASSOCIATIONS_COUNT_PATH = '/api/:version/users/:id/associations_count';
export function getUsers(query, options) {
const url = buildApiUrl(USERS_PATH);
@ -81,3 +82,8 @@ export function unfollowUser(userId) {
const url = buildApiUrl(USER_UNFOLLOW_PATH).replace(':id', encodeURIComponent(userId));
return axios.post(url);
}
export function associationsCount(userId) {
const url = buildApiUrl(USER_ASSOCIATIONS_COUNT_PATH).replace(':id', encodeURIComponent(userId));
return axios.get(url);
}

View File

@ -5,7 +5,7 @@ const createSandbox = () => {
const iframeEl = document.createElement('iframe');
setAttributes(iframeEl, {
src: '/-/sandbox/swagger',
sandbox: 'allow-scripts',
sandbox: 'allow-scripts allow-popups',
frameBorder: 0,
width: '100%',
// The height will be adjusted dynamically.

View File

@ -289,7 +289,9 @@ export default class MergeRequestTabs {
}
if (action === 'commits') {
this.loadCommits(href);
if (!this.commitsLoaded) {
this.loadCommits(href);
}
// this.hideSidebar();
this.resetViewContainer();
this.commitPipelinesTable = destroyPipelines(this.commitPipelinesTable);
@ -423,28 +425,39 @@ export default class MergeRequestTabs {
return this.currentAction;
}
loadCommits(source) {
if (this.commitsLoaded) {
return;
}
loadCommits(source, page = 1) {
toggleLoader(true);
axios
.get(`${source}.json`)
.get(`${source}.json`, { params: { page, per_page: 100 } })
.then(({ data }) => {
toggleLoader(false);
const commitsDiv = document.querySelector('div#commits');
// eslint-disable-next-line no-unsanitized/property
commitsDiv.innerHTML = data.html;
commitsDiv.innerHTML += data.html;
localTimeAgo(commitsDiv.querySelectorAll('.js-timeago'));
this.commitsLoaded = true;
scrollToContainer('#commits');
toggleLoader(false);
const loadMoreButton = document.querySelector('.js-load-more-commits');
return import('./add_context_commits_modal');
if (loadMoreButton) {
loadMoreButton.addEventListener('click', (e) => {
e.preventDefault();
loadMoreButton.remove();
this.loadCommits(source, loadMoreButton.dataset.nextPage);
});
}
if (!data.next_page) {
return import('./add_context_commits_modal');
}
return null;
})
.then((m) => m.default())
.then((m) => m?.default())
.catch(() => {
toggleLoader(false);
createAlert({

View File

@ -3,7 +3,6 @@ import ShortcutsNavigation from '~/behaviors/shortcuts/shortcuts_navigation';
import BlobForkSuggestion from '~/blob/blob_fork_suggestion';
import BlobLinePermalinkUpdater from '~/blob/blob_line_permalink_updater';
import LineHighlighter from '~/blob/line_highlighter';
import initBlobBundle from '~/blob_edit/blob_bundle';
export default () => {
new LineHighlighter(); // eslint-disable-line no-new
@ -35,6 +34,4 @@ export default () => {
suggestionSections: document.querySelectorAll('.js-file-fork-suggestion-section'),
actionTextPieces: document.querySelectorAll('.js-file-fork-suggestion-section-action'),
}).init();
initBlobBundle();
};

View File

@ -1,10 +1,8 @@
import $ from 'jquery';
import initTree from 'ee_else_ce/repository';
import initBlob from '~/blob_edit/blob_bundle';
import ShortcutsNavigation from '~/behaviors/shortcuts/shortcuts_navigation';
import NewCommitForm from '~/new_commit_form';
new NewCommitForm($('.js-create-dir-form')); // eslint-disable-line no-new
initBlob();
initTree();
new ShortcutsNavigation(); // eslint-disable-line no-new

View File

@ -1205,3 +1205,7 @@ $tabs-holder-z-index: 250;
margin-bottom: 0;
}
}
.commits ol:not(:last-of-type) {
margin-bottom: 0;
}

View File

@ -9,7 +9,7 @@ module Groups
default_frame_src = p.directives['frame-src'] || p.directives['default-src']
# When ObservabilityUI is not authenticated, it needs to be able to redirect to the GL sign-in page, hence 'self'
frame_src_values = Array.wrap(default_frame_src) | [ObservabilityController.observability_url, "'self'"]
frame_src_values = Array.wrap(default_frame_src) | [observability_url, "'self'"]
p.frame_src(*frame_src_values)
end
@ -18,10 +18,7 @@ module Groups
def index
# Format: https://observe.gitlab.com/-/GROUP_ID
@observability_iframe_src = "#{ObservabilityController.observability_url}/-/#{@group.id}"
# Uncomment below for testing with local GDK
# @observability_iframe_src = "#{ObservabilityController.observability_url}/9970?groupId=14485840"
@observability_iframe_src = "#{observability_url}/-/#{@group.id}"
render layout: 'group', locals: { base_layout: 'layouts/fullscreen' }
end
@ -29,15 +26,15 @@ module Groups
private
def self.observability_url
return ENV['OVERRIDE_OBSERVABILITY_URL'] if ENV['OVERRIDE_OBSERVABILITY_URL']
# TODO Make observability URL configurable https://gitlab.com/gitlab-org/opstrace/opstrace-ui/-/issues/80
return "https://staging.observe.gitlab.com" if Gitlab.staging?
Gitlab::Observability.observability_url
end
"https://observe.gitlab.com"
def observability_url
self.class.observability_url
end
def check_observability_allowed
return render_404 unless self.class.observability_url.present?
return render_404 unless observability_url.present?
render_404 unless can?(current_user, :read_observability, @group)
end

View File

@ -178,15 +178,15 @@ class Projects::MergeRequestsController < Projects::MergeRequests::ApplicationCo
@merge_request.recent_context_commits
)
# Get commits from repository
# or from cache if already merged
@commits =
set_commits_for_rendering(
@merge_request.recent_commits(load_from_gitaly: true).with_latest_pipeline(@merge_request.source_branch).with_markdown_cache,
commits_count: @merge_request.commits_count
)
per_page = [(params[:per_page] || MergeRequestDiff::COMMITS_SAFE_SIZE).to_i, MergeRequestDiff::COMMITS_SAFE_SIZE].min
recent_commits = @merge_request.recent_commits(load_from_gitaly: true, limit: per_page, page: params[:page]).with_latest_pipeline(@merge_request.source_branch).with_markdown_cache
@next_page = recent_commits.next_page
@commits = set_commits_for_rendering(
recent_commits,
commits_count: @merge_request.commits_count
)
render json: { html: view_to_html_string('projects/merge_requests/_commits') }
render json: { html: view_to_html_string('projects/merge_requests/_commits'), next_page: @next_page }
end
def pipelines

View File

@ -25,13 +25,12 @@ module Mutations
'Only present if operation was performed synchronously.'
def resolve(**runner_attrs)
raise_resource_not_available_error! unless Ability.allowed?(current_user, :delete_runners)
if ids = runner_attrs[:ids]
runners = find_all_runners_by_ids(model_ids_of(ids))
runner_ids = model_ids_of(ids)
runners = find_all_runners_by_ids(runner_ids)
result = ::Ci::Runners::BulkDeleteRunnersService.new(runners: runners).execute
result.payload.slice(:deleted_count, :deleted_ids).merge(errors: [])
result = ::Ci::Runners::BulkDeleteRunnersService.new(runners: runners, current_user: current_user).execute
result.payload.slice(:deleted_count, :deleted_ids, :errors)
else
{ errors: [] }
end
@ -39,14 +38,15 @@ module Mutations
private
def model_ids_of(ids)
ids.filter_map { |gid| gid.model_id.to_i }
def model_ids_of(global_ids)
global_ids.filter_map { |gid| gid.model_id.to_i }
end
def find_all_runners_by_ids(ids)
return ::Ci::Runner.none if ids.blank?
::Ci::Runner.id_in(ids)
limit = ::Ci::Runners::BulkDeleteRunnersService::RUNNER_LIMIT
::Ci::Runner.id_in(ids).limit(limit + 1)
end
end
end

View File

@ -20,7 +20,7 @@ module Types
description: 'Timestamp of when the merge request was created.'
field :description, GraphQL::Types::String, null: true,
description: 'Description of the merge request (Markdown rendered as HTML for caching).'
field :diff_head_sha, GraphQL::Types::String, null: true,
field :diff_head_sha, GraphQL::Types::String, null: true, calls_gitaly: true,
description: 'Diff head SHA of the merge request.'
field :diff_refs, Types::DiffRefsType, null: true,
description: 'References of the base SHA, the head SHA, and the start SHA for this merge request.'

View File

@ -532,7 +532,7 @@ class ApplicationSetting < ApplicationRecord
validates :jira_connect_proxy_url,
length: { maximum: 255, message: N_('is too long (maximum is %{count} characters)') },
allow_blank: true,
addressable_url: true
public_url: true
with_options(presence: true, numericality: { only_integer: true, greater_than: 0 }) do
validates :throttle_unauthenticated_api_requests_per_period

View File

@ -284,7 +284,11 @@ module Ci
return [] unless forward_yaml_variables?
yaml_variables.to_a.map do |hash|
{ key: hash[:key], value: ::ExpandVariables.expand(hash[:value], expand_variables) }
if hash[:raw] && ci_raw_variables_in_yaml_config_enabled?
{ key: hash[:key], value: hash[:value], raw: true }
else
{ key: hash[:key], value: ::ExpandVariables.expand(hash[:value], expand_variables) }
end
end
end
@ -292,7 +296,11 @@ module Ci
return [] unless forward_pipeline_variables?
pipeline.variables.to_a.map do |variable|
{ key: variable.key, value: ::ExpandVariables.expand(variable.value, expand_variables) }
if variable.raw? && ci_raw_variables_in_yaml_config_enabled?
{ key: variable.key, value: variable.value, raw: true }
else
{ key: variable.key, value: ::ExpandVariables.expand(variable.value, expand_variables) }
end
end
end
@ -301,7 +309,11 @@ module Ci
return [] unless pipeline.pipeline_schedule
pipeline.pipeline_schedule.variables.to_a.map do |variable|
{ key: variable.key, value: ::ExpandVariables.expand(variable.value, expand_variables) }
if variable.raw? && ci_raw_variables_in_yaml_config_enabled?
{ key: variable.key, value: variable.value, raw: true }
else
{ key: variable.key, value: ::ExpandVariables.expand(variable.value, expand_variables) }
end
end
end
@ -320,6 +332,12 @@ module Ci
result.nil? ? FORWARD_DEFAULTS[:pipeline_variables] : result
end
end
def ci_raw_variables_in_yaml_config_enabled?
strong_memoize(:ci_raw_variables_in_yaml_config_enabled) do
::Feature.enabled?(:ci_raw_variables_in_yaml_config, project)
end
end
end
end

View File

@ -13,10 +13,11 @@ class CommitCollection
# container - The object the commits belong to.
# commits - The Commit instances to store.
# ref - The name of the ref (e.g. "master").
def initialize(container, commits, ref = nil)
def initialize(container, commits, ref = nil, page: nil, per_page: nil, count: nil)
@container = container
@commits = commits
@ref = ref
@pagination = Gitlab::PaginationDelegate.new(page: page, per_page: per_page, count: count)
end
def each(&block)
@ -113,4 +114,8 @@ class CommitCollection
def method_missing(message, *args, &block)
commits.public_send(message, *args, &block)
end
def next_page
@pagination.next_page
end
end

View File

@ -26,7 +26,7 @@ module RedisCacheable
end
def cache_attributes(values)
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
redis.set(cache_attribute_key, Gitlab::Json.dump(values), ex: CACHED_ATTRIBUTES_EXPIRY_TIME)
end
@ -41,13 +41,17 @@ module RedisCacheable
def cached_attributes
strong_memoize(:cached_attributes) do
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
data = redis.get(cache_attribute_key)
Gitlab::Json.parse(data, symbolize_names: true) if data
end
end
end
def with_redis(&block)
Gitlab::Redis::Cache.with(&block) # rubocop:disable CodeReuse/ActiveRecord
end
def cast_value_from_cache(attribute, value)
self.class.type_for_attribute(attribute.to_s).cast(value)
end

View File

@ -653,8 +653,8 @@ class MergeRequest < ApplicationRecord
context_commits.count
end
def commits(limit: nil, load_from_gitaly: false)
return merge_request_diff.commits(limit: limit, load_from_gitaly: load_from_gitaly) if merge_request_diff.persisted?
def commits(limit: nil, load_from_gitaly: false, page: nil)
return merge_request_diff.commits(limit: limit, load_from_gitaly: load_from_gitaly, page: page) if merge_request_diff.persisted?
commits_arr = if compare_commits
reversed_commits = compare_commits.reverse
@ -666,8 +666,8 @@ class MergeRequest < ApplicationRecord
CommitCollection.new(source_project, commits_arr, source_branch)
end
def recent_commits(load_from_gitaly: false)
commits(limit: MergeRequestDiff::COMMITS_SAFE_SIZE, load_from_gitaly: load_from_gitaly)
def recent_commits(limit: MergeRequestDiff::COMMITS_SAFE_SIZE, load_from_gitaly: false, page: nil)
commits(limit: limit, load_from_gitaly: load_from_gitaly, page: page)
end
def commits_count

View File

@ -292,9 +292,9 @@ class MergeRequestDiff < ApplicationRecord
end
end
def commits(limit: nil, load_from_gitaly: false)
strong_memoize(:"commits_#{limit || 'all'}_#{load_from_gitaly}") do
load_commits(limit: limit, load_from_gitaly: load_from_gitaly)
def commits(limit: nil, load_from_gitaly: false, page: nil)
strong_memoize(:"commits_#{limit || 'all'}_#{load_from_gitaly}_page_#{page}") do
load_commits(limit: limit, load_from_gitaly: load_from_gitaly, page: page)
end
end
@ -725,17 +725,19 @@ class MergeRequestDiff < ApplicationRecord
end
end
def load_commits(limit: nil, load_from_gitaly: false)
def load_commits(limit: nil, load_from_gitaly: false, page: nil)
diff_commits = page.present? ? merge_request_diff_commits.page(page).per(limit) : merge_request_diff_commits.limit(limit)
if load_from_gitaly
commits = Gitlab::Git::Commit.batch_by_oid(repository, merge_request_diff_commits.limit(limit).map(&:sha))
commits = Gitlab::Git::Commit.batch_by_oid(repository, diff_commits.map(&:sha))
commits = Commit.decorate(commits, project)
else
commits = merge_request_diff_commits.with_users.limit(limit)
commits = diff_commits.with_users
.map { |commit| Commit.from_hash(commit.to_hash, project) }
end
CommitCollection
.new(merge_request.target_project, commits, merge_request.target_branch)
.new(merge_request.target_project, commits, merge_request.target_branch, page: page.to_i, per_page: limit, count: commits_count)
end
def save_diffs

View File

@ -9,7 +9,7 @@ module Preloaders
end
def execute
return if @projects.is_a?(ActiveRecord::NullRelation)
return unless @projects.is_a?(ActiveRecord::Relation)
return unless ::Feature.enabled?(:use_traversal_ids)
root_query = Namespace.joins("INNER JOIN (#{join_sql}) as root_query ON root_query.root_id = namespaces.id")

View File

@ -19,6 +19,8 @@ module Preloaders
end
def execute
return unless @user
project_authorizations = ProjectAuthorization.arel_table
auths = @projects

View File

@ -1768,7 +1768,7 @@ class User < ApplicationRecord
end
def owns_runner?(runner)
ci_owned_runners.exists?(runner.id)
ci_owned_runners.include?(runner)
end
def notification_email_for(notification_group)

View File

@ -120,8 +120,6 @@ class GlobalPolicy < BasePolicy
# We can't use `read_statistics` because the user may have different permissions for different projects
rule { admin }.enable :use_project_statistics_filters
rule { admin }.enable :delete_runners
rule { external_user }.prevent :create_snippet
end

View File

@ -632,7 +632,6 @@ class ProjectPolicy < BasePolicy
prevent :read_commit_status
prevent :read_pipeline
prevent :read_pipeline_schedule
prevent(*create_read_update_admin_destroy(:release))
prevent(*create_read_update_admin_destroy(:feature_flag))
prevent(:admin_feature_flags_user_lists)
end

View File

@ -7,29 +7,69 @@ module Ci
RUNNER_LIMIT = 50
# @param runners [Array<Ci::Runner, Integer>] the runners to unregister/destroy
def initialize(runners:)
# @param runners [Array<Ci::Runner>] the runners to unregister/destroy
# @param current_user [User] the user performing the operation
def initialize(runners:, current_user:)
@runners = runners
@current_user = current_user
end
def execute
if @runners
# Delete a few runners immediately
return ServiceResponse.success(payload: delete_runners)
return delete_runners
end
ServiceResponse.success(payload: { deleted_count: 0, deleted_ids: [] })
ServiceResponse.success(payload: { deleted_count: 0, deleted_ids: [], errors: [] })
end
private
def delete_runners
runner_count = @runners.count
authorized_runners_ids, unauthorized_runners_ids = compute_authorized_runners
# rubocop:disable CodeReuse/ActiveRecord
runners_to_be_deleted = Ci::Runner.where(id: @runners).limit(RUNNER_LIMIT)
runners_to_be_deleted =
Ci::Runner
.where(id: authorized_runners_ids)
.preload([:taggings, :runner_namespaces, :runner_projects])
# rubocop:enable CodeReuse/ActiveRecord
deleted_ids = runners_to_be_deleted.destroy_all.map(&:id) # rubocop: disable Cop/DestroyAll
deleted_ids = runners_to_be_deleted.destroy_all.map(&:id) # rubocop:disable Cop/DestroyAll
{ deleted_count: deleted_ids.count, deleted_ids: deleted_ids }
ServiceResponse.success(
payload: {
deleted_count: deleted_ids.count,
deleted_ids: deleted_ids,
errors: error_messages(runner_count, authorized_runners_ids, unauthorized_runners_ids)
})
end
def compute_authorized_runners
# rubocop:disable CodeReuse/ActiveRecord
@current_user.ci_owned_runners.load # preload the owned runners to avoid an N+1
authorized_runners, unauthorized_runners =
@runners.limit(RUNNER_LIMIT)
.partition { |runner| Ability.allowed?(@current_user, :delete_runner, runner) }
# rubocop:enable CodeReuse/ActiveRecord
[authorized_runners.map(&:id), unauthorized_runners.map(&:id)]
end
def error_messages(runner_count, authorized_runners_ids, unauthorized_runners_ids)
errors = []
if runner_count > RUNNER_LIMIT
errors << "Can only delete up to #{RUNNER_LIMIT} runners per call. Ignored the remaining runner(s)."
end
if authorized_runners_ids.empty?
errors << "User does not have permission to delete any of the runners"
elsif unauthorized_runners_ids.any?
failed_ids = unauthorized_runners_ids.map { |runner_id| "##{runner_id}" }.join(', ')
errors << "User does not have permission to delete runner(s) #{failed_ids}"
end
errors
end
end
end

View File

@ -25,18 +25,22 @@ class EventCreateService
def open_mr(merge_request, current_user)
create_record_event(merge_request, current_user, :created).tap do
track_event(event_action: :created, event_target: MergeRequest, author_id: current_user.id)
track_snowplow_event(merge_request, current_user,
Gitlab::UsageDataCounters::TrackUniqueEvents::MERGE_REQUEST_ACTION,
:create, 'merge_requests_users')
track_snowplow_event(
:created,
merge_request,
current_user
)
end
end
def close_mr(merge_request, current_user)
create_record_event(merge_request, current_user, :closed).tap do
track_event(event_action: :closed, event_target: MergeRequest, author_id: current_user.id)
track_snowplow_event(merge_request, current_user,
Gitlab::UsageDataCounters::TrackUniqueEvents::MERGE_REQUEST_ACTION,
:close, 'merge_requests_users')
track_snowplow_event(
:closed,
merge_request,
current_user
)
end
end
@ -47,9 +51,11 @@ class EventCreateService
def merge_mr(merge_request, current_user)
create_record_event(merge_request, current_user, :merged).tap do
track_event(event_action: :merged, event_target: MergeRequest, author_id: current_user.id)
track_snowplow_event(merge_request, current_user,
Gitlab::UsageDataCounters::TrackUniqueEvents::MERGE_REQUEST_ACTION,
:merge, 'merge_requests_users')
track_snowplow_event(
:merged,
merge_request,
current_user
)
end
end
@ -73,9 +79,12 @@ class EventCreateService
create_record_event(note, current_user, :commented).tap do
if note.is_a?(DiffNote) && note.for_merge_request?
track_event(event_action: :commented, event_target: MergeRequest, author_id: current_user.id)
track_snowplow_event(note, current_user,
Gitlab::UsageDataCounters::TrackUniqueEvents::MERGE_REQUEST_ACTION,
:comment, 'merge_requests_users')
track_snowplow_event(
:commented,
note,
current_user
)
end
end
end
@ -109,13 +118,13 @@ class EventCreateService
return [] if records.empty?
if create.any?
track_snowplow_event(create.first, current_user,
old_track_snowplow_event(create.first, current_user,
Gitlab::UsageDataCounters::TrackUniqueEvents::DESIGN_ACTION,
:create, 'design_users')
end
if update.any?
track_snowplow_event(update.first, current_user,
old_track_snowplow_event(update.first, current_user,
Gitlab::UsageDataCounters::TrackUniqueEvents::DESIGN_ACTION,
:update, 'design_users')
end
@ -126,7 +135,7 @@ class EventCreateService
def destroy_designs(designs, current_user)
return [] unless designs.present?
track_snowplow_event(designs.first, current_user,
old_track_snowplow_event(designs.first, current_user,
Gitlab::UsageDataCounters::TrackUniqueEvents::DESIGN_ACTION,
:destroy, 'design_users')
create_record_events(designs.zip([:destroyed].cycle), current_user)
@ -261,7 +270,10 @@ class EventCreateService
Gitlab::UsageDataCounters::TrackUniqueEvents.track_event(**params)
end
def track_snowplow_event(record, current_user, category, action, label)
# This will be deleted as a part of
# https://gitlab.com/groups/gitlab-org/-/epics/8641
# once all the events are fixed
def old_track_snowplow_event(record, current_user, category, action, label)
return unless Feature.enabled?(:route_hll_to_snowplow_phase2)
project = record.project
@ -274,6 +286,19 @@ class EventCreateService
user: current_user
)
end
def track_snowplow_event(action, record, user)
project = record.project
Gitlab::Tracking.event(
self.class.to_s,
action.to_s,
label: 'usage_activity_by_stage_monthly.create.merge_requests_users',
namespace: project.namespace,
user: user,
project: project,
context: [Gitlab::Tracking::ServicePingContext.new(data_source: :redis_hll, event: 'merge_requests_users').to_context]
)
end
end
EventCreateService.prepend_mod_with('EventCreateService')

View File

@ -20,7 +20,7 @@ module MergeRequests
end
def execute_hooks(merge_request, action = 'open', old_rev: nil, old_associations: {})
merge_data = hook_data(merge_request, action, old_rev: old_rev, old_associations: old_associations)
merge_data = Gitlab::Lazy.new { hook_data(merge_request, action, old_rev: old_rev, old_associations: old_associations) }
merge_request.project.execute_hooks(merge_data, :merge_request_hooks)
merge_request.project.execute_integrations(merge_data, :merge_request_hooks)

View File

@ -10,7 +10,7 @@ module ProtectedBranches
def fetch(ref_name, dry_run: false, &block)
record = OpenSSL::Digest::SHA256.hexdigest(ref_name)
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
cached_result = redis.hget(redis_key, record)
if cached_result.nil?
@ -48,11 +48,15 @@ module ProtectedBranches
end
def refresh
Gitlab::Redis::Cache.with { |redis| redis.unlink(redis_key) }
with_redis { |redis| redis.unlink(redis_key) }
end
private
def with_redis(&block)
Gitlab::Redis::Cache.with(&block) # rubocop:disable CodeReuse/ActiveRecord
end
def check_and_log_discrepancy(cached_value, real_value, ref_name)
return if cached_value.nil?
return if cached_value == real_value

View File

@ -33,16 +33,15 @@
- else
= render partial: 'projects/commits/commit', collection: context_commits, locals: { project: project, ref: ref, merge_request: merge_request }
- if hidden > 0
- if hidden > 0 && !@merge_request
%li
= render Pajamas::AlertComponent.new(variant: :warning,
dismissible: false) do |c|
= c.body do
= n_('%s additional commit has been omitted to prevent performance issues.', '%s additional commits have been omitted to prevent performance issues.', hidden) % number_with_delimiter(hidden)
- if can_update_merge_request && context_commits&.empty?
= render Pajamas::ButtonComponent.new(button_options: { class: 'gl-mt-5 add-review-item-modal-trigger', data: { context_commits_empty: 'true' } }) do
= _('Add previously merged commits')
- if can_update_merge_request && context_commits&.empty? && !(defined?(@next_page) && @next_page)
.add-review-item-modal-trigger{ data: { context_commits_empty: 'true' } }
- if commits.size == 0 && context_commits.nil?
.commits-empty.gl-mt-6

View File

@ -13,6 +13,9 @@
- else
%ol#commits-list.list-unstyled
= render "projects/commits/commits", merge_request: @merge_request
- if @next_page && @merge_request
= render Pajamas::ButtonComponent.new(button_options: { class: 'js-load-more-commits', data: { next_page: @next_page } }) do
= _('Load more')
- if can_update_merge_request && @merge_request.iid
- if can_update_merge_request && @merge_request.iid && !@next_page
.add-review-item-modal-wrapper{ data: { context_commits_path: context_commits_project_json_merge_request_url(@merge_request&.project, @merge_request, :json), target_branch: @merge_request.target_branch, merge_request_iid: @merge_request.iid, project_id: @merge_request.project.id } }

View File

@ -18,7 +18,7 @@ class GitlabPerformanceBarStatsWorker
idempotent!
def perform(lease_uuid)
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
request_ids = fetch_request_ids(redis, lease_uuid)
stats = Gitlab::PerformanceBar::Stats.new(redis)
@ -30,6 +30,10 @@ class GitlabPerformanceBarStatsWorker
private
def with_redis(&block)
Gitlab::Redis::Cache.with(&block) # rubocop:disable CodeReuse/ActiveRecord
end
def fetch_request_ids(redis, lease_uuid)
ids = redis.smembers(STATS_KEY)
redis.del(STATS_KEY)

View File

@ -90,22 +90,26 @@ module Projects
end
def save_last_processed_project_id(project_id)
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
redis.set(LAST_PROCESSED_INACTIVE_PROJECT_REDIS_KEY, project_id)
end
end
def last_processed_project_id
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
redis.get(LAST_PROCESSED_INACTIVE_PROJECT_REDIS_KEY).to_i
end
end
def reset_last_processed_project_id
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
redis.del(LAST_PROCESSED_INACTIVE_PROJECT_REDIS_KEY)
end
end
def with_redis(&block)
Gitlab::Redis::Cache.with(&block) # rubocop:disable CodeReuse/ActiveRecord
end
end
end

View File

@ -1,8 +0,0 @@
---
name: duplicate_jobs_cookie
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/100851
rollout_issue_url: https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1954
milestone: '15.5'
type: development
group: group::scalability
default_enabled: false

View File

@ -1,7 +1,7 @@
---
name: observability_group_tab
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/96374
rollout_issue_url:
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/381740
milestone: '15.3'
type: development
group: group::observability

View File

@ -513,7 +513,7 @@ internet connectivity is gated by a proxy. To use a proxy for GitLab Pages:
### Using a custom Certificate Authority (CA)
When using certificates issued by a custom CA, [Access Control](../../user/project/pages/pages_access_control.md#gitlab-pages-access-control) and
When using certificates issued by a custom CA, [Access Control](../../user/project/pages/pages_access_control.md) and
the [online view of HTML job artifacts](../../ci/pipelines/job_artifacts.md#download-job-artifacts)
fails to work if the custom CA is not recognized.

View File

@ -459,7 +459,7 @@ Pages access control is disabled by default. To enable it:
auth-server=<URL of the GitLab instance>
```
1. Users can now configure it in their [projects' settings](../../user/project/pages/introduction.md#gitlab-pages-access-control).
1. Users can now configure it in their [projects' settings](../../user/project/pages/pages_access_control.md).
## Change storage path

View File

@ -13627,7 +13627,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="groupscanexecutionpoliciesactionscantypes"></a>`actionScanTypes` | [`[SecurityReportTypeEnum!]`](#securityreporttypeenum) | Filters policies by the action scan type. Only these scan types are supported: `dast`, `secret_detection`, `cluster_image_scanning`, `container_scanning`, `sast`. |
| <a id="groupscanexecutionpoliciesactionscantypes"></a>`actionScanTypes` | [`[SecurityReportTypeEnum!]`](#securityreporttypeenum) | Filters policies by the action scan type. Only these scan types are supported: `dast`, `secret_detection`, `cluster_image_scanning`, `container_scanning`, `sast`, `dependency_scanning`. |
| <a id="groupscanexecutionpoliciesrelationship"></a>`relationship` | [`SecurityPolicyRelationType`](#securitypolicyrelationtype) | Filter policies by the given policy relationship. |
##### `Group.scanResultPolicies`
@ -15760,7 +15760,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="namespacescanexecutionpoliciesactionscantypes"></a>`actionScanTypes` | [`[SecurityReportTypeEnum!]`](#securityreporttypeenum) | Filters policies by the action scan type. Only these scan types are supported: `dast`, `secret_detection`, `cluster_image_scanning`, `container_scanning`, `sast`. |
| <a id="namespacescanexecutionpoliciesactionscantypes"></a>`actionScanTypes` | [`[SecurityReportTypeEnum!]`](#securityreporttypeenum) | Filters policies by the action scan type. Only these scan types are supported: `dast`, `secret_detection`, `cluster_image_scanning`, `container_scanning`, `sast`, `dependency_scanning`. |
| <a id="namespacescanexecutionpoliciesrelationship"></a>`relationship` | [`SecurityPolicyRelationType`](#securitypolicyrelationtype) | Filter policies by the given policy relationship. |
##### `Namespace.scanResultPolicies`
@ -17501,7 +17501,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="projectscanexecutionpoliciesactionscantypes"></a>`actionScanTypes` | [`[SecurityReportTypeEnum!]`](#securityreporttypeenum) | Filters policies by the action scan type. Only these scan types are supported: `dast`, `secret_detection`, `cluster_image_scanning`, `container_scanning`, `sast`. |
| <a id="projectscanexecutionpoliciesactionscantypes"></a>`actionScanTypes` | [`[SecurityReportTypeEnum!]`](#securityreporttypeenum) | Filters policies by the action scan type. Only these scan types are supported: `dast`, `secret_detection`, `cluster_image_scanning`, `container_scanning`, `sast`, `dependency_scanning`. |
| <a id="projectscanexecutionpoliciesrelationship"></a>`relationship` | [`SecurityPolicyRelationType`](#securitypolicyrelationtype) | Filter policies by the given policy relationship. |
##### `Project.scanResultPolicies`

View File

@ -623,7 +623,7 @@ Supported attributes:
| Attribute | Type | Description |
|----------------------------------|------|-------------|
| `approvals_before_merge` | integer | **(PREMIUM)** Number of approvals required before this can be merged. |
| `approvals_before_merge` | integer | **(PREMIUM)** Number of approvals required before this merge request can merge. |
| `assignee` | object | First assignee of the merge request. |
| `assignees` | array | Assignees of the merge request. |
| `author` | object | User who created this merge request. |
@ -632,41 +632,42 @@ Supported attributes:
| `closed_at` | datetime | Timestamp of when the merge request was closed. |
| `closed_by` | object | User who closed this merge request. |
| `created_at` | datetime | Timestamp of when the merge request was created. |
| `description` | string | Description of the merge request (Markdown rendered as HTML for caching). |
| `description` | string | Description of the merge request. Contains Markdown rendered as HTML for caching. |
| `detailed_merge_status` | string | Detailed merge status of the merge request. |
| `diff_refs` | object | References of the base SHA, the head SHA, and the start SHA for this merge request. |
| `diff_refs` | object | References of the base SHA, the head SHA, and the start SHA for this merge request. Corresponds to the latest diff version of the merge request. |
| `discussion_locked` | boolean | Indicates if comments on the merge request are locked to members only. |
| `downvotes` | integer | Number of downvotes for the merge request. |
| `draft` | boolean | Indicates if the merge request is a draft. |
| `first_contribution` | boolean | Indicates if the merge request is the first contribution of the author. |
| `first_deployed_to_production_at` | datetime | Timestamp of when the first deployment finished. |
| `force_remove_source_branch` | boolean | Indicates if the project settings will lead to source branch deletion after merge. |
| `has_conflicts` | boolean | Indicates if merge request has conflicts and cannot be merged. |
| `head_pipeline` | object | Pipeline running on the branch HEAD of the merge request. |
| `has_conflicts` | boolean | Indicates if merge request has conflicts and cannot be merged. Dependent on the `merge_status` property. Returns
`false` unless `merge_status` is `cannot_be_merged`. |
| `head_pipeline` | object | Pipeline running on the branch HEAD of the merge request. Contains more complete information than `pipeline` and should be used instead of it. |
| `id` | integer | ID of the merge request. |
| `iid` | integer | Internal ID of the merge request. |
| `labels` | array | Labels of the merge request. |
| `latest_build_finished_at` | datetime | Timestamp of when the latest build for the merge request finished. |
| `latest_build_started_at` | datetime | Timestamp of when the latest build for the merge request started. |
| `merge_commit_sha` | string | SHA of the merge request commit (set once merged). |
| `merge_commit_sha` | string | SHA of the merge request commit. Returns `null` until merged. |
| `merge_error` | string | Error message due to a merge error. |
| `merge_user` | object | User who merged this merge request or set it to merge when pipeline succeeds. |
| `merge_status` | string | Status of the merge request. Can be `unchecked`, `checking`, `can_be_merged`, `cannot_be_merged` or `cannot_be_merged_recheck`. [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/3169#note_1162532204) in GitLab 15.6. Use `detailed_merge_status` instead. |
| `merge_user` | object | The user who merged this merge request, the user who set it to merge when pipeline succeeds, or `null`. [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/349031) in GitLab 14.7. |
| `merge_status` | string | Status of the merge request. Can be `unchecked`, `checking`, `can_be_merged`, `cannot_be_merged`, or `cannot_be_merged_recheck`. Affects the `has_conflicts` property. [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/3169#note_1162532204) in GitLab 15.6. Use `detailed_merge_status` instead. |
| `merge_when_pipeline_succeeds` | boolean | Indicates if the merge has been set to be merged when its pipeline succeeds. |
| `merged_at` | datetime | Timestamp of when the merge request was merged. |
| `merged_by` | object | Deprecated: Use `merge_user` instead. User who merged this merge request or set it to merge when pipeline succeeds. |
| `merged_by` | object | User who merged this merge request or set it to merge when pipeline succeeds. [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/350534) in GitLab 14.7, and scheduled for removal in [API version 5](https://gitlab.com/groups/gitlab-org/-/epics/8115). Use `merge_user` instead. |
| `milestone` | object | Milestone of the merge request. |
| `pipeline` | object | Pipeline running on the branch HEAD of the merge request. |
| `pipeline` | object | Pipeline running on the branch HEAD of the merge request. Consider using `head_pipeline` instead, as it contains more information. |
| `project_id` | integer | ID of the merge request project. |
| `reference` | string | Deprecated: Use `references` instead. Internal reference of the merge request. Returned in shortened format by default. |
| `references` | object | Internal references of the merge request. Includes `short`, `relative` and `full` references. |
| `reference` | string | Internal reference of the merge request. Returned in shortened format by default. [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/20354) in GitLab 12.7, and scheduled for removal in [API version 5](https://gitlab.com/groups/gitlab-org/-/epics/8115). Use `references` instead. |
| `references` | object | Internal references of the merge request. Includes `short`, `relative`, and `full` references. `references.relative` is relative to the merge request's group or project. When fetched from the merge request's project, `relative` and `short` formats are identical. When requested across groups or projects, `relative` and `full` formats are identical.|
| `reviewers` | array | Reviewers of the merge request. |
| `sha` | string | Diff head SHA of the merge request. |
| `should_remove_source_branch` | boolean | Indicates if the source branch of the merge request will be deleted after merge. |
| `source_branch` | string | Source branch of the merge request. |
| `source_project_id` | integer | ID of the merge request source project. |
| `squash` | boolean | Indicates if squash on merge is enabled. |
| `squash_commit_sha` | string | SHA of the squash commit (set once merged). |
| `squash_commit_sha` | string | SHA of the squash commit. Empty until merged. |
| `state` | string | State of the merge request. Can be `opened`, `closed`, `merged` or `locked`. |
| `subscribed` | boolean | Indicates if the currently logged in user is subscribed to this merge request. |
| `target_branch` | string | Target branch of the merge request. |
@ -690,7 +691,7 @@ Supported attributes:
"state": "opened",
"created_at": "2022-05-13T07:26:38.402Z",
"updated_at": "2022-05-14T03:38:31.354Z",
"merged_by": null, // Deprecated and will be removed in API v5, use `merge_user` instead
"merged_by": null, // Deprecated and will be removed in API v5. Use `merge_user` instead.
"merge_user": null,
"merged_at": null,
"closed_by": null,
@ -726,7 +727,7 @@ Supported attributes:
"discussion_locked": null,
"should_remove_source_branch": null,
"force_remove_source_branch": true,
"reference": "!133",
"reference": "!133", // Deprecated. Use `references` instead.
"references": {
"short": "!133",
"relative": "!133",
@ -752,7 +753,7 @@ Supported attributes:
"latest_build_started_at": "2022-05-13T09:46:50.032Z",
"latest_build_finished_at": null,
"first_deployed_to_production_at": null,
"pipeline": { // Old parameter, use `head_pipeline` instead.
"pipeline": { // Use `head_pipeline` instead.
"id": 538317940,
"iid": 1877,
"project_id": 15513260,
@ -813,38 +814,21 @@ Supported attributes:
"first_contribution": false,
"user": {
"can_merge": true
}
}
```
Users on [GitLab Premium or higher](https://about.gitlab.com/pricing/) also see
the `approvals_before_merge` parameter:
```json
{
"id": 1,
"title": "test1",
"approvals_before_merge": null
...
},
"approvals_before_merge": { // Available for GitLab Premium and higher tiers only
"id": 1,
"title": "test1",
"approvals_before_merge": null
},
}
```
### Single merge request response notes
- The `diff_refs` in the response correspond to the latest diff version of the merge request.
- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/29984) in GitLab 12.8, the mergeability (`merge_status`)
of each merge request is checked asynchronously when a request is made to this endpoint. Poll this API endpoint
to get updated status. This affects the `has_conflicts` property as it is dependent on the `merge_status`. It returns
`false` unless `merge_status` is `cannot_be_merged`.
- `references.relative` is relative to the group or project that the merge request is being requested. When the merge
request is fetched from its project, `relative` format would be the same as `short` format, and when requested across
groups or projects, it is expected to be the same as `full` format.
- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/349031) in GitLab 14.7,
field `merge_user` can be either user who merged this merge request,
user who set it to merge when pipeline succeeds or `null`.
Field `merged_by` (user who merged this merge request or `null`) has been deprecated.
- `pipeline` is an old parameter and should not be used. Use `head_pipeline` instead,
as it is faster and returns more information.
### Merge status

View File

@ -129,7 +129,7 @@ rule in the defined policy are met.
| Field | Type | Possible values | Description |
|-------|------|-----------------|-------------|
| `scan` | `string` | `dast`, `secret_detection`, `sast`, `container_scanning` | The action's type. |
| `scan` | `string` | `dast`, `secret_detection`, `sast`, `container_scanning`, `dependency_scanning` | The action's type. |
| `site_profile` | `string` | Name of the selected [DAST site profile](../dast/index.md#site-profile). | The DAST site profile to execute the DAST scan. This field should only be set if `scan` type is `dast`. |
| `scanner_profile` | `string` or `null` | Name of the selected [DAST scanner profile](../dast/index.md#scanner-profile). | The DAST scanner profile to execute the DAST scan. This field should only be set if `scan` type is `dast`.|
| `variables` | `object` | | A set of CI variables, supplied as an array of `key: value` pairs, to apply and enforce for the selected scan. The `key` is the variable name, with its `value` provided as a string. This parameter supports any variable that the GitLab CI job supports for the specified scan. |
@ -152,7 +152,7 @@ Note the following:
mode when executed as part of a scheduled scan.
- A container scanning scan that is configured for the `pipeline` rule type ignores the agent defined in the `agents` object. The `agents` object is only considered for `schedule` rule types.
An agent with a name provided in the `agents` object must be created and configured for the project.
- The SAST scan uses the default template and runs in a [child pipeline](../../../ci/pipelines/downstream_pipelines.md#parent-child-pipelines).
- The Depndency Scanning and SAST scans use the default templates and run in a [child pipeline](../../../ci/pipelines/downstream_pipelines.md#parent-child-pipelines).
## Example security policies project

View File

@ -121,11 +121,18 @@ It can also help to compare the XML response from your provider with our [exampl
> - [Improved](https://gitlab.com/gitlab-org/gitlab/-/issues/211962) in GitLab 13.8 with allowing group owners to not go through SSO.
> - [Improved](https://gitlab.com/gitlab-org/gitlab/-/issues/9152) in GitLab 13.11 with enforcing open SSO session to use Git if this setting is switched on.
> - [Improved](https://gitlab.com/gitlab-org/gitlab/-/issues/339888) in GitLab 14.7 to not enforce SSO checks for Git activity originating from CI/CD jobs.
> - [Improved](https://gitlab.com/gitlab-org/gitlab/-/issues/215155) in GitLab 15.5 [with a flag](../../../administration/feature_flags.md) named `transparent_sso_enforcement` to include transparent enforcement even when SSO enforcement is not enabled. Enabled on GitLab.com.
> - [Improved](https://gitlab.com/gitlab-org/gitlab/-/issues/215155) in GitLab 15.5 [with a flag](../../../administration/feature_flags.md) named `transparent_sso_enforcement` to include transparent enforcement even when SSO enforcement is not enabled. Disabled on GitLab.com.
FLAG:
On self-managed GitLab, transparent SSO enforcement is unavailable. On GitLab.com, transparent SSO enforcement is unavailable and can be configured by GitLab.com administrators only.
SSO is enforced when users access groups and projects in the organization's group hierarchy. Users can view other groups and projects without SSO sign in.
When SAML SSO is enabled, SSO is enforced for each user with an existing SAML identity.
SSO is enforced for each user with an existing SAML identity when the following is enabled:
- SAML SSO.
- The `:transparent_sso_enforcement` feature flag.
A user has a SAML identity if one or both of the following are true:
- They have signed in to GitLab by using their GitLab group's single sign-on URL.
@ -142,6 +149,15 @@ However, users are not prompted to sign in through SSO on each visit. GitLab che
has authenticated through SSO. If it's been more than 1 day since the last sign-in, GitLab
prompts the user to sign in again through SSO.
When the transparent SSO enforcement feature flag is enabled, SSO is enforced as follows:
| Project/Group visibility | Enforce SSO setting | Member with identity | Member without identity | Non-member or not signed in |
|--------------------------|---------------------|--------------------| ------ |------------------------------|
| Private | Off | Enforced | Not enforced | No access |
| Private | On | Enforced | Enforced | No access |
| Public | Off | Enforced | Not enforced | Not enforced |
| Public | On | Enforced | Enforced | Not enforced |
An [issue exists](https://gitlab.com/gitlab-org/gitlab/-/issues/297389) to add a similar SSO requirement for API activity.
SSO enforcement has the following effects when enabled:

View File

@ -75,7 +75,7 @@ The following table lists project permissions available for each role:
| [Container Registry](packages/container_registry/index.md):<br>Push an image to the Container Registry | | | ✓ | ✓ | ✓ |
| [Container Registry](packages/container_registry/index.md):<br>Pull an image from the Container Registry | ✓ (*19*) | ✓ (*19*) | ✓ | ✓ | ✓ |
| [Container Registry](packages/container_registry/index.md):<br>Remove a Container Registry image | | | ✓ | ✓ | ✓ |
| [GitLab Pages](project/pages/index.md):<br>View Pages protected by [access control](project/pages/introduction.md#gitlab-pages-access-control) | ✓ | ✓ | ✓ | ✓ | ✓ |
| [GitLab Pages](project/pages/index.md):<br>View Pages protected by [access control](project/pages/pages_access_control.md) | ✓ | ✓ | ✓ | ✓ | ✓ |
| [GitLab Pages](project/pages/index.md):<br>Manage | | | | ✓ | ✓ |
| [GitLab Pages](project/pages/index.md):<br>Manage GitLab Pages domains and certificates | | | | ✓ | ✓ |
| [GitLab Pages](project/pages/index.md):<br>Remove GitLab Pages | | | | ✓ | ✓ |

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.3 KiB

View File

@ -40,20 +40,19 @@ If you are using [GitLab Pages on GitLab.com](#gitlab-pages-on-gitlabcom) to hos
Visit the [GitLab Pages group](https://gitlab.com/groups/pages) for a complete list of example projects. Contributions are very welcome.
## Custom error codes Pages
## Custom error codes pages
You can provide your own 403 and 404 error pages by creating the `403.html` and
`404.html` files respectively in the root directory of the `public/` directory
that are included in the artifacts. Usually this is the root directory of
your project, but that may differ depending on your static generator
configuration.
You can provide your own `403` and `404` error pages by creating `403.html` and
`404.html` files in the root of the `public/` directory. Usually this is
the root directory of your project, but that may differ
depending on your static generator configuration.
If the case of `404.html`, there are different scenarios. For example:
- If you use project Pages (served under `/projectname/`) and try to access
`/projectname/non/existing_file`, GitLab Pages tries to serve first
`/projectname/404.html`, and then `/404.html`.
- If you use user/group Pages (served under `/`) and try to access
- If you use user or group Pages (served under `/`) and try to access
`/non/existing_file` GitLab Pages tries to serve `/404.html`.
- If you use a custom domain and try to access `/non/existing_file`, GitLab
Pages tries to serve only `/404.html`.
@ -63,34 +62,34 @@ If the case of `404.html`, there are different scenarios. For example:
You can configure redirects for your site using a `_redirects` file. To learn more, read
the [redirects documentation](redirects.md).
## GitLab Pages Access Control
## Remove your pages
To restrict access to your website, enable [GitLab Pages Access Control](pages_access_control.md).
To remove your pages:
## Unpublishing your Pages
If you ever feel the need to purge your Pages content, you can do so by going
to your project's settings through the gear icon in the top right, and then
navigating to **Pages**. Select the **Remove pages** button to delete your Pages
website.
![Remove pages](img/remove_pages_v15_3.png)
1. On the top bar, select **Main menu > Projects** and find your project.
1. On the left sidebar, select **Settings > Pages**.
1. Select **Remove pages**.
## Subdomains of subdomains
When using Pages under the top-level domain of a GitLab instance (`*.example.io`), you can't use HTTPS with subdomains
of subdomains. If your namespace or group name contains a dot (for example, `foo.bar`) the domain
`https://foo.bar.example.io` does _not_ work.
`https://foo.bar.example.io` does **not** work.
This limitation is because of the [HTTP Over TLS protocol](https://www.rfc-editor.org/rfc/rfc2818#section-3.1). HTTP pages
work as long as you don't redirect HTTP to HTTPS.
## GitLab Pages and subgroups
## GitLab Pages in projects and groups
You must host your GitLab Pages website in a project. This project can belong to a [group](../../group/index.md) or
[subgroup](../../group/subgroups/index.md). For
[group websites](../../project/pages/getting_started_part_one.md#gitlab-pages-default-domain-names), the group must be
at the top level and not a subgroup.
You must host your GitLab Pages website in a project. This project can be
[private, internal, or public](../../../user/public_access.md) and belong
to a [group](../../group/index.md) or [subgroup](../../group/subgroups/index.md).
For [group websites](../../project/pages/getting_started_part_one.md#user-and-group-website-examples),
the group must be at the top level and not a subgroup.
For [project websites](../../project/pages/getting_started_part_one.md#project-website-examples),
you can create your project first and access it under `http(s)://namespace.example.io/projectname`.
## Specific configuration options for Pages
@ -129,7 +128,7 @@ pages:
See this document for a [step-by-step guide](getting_started/pages_from_scratch.md).
### `.gitlab-ci.yml` for a repository where there's also actual code
### `.gitlab-ci.yml` for a repository with code
Remember that GitLab Pages are by default branch/tag agnostic and their
deployment relies solely on what you specify in `.gitlab-ci.yml`. You can limit
@ -257,26 +256,6 @@ instead. Here are some examples of what happens given the above Pages site:
Note that when `public/data/index.html` exists, it takes priority over the `public/data.html` file
for both the `/data` and `/data/` URL paths.
## Frequently Asked Questions
### Can you download your generated pages?
Sure. All you need to do is download the artifacts archive from the job page.
### Can you use GitLab Pages if your project is private?
Yes. GitLab Pages doesn't care whether you set your project's visibility level
to private, internal or public.
### Can you create a personal or a group website?
Yes. See the documentation about [GitLab Pages domain names, URLs, and base URLs](getting_started_part_one.md).
### Do you need to create a user/group website before creating a project website?
No, you don't. You can create your project first and access it under
`http(s)://namespace.example.io/projectname`.
## Known issues
For a list of known issues, visit the GitLab [public issue tracker](https://gitlab.com/gitlab-org/gitlab/-/issues?label_name[]=Category%3APages).

View File

@ -66,6 +66,8 @@ module Banzai
projects = lazy { projects_for_nodes(nodes) }
project_attr = 'data-project'
preload_associations(projects, user)
nodes.select do |node|
if node.has_attribute?(project_attr)
can_read_reference?(user, projects[node], node)
@ -261,6 +263,14 @@ module Banzai
hash[key] = {}
end
end
# For any preloading of project associations
# needed to avoid N+1s.
# Note: `projects` param is a hash of { node => project }.
# See #projects_for_nodes for more information.
def preload_associations(projects, user)
::Preloaders::ProjectPolicyPreloader.new(projects.values, user).execute
end
end
end
end

View File

@ -85,7 +85,7 @@ module Gitlab
end
def load_from_cache
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
self.sha, self.status, self.ref = redis.hmget(cache_key, :sha, :status, :ref)
self.status = nil if self.status.empty?
@ -93,13 +93,13 @@ module Gitlab
end
def store_in_cache
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
redis.mapped_hmset(cache_key, { sha: sha, status: status, ref: ref })
end
end
def delete_from_cache
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
redis.del(cache_key)
end
end
@ -107,7 +107,7 @@ module Gitlab
def has_cache?
return self.loaded unless self.loaded.nil?
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
redis.exists?(cache_key) # rubocop:disable CodeReuse/ActiveRecord
end
end
@ -125,6 +125,10 @@ module Gitlab
project.commit
end
end
def with_redis(&block)
Gitlab::Redis::Cache.with(&block) # rubocop:disable CodeReuse/ActiveRecord
end
end
end
end

View File

@ -33,7 +33,7 @@ module Gitlab
# timeout - The new timeout of the key if the key is to be refreshed.
def self.read(raw_key, timeout: TIMEOUT)
key = cache_key_for(raw_key)
value = Redis::Cache.with { |redis| redis.get(key) }
value = with_redis { |redis| redis.get(key) }
if value.present?
# We refresh the expiration time so frequently used keys stick
@ -44,7 +44,7 @@ module Gitlab
# did not find a matching GitLab user. In that case we _don't_ want to
# refresh the TTL so we automatically pick up the right data when said
# user were to register themselves on the GitLab instance.
Redis::Cache.with { |redis| redis.expire(key, timeout) }
with_redis { |redis| redis.expire(key, timeout) }
end
value
@ -69,7 +69,7 @@ module Gitlab
key = cache_key_for(raw_key)
Redis::Cache.with do |redis|
with_redis do |redis|
redis.set(key, value, ex: timeout)
end
@ -85,7 +85,7 @@ module Gitlab
def self.increment(raw_key, timeout: TIMEOUT)
key = cache_key_for(raw_key)
Redis::Cache.with do |redis|
with_redis do |redis|
value = redis.incr(key)
redis.expire(key, timeout)
@ -105,7 +105,7 @@ module Gitlab
key = cache_key_for(raw_key)
Redis::Cache.with do |redis|
with_redis do |redis|
redis.incrby(key, value)
redis.expire(key, timeout)
end
@ -121,7 +121,7 @@ module Gitlab
key = cache_key_for(raw_key)
Redis::Cache.with do |redis|
with_redis do |redis|
redis.multi do |m|
m.sadd(key, value)
m.expire(key, timeout)
@ -149,7 +149,7 @@ module Gitlab
def self.values_from_set(raw_key)
key = cache_key_for(raw_key)
Redis::Cache.with do |redis|
with_redis do |redis|
redis.smembers(key)
end
end
@ -160,7 +160,7 @@ module Gitlab
# key_prefix - prefix inserted before each key
# timeout - The time after which the cache key should expire.
def self.write_multiple(mapping, key_prefix: nil, timeout: TIMEOUT)
Redis::Cache.with do |redis|
with_redis do |redis|
redis.pipelined do |multi|
mapping.each do |raw_key, value|
key = cache_key_for("#{key_prefix}#{raw_key}")
@ -180,7 +180,7 @@ module Gitlab
def self.expire(raw_key, timeout)
key = cache_key_for(raw_key)
Redis::Cache.with do |redis|
with_redis do |redis|
redis.expire(key, timeout)
end
end
@ -199,7 +199,7 @@ module Gitlab
validate_redis_value!(value)
key = cache_key_for(raw_key)
val = Redis::Cache.with do |redis|
val = with_redis do |redis|
redis
.eval(WRITE_IF_GREATER_SCRIPT, keys: [key], argv: [value, timeout])
end
@ -218,7 +218,7 @@ module Gitlab
key = cache_key_for(raw_key)
Redis::Cache.with do |redis|
with_redis do |redis|
redis.multi do |m|
m.hset(key, field, value)
m.expire(key, timeout)
@ -232,7 +232,7 @@ module Gitlab
def self.values_from_hash(raw_key)
key = cache_key_for(raw_key)
Redis::Cache.with do |redis|
with_redis do |redis|
redis.hgetall(key)
end
end
@ -241,6 +241,10 @@ module Gitlab
"#{Redis::Cache::CACHE_NAMESPACE}:#{raw_key}"
end
def self.with_redis(&block)
Redis::Cache.with(&block) # rubocop:disable CodeReuse/ActiveRecord
end
def self.validate_redis_value!(value)
value_as_string = value.to_s
return if value_as_string.is_a?(String)

View File

@ -18,7 +18,7 @@ module Gitlab
keys = tags.map(&method(:cache_key))
cached_tags_count = 0
::Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
tags.zip(redis.mget(keys)).each do |tag, created_at|
next unless created_at
@ -45,7 +45,7 @@ module Gitlab
now = Time.zone.now
::Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
# we use a pipeline instead of a MSET because each tag has
# a specific ttl
redis.pipelined do |pipeline|
@ -66,6 +66,10 @@ module Gitlab
def cache_key(tag)
"container_repository:{#{@container_repository.id}}:tag:#{tag.name}:created_at"
end
def with_redis(&block)
::Gitlab::Redis::Cache.with(&block) # rubocop:disable CodeReuse/ActiveRecord
end
end
end
end

View File

@ -62,7 +62,7 @@ module Gitlab
end
def clear
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
redis.del(key)
end
end
@ -124,7 +124,7 @@ module Gitlab
# ...it will write/update a Gitlab::Redis hash (HSET)
#
def write_to_redis_hash(hash)
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
redis.pipelined do |pipeline|
hash.each do |diff_file_id, highlighted_diff_lines_hash|
pipeline.hset(
@ -189,7 +189,7 @@ module Gitlab
results = []
cache_key = key # Moving out redis calls for feature flags out of redis.pipelined
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
redis.pipelined do |pipeline|
results = pipeline.hmget(cache_key, file_paths)
pipeline.expire(key, EXPIRATION)
@ -223,6 +223,10 @@ module Gitlab
::Gitlab::Metrics::WebTransaction.current
end
def with_redis(&block)
Gitlab::Redis::Cache.with(&block) # rubocop:disable CodeReuse/ActiveRecord
end
def record_hit_ratio(results)
current_transaction&.increment(:gitlab_redis_diff_caching_requests_total)
current_transaction&.increment(:gitlab_redis_diff_caching_hits_total) if results.any?(&:present?)

View File

@ -14,7 +14,7 @@ module Gitlab
#
# mapping - Write multiple cache values at once
def write_multiple(mapping)
Redis::Cache.with do |redis|
with_redis do |redis|
redis.multi do |multi|
mapping.each do |raw_key, value|
key = cache_key_for(raw_key)
@ -37,7 +37,7 @@ module Gitlab
keys = raw_keys.map { |id| cache_key_for(id) }
content =
Redis::Cache.with do |redis|
with_redis do |redis|
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
redis.mget(keys)
end
@ -62,7 +62,7 @@ module Gitlab
keys = raw_keys.map { |id| cache_key_for(id) }
Redis::Cache.with do |redis|
with_redis do |redis|
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
redis.del(keys)
end
@ -78,6 +78,10 @@ module Gitlab
def cache_key_prefix
"#{Redis::Cache::CACHE_NAMESPACE}:#{VERSION}:discussion-highlight"
end
def with_redis(&block)
Redis::Cache.with(&block) # rubocop:disable CodeReuse/ActiveRecord
end
end
end
end

View File

@ -11,7 +11,7 @@ module Gitlab
end
def load
@access, @reason, @refreshed_at = ::Gitlab::Redis::Cache.with do |redis|
@access, @reason, @refreshed_at = with_redis do |redis|
redis.hmget(cache_key, :access, :reason, :refreshed_at)
end
@ -19,7 +19,7 @@ module Gitlab
end
def store(new_access, new_reason, new_refreshed_at)
::Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
redis.pipelined do |pipeline|
pipeline.mapped_hmset(
cache_key,
@ -58,6 +58,10 @@ module Gitlab
def cache_key
"external_authorization:user-#{@user.id}:label-#{@label}"
end
def with_redis(&block)
::Gitlab::Redis::Cache.with(&block) # rubocop:disable CodeReuse/ActiveRecord
end
end
end
end

View File

@ -28,7 +28,7 @@ module Gitlab
def save(updates)
@loaded = false
Gitlab::Redis::Cache.with do |r|
with_redis do |r|
r.mapped_hmset(markdown_cache_key, updates)
r.expire(markdown_cache_key, EXPIRES_IN)
end
@ -40,7 +40,7 @@ module Gitlab
if pipeline
pipeline.mapped_hmget(markdown_cache_key, *fields)
else
Gitlab::Redis::Cache.with do |r|
with_redis do |r|
r.mapped_hmget(markdown_cache_key, *fields)
end
end
@ -64,6 +64,10 @@ module Gitlab
"markdown_cache:#{@subject.cache_key}"
end
def with_redis(&block)
Gitlab::Redis::Cache.with(&block) # rubocop:disable CodeReuse/ActiveRecord
end
end
end
end

View File

@ -7,16 +7,20 @@ module Gitlab
VERSION = 1
def save_check(merge_check:, result_hash:)
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
redis.set(merge_check.cache_key + ":#{VERSION}", result_hash.to_json, ex: EXPIRATION)
end
end
def retrieve_check(merge_check:)
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
Gitlab::Json.parse(redis.get(merge_check.cache_key + ":#{VERSION}"), symbolize_keys: true)
end
end
def with_redis(&block)
Gitlab::Redis::Cache.with(&block) # rubocop:disable CodeReuse/ActiveRecord
end
end
end
end

View File

@ -0,0 +1,15 @@
# frozen_string_literal: true
module Gitlab
module Observability
module_function
def observability_url
return ENV['OVERRIDE_OBSERVABILITY_URL'] if ENV['OVERRIDE_OBSERVABILITY_URL']
# TODO Make observability URL configurable https://gitlab.com/gitlab-org/opstrace/opstrace-ui/-/issues/80
return 'https://observe.staging.gitlab.com' if Gitlab.staging?
'https://observe.gitlab.com'
end
end
end

View File

@ -0,0 +1,67 @@
# frozen_string_literal: true
module Gitlab
class PaginationDelegate # rubocop:disable Gitlab/NamespacedClass
DEFAULT_PER_PAGE = Kaminari.config.default_per_page
MAX_PER_PAGE = Kaminari.config.max_per_page
def initialize(page:, per_page:, count:, options: {})
@count = count
@options = { default_per_page: DEFAULT_PER_PAGE,
max_per_page: MAX_PER_PAGE }.merge(options)
@per_page = sanitize_per_page(per_page)
@page = sanitize_page(page)
end
def total_count
@count
end
def total_pages
(total_count.to_f / @per_page).ceil
end
def next_page
current_page + 1 unless last_page?
end
def prev_page
current_page - 1 unless first_page?
end
def current_page
@page
end
def limit_value
@per_page
end
def first_page?
current_page == 1
end
def last_page?
current_page >= total_pages
end
def offset
(current_page - 1) * limit_value
end
private
def sanitize_per_page(per_page)
return @options[:default_per_page] unless per_page && per_page > 0
[@options[:max_per_page], per_page].min
end
def sanitize_page(page)
return 1 unless page && page > 1
[total_pages, page].min
end
end
end

View File

@ -7,14 +7,14 @@ module Gitlab
# Clears the Redis set storing the list of healthy shards
def self.clear
Gitlab::Redis::Cache.with { |redis| redis.del(HEALTHY_SHARDS_KEY) }
with_redis { |redis| redis.del(HEALTHY_SHARDS_KEY) }
end
# Updates the list of healthy shards using a Redis set
#
# shards - An array of shard names to store
def self.update(shards)
Gitlab::Redis::Cache.with do |redis|
with_redis do |redis|
redis.multi do |m|
m.del(HEALTHY_SHARDS_KEY)
shards.each { |shard_name| m.sadd(HEALTHY_SHARDS_KEY, shard_name) }
@ -25,19 +25,23 @@ module Gitlab
# Returns an array of strings of healthy shards
def self.cached_healthy_shards
Gitlab::Redis::Cache.with { |redis| redis.smembers(HEALTHY_SHARDS_KEY) }
with_redis { |redis| redis.smembers(HEALTHY_SHARDS_KEY) }
end
# Checks whether the given shard name is in the list of healthy shards.
#
# shard_name - The string to check
def self.healthy_shard?(shard_name)
Gitlab::Redis::Cache.with { |redis| redis.sismember(HEALTHY_SHARDS_KEY, shard_name) }
with_redis { |redis| redis.sismember(HEALTHY_SHARDS_KEY, shard_name) }
end
# Returns the number of healthy shards in the Redis set
def self.healthy_shard_count
Gitlab::Redis::Cache.with { |redis| redis.scard(HEALTHY_SHARDS_KEY) }
with_redis { |redis| redis.scard(HEALTHY_SHARDS_KEY) }
end
def self.with_redis(&block)
Gitlab::Redis::Cache.with(&block) # rubocop:disable CodeReuse/ActiveRecord
end
end
end

View File

@ -21,22 +21,8 @@ module Gitlab
include Gitlab::Utils::StrongMemoize
DEFAULT_DUPLICATE_KEY_TTL = 6.hours
WAL_LOCATION_TTL = 60.seconds
DEFAULT_STRATEGY = :until_executing
STRATEGY_NONE = :none
DEDUPLICATED_FLAG_VALUE = 1
LUA_SET_WAL_SCRIPT = <<~EOS
local key, wal, offset, ttl = KEYS[1], ARGV[1], tonumber(ARGV[2]), ARGV[3]
local existing_offset = redis.call("LINDEX", key, -1)
if existing_offset == false then
redis.call("RPUSH", key, wal, offset)
redis.call("EXPIRE", key, ttl)
elseif offset > tonumber(existing_offset) then
redis.call("LSET", key, 0, wal)
redis.call("LSET", key, -1, offset)
end
EOS
attr_reader :existing_jid
@ -60,129 +46,6 @@ module Gitlab
# This method will return the jid that was set in redis
def check!(expiry = duplicate_key_ttl)
if Feature.enabled?(:duplicate_jobs_cookie)
check_cookie!(expiry)
else
check_multi!(expiry)
end
end
def update_latest_wal_location!
return unless job_wal_locations.present?
if Feature.enabled?(:duplicate_jobs_cookie)
update_latest_wal_location_cookie!
else
update_latest_wal_location_multi!
end
end
def latest_wal_locations
return {} unless job_wal_locations.present?
strong_memoize(:latest_wal_locations) do
if Feature.enabled?(:duplicate_jobs_cookie)
get_cookie.fetch('wal_locations', {})
else
latest_wal_locations_multi
end
end
end
def delete!
if Feature.enabled?(:duplicate_jobs_cookie)
with_redis { |redis| redis.del(cookie_key) }
else
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
with_redis do |redis|
redis.multi do |multi|
multi.del(idempotency_key, deduplicated_flag_key)
delete_wal_locations!(multi)
end
end
end
end
end
def reschedule
Gitlab::SidekiqLogging::DeduplicationLogger.instance.rescheduled_log(job)
worker_klass.perform_async(*arguments)
end
def scheduled?
scheduled_at.present?
end
def duplicate?
raise "Call `#check!` first to check for existing duplicates" unless existing_jid
jid != existing_jid
end
def set_deduplicated_flag!(expiry = duplicate_key_ttl)
return unless reschedulable?
if Feature.enabled?(:duplicate_jobs_cookie)
with_redis { |redis| redis.eval(DEDUPLICATED_SCRIPT, keys: [cookie_key]) }
else
with_redis do |redis|
redis.set(deduplicated_flag_key, DEDUPLICATED_FLAG_VALUE, ex: expiry, nx: true)
end
end
end
DEDUPLICATED_SCRIPT = <<~LUA
local cookie_msgpack = redis.call("get", KEYS[1])
if not cookie_msgpack then
return
end
local cookie = cmsgpack.unpack(cookie_msgpack)
cookie.deduplicated = "1"
redis.call("set", KEYS[1], cmsgpack.pack(cookie), "ex", redis.call("ttl", KEYS[1]))
LUA
def should_reschedule?
return false unless reschedulable?
if Feature.enabled?(:duplicate_jobs_cookie)
get_cookie['deduplicated'].present?
else
with_redis do |redis|
redis.get(deduplicated_flag_key).present?
end
end
end
def scheduled_at
job['at']
end
def options
return {} unless worker_klass
return {} unless worker_klass.respond_to?(:get_deduplication_options)
worker_klass.get_deduplication_options
end
def idempotent?
return false unless worker_klass
return false unless worker_klass.respond_to?(:idempotent?)
worker_klass.idempotent?
end
def duplicate_key_ttl
options[:ttl] || DEFAULT_DUPLICATE_KEY_TTL
end
private
attr_writer :existing_wal_locations
attr_reader :queue_name, :job
attr_writer :existing_jid
def check_cookie!(expiry)
my_cookie = {
'jid' => jid,
'offsets' => {},
@ -206,26 +69,9 @@ module Gitlab
self.existing_jid = actual_cookie['jid']
end
def check_multi!(expiry)
read_jid = nil
read_wal_locations = {}
def update_latest_wal_location!
return unless job_wal_locations.present?
with_redis do |redis|
redis.multi do |multi|
multi.set(idempotency_key, jid, ex: expiry, nx: true)
read_wal_locations = check_existing_wal_locations!(multi, expiry)
read_jid = multi.get(idempotency_key)
end
end
job['idempotency_key'] = idempotency_key
# We need to fetch values since the read_wal_locations and read_jid were obtained inside transaction, under redis.multi command.
self.existing_wal_locations = read_wal_locations.transform_values(&:value)
self.existing_jid = read_jid.value
end
def update_latest_wal_location_cookie!
argv = []
job_wal_locations.each do |connection_name, location|
argv += [connection_name, pg_wal_lsn_diff(connection_name), location]
@ -260,56 +106,86 @@ module Gitlab
redis.call("set", KEYS[1], cmsgpack.pack(cookie), "ex", redis.call("ttl", KEYS[1]))
LUA
def update_latest_wal_location_multi!
with_redis do |redis|
redis.multi do |multi|
job_wal_locations.each do |connection_name, location|
multi.eval(
LUA_SET_WAL_SCRIPT,
keys: [wal_location_key(connection_name)],
argv: [location, pg_wal_lsn_diff(connection_name).to_i, WAL_LOCATION_TTL]
)
end
end
def latest_wal_locations
return {} unless job_wal_locations.present?
strong_memoize(:latest_wal_locations) do
get_cookie.fetch('wal_locations', {})
end
end
def latest_wal_locations_multi
read_wal_locations = {}
with_redis do |redis|
redis.multi do |multi|
job_wal_locations.keys.each do |connection_name|
read_wal_locations[connection_name] = multi.lindex(wal_location_key(connection_name), 0)
end
end
end
read_wal_locations.transform_values(&:value).compact
def delete!
with_redis { |redis| redis.del(cookie_key) }
end
def reschedule
Gitlab::SidekiqLogging::DeduplicationLogger.instance.rescheduled_log(job)
worker_klass.perform_async(*arguments)
end
def scheduled?
scheduled_at.present?
end
def duplicate?
raise "Call `#check!` first to check for existing duplicates" unless existing_jid
jid != existing_jid
end
def set_deduplicated_flag!(expiry = duplicate_key_ttl)
return unless reschedulable?
with_redis { |redis| redis.eval(DEDUPLICATED_SCRIPT, keys: [cookie_key]) }
end
DEDUPLICATED_SCRIPT = <<~LUA
local cookie_msgpack = redis.call("get", KEYS[1])
if not cookie_msgpack then
return
end
local cookie = cmsgpack.unpack(cookie_msgpack)
cookie.deduplicated = "1"
redis.call("set", KEYS[1], cmsgpack.pack(cookie), "ex", redis.call("ttl", KEYS[1]))
LUA
def should_reschedule?
reschedulable? && get_cookie['deduplicated'].present?
end
def scheduled_at
job['at']
end
def options
return {} unless worker_klass
return {} unless worker_klass.respond_to?(:get_deduplication_options)
worker_klass.get_deduplication_options
end
def idempotent?
return false unless worker_klass
return false unless worker_klass.respond_to?(:idempotent?)
worker_klass.idempotent?
end
def duplicate_key_ttl
options[:ttl] || DEFAULT_DUPLICATE_KEY_TTL
end
private
attr_writer :existing_wal_locations
attr_reader :queue_name, :job
attr_writer :existing_jid
def worker_klass
@worker_klass ||= worker_class_name.to_s.safe_constantize
end
def delete_wal_locations!(redis)
job_wal_locations.keys.each do |connection_name|
redis.del(wal_location_key(connection_name))
redis.del(existing_wal_location_key(connection_name))
end
end
def check_existing_wal_locations!(redis, expiry)
read_wal_locations = {}
job_wal_locations.each do |connection_name, location|
key = existing_wal_location_key(connection_name)
redis.set(key, location, ex: expiry, nx: true)
read_wal_locations[connection_name] = redis.get(key)
end
read_wal_locations
end
def job_wal_locations
job['wal_locations'] || {}
end
@ -343,14 +219,6 @@ module Gitlab
job['jid']
end
def existing_wal_location_key(connection_name)
"#{idempotency_key}:#{connection_name}:existing_wal_location"
end
def wal_location_key(connection_name)
"#{idempotency_key}:#{connection_name}:wal_location"
end
def cookie_key
"#{idempotency_key}:cookie:v2"
end
@ -363,10 +231,6 @@ module Gitlab
@idempotency_key ||= job['idempotency_key'] || "#{namespace}:#{idempotency_hash}"
end
def deduplicated_flag_key
"#{idempotency_key}:deduplicate_flag"
end
def idempotency_hash
Digest::SHA256.hexdigest(idempotency_string)
end

View File

@ -7,6 +7,7 @@ module Gitlab
FREE_TEXT_METRIC_NAME = "<please fill metric name>"
REDIS_EVENT_METRIC_NAME = "<please fill metric name, suggested format is: {subject}_{verb}{ing|ed}_{object} eg: users_creating_epics or merge_requests_viewed_in_single_file_mode>"
CONSTRAINTS_PROMPT_TEMPLATE = "<adjective describing: '%{constraints}'>"
EMPTY_CONSTRAINT = "()"
class << self
def for(operation, relation: nil, column: nil)
@ -52,7 +53,8 @@ module Gitlab
end
arel = arel_query(relation: relation, column: arel_column, distinct: distinct)
constraints = parse_constraints(relation: relation, arel: arel)
where_constraints = parse_where_constraints(relation: relation, arel: arel)
having_constraints = parse_having_constraints(relation: relation, arel: arel)
# In some cases due to performance reasons metrics are instrumented with joined relations
# where relation listed in FROM statement is not the one that includes counted attribute
@ -66,23 +68,35 @@ module Gitlab
# count_environment_id_from_clusters_with_deployments
actual_source = parse_source(relation, arel_column)
append_constraints_prompt(actual_source, [constraints], parts)
append_constraints_prompt(actual_source, [where_constraints], [having_constraints], parts)
parts << actual_source
parts += process_joined_relations(actual_source, arel, relation, constraints)
parts += process_joined_relations(actual_source, arel, relation, where_constraints)
parts.compact.join('_').delete('"')
end
def append_constraints_prompt(target, constraints, parts)
applicable_constraints = constraints.select { |constraint| constraint.include?(target) }
def append_constraints_prompt(target, where_constraints, having_constraints, parts)
where_constraints.select! do |constraint|
constraint.include?(target)
end
having_constraints.delete(EMPTY_CONSTRAINT)
applicable_constraints = where_constraints + having_constraints
return unless applicable_constraints.any?
parts << CONSTRAINTS_PROMPT_TEMPLATE % { constraints: applicable_constraints.join(' AND ') }
end
def parse_constraints(relation:, arel:)
def parse_where_constraints(relation:, arel:)
connection = relation.connection
::Gitlab::Usage::Metrics::NamesSuggestions::RelationParsers::Constraints
::Gitlab::Usage::Metrics::NamesSuggestions::RelationParsers::WhereConstraints
.new(connection)
.accept(arel, collector(connection))
.value
end
def parse_having_constraints(relation:, arel:)
connection = relation.connection
::Gitlab::Usage::Metrics::NamesSuggestions::RelationParsers::HavingConstraints
.new(connection)
.accept(arel, collector(connection))
.value
@ -152,7 +166,7 @@ module Gitlab
subtree.each do |parent, children|
parts << "<#{conjunction}>"
join_constraints = joins.find { |join| join[:source] == parent }&.dig(:constraints)
append_constraints_prompt(parent, [wheres, join_constraints].compact, parts)
append_constraints_prompt(parent, [wheres, join_constraints].compact, [], parts)
parts << parent
collect_join_parts(relations: children, joins: joins, wheres: wheres, parts: parts, conjunctions: conjunctions)
end

View File

@ -0,0 +1,31 @@
# frozen_string_literal: true
module Gitlab
module Usage
module Metrics
module NamesSuggestions
module RelationParsers
class HavingConstraints < ::Arel::Visitors::PostgreSQL
# rubocop:disable Naming/MethodName
def visit_Arel_Nodes_SelectCore(object, collector)
collect_nodes_for(object.havings, collector, "") || collector
end
# rubocop:enable Naming/MethodName
def quote(value)
value.to_s
end
def quote_table_name(name)
name.to_s
end
def quote_column_name(name)
name.to_s
end
end
end
end
end
end
end

View File

@ -5,7 +5,7 @@ module Gitlab
module Metrics
module NamesSuggestions
module RelationParsers
class Constraints < ::Arel::Visitors::PostgreSQL
class WhereConstraints < ::Arel::Visitors::PostgreSQL
# rubocop:disable Naming/MethodName
def visit_Arel_Nodes_SelectCore(object, collector)
collect_nodes_for(object.wheres, collector, "") || collector
@ -13,15 +13,15 @@ module Gitlab
# rubocop:enable Naming/MethodName
def quote(value)
"#{value}"
value.to_s
end
def quote_table_name(name)
"#{name}"
name.to_s
end
def quote_column_name(name)
"#{name}"
name.to_s
end
end
end

View File

@ -560,6 +560,16 @@ msgstr[1] ""
msgid "%{count} files touched"
msgstr ""
msgid "%{count} group"
msgid_plural "%{count} groups"
msgstr[0] ""
msgstr[1] ""
msgid "%{count} issue"
msgid_plural "%{count} issues"
msgstr[0] ""
msgstr[1] ""
msgid "%{count} item"
msgid_plural "%{count} items"
msgstr[0] ""
@ -568,6 +578,11 @@ msgstr[1] ""
msgid "%{count} items per page"
msgstr ""
msgid "%{count} merge request"
msgid_plural "%{count} merge requests"
msgstr[0] ""
msgstr[1] ""
msgid "%{count} more"
msgstr ""
@ -590,6 +605,11 @@ msgid_plural "%{count} participants"
msgstr[0] ""
msgstr[1] ""
msgid "%{count} project"
msgid_plural "%{count} projects"
msgstr[0] ""
msgstr[1] ""
msgid "%{count} related %{pluralized_subject}: %{links}"
msgstr ""
@ -3124,6 +3144,9 @@ msgstr ""
msgid "AdminUsers|Admins"
msgstr ""
msgid "AdminUsers|An error occurred while fetching this user's contributions, and the request cannot return the number of issues, merge requests, groups, and projects linked to this user. If you proceed with deleting the user, all their contributions will still be deleted."
msgstr ""
msgid "AdminUsers|Approve"
msgstr ""
@ -3361,7 +3384,7 @@ msgstr ""
msgid "AdminUsers|To confirm, type %{projectName}"
msgstr ""
msgid "AdminUsers|To confirm, type %{username}"
msgid "AdminUsers|To confirm, type %{username}."
msgstr ""
msgid "AdminUsers|Unban user"
@ -3424,7 +3447,7 @@ msgstr ""
msgid "AdminUsers|You are about to permanently delete the user %{username}. Issues, merge requests, and groups linked to them will be transferred to a system-wide \"Ghost-user\". To avoid data loss, consider using the %{strongStart}block user%{strongEnd} feature instead. Once you %{strongStart}Delete user%{strongEnd}, it cannot be undone or recovered."
msgstr ""
msgid "AdminUsers|You are about to permanently delete the user %{username}. This will delete all of the issues, merge requests, and groups linked to them. To avoid data loss, consider using the %{strongStart}block user%{strongEnd} feature instead. Once you %{strongStart}Delete user%{strongEnd}, it cannot be undone or recovered."
msgid "AdminUsers|You are about to permanently delete the user %{username}. This will delete all issues, merge requests, groups, and projects linked to them. To avoid data loss, consider using the %{strongStart}Block user%{strongEnd} feature instead. After you %{strongStart}Delete user%{strongEnd}, you cannot undo this action or recover the data."
msgstr ""
msgid "AdminUsers|You can always block their account again if needed."
@ -5239,6 +5262,9 @@ msgstr ""
msgid "Are you sure you want to revoke this %{accessTokenType}? This action cannot be undone."
msgstr ""
msgid "Are you sure you want to revoke this group access token? This action cannot be undone."
msgstr ""
msgid "Are you sure you want to revoke this personal access token? This action cannot be undone."
msgstr ""
@ -11595,7 +11621,7 @@ msgstr ""
msgid "CredentialsInventory|Personal Access Tokens"
msgstr ""
msgid "CredentialsInventory|Project Access Tokens"
msgid "CredentialsInventory|Project and Group Access Tokens"
msgstr ""
msgid "CredentialsInventory|SSH Keys"
@ -31544,6 +31570,9 @@ msgstr ""
msgid "Project navigation"
msgstr ""
msgid "Project or Group"
msgstr ""
msgid "Project order will not be saved as local storage is not available."
msgstr ""

View File

@ -48,14 +48,21 @@ module QA
end
def verify_protected_branches_import
# TODO: Add validation once https://gitlab.com/groups/gitlab-org/-/epics/8585 is closed
# At the moment both options are always set to false regardless of state in github
# allow_force_push: true,
# code_owner_approval_required: true
imported_branches = imported_project.protected_branches.map do |branch|
branch.slice(:name)
branch.slice(:name, :allow_force_push, :code_owner_approval_required)
end
actual_branches = [{ name: 'main' }, { name: 'release' }]
actual_branches = [
{
name: 'main',
allow_force_push: false,
code_owner_approval_required: true
},
{
name: 'release',
allow_force_push: true,
code_owner_approval_required: true
}
]
expect(imported_branches).to match_array(actual_branches)
end

View File

@ -43,7 +43,7 @@ RSpec.describe RendersCommits do
context 'rendering commits' do
render_views
it 'avoids N + 1' do
it 'avoids N + 1', :request_store do
stub_const("MergeRequestDiff::COMMITS_SAFE_SIZE", 5)
control_count = ActiveRecord::QueryRecorder.new do
@ -59,7 +59,7 @@ RSpec.describe RendersCommits do
end
describe '.prepare_commits_for_rendering' do
it 'avoids N+1' do
it 'avoids N+1', :request_store do
control = ActiveRecord::QueryRecorder.new do
subject.prepare_commits_for_rendering(merge_request.commits.take(1))
end

View File

@ -208,19 +208,26 @@ RSpec.describe Explore::ProjectsController do
render_views
# some N+1 queries still exist
it 'avoids N+1 queries' do
projects = create_list(:project, 3, :repository, :public)
projects.each do |project|
pipeline = create(:ci_pipeline, :success, project: project, sha: project.commit.id)
create(:commit_status, :success, pipeline: pipeline, ref: pipeline.ref)
it 'avoids N+1 queries', :request_store do
# Because we enable the request store for this spec, Gitaly may report too many invocations.
# Allow N+1s here and when creating additional objects below because we're just creating test objects.
Gitlab::GitalyClient.allow_n_plus_1_calls do
projects = create_list(:project, 3, :repository, :public)
projects.each do |project|
pipeline = create(:ci_pipeline, :success, project: project, sha: project.commit.id)
create(:commit_status, :success, pipeline: pipeline, ref: pipeline.ref)
end
end
control = ActiveRecord::QueryRecorder.new { get endpoint }
new_projects = create_list(:project, 2, :repository, :public)
new_projects.each do |project|
pipeline = create(:ci_pipeline, :success, project: project, sha: project.commit.id)
create(:commit_status, :success, pipeline: pipeline, ref: pipeline.ref)
Gitlab::GitalyClient.allow_n_plus_1_calls do
new_projects = create_list(:project, 2, :repository, :public)
new_projects.each do |project|
pipeline = create(:ci_pipeline, :success, project: project, sha: project.commit.id)
create(:commit_status, :success, pipeline: pipeline, ref: pipeline.ref)
end
end
expect { get endpoint }.not_to exceed_query_limit(control).with_threshold(8)

View File

@ -5,6 +5,7 @@ require 'spec_helper'
RSpec.describe Projects::MergeRequestsController do
include ProjectForksHelper
include Gitlab::Routing
using RSpec::Parameterized::TableSyntax
let_it_be_with_refind(:project) { create(:project, :repository) }
let_it_be_with_reload(:project_public_with_private_builds) { create(:project, :repository, :public, :builds_private) }
@ -708,12 +709,14 @@ RSpec.describe Projects::MergeRequestsController do
end
describe 'GET commits' do
def go(format: 'html')
def go(page: nil, per_page: 1, format: 'html')
get :commits,
params: {
namespace_id: project.namespace.to_param,
project_id: project,
id: merge_request.iid
id: merge_request.iid,
page: page,
per_page: per_page
},
format: format
end
@ -723,6 +726,27 @@ RSpec.describe Projects::MergeRequestsController do
expect(response).to render_template('projects/merge_requests/_commits')
expect(json_response).to have_key('html')
expect(json_response).to have_key('next_page')
expect(json_response['next_page']).to eq(2)
end
describe 'pagination' do
where(:page, :next_page) do
1 | 2
2 | 3
3 | nil
end
with_them do
it "renders the commits for page #{params[:page]}" do
go format: 'json', page: page, per_page: 10
expect(response).to render_template('projects/merge_requests/_commits')
expect(json_response).to have_key('html')
expect(json_response).to have_key('next_page')
expect(json_response['next_page']).to eq(next_page)
end
end
end
end

View File

@ -86,7 +86,7 @@ RSpec.describe 'Resolving all open threads in a merge request from an issue', :j
expect(page).to have_link 'Create issue to resolve all threads', href: new_project_issue_path(project, merge_request_to_resolve_discussions_of: merge_request.iid)
end
context 'creating an issue for threads', quarantine: 'https://gitlab.com/gitlab-org/gitlab/-/issues/381729' do
context 'creating an issue for threads' do
before do
page.within '.mr-state-widget' do
page.click_link 'Create issue to resolve all threads', href: new_project_issue_path(project, merge_request_to_resolve_discussions_of: merge_request.iid)

View File

@ -1002,7 +1002,7 @@ RSpec.describe 'File blob', :js do
end
it 'renders sandboxed iframe' do
expected = %(<iframe src="/-/sandbox/swagger" sandbox="allow-scripts" frameborder="0" width="100%" height="1000">)
expected = %(<iframe src="/-/sandbox/swagger" sandbox="allow-scripts allow-popups" frameborder="0" width="100%" height="1000">)
expect(page.html).to include(expected)
end
end

View File

@ -1,13 +1,13 @@
import { GlDropdownItem } from '@gitlab/ui';
import { shallowMount } from '@vue/test-utils';
import Actions from '~/admin/users/components/actions';
import Delete from '~/admin/users/components/actions/delete.vue';
import eventHub, {
EVENT_OPEN_DELETE_USER_MODAL,
} from '~/admin/users/components/modals/delete_user_modal_event_hub';
import { capitalizeFirstCharacter } from '~/lib/utils/text_utility';
import { OBSTACLE_TYPES } from '~/vue_shared/components/user_deletion_obstacles/constants';
import { CONFIRMATION_ACTIONS, DELETE_ACTIONS } from '../../constants';
import { paths } from '../../mock_data';
import { CONFIRMATION_ACTIONS } from '../../constants';
import { paths, userDeletionObstacles } from '../../mock_data';
describe('Action components', () => {
let wrapper;
@ -41,40 +41,33 @@ describe('Action components', () => {
});
});
describe('DELETE_ACTION_COMPONENTS', () => {
describe('DELETE', () => {
beforeEach(() => {
jest.spyOn(eventHub, '$emit').mockImplementation();
});
const userDeletionObstacles = [
{ name: 'schedule1', type: OBSTACLE_TYPES.oncallSchedules },
{ name: 'policy1', type: OBSTACLE_TYPES.escalationPolicies },
];
it('renders a dropdown item that opens the delete user modal when Delete is clicked', async () => {
initComponent({
component: Delete,
props: {
username: 'John Doe',
userId: 1,
paths,
userDeletionObstacles,
},
});
it.each(DELETE_ACTIONS)(
'renders a dropdown item that opens the delete user modal when clicked for "%s"',
async (action) => {
initComponent({
component: Actions[capitalizeFirstCharacter(action)],
props: {
username: 'John Doe',
paths,
userDeletionObstacles,
},
});
await findDropdownItem().vm.$emit('click');
await findDropdownItem().vm.$emit('click');
expect(eventHub.$emit).toHaveBeenCalledWith(
EVENT_OPEN_DELETE_USER_MODAL,
expect.objectContaining({
username: 'John Doe',
blockPath: paths.block,
deletePath: paths[action],
userDeletionObstacles,
}),
);
},
);
expect(eventHub.$emit).toHaveBeenCalledWith(
EVENT_OPEN_DELETE_USER_MODAL,
expect.objectContaining({
username: 'John Doe',
blockPath: paths.block,
deletePath: paths.delete,
userDeletionObstacles,
}),
);
});
});
});

View File

@ -0,0 +1,107 @@
import { GlLoadingIcon } from '@gitlab/ui';
import { mountExtended } from 'helpers/vue_test_utils_helper';
import waitForPromises from 'helpers/wait_for_promises';
import DeleteWithContributions from '~/admin/users/components/actions/delete_with_contributions.vue';
import eventHub, {
EVENT_OPEN_DELETE_USER_MODAL,
} from '~/admin/users/components/modals/delete_user_modal_event_hub';
import { associationsCount } from '~/api/user_api';
import {
paths,
associationsCount as associationsCountData,
userDeletionObstacles,
} from '../../mock_data';
jest.mock('~/admin/users/components/modals/delete_user_modal_event_hub', () => ({
...jest.requireActual('~/admin/users/components/modals/delete_user_modal_event_hub'),
__esModule: true,
default: {
$emit: jest.fn(),
},
}));
jest.mock('~/api/user_api', () => ({
associationsCount: jest.fn(),
}));
describe('DeleteWithContributions', () => {
let wrapper;
const defaultPropsData = {
username: 'John Doe',
userId: 1,
paths,
userDeletionObstacles,
};
const createComponent = () => {
wrapper = mountExtended(DeleteWithContributions, { propsData: defaultPropsData });
};
describe('when action is clicked', () => {
describe('when API request is loading', () => {
beforeEach(() => {
associationsCount.mockReturnValueOnce(new Promise(() => {}));
createComponent();
});
it('displays loading icon and disables button', async () => {
await wrapper.trigger('click');
expect(wrapper.findComponent(GlLoadingIcon).exists()).toBe(true);
expect(wrapper.findByRole('menuitem').attributes()).toMatchObject({
disabled: 'disabled',
'aria-busy': 'true',
});
});
});
describe('when API request is successful', () => {
beforeEach(() => {
associationsCount.mockResolvedValueOnce({
data: associationsCountData,
});
createComponent();
});
it('emits event with association counts', async () => {
await wrapper.trigger('click');
await waitForPromises();
expect(associationsCount).toHaveBeenCalledWith(defaultPropsData.userId);
expect(eventHub.$emit).toHaveBeenCalledWith(
EVENT_OPEN_DELETE_USER_MODAL,
expect.objectContaining({
associationsCount: associationsCountData,
username: defaultPropsData.username,
blockPath: paths.block,
deletePath: paths.deleteWithContributions,
userDeletionObstacles,
}),
);
});
});
describe('when API request is not successful', () => {
beforeEach(() => {
associationsCount.mockRejectedValueOnce();
createComponent();
});
it('emits event with error', async () => {
await wrapper.trigger('click');
await waitForPromises();
expect(eventHub.$emit).toHaveBeenCalledWith(
EVENT_OPEN_DELETE_USER_MODAL,
expect.objectContaining({
associationsCount: new Error(),
}),
);
});
});
});
});

View File

@ -0,0 +1,3 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`AssociationsListItem renders interpolated message in a \`li\` element 1`] = `"<li><strong>5</strong> groups</li>"`;

View File

@ -0,0 +1,34 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`AssociationsList when counts are 0 does not render items 1`] = `""`;
exports[`AssociationsList when counts are plural renders plural counts 1`] = `
"<ul class=\\"gl-mb-5\\">
<li><strong>2</strong> groups</li>
<li><strong>3</strong> projects</li>
<li><strong>4</strong> issues</li>
<li><strong>5</strong> merge requests</li>
</ul>"
`;
exports[`AssociationsList when counts are singular renders singular counts 1`] = `
"<ul class=\\"gl-mb-5\\">
<li><strong>1</strong> group</li>
<li><strong>1</strong> project</li>
<li><strong>1</strong> issue</li>
<li><strong>1</strong> merge request</li>
</ul>"
`;
exports[`AssociationsList when there is an error displays an alert 1`] = `
"<div class=\\"gl-mb-5 gl-alert gl-alert-not-dismissible gl-alert-danger\\"><svg data-testid=\\"error-icon\\" role=\\"img\\" aria-hidden=\\"true\\" class=\\"gl-icon s16 gl-alert-icon gl-alert-icon-no-title\\">
<use href=\\"#error\\"></use>
</svg>
<div role=\\"alert\\" aria-live=\\"assertive\\" class=\\"gl-alert-content\\">
<!---->
<div class=\\"gl-alert-body\\">An error occurred while fetching this user's contributions, and the request cannot return the number of issues, merge requests, groups, and projects linked to this user. If you proceed with deleting the user, all their contributions will still be deleted.</div>
<!---->
</div>
<!---->
</div>"
`;

View File

@ -0,0 +1,25 @@
import { mountExtended } from 'helpers/vue_test_utils_helper';
import AssociationsListItem from '~/admin/users/components/associations/associations_list_item.vue';
import { n__ } from '~/locale';
describe('AssociationsListItem', () => {
let wrapper;
const count = 5;
const createComponent = () => {
wrapper = mountExtended(AssociationsListItem, {
propsData: {
message: n__('%{count} group', '%{count} groups', count),
count,
},
});
};
beforeEach(() => {
createComponent();
});
it('renders interpolated message in a `li` element', () => {
expect(wrapper.html()).toMatchSnapshot();
});
});

View File

@ -0,0 +1,78 @@
import { mountExtended } from 'helpers/vue_test_utils_helper';
import AssociationsList from '~/admin/users/components/associations/associations_list.vue';
describe('AssociationsList', () => {
let wrapper;
const defaultPropsData = {
associationsCount: {
groups_count: 1,
projects_count: 1,
issues_count: 1,
merge_requests_count: 1,
},
};
const createComponent = ({ propsData = {} } = {}) => {
wrapper = mountExtended(AssociationsList, {
propsData: {
...defaultPropsData,
...propsData,
},
});
};
describe('when there is an error', () => {
it('displays an alert', () => {
createComponent({
propsData: {
associationsCount: new Error(),
},
});
expect(wrapper.html()).toMatchSnapshot();
});
});
describe('when counts are singular', () => {
it('renders singular counts', () => {
createComponent();
expect(wrapper.html()).toMatchSnapshot();
});
});
describe('when counts are plural', () => {
it('renders plural counts', () => {
createComponent({
propsData: {
associationsCount: {
groups_count: 2,
projects_count: 3,
issues_count: 4,
merge_requests_count: 5,
},
},
});
expect(wrapper.html()).toMatchSnapshot();
});
});
describe('when counts are 0', () => {
it('does not render items', () => {
createComponent({
propsData: {
associationsCount: {
groups_count: 0,
projects_count: 0,
issues_count: 0,
merge_requests_count: 0,
},
},
});
expect(wrapper.html()).toMatchSnapshot();
});
});
});

View File

@ -1,10 +1,12 @@
import { GlButton, GlFormInput, GlSprintf } from '@gitlab/ui';
import { nextTick } from 'vue';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import eventHub, {
EVENT_OPEN_DELETE_USER_MODAL,
} from '~/admin/users/components/modals/delete_user_modal_event_hub';
import DeleteUserModal from '~/admin/users/components/modals/delete_user_modal.vue';
import UserDeletionObstaclesList from '~/vue_shared/components/user_deletion_obstacles/user_deletion_obstacles_list.vue';
import AssociationsList from '~/admin/users/components/associations/associations_list.vue';
import ModalStub from './stubs/modal_stub';
const TEST_DELETE_USER_URL = 'delete-url';
@ -200,4 +202,24 @@ describe('Delete user modal', () => {
expect(obstacles.props('obstacles')).toEqual(userDeletionObstacles);
});
});
it('renders `AssociationsList` component and passes `associationsCount` prop', async () => {
const associationsCount = {
groups_count: 5,
projects_count: 0,
issues_count: 5,
merge_requests_count: 5,
};
createComponent();
emitOpenModalEvent({
...mockModalData,
associationsCount,
});
await nextTick();
expect(wrapper.findComponent(AssociationsList).props('associationsCount')).toEqual(
associationsCount,
);
});
});

View File

@ -121,8 +121,11 @@ describe('AdminUserActions component', () => {
it.each(DELETE_ACTIONS)('renders a delete action component item for "%s"', (action) => {
const component = wrapper.findComponent(Actions[capitalizeFirstCharacter(action)]);
expect(component.props('username')).toBe(user.name);
expect(component.props('paths')).toEqual(userPaths);
expect(component.props()).toMatchObject({
username: user.name,
userId: user.id,
paths: userPaths,
});
expect(component.text()).toBe(I18N_USER_ACTIONS[action]);
});
});

View File

@ -1,3 +1,5 @@
import { OBSTACLE_TYPES } from '~/vue_shared/components/user_deletion_obstacles/constants';
export const users = [
{
id: 2177,
@ -48,3 +50,15 @@ export const createGroupCountResponse = (groupCounts) => ({
},
},
});
export const associationsCount = {
groups_count: 5,
projects_count: 5,
issues_count: 5,
merge_requests_count: 5,
};
export const userDeletionObstacles = [
{ name: 'schedule1', type: OBSTACLE_TYPES.oncallSchedules },
{ name: 'policy1', type: OBSTACLE_TYPES.escalationPolicies },
];

View File

@ -1,7 +1,8 @@
import MockAdapter from 'axios-mock-adapter';
import { followUser, unfollowUser } from '~/api/user_api';
import { followUser, unfollowUser, associationsCount } from '~/api/user_api';
import axios from '~/lib/utils/axios_utils';
import { associationsCount as associationsCountData } from 'jest/admin/users/mock_data';
describe('~/api/user_api', () => {
let axiosMock;
@ -47,4 +48,18 @@ describe('~/api/user_api', () => {
expect(axiosMock.history.post[0].url).toBe(expectedUrl);
});
});
describe('associationsCount', () => {
it('calls correct URL and returns expected response', async () => {
const expectedUrl = '/api/v4/users/1/associations_count';
const expectedResponse = { data: associationsCountData };
axiosMock.onGet(expectedUrl).replyOnce(200, expectedResponse);
await expect(associationsCount(1)).resolves.toEqual(
expect.objectContaining({ data: expectedResponse }),
);
expect(axiosMock.history.get[0].url).toBe(expectedUrl);
});
});
});

View File

@ -21,7 +21,7 @@ describe('OpenAPI blob viewer', () => {
it('initializes SwaggerUI with the correct configuration', () => {
expect(document.body.innerHTML).toContain(
'<iframe src="/-/sandbox/swagger" sandbox="allow-scripts" frameborder="0" width="100%" height="1000"></iframe>',
'<iframe src="/-/sandbox/swagger" sandbox="allow-scripts allow-popups" frameborder="0" width="100%" height="1000"></iframe>',
);
});
});

View File

@ -6,7 +6,6 @@ RSpec.describe Mutations::Ci::Runner::BulkDelete do
include GraphqlHelpers
let_it_be(:admin_user) { create(:user, :admin) }
let_it_be(:user) { create(:user) }
let(:current_ctx) { { current_user: user } }
@ -19,24 +18,14 @@ RSpec.describe Mutations::Ci::Runner::BulkDelete do
sync(resolve(described_class, args: mutation_params, ctx: current_ctx))
end
context 'when the user cannot admin the runner' do
let(:runner) { create(:ci_runner) }
let(:mutation_params) do
{ ids: [runner.to_global_id] }
end
it 'generates an error' do
expect_graphql_error_to_be_created(Gitlab::Graphql::Errors::ResourceNotAvailable) { response }
end
end
context 'when user can delete runners' do
let(:user) { admin_user }
let(:group) { create(:group) }
let!(:runners) do
create_list(:ci_runner, 2, :instance)
create_list(:ci_runner, 2, :group, groups: [group])
end
context 'when required arguments are missing' do
context 'when runner IDs are missing' do
let(:mutation_params) { {} }
context 'when admin mode is enabled', :enable_admin_mode do
@ -47,43 +36,48 @@ RSpec.describe Mutations::Ci::Runner::BulkDelete do
end
context 'with runners specified by id' do
let(:mutation_params) do
let!(:mutation_params) do
{ ids: runners.map(&:to_global_id) }
end
context 'when admin mode is enabled', :enable_admin_mode do
it 'deletes runners', :aggregate_failures do
expect_next_instance_of(
::Ci::Runners::BulkDeleteRunnersService, { runners: runners }
) do |service|
expect(service).to receive(:execute).once.and_call_original
end
expect { response }.to change { Ci::Runner.count }.by(-2)
expect(response[:errors]).to be_empty
end
context 'when runner list is is above limit' do
before do
stub_const('::Ci::Runners::BulkDeleteRunnersService::RUNNER_LIMIT', 1)
end
it 'only deletes up to the defined limit', :aggregate_failures do
expect { response }.to change { Ci::Runner.count }
.by(-::Ci::Runners::BulkDeleteRunnersService::RUNNER_LIMIT)
expect(response[:errors]).to be_empty
end
end
end
context 'when admin mode is disabled', :aggregate_failures do
it 'returns error', :aggregate_failures do
expect do
expect_graphql_error_to_be_created(Gitlab::Graphql::Errors::ResourceNotAvailable) do
response
end
end.not_to change { Ci::Runner.count }
it 'ignores unknown keys from service response payload', :aggregate_failures do
expect_next_instance_of(
::Ci::Runners::BulkDeleteRunnersService, { runners: runners, current_user: user }
) do |service|
expect(service).to receive(:execute).once.and_return(
ServiceResponse.success(
payload: {
extra_key: 'extra_value',
deleted_count: 10,
deleted_ids: (1..10).to_a,
errors: []
}))
end
expect(response).not_to include(extra_key: 'extra_value')
end
end
end
context 'when the user cannot delete the runner' do
let(:runner) { create(:ci_runner) }
let!(:mutation_params) do
{ ids: [runner.to_global_id] }
end
context 'when user is admin and admin mode is not enabled' do
let(:user) { admin_user }
it 'returns error', :aggregate_failures do
expect { response }.not_to change { Ci::Runner.count }
expect(response[:errors]).to match_array("User does not have permission to delete any of the runners")
end
end
end

View File

@ -0,0 +1,33 @@
# frozen_string_literal: true
require 'fast_spec_helper'
RSpec.describe Gitlab::Observability do
describe '.observability_url' do
let(:gitlab_url) { 'https://example.com' }
subject { described_class.observability_url }
before do
stub_config_setting(url: gitlab_url)
end
it { is_expected.to eq('https://observe.gitlab.com') }
context 'when on staging.gitlab.com' do
let(:gitlab_url) { Gitlab::Saas.staging_com_url }
it { is_expected.to eq('https://observe.staging.gitlab.com') }
end
context 'when overriden via ENV' do
let(:observe_url) { 'https://example.net' }
before do
stub_env('OVERRIDE_OBSERVABILITY_URL', observe_url)
end
it { is_expected.to eq(observe_url) }
end
end
end

View File

@ -0,0 +1,157 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::PaginationDelegate do
context 'when there is no data' do
let(:delegate) do
described_class.new(page: 1,
per_page: 10,
count: 0)
end
it 'shows the correct total count' do
expect(delegate.total_count).to eq(0)
end
it 'shows the correct total pages' do
expect(delegate.total_pages).to eq(0)
end
it 'shows the correct next page' do
expect(delegate.next_page).to be_nil
end
it 'shows the correct previous page' do
expect(delegate.prev_page).to be_nil
end
it 'shows the correct current page' do
expect(delegate.current_page).to eq(1)
end
it 'shows the correct limit value' do
expect(delegate.limit_value).to eq(10)
end
it 'shows the correct first page' do
expect(delegate.first_page?).to be true
end
it 'shows the correct last page' do
expect(delegate.last_page?).to be true
end
it 'shows the correct offset' do
expect(delegate.offset).to eq(0)
end
end
context 'with data' do
let(:delegate) do
described_class.new(page: 5,
per_page: 100,
count: 1000)
end
it 'shows the correct total count' do
expect(delegate.total_count).to eq(1000)
end
it 'shows the correct total pages' do
expect(delegate.total_pages).to eq(10)
end
it 'shows the correct next page' do
expect(delegate.next_page).to eq(6)
end
it 'shows the correct previous page' do
expect(delegate.prev_page).to eq(4)
end
it 'shows the correct current page' do
expect(delegate.current_page).to eq(5)
end
it 'shows the correct limit value' do
expect(delegate.limit_value).to eq(100)
end
it 'shows the correct first page' do
expect(delegate.first_page?).to be false
end
it 'shows the correct last page' do
expect(delegate.last_page?).to be false
end
it 'shows the correct offset' do
expect(delegate.offset).to eq(400)
end
end
context 'for last page' do
let(:delegate) do
described_class.new(page: 10,
per_page: 100,
count: 1000)
end
it 'shows the correct total count' do
expect(delegate.total_count).to eq(1000)
end
it 'shows the correct total pages' do
expect(delegate.total_pages).to eq(10)
end
it 'shows the correct next page' do
expect(delegate.next_page).to be_nil
end
it 'shows the correct previous page' do
expect(delegate.prev_page).to eq(9)
end
it 'shows the correct current page' do
expect(delegate.current_page).to eq(10)
end
it 'shows the correct limit value' do
expect(delegate.limit_value).to eq(100)
end
it 'shows the correct first page' do
expect(delegate.first_page?).to be false
end
it 'shows the correct last page' do
expect(delegate.last_page?).to be true
end
it 'shows the correct offset' do
expect(delegate.offset).to eq(900)
end
end
context 'with limits and defaults' do
it 'has a maximum limit per page' do
expect(described_class.new(page: nil,
per_page: 1000,
count: 0).limit_value).to eq(described_class::MAX_PER_PAGE)
end
it 'has a default per page' do
expect(described_class.new(page: nil,
per_page: nil,
count: 0).limit_value).to eq(described_class::DEFAULT_PER_PAGE)
end
it 'has a maximum page' do
expect(described_class.new(page: 100,
per_page: 10,
count: 1).current_page).to eq(1)
end
end
end

View File

@ -77,463 +77,10 @@ RSpec.describe Gitlab::SidekiqMiddleware::DuplicateJobs::DuplicateJob, :clean_gi
end
end
shared_examples 'with multiple Redis keys' do
let(:deduplicated_flag_key) do
"#{idempotency_key}:deduplicate_flag"
end
describe '#check!' do
context 'when there was no job in the queue yet' do
it { expect(duplicate_job.check!).to eq('123') }
shared_examples 'sets Redis keys with correct TTL' do
it "adds an idempotency key with correct ttl" do
expect { duplicate_job.check! }
.to change { read_idempotency_key_with_ttl(idempotency_key) }
.from([nil, -2])
.to(['123', be_within(1).of(expected_ttl)])
end
context 'when wal locations is not empty' do
it "adds an existing wal locations key with correct ttl" do
expect { duplicate_job.check! }
.to change { read_idempotency_key_with_ttl(existing_wal_location_key(idempotency_key, 'main')) }
.from([nil, -2])
.to([wal_locations['main'], be_within(1).of(expected_ttl)])
.and change { read_idempotency_key_with_ttl(existing_wal_location_key(idempotency_key, 'ci')) }
.from([nil, -2])
.to([wal_locations['ci'], be_within(1).of(expected_ttl)])
end
end
end
context 'when TTL option is not set' do
let(:expected_ttl) { described_class::DEFAULT_DUPLICATE_KEY_TTL }
it_behaves_like 'sets Redis keys with correct TTL'
end
context 'when TTL option is set' do
let(:expected_ttl) { 5.minutes }
before do
allow(duplicate_job).to receive(:options).and_return({ ttl: expected_ttl })
end
it_behaves_like 'sets Redis keys with correct TTL'
end
it "adds the idempotency key to the jobs payload" do
expect { duplicate_job.check! }.to change { job['idempotency_key'] }.from(nil).to(idempotency_key)
end
end
context 'when there was already a job with same arguments in the same queue' do
before do
set_idempotency_key(idempotency_key, 'existing-key')
wal_locations.each do |config_name, location|
set_idempotency_key(existing_wal_location_key(idempotency_key, config_name), location)
end
end
it { expect(duplicate_job.check!).to eq('existing-key') }
it "does not change the existing key's TTL" do
expect { duplicate_job.check! }
.not_to change { read_idempotency_key_with_ttl(idempotency_key) }
.from(['existing-key', -1])
end
it "does not change the existing wal locations key's TTL" do
expect { duplicate_job.check! }
.to not_change { read_idempotency_key_with_ttl(existing_wal_location_key(idempotency_key, 'main')) }
.from([wal_locations['main'], -1])
.and not_change { read_idempotency_key_with_ttl(existing_wal_location_key(idempotency_key, 'ci')) }
.from([wal_locations['ci'], -1])
end
it 'sets the existing jid' do
duplicate_job.check!
expect(duplicate_job.existing_jid).to eq('existing-key')
end
end
end
describe '#update_latest_wal_location!' do
before do
allow(Gitlab::Database).to receive(:database_base_models).and_return(
{ main: ::ActiveRecord::Base,
ci: ::ActiveRecord::Base })
set_idempotency_key(existing_wal_location_key(idempotency_key, 'main'), existing_wal['main'])
set_idempotency_key(existing_wal_location_key(idempotency_key, 'ci'), existing_wal['ci'])
# read existing_wal_locations
duplicate_job.check!
end
context "when the key doesn't exists in redis" do
let(:existing_wal) do
{
'main' => '0/D525E3A0',
'ci' => 'AB/12340'
}
end
let(:new_wal_location_with_offset) do
{
# offset is relative to `existing_wal`
'main' => ['0/D525E3A8', '8'],
'ci' => ['AB/12345', '5']
}
end
let(:wal_locations) { new_wal_location_with_offset.transform_values(&:first) }
it 'stores a wal location to redis with an offset relative to existing wal location' do
expect { duplicate_job.update_latest_wal_location! }
.to change { read_range_from_redis(wal_location_key(idempotency_key, 'main')) }
.from([])
.to(new_wal_location_with_offset['main'])
.and change { read_range_from_redis(wal_location_key(idempotency_key, 'ci')) }
.from([])
.to(new_wal_location_with_offset['ci'])
end
end
context "when the key exists in redis" do
before do
rpush_to_redis_key(wal_location_key(idempotency_key, 'main'), *stored_wal_location_with_offset['main'])
rpush_to_redis_key(wal_location_key(idempotency_key, 'ci'), *stored_wal_location_with_offset['ci'])
end
let(:wal_locations) { new_wal_location_with_offset.transform_values(&:first) }
context "when the new offset is bigger then the existing one" do
let(:existing_wal) do
{
'main' => '0/D525E3A0',
'ci' => 'AB/12340'
}
end
let(:stored_wal_location_with_offset) do
{
# offset is relative to `existing_wal`
'main' => ['0/D525E3A3', '3'],
'ci' => ['AB/12342', '2']
}
end
let(:new_wal_location_with_offset) do
{
# offset is relative to `existing_wal`
'main' => ['0/D525E3A8', '8'],
'ci' => ['AB/12345', '5']
}
end
it 'updates a wal location to redis with an offset' do
expect { duplicate_job.update_latest_wal_location! }
.to change { read_range_from_redis(wal_location_key(idempotency_key, 'main')) }
.from(stored_wal_location_with_offset['main'])
.to(new_wal_location_with_offset['main'])
.and change { read_range_from_redis(wal_location_key(idempotency_key, 'ci')) }
.from(stored_wal_location_with_offset['ci'])
.to(new_wal_location_with_offset['ci'])
end
end
context "when the old offset is not bigger then the existing one" do
let(:existing_wal) do
{
'main' => '0/D525E3A0',
'ci' => 'AB/12340'
}
end
let(:stored_wal_location_with_offset) do
{
# offset is relative to `existing_wal`
'main' => ['0/D525E3A8', '8'],
'ci' => ['AB/12345', '5']
}
end
let(:new_wal_location_with_offset) do
{
# offset is relative to `existing_wal`
'main' => ['0/D525E3A2', '2'],
'ci' => ['AB/12342', '2']
}
end
it "does not update a wal location to redis with an offset" do
expect { duplicate_job.update_latest_wal_location! }
.to not_change { read_range_from_redis(wal_location_key(idempotency_key, 'main')) }
.from(stored_wal_location_with_offset['main'])
.and not_change { read_range_from_redis(wal_location_key(idempotency_key, 'ci')) }
.from(stored_wal_location_with_offset['ci'])
end
end
end
end
describe '#latest_wal_locations' do
context 'when job was deduplicated and wal locations were already persisted' do
before do
rpush_to_redis_key(wal_location_key(idempotency_key, 'main'), wal_locations['main'], 1024)
rpush_to_redis_key(wal_location_key(idempotency_key, 'ci'), wal_locations['ci'], 1024)
end
it { expect(duplicate_job.latest_wal_locations).to eq(wal_locations) }
end
context 'when job is not deduplication and wal locations were not persisted' do
it { expect(duplicate_job.latest_wal_locations).to be_empty }
end
end
describe '#delete!' do
context "when we didn't track the definition" do
it { expect { duplicate_job.delete! }.not_to raise_error }
end
context 'when the key exists in redis' do
before do
set_idempotency_key(idempotency_key, 'existing-jid')
set_idempotency_key(deduplicated_flag_key, 1)
wal_locations.each do |config_name, location|
set_idempotency_key(existing_wal_location_key(idempotency_key, config_name), location)
set_idempotency_key(wal_location_key(idempotency_key, config_name), location)
end
end
shared_examples 'deleting the duplicate job' do
shared_examples 'deleting keys from redis' do |key_name|
it "removes the #{key_name} from redis" do
expect { duplicate_job.delete! }
.to change { read_idempotency_key_with_ttl(key) }
.from([from_value, -1])
.to([nil, -2])
end
end
shared_examples 'does not delete key from redis' do |key_name|
it "does not remove the #{key_name} from redis" do
expect { duplicate_job.delete! }
.to not_change { read_idempotency_key_with_ttl(key) }
.from([from_value, -1])
end
end
it_behaves_like 'deleting keys from redis', 'idempotent key' do
let(:key) { idempotency_key }
let(:from_value) { 'existing-jid' }
end
it_behaves_like 'deleting keys from redis', 'deduplication counter key' do
let(:key) { deduplicated_flag_key }
let(:from_value) { '1' }
end
it_behaves_like 'deleting keys from redis', 'existing wal location keys for main database' do
let(:key) { existing_wal_location_key(idempotency_key, 'main') }
let(:from_value) { wal_locations['main'] }
end
it_behaves_like 'deleting keys from redis', 'existing wal location keys for ci database' do
let(:key) { existing_wal_location_key(idempotency_key, 'ci') }
let(:from_value) { wal_locations['ci'] }
end
it_behaves_like 'deleting keys from redis', 'latest wal location keys for main database' do
let(:key) { wal_location_key(idempotency_key, 'main') }
let(:from_value) { wal_locations['main'] }
end
it_behaves_like 'deleting keys from redis', 'latest wal location keys for ci database' do
let(:key) { wal_location_key(idempotency_key, 'ci') }
let(:from_value) { wal_locations['ci'] }
end
end
context 'when the idempotency key is not part of the job' do
it_behaves_like 'deleting the duplicate job'
it 'recalculates the idempotency hash' do
expect(duplicate_job).to receive(:idempotency_hash).and_call_original
duplicate_job.delete!
end
end
context 'when the idempotency key is part of the job' do
let(:idempotency_key) { 'not the same as what we calculate' }
let(:job) { super().merge('idempotency_key' => idempotency_key) }
it_behaves_like 'deleting the duplicate job'
it 'does not recalculate the idempotency hash' do
expect(duplicate_job).not_to receive(:idempotency_hash)
duplicate_job.delete!
end
end
end
end
describe '#set_deduplicated_flag!' do
context 'when the job is reschedulable' do
before do
allow(duplicate_job).to receive(:reschedulable?) { true }
end
it 'sets the key in Redis' do
duplicate_job.set_deduplicated_flag!
flag = with_redis { |redis| redis.get(deduplicated_flag_key) }
expect(flag).to eq(described_class::DEDUPLICATED_FLAG_VALUE.to_s)
end
it 'sets, gets and cleans up the deduplicated flag' do
expect(duplicate_job.should_reschedule?).to eq(false)
duplicate_job.set_deduplicated_flag!
expect(duplicate_job.should_reschedule?).to eq(true)
duplicate_job.delete!
expect(duplicate_job.should_reschedule?).to eq(false)
end
end
context 'when the job is not reschedulable' do
before do
allow(duplicate_job).to receive(:reschedulable?) { false }
end
it 'does not set the key in Redis' do
duplicate_job.set_deduplicated_flag!
flag = with_redis { |redis| redis.get(deduplicated_flag_key) }
expect(flag).to be_nil
end
it 'does not set the deduplicated flag' do
expect(duplicate_job.should_reschedule?).to eq(false)
duplicate_job.set_deduplicated_flag!
expect(duplicate_job.should_reschedule?).to eq(false)
duplicate_job.delete!
expect(duplicate_job.should_reschedule?).to eq(false)
end
end
end
describe '#duplicate?' do
it "raises an error if the check wasn't performed" do
expect { duplicate_job.duplicate? }.to raise_error /Call `#check!` first/
end
it 'returns false if the existing jid equals the job jid' do
duplicate_job.check!
expect(duplicate_job.duplicate?).to be(false)
end
it 'returns false if the existing jid is different from the job jid' do
set_idempotency_key(idempotency_key, 'a different jid')
duplicate_job.check!
expect(duplicate_job.duplicate?).to be(true)
end
end
def existing_wal_location_key(idempotency_key, connection_name)
"#{idempotency_key}:#{connection_name}:existing_wal_location"
end
def wal_location_key(idempotency_key, connection_name)
"#{idempotency_key}:#{connection_name}:wal_location"
end
def set_idempotency_key(key, value = '1')
with_redis { |r| r.set(key, value) }
end
def rpush_to_redis_key(key, wal, offset)
with_redis { |r| r.rpush(key, [wal, offset]) }
end
def read_idempotency_key_with_ttl(key)
with_redis do |redis|
redis.pipelined do |p|
p.get(key)
p.ttl(key)
end
end
end
def read_range_from_redis(key)
with_redis do |redis|
redis.lrange(key, 0, -1)
end
end
end
context 'with duplicate_jobs_cookie disabled' do
before do
stub_feature_flags(duplicate_jobs_cookie: false)
end
context 'with multi-store feature flags turned on' do
def with_redis(&block)
Gitlab::Redis::DuplicateJobs.with(&block)
end
it 'use Gitlab::Redis::DuplicateJobs.with' do
expect(Gitlab::Redis::DuplicateJobs).to receive(:with).and_call_original
expect(Sidekiq).not_to receive(:redis)
duplicate_job.check!
end
it_behaves_like 'with multiple Redis keys'
end
context 'when both multi-store feature flags are off' do
def with_redis(&block)
Sidekiq.redis(&block)
end
before do
stub_feature_flags(use_primary_and_secondary_stores_for_duplicate_jobs: false)
stub_feature_flags(use_primary_store_as_default_for_duplicate_jobs: false)
end
it 'use Sidekiq.redis' do
expect(Sidekiq).to receive(:redis).and_call_original
expect(Gitlab::Redis::DuplicateJobs).not_to receive(:with)
duplicate_job.check!
end
it_behaves_like 'with multiple Redis keys'
end
end
context 'with Redis cookies' do
shared_examples 'with Redis cookies' do
let(:cookie_key) { "#{idempotency_key}:cookie:v2" }
let(:cookie) { get_redis_msgpack(cookie_key) }
def with_redis(&block)
Gitlab::Redis::DuplicateJobs.with(&block)
end
describe '#check!' do
context 'when there was no job in the queue yet' do
it { expect(duplicate_job.check!).to eq('123') }
@ -838,6 +385,41 @@ RSpec.describe Gitlab::SidekiqMiddleware::DuplicateJobs::DuplicateJob, :clean_gi
end
end
context 'with multi-store feature flags turned on' do
def with_redis(&block)
Gitlab::Redis::DuplicateJobs.with(&block)
end
it 'use Gitlab::Redis::DuplicateJobs.with' do
expect(Gitlab::Redis::DuplicateJobs).to receive(:with).and_call_original
expect(Sidekiq).not_to receive(:redis)
duplicate_job.check!
end
it_behaves_like 'with Redis cookies'
end
context 'when both multi-store feature flags are off' do
def with_redis(&block)
Sidekiq.redis(&block)
end
before do
stub_feature_flags(use_primary_and_secondary_stores_for_duplicate_jobs: false)
stub_feature_flags(use_primary_store_as_default_for_duplicate_jobs: false)
end
it 'use Sidekiq.redis' do
expect(Sidekiq).to receive(:redis).and_call_original
expect(Gitlab::Redis::DuplicateJobs).not_to receive(:with)
duplicate_job.check!
end
it_behaves_like 'with Redis cookies'
end
describe '#scheduled?' do
it 'returns false for non-scheduled jobs' do
expect(duplicate_job.scheduled?).to be(false)

View File

@ -63,7 +63,6 @@ RSpec.describe Gitlab::Usage::Metrics::NameSuggestion do
context 'for sum metrics' do
it_behaves_like 'name suggestion' do
# corresponding metric is collected with sum(JiraImportState.finished, :imported_issues_count)
let(:key_path) { 'counts.jira_imports_total_imported_issues_count' }
let(:operation) { :sum }
let(:relation) { JiraImportState.finished }
let(:column) { :imported_issues_count }
@ -74,7 +73,6 @@ RSpec.describe Gitlab::Usage::Metrics::NameSuggestion do
context 'for average metrics' do
it_behaves_like 'name suggestion' do
# corresponding metric is collected with average(Ci::Pipeline, :duration)
let(:key_path) { 'counts.ci_pipeline_duration' }
let(:operation) { :average }
let(:relation) { Ci::Pipeline }
let(:column) { :duration }
@ -100,5 +98,16 @@ RSpec.describe Gitlab::Usage::Metrics::NameSuggestion do
let(:name_suggestion) { /<please fill metric name>/ }
end
end
context 'for metrics with `having` keyword' do
it_behaves_like 'name suggestion' do
let(:operation) { :count }
let(:relation) { Issue.with_alert_management_alerts.having('COUNT(alert_management_alerts) > 1').group(:id) }
let(:column) { nil }
let(:constraints) { /<adjective describing: '\(\(COUNT\(alert_management_alerts\) > 1\)\)'>/ }
let(:name_suggestion) { /count_#{constraints}_issues_<with>_alert_management_alerts/ }
end
end
end
end

View File

@ -0,0 +1,19 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Usage::Metrics::NamesSuggestions::RelationParsers::HavingConstraints do
describe '#accept' do
let(:connection) { ApplicationRecord.connection }
let(:collector) { Arel::Collectors::SubstituteBinds.new(connection, Arel::Collectors::SQLString.new) }
it 'builds correct constraints description' do
table = Arel::Table.new('records')
havings = table[:attribute].sum.eq(6).and(table[:attribute].count.gt(5))
arel = table.from.project(table['id'].count).having(havings).group(table[:attribute2])
described_class.new(connection).accept(arel, collector)
expect(collector.value).to eql '(SUM(records.attribute) = 6 AND COUNT(records.attribute) > 5)'
end
end
end

View File

@ -2,14 +2,15 @@
require 'spec_helper'
RSpec.describe Gitlab::Usage::Metrics::NamesSuggestions::RelationParsers::Constraints do
RSpec.describe Gitlab::Usage::Metrics::NamesSuggestions::RelationParsers::WhereConstraints do
describe '#accept' do
let(:collector) { Arel::Collectors::SubstituteBinds.new(ActiveRecord::Base.connection, Arel::Collectors::SQLString.new) }
let(:connection) { ApplicationRecord.connection }
let(:collector) { Arel::Collectors::SubstituteBinds.new(connection, Arel::Collectors::SQLString.new) }
it 'builds correct constraints description' do
table = Arel::Table.new('records')
arel = table.from.project(table['id'].count).where(table[:attribute].eq(true).and(table[:some_value].gt(5)))
described_class.new(ApplicationRecord.connection).accept(arel, collector)
described_class.new(connection).accept(arel, collector)
expect(collector.value).to eql '(records.attribute = true AND records.some_value > 5)'
end

View File

@ -211,6 +211,9 @@ RSpec.describe ApplicationSetting do
it { is_expected.to allow_value([]).for(:valid_runner_registrars) }
it { is_expected.to allow_value(%w(project group)).for(:valid_runner_registrars) }
it { is_expected.to allow_value(http).for(:jira_connect_proxy_url) }
it { is_expected.to allow_value(https).for(:jira_connect_proxy_url) }
context 'when deactivate_dormant_users is enabled' do
before do
stub_application_setting(deactivate_dormant_users: true)
@ -269,6 +272,7 @@ RSpec.describe ApplicationSetting do
end
it { is_expected.not_to allow_value('http://localhost:9000').for(:grafana_url) }
it { is_expected.not_to allow_value('http://localhost:9000').for(:jira_connect_proxy_url) }
end
context 'with invalid grafana URL' do

View File

@ -196,6 +196,8 @@ RSpec.describe Ci::Bridge do
end
describe '#downstream_variables' do
subject(:downstream_variables) { bridge.downstream_variables }
it 'returns variables that are going to be passed downstream' do
expect(bridge.downstream_variables)
.to include(key: 'BRIDGE', value: 'cross')
@ -320,6 +322,79 @@ RSpec.describe Ci::Bridge do
end
end
end
context 'when using raw variables' do
let(:options) do
{
trigger: {
project: 'my/project',
branch: 'master',
forward: { yaml_variables: true,
pipeline_variables: true }.compact
}
}
end
let(:yaml_variables) do
[
{
key: 'VAR6',
value: 'value6 $VAR1'
},
{
key: 'VAR7',
value: 'value7 $VAR1',
raw: true
}
]
end
let(:pipeline_schedule) { create(:ci_pipeline_schedule, :nightly, project: project) }
let(:pipeline) { create(:ci_pipeline, pipeline_schedule: pipeline_schedule) }
before do
create(:ci_pipeline_variable, pipeline: pipeline, key: 'VAR1', value: 'value1')
create(:ci_pipeline_variable, pipeline: pipeline, key: 'VAR2', value: 'value2 $VAR1')
create(:ci_pipeline_variable, pipeline: pipeline, key: 'VAR3', value: 'value3 $VAR1', raw: true)
pipeline_schedule.variables.create!(key: 'VAR4', value: 'value4 $VAR1')
pipeline_schedule.variables.create!(key: 'VAR5', value: 'value5 $VAR1', raw: true)
bridge.yaml_variables.concat(yaml_variables)
end
it 'expands variables according to their raw attributes' do
expect(downstream_variables).to contain_exactly(
{ key: 'BRIDGE', value: 'cross' },
{ key: 'VAR1', value: 'value1' },
{ key: 'VAR2', value: 'value2 value1' },
{ key: 'VAR3', value: 'value3 $VAR1', raw: true },
{ key: 'VAR4', value: 'value4 value1' },
{ key: 'VAR5', value: 'value5 $VAR1', raw: true },
{ key: 'VAR6', value: 'value6 value1' },
{ key: 'VAR7', value: 'value7 $VAR1', raw: true }
)
end
context 'when the FF ci_raw_variables_in_yaml_config is disabled' do
before do
stub_feature_flags(ci_raw_variables_in_yaml_config: false)
end
it 'ignores the raw attribute' do
expect(downstream_variables).to contain_exactly(
{ key: 'BRIDGE', value: 'cross' },
{ key: 'VAR1', value: 'value1' },
{ key: 'VAR2', value: 'value2 value1' },
{ key: 'VAR3', value: 'value3 value1' },
{ key: 'VAR4', value: 'value4 value1' },
{ key: 'VAR5', value: 'value5 value1' },
{ key: 'VAR6', value: 'value6 value1' },
{ key: 'VAR7', value: 'value7 value1' }
)
end
end
end
end
describe 'metadata support' do

View File

@ -1097,6 +1097,19 @@ RSpec.describe MergeRequestDiff do
it 'returns a non-empty CommitCollection' do
expect(mr.merge_request_diff.commits.commits.size).to be > 0
end
context 'with a page' do
it 'returns a limited number of commits for page' do
expect(mr.merge_request_diff.commits(limit: 1, page: 1).map(&:sha)).to eq(
%w[
b83d6e391c22777fca1ed3012fce84f633d7fed0
])
expect(mr.merge_request_diff.commits(limit: 1, page: 2).map(&:sha)).to eq(
%w[
498214de67004b1da3d820901307bed2a68a8ef6
])
end
end
end
describe '.latest_diff_for_merge_requests' do

View File

@ -5008,6 +5008,19 @@ RSpec.describe MergeRequest, factory_default: :keep do
expect(subject.commits.size).to eq(29)
end
end
context 'with a page' do
it 'returns a limited number of commits for page' do
expect(subject.commits(limit: 1, page: 1).map(&:sha)).to eq(
%w[
b83d6e391c22777fca1ed3012fce84f633d7fed0
])
expect(subject.commits(limit: 1, page: 2).map(&:sha)).to eq(
%w[
498214de67004b1da3d820901307bed2a68a8ef6
])
end
end
end
context 'new merge request' do

View File

@ -63,6 +63,14 @@ RSpec.describe Preloaders::ProjectRootAncestorPreloader do
it_behaves_like 'executes N matching DB queries', 0, :full_path
end
context 'when projects are an array and not an ActiveRecord::Relation' do
before do
described_class.new(projects, :namespace, additional_preloads).execute
end
it_behaves_like 'executes N matching DB queries', 4
end
end
context 'when the preloader is not used' do

View File

@ -31,31 +31,43 @@ RSpec.describe Preloaders::UserMaxAccessLevelInProjectsPreloader do
shared_examples '#execute' do
let(:projects_arg) { projects }
before do
Preloaders::UserMaxAccessLevelInProjectsPreloader.new(projects_arg, user).execute
end
it 'avoids N+1 queries' do
expect { query }.not_to make_queries
end
context 'when projects is an array of IDs' do
let(:projects_arg) { projects.map(&:id) }
it 'avoids N+1 queries' do
expect { query }.not_to make_queries
end
end
# Test for handling of SQL table name clashes.
context 'when projects is a relation including project_authorizations' do
let(:projects_arg) do
Project.where(id: ProjectAuthorization.where(project_id: projects).select(:project_id))
context 'when user is present' do
before do
Preloaders::UserMaxAccessLevelInProjectsPreloader.new(projects_arg, user).execute
end
it 'avoids N+1 queries' do
expect { query }.not_to make_queries
end
context 'when projects is an array of IDs' do
let(:projects_arg) { projects.map(&:id) }
it 'avoids N+1 queries' do
expect { query }.not_to make_queries
end
end
# Test for handling of SQL table name clashes.
context 'when projects is a relation including project_authorizations' do
let(:projects_arg) do
Project.where(id: ProjectAuthorization.where(project_id: projects).select(:project_id))
end
it 'avoids N+1 queries' do
expect { query }.not_to make_queries
end
end
end
context 'when user is not present' do
before do
Preloaders::UserMaxAccessLevelInProjectsPreloader.new(projects_arg, nil).execute
end
it 'does not avoid N+1 queries' do
expect { query }.to make_queries
end
end
end

View File

@ -591,34 +591,4 @@ RSpec.describe GlobalPolicy do
it { is_expected.not_to be_allowed(:log_in) }
end
end
describe 'delete runners' do
context 'when anonymous' do
let(:current_user) { nil }
it { is_expected.not_to be_allowed(:delete_runners) }
end
context 'regular user' do
it { is_expected.not_to be_allowed(:delete_runners) }
end
context 'when external' do
let(:current_user) { build(:user, :external) }
it { is_expected.not_to be_allowed(:delete_runners) }
end
context 'admin user' do
let_it_be(:current_user) { create(:user, :admin) }
context 'when admin mode is enabled', :enable_admin_mode do
it { is_expected.to be_allowed(:delete_runners) }
end
context 'when admin mode is disabled' do
it { is_expected.to be_disallowed(:delete_runners) }
end
end
end
end

View File

@ -323,7 +323,7 @@ RSpec.describe ProjectPolicy do
:create_environment, :read_environment, :update_environment, :admin_environment, :destroy_environment,
:create_cluster, :read_cluster, :update_cluster, :admin_cluster,
:create_deployment, :read_deployment, :update_deployment, :admin_deployment, :destroy_deployment,
:destroy_release, :download_code, :build_download_code
:download_code, :build_download_code
]
end

View File

@ -81,24 +81,20 @@ RSpec.describe API::Release::Links do
end
context 'when project is public' do
let(:project) { create(:project, :repository, :public) }
before do
project.update!(visibility_level: Gitlab::VisibilityLevel::PUBLIC)
end
it 'allows the request' do
get api("/projects/#{project.id}/releases/v0.1/assets/links", non_project_member)
expect(response).to have_gitlab_http_status(:ok)
end
end
context 'when project is public and the repository is private' do
let(:project) { create(:project, :repository, :public, :repository_private) }
it_behaves_like '403 response' do
let(:request) { get api("/projects/#{project.id}/releases/v0.1/assets/links", non_project_member) }
end
context 'when the release does not exists' do
let!(:release) {}
context 'and the releases are private' do
before do
project.project_feature.update!(releases_access_level: ProjectFeature::PRIVATE)
end
it_behaves_like '403 response' do
let(:request) { get api("/projects/#{project.id}/releases/v0.1/assets/links", non_project_member) }

Some files were not shown because too many files have changed in this diff Show More