Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-03-06 12:12:50 +00:00
parent 1732634005
commit e408c9c787
88 changed files with 1317 additions and 527 deletions

View File

@ -41,23 +41,11 @@ cache-workhorse:
cache-assets:test:
extends: .cache-assets-base
cache-assets:test as-if-foss:
extends:
- cache-assets:test
- .as-if-foss
- .caching:rules:cache-assets-as-if-foss
cache-assets:production:
extends:
- .cache-assets-base
- .production
cache-assets:production as-if-foss:
extends:
- cache-assets:production
- .as-if-foss
- .caching:rules:cache-assets-as-if-foss
packages-cleanup:
extends:
- .default-retry

View File

@ -1065,11 +1065,6 @@
when: manual
allow_failure: true
.caching:rules:cache-assets-as-if-foss:
rules:
- !reference [".strict-ee-only-rules", rules]
- !reference [".caching:rules:cache-assets", "rules"]
.caching:rules:packages-cleanup:
rules:
# The new strategy to cache assets as generic packages is experimental and can be disabled by removing the `CACHE_ASSETS_AS_PACKAGE` variable

View File

@ -145,8 +145,6 @@ The Geo primary site needs to checksum every replicable so secondaries can verif
FAILED_VERIFICATION_INDEX_NAME = "index_cool_widget_states_failed_verification"
NEEDS_VERIFICATION_INDEX_NAME = "index_cool_widget_states_needs_verification"
enable_lock_retries!
def up
create_table :cool_widget_states do |t|
t.datetime_with_timezone :verification_started_at

View File

@ -148,8 +148,6 @@ The Geo primary site needs to checksum every replicable so secondaries can verif
FAILED_VERIFICATION_INDEX_NAME = "index_cool_widget_states_failed_verification"
NEEDS_VERIFICATION_INDEX_NAME = "index_cool_widget_states_needs_verification"
enable_lock_retries!
def up
create_table :cool_widget_states do |t|
t.datetime_with_timezone :verification_started_at

View File

@ -33,25 +33,19 @@ export default {
};
</script>
<template>
<div>
<gl-empty-state
:title="$options.i18n.emptyState.title"
:svg-path="emptyAlertSvgPath"
:svg-height="null"
>
<template #description>
<div class="gl-display-block">
<span>{{ $options.i18n.emptyState.info }}</span>
<gl-link :href="alertsHelpUrl" target="_blank">
{{ $options.i18n.moreInformation }}
</gl-link>
</div>
<div v-if="userCanEnableAlertManagement" class="gl-display-block center gl-pt-4">
<gl-button category="primary" variant="confirm" :href="enableAlertManagementPath">
{{ $options.i18n.emptyState.buttonText }}
</gl-button>
</div>
</template>
</gl-empty-state>
</div>
<gl-empty-state :title="$options.i18n.emptyState.title" :svg-path="emptyAlertSvgPath">
<template #description>
<div class="gl-display-block">
<span>{{ $options.i18n.emptyState.info }}</span>
<gl-link :href="alertsHelpUrl" target="_blank">
{{ $options.i18n.moreInformation }}
</gl-link>
</div>
</template>
<template v-if="userCanEnableAlertManagement" #actions>
<gl-button category="primary" variant="confirm" :href="enableAlertManagementPath">
{{ $options.i18n.emptyState.buttonText }}
</gl-button>
</template>
</gl-empty-state>
</template>

View File

@ -19,6 +19,7 @@ function parseDatasetToProps(data) {
const {
id,
type,
projectId,
commentDetail,
projectKey,
learnMorePath,
@ -36,6 +37,7 @@ function parseDatasetToProps(data) {
jiraIssueTransitionAutomatic,
jiraIssueTransitionId,
artifactRegistryPath,
personalAccessTokensPath,
redirectTo,
upgradeSlackUrl,
...booleanAttributes
@ -69,6 +71,7 @@ function parseDatasetToProps(data) {
testPath,
resetPath,
formPath,
personalAccessTokensPath,
triggerFieldsProps: {
initialTriggerCommit: commitEvents,
initialTriggerMergeRequest: mergeRequestEvents,
@ -96,6 +99,7 @@ function parseDatasetToProps(data) {
inheritFromId: parseInt(inheritFromId, 10),
integrationLevel,
id: parseInt(id, 10),
projectId: parseInt(projectId, 10),
redirectTo,
shouldUpgradeSlack,
upgradeSlackUrl,

View File

@ -225,7 +225,10 @@ function tracingFilterObjToQueryParams(filterObj) {
*
* @returns Array<Trace> : A list of traces
*/
async function fetchTraces(tracingUrl, { filters = {}, pageToken, pageSize, sortBy } = {}) {
async function fetchTraces(
tracingUrl,
{ filters = {}, pageToken, pageSize, sortBy, abortController } = {},
) {
const params = tracingFilterObjToQueryParams(filters);
if (pageToken) {
params.append('page_token', pageToken);
@ -242,6 +245,7 @@ async function fetchTraces(tracingUrl, { filters = {}, pageToken, pageSize, sort
const { data } = await axios.get(tracingUrl, {
withCredentials: true,
params,
signal: abortController?.signal,
});
if (!Array.isArray(data.traces)) {
throw new Error('traces are missing/invalid in the response'); // eslint-disable-line @gitlab/require-i18n-strings
@ -252,13 +256,14 @@ async function fetchTraces(tracingUrl, { filters = {}, pageToken, pageSize, sort
}
}
async function fetchTracesAnalytics(tracingAnalyticsUrl, { filters = {} } = {}) {
async function fetchTracesAnalytics(tracingAnalyticsUrl, { filters = {}, abortController } = {}) {
const params = tracingFilterObjToQueryParams(filters);
try {
const { data } = await axios.get(tracingAnalyticsUrl, {
withCredentials: true,
params,
signal: abortController?.signal,
});
return data.results ?? [];
} catch (e) {
@ -354,7 +359,7 @@ function addMetricsAttributeFilterToQueryParams(dimensionFilter, params) {
});
}
function addMetricsDateRangeFilterToQueryParams(dateRangeFilter, params) {
function addDateRangeFilterToQueryParams(dateRangeFilter, params) {
if (!dateRangeFilter || !params) return;
const { value, endDate, startDate } = dateRangeFilter;
@ -401,7 +406,7 @@ async function fetchMetric(searchUrl, name, type, options = {}) {
}
if (dateRange) {
addMetricsDateRangeFilterToQueryParams(dateRange, params);
addDateRangeFilterToQueryParams(dateRange, params);
}
if (groupBy) {
@ -446,16 +451,21 @@ async function fetchMetricSearchMetadata(searchMetadataUrl, name, type) {
}
}
export async function fetchLogs(logsSearchUrl, { pageToken, pageSize } = {}) {
export async function fetchLogs(logsSearchUrl, { pageToken, pageSize, filters = {} } = {}) {
try {
const params = new URLSearchParams();
const { dateRange } = filters;
if (dateRange) {
addDateRangeFilterToQueryParams(dateRange, params);
}
if (pageToken) {
params.append('page_token', pageToken);
}
if (pageSize) {
params.append('page_size', pageSize);
}
const { data } = await axios.get(logsSearchUrl, {
withCredentials: true,
params,

View File

@ -0,0 +1,96 @@
<script>
import { GlDaterangePicker } from '@gitlab/ui';
import { periodToDate } from '~/observability/utils';
import DateRangesDropdown from '~/analytics/shared/components/date_ranges_dropdown.vue';
import { TIME_RANGE_OPTIONS, CUSTOM_DATE_RANGE_OPTION } from '~/observability/constants';
export default {
components: {
DateRangesDropdown,
GlDaterangePicker,
},
props: {
selected: {
type: Object,
required: false,
default: null,
},
},
data() {
return {
dateRange: this.selected ?? {
value: '',
startDate: null,
endDate: null,
},
};
},
computed: {
dateRangeOptions() {
return TIME_RANGE_OPTIONS.map((option) => {
const dateRange = periodToDate(option.value);
return {
value: option.value,
text: option.title,
startDate: dateRange.min,
endDate: dateRange.max,
};
});
},
shouldShowDateRangePicker() {
return this.dateRange.value === CUSTOM_DATE_RANGE_OPTION;
},
shouldStartOpened() {
return (
this.shouldShowDateRangePicker && (!this.dateRange.startDate || !this.dateRange.endDate)
);
},
},
methods: {
onSelectPredefinedDateRange({ value, startDate, endDate }) {
this.dateRange = {
value,
startDate: new Date(startDate),
endDate: new Date(endDate),
};
this.$emit('onDateRangeSelected', this.dateRange);
},
onSelectCustomDateRange() {
this.dateRange = {
value: CUSTOM_DATE_RANGE_OPTION,
startDate: undefined,
endDate: undefined,
};
},
onCustomRangeSelected({ startDate, endDate }) {
this.dateRange = {
value: CUSTOM_DATE_RANGE_OPTION,
startDate: new Date(startDate),
endDate: new Date(endDate),
};
this.$emit('onDateRangeSelected', this.dateRange);
},
},
};
</script>
<template>
<div class="gl-display-flex gl-flex-direction-column gl-lg-flex-direction-row gl-gap-3">
<date-ranges-dropdown
:selected="dateRange.value"
:date-range-options="dateRangeOptions"
disable-selected-day-count
tooltip=""
include-end-date-in-days-selected
@selected="onSelectPredefinedDateRange"
@customDateRangeSelected="onSelectCustomDateRange"
/>
<gl-daterange-picker
v-if="shouldShowDateRangePicker"
:start-opened="shouldStartOpened"
:default-start-date="dateRange.startDate"
:default-end-date="dateRange.endDate"
@input="onCustomRangeSelected"
/>
</div>
</template>

View File

@ -97,7 +97,11 @@ export default {
<template>
<div>
<infrastructure-title :help-url="$options.terraformRegistryHelpUrl" :count="packagesCount" />
<infrastructure-title
v-if="packagesCount > 0"
:help-url="$options.terraformRegistryHelpUrl"
:count="packagesCount"
/>
<infrastructure-search v-if="packagesCount > 0" @update="requestPackagesList" />
<package-list @page:changed="onPageChanged" @package:delete="onPackageDeleteRequest">

View File

@ -18,7 +18,6 @@ export const FAILURE_REASONS = {
not_open: __('Merge request must be open.'),
need_rebase: __('Merge request must be rebased, because a fast-forward merge is not possible.'),
not_approved: __('All required approvals must be given.'),
policies_denied: __('Denied licenses must be removed or approved.'),
merge_request_blocked: __('Merge request dependencies must be merged.'),
status_checks_must_pass: __('Status checks must pass.'),
jira_association_missing: __('Either the title or description must reference a Jira issue.'),

View File

@ -197,7 +197,6 @@ export const DETAILED_MERGE_STATUS = {
NOT_APPROVED: 'NOT_APPROVED',
DRAFT_STATUS: 'DRAFT_STATUS',
BLOCKED_STATUS: 'BLOCKED_STATUS',
POLICIES_DENIED: 'POLICIES_DENIED',
CI_MUST_PASS: 'CI_MUST_PASS',
CI_STILL_RUNNING: 'CI_STILL_RUNNING',
EXTERNAL_STATUS_CHECKS: 'EXTERNAL_STATUS_CHECKS',

View File

@ -42,9 +42,6 @@ module Types
value 'BLOCKED_STATUS',
value: :merge_request_blocked,
description: 'Merge request dependencies must be merged.'
value 'POLICIES_DENIED',
value: :policies_denied,
description: 'There are denied policies for the merge request.'
value 'EXTERNAL_STATUS_CHECKS',
value: :status_checks_must_pass,
description: 'Status checks must pass.'

View File

@ -244,7 +244,7 @@ module DiffHelper
def conflicts_with_types
return unless merge_request.cannot_be_merged? && merge_request.source_branch_exists? && merge_request.target_branch_exists?
cached_conflicts_with_types(enabled: Feature.enabled?(:cached_conflicts_with_types, merge_request.project)) do
cached_conflicts_with_types do
conflicts_service = MergeRequests::Conflicts::ListService.new(merge_request, allow_tree_conflicts: true) # rubocop:disable CodeReuse/ServiceClass
{}.tap do |h|
@ -271,9 +271,7 @@ module DiffHelper
private
def cached_conflicts_with_types(enabled: false)
return yield unless enabled
def cached_conflicts_with_types
cache_key = "merge_request_#{merge_request.id}_conflicts_with_types"
cache = Rails.cache.read(cache_key)
source_branch_sha = merge_request.source_branch_sha

View File

@ -107,6 +107,7 @@ module IntegrationsHelper
def integration_form_data(integration, project: nil, group: nil)
form_data = {
id: integration.id,
project_id: integration.project_id,
show_active: integration.show_active_box?.to_s,
activated: (integration.active || (integration.new_record? && integration.activate_disabled_reason.nil?)).to_s,
activate_disabled: integration.activate_disabled_reason.present?.to_s,
@ -212,7 +213,8 @@ module IntegrationsHelper
wiki_page_events: s_('Webhooks|Wiki page events'),
deployment_events: s_('Webhooks|Deployment events'),
feature_flag_events: s_('Webhooks|Feature flag events'),
releases_events: s_('Webhooks|Releases events')
releases_events: s_('Webhooks|Releases events'),
resource_access_token_events: s_('Webhooks|Project or group access token events')
}
event_i18n_map[event] || event.to_s.humanize

View File

@ -7,7 +7,7 @@ module Projects::AlertManagementHelper
'enable-alert-management-path' => project_settings_operations_path(project, anchor: 'js-alert-management-settings'),
'alerts-help-url' => help_page_url('operations/incident_management/alerts.md'),
'populating-alerts-help-url' => help_page_url('operations/incident_management/integrations.md', anchor: 'configuration'),
'empty-alert-svg-path' => image_path('illustrations/alert-management-empty-state.svg'),
'empty-alert-svg-path' => image_path('illustrations/empty-state/empty-scan-alert-md.svg'),
'user-can-enable-alert-management' => can?(current_user, :admin_operations, project).to_s,
'alert-management-enabled' => alert_management_enabled?(project).to_s,
'text-query': params[:search],

View File

@ -18,7 +18,8 @@ module TriggerableHooks
release_hooks: :releases_events,
member_hooks: :member_events,
subgroup_hooks: :subgroup_events,
emoji_hooks: :emoji_events
emoji_hooks: :emoji_events,
resource_access_token_hooks: :resource_access_token_events
}.freeze
extend ActiveSupport::Concern

View File

@ -24,7 +24,8 @@ class ProjectHook < WebHook
:deployment_hooks,
:feature_flag_hooks,
:release_hooks,
:emoji_hooks
:emoji_hooks,
:resource_access_token_hooks
]
belongs_to :project

View File

@ -75,6 +75,10 @@ class PersonalAccessToken < ApplicationRecord
fuzzy_search(query, [:name])
end
def hook_attrs
Gitlab::HookData::ResourceAccessTokenBuilder.new(self).build
end
protected
def validate_scopes

View File

@ -18,6 +18,7 @@ module Projects
@relations.each do |hooks|
hooks.hooks_for(@scope).select_active(@scope, @data).each do |hook|
next if @scope == :emoji_hooks && Feature.disabled?(:emoji_webhooks, hook.parent)
next if @scope == :resource_access_token_hooks && Feature.disabled?(:access_tokens_webhooks, hook.parent)
hook.async_execute(@data, @scope.to_s)
end

View File

@ -645,6 +645,14 @@ class User < MainClusterwide::ApplicationRecord
.trusted_with_spam)
end
# This scope to be used only for bot_users since for
# regular users this may lead to memory allocation issues
scope :with_personal_access_tokens_and_resources, -> do
includes(:personal_access_tokens)
.includes(:groups)
.includes(:projects)
end
scope :preload_user_detail, -> { preload(:user_detail) }
def self.supported_keyset_orderings

View File

@ -92,5 +92,18 @@ module Integrations
Gitlab::DataBuilder::Emoji.build(award_emoji, current_user, 'award')
end
def access_tokens_events_data
resource_access_token = PersonalAccessToken.new(
id: 1,
name: 'pat_for_webhook_event',
user: project.bots.first,
created_at: Time.zone.now,
updated_at: Time.zone.now,
expires_at: 2.days.from_now
)
Gitlab::DataBuilder::ResourceAccessToken.build(resource_access_token, :expiring, project)
end
end
end

View File

@ -34,6 +34,8 @@ module TestHooks
releases_events_data
when 'emoji_events'
emoji_events_data
when 'resource_access_token_events'
access_tokens_events_data
end
end
end

View File

@ -2,4 +2,4 @@
#js-vue-packages-list{ data: { resource_id: @group.id,
page_type: 'groups',
empty_list_illustration: image_path('illustrations/empty-state/empty-terraform-register-lg.svg') } }
empty_list_illustration: image_path('illustrations/empty-state/empty-environment-md.svg') } }

View File

@ -10,17 +10,14 @@
= render 'shared/milestones/header', milestone: @milestone
= render 'shared/milestones/description', milestone: @milestone
= render_if_exists 'shared/milestones/burndown', milestone: @milestone, project: @project
- if can?(current_user, :read_issue, @project) && @milestone.total_issues_count == 0
= render Pajamas::AlertComponent.new(dismissible: false,
alert_options: { class: 'gl-mt-3 gl-mb-5',
data: { testid: 'no-issues-alert' }}) do |c|
- if @milestone.complete? && @milestone.active?
= render Pajamas::AlertComponent.new(variant: :success,
alert_options: { data: { testid: 'all-issues-closed-alert' }},
dismissible: false) do |c|
- c.with_body do
= _('Assign some issues to this milestone.')
- else
= render 'shared/milestones/milestone_complete_alert', milestone: @milestone do
= _('All issues for this milestone are closed. You may close this milestone now.')
= _('All issues for this milestone are closed. You may close this milestone now.')
= render_if_exists 'shared/milestones/burndown', milestone: @milestone, project: @project
= render 'shared/milestones/tabs', milestone: @milestone
= render 'shared/milestones/sidebar', milestone: @milestone, project: @project, affix_offset: 153

View File

@ -2,4 +2,4 @@
#js-vue-packages-list{ data: { resource_id: @project.id,
page_type: 'project',
empty_list_illustration: image_path('illustrations/empty-state/empty-terraform-register-lg.svg') } }
empty_list_illustration: image_path('illustrations/empty-state/empty-environment-md.svg') } }

View File

@ -71,11 +71,18 @@
integration_webhook_event_human_name(:releases_events),
help_text: s_('Webhooks|A release is created, updated, or deleted.')
- if Feature.enabled?(:emoji_webhooks, hook.parent)
%li.gl-pb-5
%li.gl-pb-3
- emoji_help_link = link_to s_('Which emoji events trigger webhooks'), help_page_path('user/project/integrations/webhook_events', anchor: 'emoji-events')
= form.gitlab_ui_checkbox_component :emoji_events,
integration_webhook_event_human_name(:emoji_events),
help_text: s_('Webhooks|An emoji is awarded or revoked. %{help_link}?').html_safe % { help_link: emoji_help_link }
- if Feature.enabled?(:access_tokens_webhooks, hook.parent)
%li.gl-pb-3
- access_token_help_link = link_to s_('Which project or group access token events trigger webhooks'), help_page_path('user/project/integrations/webhook_events', anchor: 'project-and-group-access-token-events')
= form.gitlab_ui_checkbox_component :resource_access_token_events,
integration_webhook_event_human_name(:resource_access_token_events),
help_text: s_('Webhooks|An access token is going to expire in the next 7 days. %{help_link}?').html_safe % { help_link: access_token_help_link }
- if Feature.enabled?(:custom_webhook_template, hook.parent, type: :beta)
.form-group

View File

@ -91,14 +91,12 @@
.gl-spinner.gl-spinner-md
- if profile_tabs.empty?
.svg-content
= image_tag 'illustrations/profile_private_mode.svg'
.text-content.text-center
%h4
- if @user.blocked?
= s_('UserProfile|This user is blocked')
- else
= s_('UserProfile|This user has a private profile')
- if @user.blocked?
= render Pajamas::EmptyStateComponent.new(svg_path: 'illustrations/empty-state/empty-access-md.svg',
title: s_('UserProfile|This user is blocked'))
- else
= render Pajamas::EmptyStateComponent.new(svg_path: 'illustrations/empty-state/empty-private-md.svg',
title: s_('UserProfile|This user has a private profile'))
.user-profile-sidebar
.profile-header.gl-pb-5.gl-pt-3.gl-overflow-y-auto.gl-sm-pr-4
.gl-vertical-align-top.gl-text-left.gl-max-w-80.gl-overflow-wrap-anywhere

View File

@ -18,10 +18,21 @@ module PersonalAccessTokens
BATCH_SIZE = 100
def perform(*args)
process_user_tokens
process_project_access_tokens
end
private
def process_user_tokens
# rubocop: disable CodeReuse/ActiveRecord -- We need to specify batch size to avoid timing out of worker
loop do
tokens = PersonalAccessToken.expiring_and_not_notified_without_impersonation
.select(:user_id).limit(BATCH_SIZE).to_a
tokens = PersonalAccessToken
.expiring_and_not_notified_without_impersonation
.owner_is_human
.select(:user_id)
.limit(BATCH_SIZE)
.load
break if tokens.empty?
@ -39,20 +50,46 @@ module PersonalAccessTokens
# We're limiting to 100 tokens so we avoid loading too many tokens into memory.
# At the time of writing this would only affect 69 users on GitLab.com
# rubocop: enable CodeReuse/ActiveRecord
if user.project_bot?
deliver_bot_notifications(token_names, user)
else
deliver_user_notifications(token_names, user)
end
deliver_user_notifications(token_names, user)
expiring_user_tokens.update_all(expire_notification_delivered: true)
end
end
end
# rubocop: enable CodeReuse/ActiveRecord
end
private
def process_project_access_tokens
# rubocop: disable CodeReuse/ActiveRecord -- We need to specify batch size to avoid timing out of worker
notifications_delivered = 0
loop do
tokens = PersonalAccessToken
.without_impersonation
.expiring_and_not_notified_without_impersonation
.project_access_token
.select(:id, :user_id)
.limit(BATCH_SIZE)
.load
break if tokens.empty?
bot_users = User.id_in(tokens.pluck(:user_id).uniq).with_personal_access_tokens_and_resources
bot_users.each do |user|
with_context(user: user) do
expiring_user_token = user.personal_access_tokens.first
execute_web_hooks(expiring_user_token, user)
deliver_bot_notifications(expiring_user_token.name, user)
end
end
tokens.update_all(expire_notification_delivered: true)
notifications_delivered += tokens.count
end
log_extra_metadata_on_done(:total_notification_delivered_for_bot_personal_access_tokens, notifications_delivered)
# rubocop: enable CodeReuse/ActiveRecord
end
def deliver_bot_notifications(token_names, user)
notification_service.resource_access_tokens_about_to_expire(user, token_names)
@ -74,6 +111,16 @@ module PersonalAccessTokens
)
end
def execute_web_hooks(token, bot_user)
resource = bot_user.resource_bot_resource
return unless ::Feature.enabled?(:access_tokens_webhooks, resource)
return if resource.is_a?(Project) && !resource.has_active_hooks?(:resource_access_token_hooks)
hook_data = Gitlab::DataBuilder::ResourceAccessToken.build(token, :expiring, resource)
resource.execute_hooks(hook_data, :resource_access_token_hooks)
end
def notification_service
NotificationService.new
end

View File

@ -1,9 +1,9 @@
---
name: cached_conflicts_with_types
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/439695
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/145107
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/442303
name: access_tokens_webhooks
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/426147
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/141907
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/439379
milestone: '16.10'
group: group::code review
group: group::authentication
type: gitlab_com_derisk
default_enabled: false

View File

@ -1,8 +0,0 @@
---
name: redis_hll_property_name_tracking
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/137890
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/432866
milestone: '16.9'
type: wip
group: group::analytics instrumentation
default_enabled: false

View File

@ -0,0 +1,14 @@
# frozen_string_literal: true
# See https://docs.gitlab.com/ee/development/migration_style_guide.html
# for more information on how to write migrations for GitLab.
class AddAccessTokenEventsToWebHooks < Gitlab::Database::Migration[2.2]
milestone '16.10'
enable_lock_retries!
def change
add_column :web_hooks, :resource_access_token_events, :boolean, null: false, default: false
end
end

View File

@ -0,0 +1 @@
fd2a8b5d4b87c7328bf6d20aa2c45a92781f84b0d53216a1d21157d6c5fa0c09

View File

@ -17538,6 +17538,7 @@ CREATE TABLE web_hooks (
name text,
description text,
custom_webhook_template text,
resource_access_token_events boolean DEFAULT false NOT NULL,
CONSTRAINT check_1e4d5cbdc5 CHECK ((char_length(name) <= 255)),
CONSTRAINT check_23a96ad211 CHECK ((char_length(description) <= 2048)),
CONSTRAINT check_69ef76ee0c CHECK ((char_length(custom_webhook_template) <= 4096))

View File

@ -15130,6 +15130,7 @@ Represents the approval policy.
| <a id="approvalpolicyenabled"></a>`enabled` | [`Boolean!`](#boolean) | Indicates whether this policy is enabled. |
| <a id="approvalpolicygroupapprovers"></a>`groupApprovers` **{warning-solid}** | [`[Group!]`](#group) | **Deprecated** in GitLab 16.5. Use `allGroupApprovers`. |
| <a id="approvalpolicyname"></a>`name` | [`String!`](#string) | Name of the policy. |
| <a id="approvalpolicypolicyscope"></a>`policyScope` **{warning-solid}** | [`PolicyScope`](#policyscope) | **Introduced** in GitLab 16.10. **Status**: Experiment. Scope of the policy. Returns `null` if Security Policy Scope experimental feature is disabled. |
| <a id="approvalpolicyroleapprovers"></a>`roleApprovers` | [`[MemberAccessLevelName!]`](#memberaccesslevelname) | Approvers of the role type. Users belonging to these role(s) alone will be approvers. |
| <a id="approvalpolicysource"></a>`source` | [`SecurityPolicySource!`](#securitypolicysource) | Source of the policy. Its fields depend on the source type. |
| <a id="approvalpolicyupdatedat"></a>`updatedAt` | [`Time!`](#time) | Timestamp of when the policy YAML was last updated. |
@ -25194,6 +25195,16 @@ Check permissions for the current user on a vulnerability finding.
| <a id="policyapprovalgroupid"></a>`id` | [`ID!`](#id) | ID of the namespace. |
| <a id="policyapprovalgroupweburl"></a>`webUrl` | [`String!`](#string) | Web URL of the group. |
### `PolicyScope`
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="policyscopecomplianceframeworks"></a>`complianceFrameworks` | [`ComplianceFrameworkConnection!`](#complianceframeworkconnection) | Compliance Frameworks linked to the policy. (see [Connections](#connections)) |
| <a id="policyscopeexcludingprojects"></a>`excludingProjects` | [`ProjectConnection!`](#projectconnection) | Projects to which the policy should not be applied to. (see [Connections](#connections)) |
| <a id="policyscopeincludingprojects"></a>`includingProjects` | [`ProjectConnection!`](#projectconnection) | Projects to which the policy should be applied to. (see [Connections](#connections)) |
### `PreviewBillableUserChange`
#### Fields
@ -27948,6 +27959,7 @@ Represents the scan execution policy.
| <a id="scanexecutionpolicyeditpath"></a>`editPath` | [`String!`](#string) | URL of policy edit page. |
| <a id="scanexecutionpolicyenabled"></a>`enabled` | [`Boolean!`](#boolean) | Indicates whether this policy is enabled. |
| <a id="scanexecutionpolicyname"></a>`name` | [`String!`](#string) | Name of the policy. |
| <a id="scanexecutionpolicypolicyscope"></a>`policyScope` **{warning-solid}** | [`PolicyScope`](#policyscope) | **Introduced** in GitLab 16.10. **Status**: Experiment. Scope of the policy. Returns `null` if Security Policy Scope experimental feature is disabled. |
| <a id="scanexecutionpolicysource"></a>`source` | [`SecurityPolicySource!`](#securitypolicysource) | Source of the policy. Its fields depend on the source type. |
| <a id="scanexecutionpolicyupdatedat"></a>`updatedAt` | [`Time!`](#time) | Timestamp of when the policy YAML was last updated. |
| <a id="scanexecutionpolicyyaml"></a>`yaml` | [`String!`](#string) | YAML definition of the policy. |
@ -27966,6 +27978,7 @@ Represents the scan result policy.
| <a id="scanresultpolicyenabled"></a>`enabled` | [`Boolean!`](#boolean) | Indicates whether this policy is enabled. |
| <a id="scanresultpolicygroupapprovers"></a>`groupApprovers` **{warning-solid}** | [`[Group!]`](#group) | **Deprecated** in GitLab 16.5. Use `allGroupApprovers`. |
| <a id="scanresultpolicyname"></a>`name` | [`String!`](#string) | Name of the policy. |
| <a id="scanresultpolicypolicyscope"></a>`policyScope` **{warning-solid}** | [`PolicyScope`](#policyscope) | **Introduced** in GitLab 16.10. **Status**: Experiment. Scope of the policy. Returns `null` if Security Policy Scope experimental feature is disabled. |
| <a id="scanresultpolicyroleapprovers"></a>`roleApprovers` | [`[MemberAccessLevelName!]`](#memberaccesslevelname) | Approvers of the role type. Users belonging to these role(s) alone will be approvers. |
| <a id="scanresultpolicysource"></a>`source` | [`SecurityPolicySource!`](#securitypolicysource) | Source of the policy. Its fields depend on the source type. |
| <a id="scanresultpolicyupdatedat"></a>`updatedAt` | [`Time!`](#time) | Timestamp of when the policy YAML was last updated. |
@ -31525,7 +31538,6 @@ Detailed representation of whether a GitLab merge request can be merged.
| <a id="detailedmergestatusneed_rebase"></a>`NEED_REBASE` | Merge request needs to be rebased. |
| <a id="detailedmergestatusnot_approved"></a>`NOT_APPROVED` | Merge request must be approved before merging. |
| <a id="detailedmergestatusnot_open"></a>`NOT_OPEN` | Merge request must be open before merging. |
| <a id="detailedmergestatuspolicies_denied"></a>`POLICIES_DENIED` | There are denied policies for the merge request. |
| <a id="detailedmergestatuspreparing"></a>`PREPARING` | Merge request diff is being created. |
| <a id="detailedmergestatusunchecked"></a>`UNCHECKED` | Merge status has not been checked. |
@ -34776,6 +34788,7 @@ Implementations:
| <a id="orchestrationpolicyeditpath"></a>`editPath` | [`String!`](#string) | URL of policy edit page. |
| <a id="orchestrationpolicyenabled"></a>`enabled` | [`Boolean!`](#boolean) | Indicates whether this policy is enabled. |
| <a id="orchestrationpolicyname"></a>`name` | [`String!`](#string) | Name of the policy. |
| <a id="orchestrationpolicypolicyscope"></a>`policyScope` **{warning-solid}** | [`PolicyScope`](#policyscope) | **Introduced** in GitLab 16.10. **Status**: Experiment. Scope of the policy. Returns `null` if Security Policy Scope experimental feature is disabled. |
| <a id="orchestrationpolicyupdatedat"></a>`updatedAt` | [`Time!`](#time) | Timestamp of when the policy YAML was last updated. |
| <a id="orchestrationpolicyyaml"></a>`yaml` | [`String!`](#string) | YAML definition of the policy. |

View File

@ -1578,7 +1578,8 @@ GET /groups/:id/hooks/:hook_id
"alert_status": "executable",
"disabled_until": null,
"url_variables": [ ],
"created_at": "2012-10-12T17:04:47Z"
"created_at": "2012-10-12T17:04:47Z",
"resource_access_token_events": true
}
```
@ -1610,6 +1611,7 @@ POST /groups/:id/hooks
| `subgroup_events` | boolean | no | Trigger hook on subgroup events |
| `enable_ssl_verification` | boolean | no | Do SSL verification when triggering the hook |
| `token` | string | no | Secret token to validate received payloads; not returned in the response |
| `resource_access_token_events` | boolean | no | Trigger hook on project access token expiry events. |
### Edit group hook
@ -1641,6 +1643,7 @@ PUT /groups/:id/hooks/:hook_id
| `enable_ssl_verification` | boolean | no | Do SSL verification when triggering the hook. |
| `service_access_tokens_expiration_enforced` | boolean | no | Require service account access tokens to have an expiration date. |
| `token` | string | no | Secret token to validate received payloads. Not returned in the response. When you change the webhook URL, the secret token is reset and not retained. |
| `resource_access_token_events` | boolean | no | Trigger hook on project access token expiry events. |
### Delete group hook

View File

@ -839,7 +839,6 @@ Use `detailed_merge_status` instead of `merge_status` to account for all potenti
- `mergeable`: The branch can merge cleanly into the target branch.
- `not_approved`: Approval is required before merge.
- `not_open`: The merge request must be open before merge.
- `policies_denied`: The merge request contains denied policies.
- `jira_association_missing`: The title or description must reference a Jira issue.
### Preparation steps

View File

@ -2813,7 +2813,8 @@ GET /projects/:id/hooks/:hook_id
"alert_status": "executable",
"disabled_until": null,
"url_variables": [ ],
"created_at": "2012-10-12T17:04:47Z"
"created_at": "2012-10-12T17:04:47Z",
"resource_access_token_events": true
}
```
@ -2844,6 +2845,7 @@ POST /projects/:id/hooks
| `tag_push_events` | boolean | No | Trigger hook on tag push events. |
| `token` | string | No | Secret token to validate received payloads; the token isn't returned in the response. |
| `wiki_page_events` | boolean | No | Trigger hook on wiki events. |
| `resource_access_token_events` | boolean | No | Trigger hook on project access token expiry events. |
### Edit project hook
@ -2873,6 +2875,7 @@ PUT /projects/:id/hooks/:hook_id
| `tag_push_events` | boolean | No | Trigger hook on tag push events. |
| `token` | string | No | Secret token to validate received payloads. Not returned in the response. When you change the webhook URL, the secret token is reset and not retained. |
| `wiki_page_events` | boolean | No | Trigger hook on wiki page events. |
| `resource_access_token_events` | boolean | No | Trigger hook on project access token expiry events. |
### Delete project hook

View File

@ -41,8 +41,6 @@ Settings are not cascading by default. To define a cascading setting, take the f
class AddDelayedProjectRemovalCascadingSetting < Gitlab::Database::Migration[2.1]
include Gitlab::Database::MigrationHelpers::CascadingNamespaceSettings
enable_lock_retries!
def up
add_cascading_namespace_setting :delayed_project_removal, :boolean, default: false, null: false
end

View File

@ -20,14 +20,17 @@ for a list of terms used throughout the document.
A GitLab Rails instance accesses backend services by means of a Cloud Connector Service Access Token.
This is a token provided by the GitLab Rails application and holds information about which backend services and features in these services it can access.
To connect a feature using Cloud Connector:
The following sections cover the necessary steps to expose features both from existing and newly built
backend services through Cloud Connector.
### Connect a feature to an existing service
To connect a feature in an existing backend service to Cloud Connector:
1. [Complete the steps in GitLab Rails](#gitlab-rails)
1. [Complete the steps in CustomersDot](#customersdot)
1. [Complete the steps in the backend service](#backend-service)
### Connect a feature to an existing service
#### GitLab Rails
1. Call `CloudConnector::AccessService.new.access_token(scopes: [...])` with the list of scopes your feature requires and include
@ -139,6 +142,114 @@ and the [FastAPI](https://fastapi.tiangolo.com/) framework.
...
```
##### Testing
### Connect a new backend service to Cloud Connector
To integrate a new backend service that isn't already accessible by Cloud Connector features:
1. [Set up JWT validation](#set-up-jwt-validation).
1. [Make it available at `cloud.gitlab.com`](#add-a-new-cloud-connector-route).
#### Set up JWT validation
As mentioned in the [backend service section](#backend-service) for services that already use
Cloud Connector, each service must verify that the JWT sent by a GitLab instance is legitimate.
To accomplish this, a backend service must:
1. [Maintain a JSON Web Key Set (JWKS)](#maintain-jwks-for-token-validation).
1. [Validate JWTs with keys in this set](#validate-jwts-with-jwks).
For a detailed explanation of the mechanism behind this, refer to
[Architecture: Access control](architecture.md#access-control).
We strongly suggest to use existing software libraries to handle JWKS and JWT authentication.
Examples include:
- [`go-jwt`](https://github.com/golang-jwt/)
- [`ruby-jwt`](https://github.com/jwt/ruby-jwt)
- [`python-jose`](https://github.com/mpdavis/python-jose)
##### Maintain JWKS for token validation
JWTs are cryptographically signed by the token authority when first issued.
GitLab instances then attach the JWTs in requests made to backend services.
To validate JWT service access tokens, the backend service must first obtain the JWKS
containing the public validation key that corresponds to the private signing key used
to sign the token. Because both GitLab.com and CustomersDot issue tokens,
the backend service must fetch the JWKS from both.
To fetch the JWKS, use the OIDC discovery endpoints exposed by GitLab.com and CustomersDot.
For each of these token authorities:
1. `GET /.well-known/openid-configuration`
Example response:
```json
{
"issuer": "https://customers.gitlab.com/",
"jwks_uri": "https://customers.gitlab.com/oauth/discovery/keys",
"id_token_signing_alg_values_supported": [
"RS256"
]
}
```
1. `GET <jwks_uri>`
Example response:
```json
{
"keys": [
{
"kty": "RSA",
"n": "sGy_cbsSmZ_Y4XV80eK_ICmz46XkyWVf6O667-mhDcN5FcSfPW7gqhyn7s052fWrZYmJJZ4PPyh6ZzZ_gZAaQM7Oe2VrpbFdCeJW0duR51MZj52FwShLfi-NOBz2GH9XuUsRBKnXt7wwKQTabH4WW7XL23Hi0eDjc9dyQmsr2-AbH05yVsrgvEYSsWiCGEgobPgNc51DwBoIcsJ-kFN591aO_qAkbpf1j7yAuAVG7TUxaditQhyZKkourPXXyx1R-u0Lx9UJyAV8ySqFxq3XDE_pg6ZuJ7M0zS0XnGI82g3Js5zAughrQyJMhKd8j5c8UfSGxhRBQh58QNl3UwoMjQ",
"e": "AQAB",
"kid": "ZoObkdsnUfqW_C_EfXp9DM6LUdzl0R-eXj6Hrb2lrNU",
"use": "sig",
"alg": "RS256"
}
]
}
```
1. Cache the response. We suggest to let the cache expire once a day.
The keys obtained this way can be used to validate JWTs issued by the respective token authority.
Exactly how this works depends on the programming language and libraries used. General instructions
can be found in [Locate JSON Web Key Sets](https://auth0.com/docs/secure/tokens/json-web-tokens/locate-json-web-key-sets).
Backend services may merge responses from both token authorities into a single cached result set.
##### Validate JWTs with JWKS
To validate a JWT:
1. Read the token string from the HTTP `Authorization` header.
1. Validate it using a JWT library object and the JWKS [obtained previously](#maintain-jwks-for-token-validation).
When validating a token, ensure that:
1. The token signature is correct.
1. The `aud` claim equals or contains the backend service (this field can be a string or an array).
1. The `iss` claim matches the issuer URL of the key used to validate it.
1. The `scopes` claim covers the functionality exposed by the requested endpoint (see [Backend service](#backend-service)).
#### Add a new Cloud Connector route
All Cloud Connector features must be accessed through `cloud.gitlab.com`, a global load-balancer that
routes requests into backend services based on paths prefixes. For example, AI features must be requested
from `cloud.gitlab.com/ai/<AI-specific-path>`. The load-balancer then routes `<AI-specific-path>` to the AI gateway.
To connect a new backend service to Cloud Connector, you must claim a new path-prefix to route requests to your
service. For example, if you connect `foo-service`, a new route must be added that routes `cloud.gitlab.com/foo`
to `foo-service`.
Adding new routes requires access to production infrastructure configuration. If you require a new route to be
added, open an issue in the [`gitlab-org/gitlab` issue tracker](https://gitlab.com/gitlab-org/gitlab/-/issues/new)
and assign it to the Cloud Connector group.
## Testing
An example for how to set up an end-to-end integration with the AI gateway as the backend service can be found [here](../ai_features/index.md#setup).

View File

@ -95,10 +95,6 @@ class RemoveUsersUpdatedAtColumn < Gitlab::Database::Migration[2.1]
end
```
You can consider [enabling lock retries](../migration_style_guide.md#usage-with-transactional-migrations)
when you run a migration on big tables, because it might take some time to
acquire a lock on this table.
#### The removed column has an index or constraint that belongs to it
If the `down` method requires adding back any dropped indexes or constraints, that cannot
@ -127,7 +123,7 @@ end
In the `down` method, we check to see if the column already exists before adding it again.
We do this because the migration is non-transactional and might have failed while it was running.
The [`disable_ddl_transaction!`](../migration_style_guide.md#usage-with-non-transactional-migrations-disable_ddl_transaction)
The [`disable_ddl_transaction!`](../migration_style_guide.md#usage-with-non-transactional-migrations)
is used to disable the transaction that wraps the whole migration.
You can refer to the page [Migration Style Guide](../migration_style_guide.md)
@ -355,18 +351,12 @@ bundle exec rails g post_deployment_migration change_ci_builds_default
```ruby
class ChangeCiBuildsDefault < Gitlab::Database::Migration[2.1]
enable_lock_retries!
def change
change_column_default('ci_builds', 'partition_id', from: 100, to: 101)
end
end
```
[Enable lock retries](../migration_style_guide.md#usage-with-transactional-migrations)
when you run a migration on big tables, because it might take some time to
acquire a lock on this table.
### Clean up the `SafelyChangeColumnDefault` concern in the next minor release
In the next minor release, create a new merge request to remove the `columns_changing_default` call. Also remove the `SafelyChangeColumnDefault` include

View File

@ -204,8 +204,6 @@ trigger needs to be configured only once. If the model already has at least one
class TrackProjectRecordChanges < Gitlab::Database::Migration[2.1]
include Gitlab::Database::MigrationHelpers::LooseForeignKeyHelpers
enable_lock_retries!
def up
track_record_deletions(:projects)
end
@ -274,8 +272,6 @@ Migration for removing the trigger:
class UnTrackProjectRecordChanges < Gitlab::Database::Migration[2.1]
include Gitlab::Database::MigrationHelpers::LooseForeignKeyHelpers
enable_lock_retries!
def up
untrack_record_deletions(:projects)
end

View File

@ -60,8 +60,6 @@ Consider the next release as "Release N.M".
Execute a standard migration (not a post-migration):
```ruby
enable_lock_retries!
def up
rename_table_safely(:issues, :tickets)
end

View File

@ -121,7 +121,7 @@ the following preparations into account.
- Ensure the `db:check-schema` job has run successfully and no unexpected schema changes are introduced in a rollback. This job may only trigger a warning if the schema was changed.
- Verify that the previously mentioned jobs continue to succeed whenever you modify the migrations during the review process.
- Add tests for the migration in `spec/migrations` if necessary. See [Testing Rails migrations at GitLab](testing_guide/testing_migrations_guide.md) for more details.
- When [high-traffic](https://gitlab.com/gitlab-org/gitlab/-/blob/master/rubocop/rubocop-migrations.yml#L3) tables are involved in the migration, use the [`enable_lock_retries`](migration_style_guide.md#retry-mechanism-when-acquiring-database-locks) method to enable lock-retries. Review the relevant [examples in our documentation](migration_style_guide.md#usage-with-transactional-migrations) for use cases and solutions.
- [Lock retries](migration_style_guide.md#retry-mechanism-when-acquiring-database-locks) are enabled by default for all transactional migrations. For non-transactional migrations review the relevant [documentation](migration_style_guide.md#usage-with-non-transactional-migrations) for use cases and solutions.
- Ensure RuboCop checks are not disabled unless there's a valid reason to.
- When adding an index to a [large table](https://gitlab.com/gitlab-org/gitlab/-/blob/master/rubocop/rubocop-migrations.yml#L3),
test its execution using `CREATE INDEX CONCURRENTLY` in [Database Lab](database/database_lab.md) and add the execution time to the MR description:

View File

@ -280,8 +280,6 @@ depending on [how long a migration takes](#how-long-a-migration-should-take)
When you use the existing helpers including `add_concurrent_index`,
they automatically turn off the statement timeout as needed.
In rare cases, you might need to set the timeout limit yourself by [using `disable_statement_timeout`](#temporarily-turn-off-the-statement-timeout-limit).
- Lock timeout: if your migration must execute as a transaction but can possibly time out while
acquiring a lock, [use `enable_lock_retries!`](#usage-with-transactional-migrations).
NOTE:
To run migrations, we directly connect to the primary database, bypassing PgBouncer
@ -500,38 +498,22 @@ offers a method to retry the operations with different `lock_timeout` settings
and wait time between the attempts. Multiple shorter attempts to acquire the necessary
lock allow the database to process other statements.
There are two distinct ways to use lock retries:
Lock retries are controlled by two different helpers:
1. Inside a transactional migration: use `enable_lock_retries!`.
1. Inside a non-transactional migration: use `with_lock_retries`.
1. `enable_lock_retries!`: enabled by default for all `transactional` migrations.
1. `with_lock_retries`: enabled manually for a block within `non-transactional` migrations.
If possible, enable lock-retries for any migration that touches a [high-traffic table](#high-traffic-tables).
### Transactional migrations
### Usage with transactional migrations
Regular migrations execute the full migration in a transaction. lock-retry mechanism is enabled by default (unless `disable_ddl_transaction!`).
Regular migrations execute the full migration in a transaction. We can enable the
lock-retry methodology by calling `enable_lock_retries!` at the migration level.
This leads to the lock timeout being controlled for this migration. Also, it can lead to retrying the full
This leads to the lock timeout being controlled for the migration. Also, it can lead to retrying the full
migration if the lock could not be granted within the timeout.
Note that, while this is currently an opt-in setting, we prefer to use lock-retries for all migrations and
plan to make this the default going forward.
Occasionally a migration may need to acquire multiple locks on different objects.
To prevent catalog bloat, ask for all those locks explicitly before performing any DDL.
A better strategy is to split the migration, so that we only need to acquire one lock at the time.
#### Removing a column
```ruby
enable_lock_retries!
def change
remove_column :users, :full_name, :string
end
```
#### Multiple changes on the same table
With the lock-retry methodology enabled, all operations wrap into a single transaction. When you have the lock,
@ -539,8 +521,6 @@ you should do as much as possible inside the transaction rather than trying to g
Be careful about running long database statements within the block. The acquired locks are kept until the transaction (block) finishes and depending on the lock type, it might block other database operations.
```ruby
enable_lock_retries!
def up
add_column :users, :full_name, :string
add_column :users, :bio, :string
@ -552,28 +532,12 @@ def down
end
```
#### Removing a foreign key
```ruby
enable_lock_retries!
def up
remove_foreign_key :issues, :projects
end
def down
add_foreign_key :issues, :projects
end
```
#### Changing default value for a column
Note that changing column defaults can cause application downtime if a multi-release process is not followed.
See [avoiding downtime in migrations for changing column defaults](database/avoiding_downtime_in_migrations.md#changing-column-defaults) for details.
```ruby
enable_lock_retries!
def up
change_column_default :merge_requests, :lock_version, from: nil, to: 0
end
@ -583,25 +547,6 @@ def down
end
```
#### Creating a new table with a foreign key
We can wrap the `create_table` method with `with_lock_retries`:
```ruby
enable_lock_retries!
def up
create_table :issues do |t|
t.references :project, index: true, null: false, foreign_key: { on_delete: :cascade }
t.string :title, limit: 255
end
end
def down
drop_table :issues
end
```
#### Creating a new table when we have two foreign keys
Only one foreign key should be created per transaction. This is because [the addition of a foreign key constraint requires a `SHARE ROW EXCLUSIVE` lock on the referenced table](https://www.postgresql.org/docs/12/sql-createtable.html#:~:text=The%20addition%20of%20a%20foreign%20key%20constraint%20requires%20a%20SHARE%20ROW%20EXCLUSIVE%20lock%20on%20the%20referenced%20table), and locking multiple tables in the same transaction should be avoided.
@ -666,7 +611,7 @@ def down
end
```
### Usage with non-transactional migrations (`disable_ddl_transaction!`)
### Usage with non-transactional migrations
Only when we disable transactional migrations using `disable_ddl_transaction!`, we can use
the `with_lock_retries` helper to guard an individual sequence of steps. It opens a transaction

View File

@ -30,6 +30,7 @@ Event type | Trigger
[Feature flag event](#feature-flag-events) | A feature flag is turned on or off.
[Release event](#release-events) | A release is created, updated, or deleted.
[Emoji event](#emoji-events) | An emoji reaction is added or removed.
[Project or group access token event](#project-and-group-access-token-events) | A project or group access token will expire in seven days.
**Events triggered for group webhooks only:**
@ -2048,3 +2049,77 @@ Payload example:
}
}
```
## Project and group access token events
[Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/141907) in GitLab 16.10 [with a flag](../../../administration/feature_flags.md) named `access_token_webhooks`. Disabled by default.
An access token event is triggered when a [project or group access token](../../../security/token_overview.md) will expire in seven days or less.
The available values for `event_name` in the payload are:
- `expiring`
Request header:
```plaintext
X-Gitlab-Event: Resource Access Token Hook
```
Payload example for project:
```json
{
"object_kind": "access_token",
"project_id": 7,
"project": {
"id": 7,
"name": "Flight",
"description": "Eum dolore maxime atque reprehenderit voluptatem.",
"web_url": "https://example.com/flightjs/Flight",
"avatar_url": null,
"git_ssh_url": "ssh://git@example.com/flightjs/Flight.git",
"git_http_url": "https://example.com/flightjs/Flight.git",
"namespace": "Flightjs",
"visibility_level": 0,
"path_with_namespace": "flightjs/Flight",
"default_branch": "master",
"ci_config_path": null,
"homepage": "https://example.com/flightjs/Flight",
"url": "ssh://git@example.com/flightjs/Flight.git",
"ssh_url": "ssh://git@example.com/flightjs/Flight.git",
"http_url": "https://example.com/flightjs/Flight.git"
},
"object_attributes": {
"user_id": 90,
"created_at": "2024-02-05T03:13:44.855Z",
"id": 25,
"name": "acd",
"expires_at": "2024-01-26",
},
"event_name": "expiring_access_token"
}
```
Payload example for group:
```json
{
"object_kind": "access_token",
"group_id": 35,
"group": {
"group_name": "Twitter",
"group_path": "twitter",
"full_path": "twitter",
"group_id": 35
},
"object_attributes": {
"user_id": 90,
"created_at": "2024-01-24 16:27:40 UTC",
"id": 25,
"name": "acd",
"expires_at": "2024-01-26",
},
"event_name": "expiring_access_token"
}
```

View File

@ -15,6 +15,7 @@ module API
expose :releases_events, documentation: { type: 'boolean' }
expose :push_events_branch_filter, documentation: { type: 'string', example: 'my-branch-*' }
expose :emoji_events, documentation: { type: 'boolean' }
expose :resource_access_token_events, documentation: { type: 'boolean' }
end
end
end

View File

@ -32,6 +32,7 @@ module API
optional :deployment_events, type: Boolean, desc: "Trigger hook on deployment events"
optional :releases_events, type: Boolean, desc: "Trigger hook on release events"
optional :emoji_events, type: Boolean, desc: "Trigger hook on emoji events"
optional :resource_access_token_events, type: Boolean, desc: "Trigger hook on project access token expiry events"
optional :enable_ssl_verification, type: Boolean, desc: "Do SSL verification when triggering the hook"
optional :token, type: String, desc: "Secret token to validate received payloads; this will not be returned in the response"
optional :push_events_branch_filter, type: String, desc: "Trigger hook on specified branch only"

View File

@ -0,0 +1,42 @@
# frozen_string_literal: true
module Gitlab
module DataBuilder
module ResourceAccessToken
extend self
def build(resource_access_token, event, resource)
base_data = {
object_kind: 'access_token'
}
if resource.is_a?(Project)
base_data[:project] = resource.hook_attrs
else
base_data[:group] = group_data(resource)
end
base_data[:object_attributes] = resource_access_token.hook_attrs
base_data[:event_name] = event_data(event)
base_data
end
private
def event_data(event)
case event
when :expiring
'expiring_access_token'
end
end
def group_data(group)
{
group_name: group.name,
group_path: group.path,
group_id: group.id
}
end
end
end
end

View File

@ -16,6 +16,20 @@ module Gitlab
end
end
included do
private
attr_accessor :with_lock_retries_used
end
def with_lock_retries_used!
self.with_lock_retries_used = true
end
def with_lock_retries_used?
with_lock_retries_used
end
delegate :enable_lock_retries?, to: :class
end

View File

@ -53,8 +53,7 @@ module Gitlab
#
# In order to retry the block, the method wraps the block into a transaction.
#
# When called inside an open transaction it will execute the block directly if lock retries are enabled
# with `enable_lock_retries!` at migration level, otherwise it will raise an error.
# When called inside an open transaction it will execute the block directly.
#
# ==== Examples
# # Invoking without parameters
@ -85,17 +84,20 @@ module Gitlab
# * +env+ - [Hash] custom environment hash, see the example with `DISABLE_LOCK_RETRIES`
def with_lock_retries(*args, **kwargs, &block)
if transaction_open?
if enable_lock_retries?
Gitlab::AppLogger.warn 'Lock retries already enabled, executing the block directly'
if with_lock_retries_used?
Gitlab::AppLogger.warn 'WithLockRetries used already, executing the block directly'
yield
else
raise <<~EOF
#{__callee__} can not be run inside an already open transaction
#{__callee__} can not be run inside an already open transaction.
Use migration-level lock retries instead, see https://docs.gitlab.com/ee/development/migration_style_guide.html#retry-mechanism-when-acquiring-database-locks
Lock retries are enabled by default for transactional migrations, so this can be run without `#{__callee__}`.
For more details, see: https://docs.gitlab.com/ee/development/migration_style_guide.html#transactional-migrations
EOF
end
else
with_lock_retries_used!
super(*args, **kwargs.merge(allow_savepoints: false), &block)
end
end

View File

@ -20,13 +20,31 @@ module Gitlab
migration.enable_lock_retries?
end
def with_lock_retries_used!
# regular AR migrations don't have this,
# only ones inheriting from Gitlab::Database::Migration have
return unless migration.respond_to?(:with_lock_retries_used!)
migration.with_lock_retries_used!
end
def with_lock_retries_used?
# regular AR migrations don't have this,
# only ones inheriting from Gitlab::Database::Migration have
return false unless migration.respond_to?(:with_lock_retries_used?)
migration.with_lock_retries_used?
end
end
module ActiveRecordMigratorLockRetries
# We patch the original method to start a transaction
# using the WithLockRetries methodology for the whole migration.
def ddl_transaction(migration, &block)
if use_transaction?(migration) && migration.enable_lock_retries?
if use_transaction?(migration)
migration.with_lock_retries_used!
Gitlab::Database::WithLockRetries.new(
connection: migration.migration_connection,
klass: migration.migration_class,

View File

@ -0,0 +1,24 @@
# frozen_string_literal: true
module Gitlab
module HookData
class ResourceAccessTokenBuilder < BaseBuilder
SAFE_HOOK_ATTRIBUTES = %i[
user_id
created_at
id
name
expires_at
].freeze
alias_method :resource_access_token, :object
def build
resource_access_token
.attributes
.with_indifferent_access
.slice(*SAFE_HOOK_ATTRIBUTES)
end
end
end
end

View File

@ -98,10 +98,6 @@ module Gitlab
unique_properties = EventDefinitions.unique_properties(event_name)
return if unique_properties.empty?
if Feature.disabled?(:redis_hll_property_name_tracking, type: :wip)
unique_properties = handle_legacy_property_names(unique_properties, event_name)
end
unique_properties.each do |property_name|
unless kwargs[property_name]
message = "#{event_name} should be triggered with a named parameter '#{property_name}'."
@ -115,18 +111,6 @@ module Gitlab
end
end
def handle_legacy_property_names(unique_properties, event_name)
# make sure we're not incrementing the user_id counter with project_id value
return [:user] if event_name.to_s == 'user_visited_dashboard'
return unique_properties if unique_properties.length == 1
# in case a new event got defined with multiple unique_properties, raise an error
raise Gitlab::InternalEvents::EventDefinitions::InvalidMetricConfiguration,
"The same event cannot have several unique properties defined. " \
"Event: #{event_name}, unique values: #{unique_properties}"
end
def trigger_snowplow_event(event_name, category, additional_properties, kwargs)
user = kwargs[:user]
project = kwargs[:project]

View File

@ -148,7 +148,7 @@ module Gitlab
key = event_name
legacy_event_with_property_name = used_in_aggregate_metric && legacy_events.include?(event_name)
if Feature.enabled?(:redis_hll_property_name_tracking, type: :wip) && property_name && !legacy_event_with_property_name
if property_name && !legacy_event_with_property_name
key = "#{key}-#{formatted_property_name(property_name)}"
end

View File

@ -3,8 +3,7 @@
# This file has been generated using the script included in
# the description of https://gitlab.com/gitlab-org/gitlab/-/merge_requests/137890
#
# It is only safe to regenerate it using the same script if the
# :redis_hll_property_name_tracking feature flag is disabled on prod environment.
# It is not safe to regenerate it using the same script
---
agent_users_using_ci_tunnel-user: agent_users_using_ci_tunnel
ci_template_included-project: ci_template_included
@ -117,7 +116,6 @@ user_viewed_dashboard_list-user: user_viewed_dashboard_list
user_viewed_instrumentation_directions-user: user_viewed_instrumentation_directions
user_viewed_visualization_designer-user: user_viewed_visualization_designer
user_visited_dashboard-user: user_visited_dashboard
user_visited_dashboard-project: user_visited_dashboard
value_streams_dashboard_change_failure_rate_link_clicked-user: value_streams_dashboard_change_failure_rate_link_clicked
value_streams_dashboard_contributor_count_link_clicked-user: value_streams_dashboard_contributor_count_link_clicked
value_streams_dashboard_cycle_time_link_clicked-user: value_streams_dashboard_cycle_time_link_clicked

View File

@ -916,6 +916,9 @@ msgstr ""
msgid "%{linkStart}Advanced search%{linkEnd} is enabled."
msgstr ""
msgid "%{link_start}Add a start date and due date%{link_end} to view a burndown chart."
msgstr ""
msgid "%{listToShow}, and %{awardsListLength} more"
msgstr ""
@ -3031,9 +3034,6 @@ msgstr ""
msgid "Add request manually"
msgstr ""
msgid "Add start and due date"
msgstr ""
msgid "Add suggestion to batch"
msgstr ""
@ -8199,12 +8199,27 @@ msgstr ""
msgid "Billing|An error occurred while removing a billable member."
msgstr ""
msgid "Billing|Are you sure you want to continue?"
msgstr ""
msgid "Billing|Assign seat"
msgstr ""
msgid "Billing|Assign seats"
msgstr ""
msgid "Billing|Awaiting member signup"
msgstr ""
msgid "Billing|Cannot remove user"
msgstr ""
msgid "Billing|Confirm bulk seat allocation"
msgstr ""
msgid "Billing|Confirm bulk seat unassignment"
msgstr ""
msgid "Billing|Direct memberships"
msgstr ""
@ -8246,6 +8261,12 @@ msgstr ""
msgid "Billing|Project invite"
msgstr ""
msgid "Billing|Remove seat"
msgstr ""
msgid "Billing|Remove seats"
msgstr ""
msgid "Billing|Remove user %{username} from your subscription"
msgstr ""
@ -8255,6 +8276,16 @@ msgstr ""
msgid "Billing|Something went wrong when un-assigning the add-on to this member. If the problem persists, please %{supportLinkStart}contact support%{supportLinkEnd}."
msgstr ""
msgid "Billing|This action will assign a GitLab Duo Pro seat to 1 user"
msgid_plural "Billing|This action will assign a GitLab Duo Pro seat to %d users"
msgstr[0] ""
msgstr[1] ""
msgid "Billing|This action will remove GitLab Duo Pro seat from 1 user"
msgid_plural "Billing|This action will remove GitLab Duo Pro seats from %d users"
msgstr[0] ""
msgstr[1] ""
msgid "Billing|To ensure all members can access the group when your trial ends, you can upgrade to a paid tier."
msgstr ""
@ -16790,9 +16821,6 @@ msgstr ""
msgid "Denied authorization of chat nickname %{user_name}."
msgstr ""
msgid "Denied licenses must be removed or approved."
msgstr ""
msgid "Deny"
msgstr ""
@ -23433,18 +23461,33 @@ msgid_plural "GoogleArtifactRegistry|%d more tags"
msgstr[0] ""
msgstr[1] ""
msgid "GoogleArtifactRegistry|After the policies have been created, select %{strongStart}Save changes%{strongEnd} to continue."
msgstr ""
msgid "GoogleArtifactRegistry|An error occurred while fetching the artifact details."
msgstr ""
msgid "GoogleArtifactRegistry|An error occurred while fetching the artifacts."
msgstr ""
msgid "GoogleArtifactRegistry|Before you begin, %{linkStart}install the Google Cloud CLI%{linkEnd}."
msgstr ""
msgid "GoogleArtifactRegistry|Built"
msgstr ""
msgid "GoogleArtifactRegistry|Configuration instructions"
msgstr ""
msgid "GoogleArtifactRegistry|Configure Google Cloud IAM policies"
msgstr ""
msgid "GoogleArtifactRegistry|Configure in settings"
msgstr ""
msgid "GoogleArtifactRegistry|Copy command"
msgstr ""
msgid "GoogleArtifactRegistry|Copy digest"
msgstr ""
@ -23487,12 +23530,21 @@ msgstr ""
msgid "GoogleArtifactRegistry|Push new ones from your CI/CD pipeline."
msgstr ""
msgid "GoogleArtifactRegistry|Replace %{codeStart}your_access_token%{codeEnd} with a new %{linkStart}personal access token%{linkEnd} with the %{strongStart}read_api%{strongEnd} scope. This token gets information from your Google Cloud IAM integration in GitLab."
msgstr ""
msgid "GoogleArtifactRegistry|Replace %{codeStart}your_google_cloud_project_id%{codeEnd} with your Google Cloud project ID."
msgstr ""
msgid "GoogleArtifactRegistry|Repository"
msgstr ""
msgid "GoogleArtifactRegistry|Repository: %{repository}"
msgstr ""
msgid "GoogleArtifactRegistry|Run the following command to setup IAM read and write policies in your Google Cloud project."
msgstr ""
msgid "GoogleArtifactRegistry|Tags"
msgstr ""
@ -23508,6 +23560,12 @@ msgstr ""
msgid "GoogleArtifactRegistry|Virtual size"
msgstr ""
msgid "GoogleArtifactRegistry|You might be prompted to sign into Google."
msgstr ""
msgid "GoogleArtifactRegistry|Your Google Cloud project must have specific Identity and Access Management (IAM) policies to use the Artifact Registry repository in this GitLab project."
msgstr ""
msgid "GoogleCloudPlatformService|%{link_start}Explore Google Cloud integration with GitLab%{link_end}, for CI/CD and more."
msgstr ""
@ -52811,6 +52869,9 @@ msgstr ""
msgid "Tracing|Service"
msgstr ""
msgid "Tracing|Showing %{count} traces"
msgstr ""
msgid "Tracing|Status"
msgstr ""
@ -56174,6 +56235,9 @@ msgstr ""
msgid "Webhooks|A wiki page is created or updated."
msgstr ""
msgid "Webhooks|An access token is going to expire in the next 7 days. %{help_link}?"
msgstr ""
msgid "Webhooks|An emoji is awarded or revoked. %{help_link}?"
msgstr ""
@ -56255,6 +56319,9 @@ msgstr ""
msgid "Webhooks|Pipeline events"
msgstr ""
msgid "Webhooks|Project or group access token events"
msgstr ""
msgid "Webhooks|Regular expression"
msgstr ""
@ -56474,6 +56541,9 @@ msgstr ""
msgid "Which emoji events trigger webhooks"
msgstr ""
msgid "Which project or group access token events trigger webhooks"
msgstr ""
msgid "While it's rare to have no vulnerabilities, it can happen. In any event, we ask that you please double check your settings to make sure you've set up your dashboard correctly."
msgstr ""

View File

@ -59,7 +59,7 @@
"@gitlab/cluster-client": "^2.1.0",
"@gitlab/favicon-overlay": "2.0.0",
"@gitlab/fonts": "^1.3.0",
"@gitlab/svgs": "3.88.0",
"@gitlab/svgs": "3.89.0",
"@gitlab/ui": "78.1.1",
"@gitlab/visual-review-tools": "1.7.3",
"@gitlab/web-ide": "^0.0.1-dev-20240226152102",

View File

@ -25,7 +25,6 @@ RSpec.describe 'Milestone', feature_category: :team_planning do
click_button 'Create milestone'
expect(find_by_testid('no-issues-alert')).to have_content('Assign some issues to this milestone.')
expect(page).to have_content('Nov 16, 2016Dec 16, 2016')
end

View File

@ -23,7 +23,8 @@
"alert_status",
"disabled_until",
"emoji_events",
"custom_webhook_template"
"custom_webhook_template",
"resource_access_token_events"
],
"optional": [
"url_variables"
@ -99,6 +100,9 @@
"emoji_events": {
"type": "boolean"
},
"resource_access_token_events": {
"type": "boolean"
},
"alert_status": {
"type": "string",
"enum": [

View File

@ -1,4 +1,4 @@
import { GlEmptyState } from '@gitlab/ui';
import { GlEmptyState, GlButton } from '@gitlab/ui';
import { shallowMount } from '@vue/test-utils';
import AlertManagementEmptyState from '~/alert_management/components/alert_management_empty_state.vue';
import defaultProvideValues from '../mocks/alerts_provide_config.json';
@ -19,11 +19,28 @@ describe('AlertManagementEmptyState', () => {
mountComponent();
});
const EmptyState = () => wrapper.findComponent(GlEmptyState);
const findEmptyState = () => wrapper.findComponent(GlEmptyState);
const findButton = () => wrapper.findComponent(GlButton);
describe('Empty state', () => {
it('shows empty state', () => {
expect(EmptyState().exists()).toBe(true);
it('renders empty state', () => {
expect(findEmptyState().exists()).toBe(true);
});
it("does not show the button is user can't enable alert management", () => {
expect(findButton().exists()).toBe(false);
});
it('shows the button if user can enable alert management', () => {
mountComponent({
provide: {
userCanEnableAlertManagement: true,
alertManagementEnabled: true,
},
});
expect(findButton().exists()).toBe(true);
expect(findButton().text()).toBe('Authorize external service');
});
});
});

View File

@ -203,6 +203,19 @@ describe('buildClient', () => {
expectErrorToBeReported(new Error(FETCHING_TRACES_ERROR));
});
it('passes the abort controller to axios', async () => {
axiosMock.onGet(tracingUrl).reply(200, { traces: [] });
const abortController = new AbortController();
await client.fetchTraces({ abortController });
expect(axios.get).toHaveBeenCalledWith(tracingUrl, {
withCredentials: true,
params: expect.any(URLSearchParams),
signal: abortController.signal,
});
});
describe('sort order', () => {
beforeEach(() => {
axiosMock.onGet(tracingUrl).reply(200, {
@ -431,6 +444,19 @@ describe('buildClient', () => {
expect(await client.fetchTracesAnalytics()).toEqual([]);
});
it('passes the abort controller to axios', async () => {
axiosMock.onGet(tracingAnalyticsUrl).reply(200, {});
const abortController = new AbortController();
await client.fetchTracesAnalytics({ abortController });
expect(axios.get).toHaveBeenCalledWith(tracingAnalyticsUrl, {
withCredentials: true,
params: expect.any(URLSearchParams),
signal: abortController.signal,
});
});
describe('query filter', () => {
beforeEach(() => {
axiosMock.onGet(tracingAnalyticsUrl).reply(200, {
@ -1058,7 +1084,7 @@ describe('buildClient', () => {
axiosMock.onGet(logsSearchUrl).reply(200, mockResponse);
});
it('fetches logs from the tracing URL', async () => {
it('fetches logs from the logs URL', async () => {
const result = await client.fetchLogs();
expect(axios.get).toHaveBeenCalledTimes(1);
@ -1097,5 +1123,45 @@ describe('buildClient', () => {
await expect(client.fetchLogs()).rejects.toThrow(FETCHING_LOGS_ERROR);
expectErrorToBeReported(new Error(FETCHING_LOGS_ERROR));
});
describe('filters', () => {
describe('date range filter', () => {
it('handle predefined date range value', async () => {
await client.fetchLogs({
filters: { dateRange: { value: '5m' } },
});
expect(getQueryParam()).toContain(`period=5m`);
});
it('handle custom date range value', async () => {
await client.fetchLogs({
filters: {
dateRange: {
endDate: new Date('2020-07-06'),
startDate: new Date('2020-07-05'),
value: 'custom',
},
},
});
expect(getQueryParam()).toContain(
'start_time=2020-07-05T00:00:00.000Z&end_time=2020-07-06T00:00:00.000Z',
);
});
});
it('ignores empty filter', async () => {
await client.fetchLogs({
filters: { dateRange: {} },
});
expect(getQueryParam()).toBe('');
});
it('ignores undefined filter', async () => {
await client.fetchLogs({
filters: { dateRange: undefined },
});
expect(getQueryParam()).toBe('');
});
});
});
});

View File

@ -0,0 +1,174 @@
import { GlDaterangePicker } from '@gitlab/ui';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import DateRangesDropdown from '~/analytics/shared/components/date_ranges_dropdown.vue';
import DateRangeFilter from '~/observability/components/date_range_filter.vue';
describe('DateRangeFilter', () => {
let wrapper;
const defaultTimeRange = {
value: '1h',
startDate: new Date(),
endDate: new Date(),
};
const mount = (selected) => {
wrapper = shallowMountExtended(DateRangeFilter, {
propsData: {
selected,
},
});
};
beforeEach(() => {
mount(defaultTimeRange);
});
const findDateRangesDropdown = () => wrapper.findComponent(DateRangesDropdown);
const findDateRangesPicker = () => wrapper.findComponent(GlDaterangePicker);
it('renders the date ranges dropdown with the default selected value and options', () => {
const dateRangesDropdown = findDateRangesDropdown();
expect(dateRangesDropdown.exists()).toBe(true);
expect(dateRangesDropdown.props('selected')).toBe(defaultTimeRange.value);
expect(dateRangesDropdown.props('dateRangeOptions')).toMatchInlineSnapshot(`
Array [
Object {
"endDate": 2020-07-06T00:00:00.000Z,
"startDate": 2020-07-05T23:55:00.000Z,
"text": "Last 5 minutes",
"value": "5m",
},
Object {
"endDate": 2020-07-06T00:00:00.000Z,
"startDate": 2020-07-05T23:45:00.000Z,
"text": "Last 15 minutes",
"value": "15m",
},
Object {
"endDate": 2020-07-06T00:00:00.000Z,
"startDate": 2020-07-05T23:30:00.000Z,
"text": "Last 30 minutes",
"value": "30m",
},
Object {
"endDate": 2020-07-06T00:00:00.000Z,
"startDate": 2020-07-05T23:00:00.000Z,
"text": "Last 1 hour",
"value": "1h",
},
Object {
"endDate": 2020-07-06T00:00:00.000Z,
"startDate": 2020-07-05T20:00:00.000Z,
"text": "Last 4 hours",
"value": "4h",
},
Object {
"endDate": 2020-07-06T00:00:00.000Z,
"startDate": 2020-07-05T12:00:00.000Z,
"text": "Last 12 hours",
"value": "12h",
},
Object {
"endDate": 2020-07-06T00:00:00.000Z,
"startDate": 2020-07-05T00:00:00.000Z,
"text": "Last 24 hours",
"value": "24h",
},
Object {
"endDate": 2020-07-06T00:00:00.000Z,
"startDate": 2020-06-29T00:00:00.000Z,
"text": "Last 7 days",
"value": "7d",
},
Object {
"endDate": 2020-07-06T00:00:00.000Z,
"startDate": 2020-06-22T00:00:00.000Z,
"text": "Last 14 days",
"value": "14d",
},
Object {
"endDate": 2020-07-06T00:00:00.000Z,
"startDate": 2020-06-06T00:00:00.000Z,
"text": "Last 30 days",
"value": "30d",
},
]
`);
});
it('does not set the selected value if not specified', () => {
mount(undefined);
expect(findDateRangesDropdown().props('selected')).toBe('');
});
it('renders the daterange-picker if custom option is selected', () => {
const timeRange = {
startDate: new Date('2022-01-01'),
endDate: new Date('2022-01-02'),
};
mount({ value: 'custom', startDate: timeRange.startDate, endDate: timeRange.endDate });
expect(findDateRangesPicker().exists()).toBe(true);
expect(findDateRangesPicker().props('defaultStartDate')).toBe(timeRange.startDate);
expect(findDateRangesPicker().props('defaultEndDate')).toBe(timeRange.endDate);
});
it('emits the onDateRangeSelected event when the time range is selected', async () => {
const timeRange = {
value: '24h',
startDate: new Date('2022-01-01'),
endDate: new Date('2022-01-02'),
};
await findDateRangesDropdown().vm.$emit('selected', timeRange);
expect(wrapper.emitted('onDateRangeSelected')).toEqual([[{ ...timeRange }]]);
});
it('emits the onDateRangeSelected event when a custom time range is selected', async () => {
const timeRange = {
startDate: new Date('2021-01-01'),
endDate: new Date('2021-01-02'),
};
await findDateRangesDropdown().vm.$emit('customDateRangeSelected');
expect(findDateRangesPicker().props('startOpened')).toBe(true);
expect(wrapper.emitted('onDateRangeSelected')).toBeUndefined();
await findDateRangesPicker().vm.$emit('input', timeRange);
expect(wrapper.emitted('onDateRangeSelected')).toEqual([
[
{
...timeRange,
value: 'custom',
},
],
]);
});
describe('start opened', () => {
it('sets startOpend to true if custom date is selected without start and end date', () => {
mount({ value: 'custom' });
expect(findDateRangesPicker().props('startOpened')).toBe(true);
});
it('sets startOpend to false if custom date is selected with start and end date', () => {
mount({
value: 'custom',
startDate: new Date('2022-01-01'),
endDate: new Date('2022-01-02'),
});
expect(findDateRangesPicker().props('startOpened')).toBe(false);
});
it('sets startOpend to true if customDateRangeSelected is emitted', async () => {
await findDateRangesDropdown().vm.$emit('customDateRangeSelected');
expect(findDateRangesPicker().props('startOpened')).toBe(true);
});
});
});

View File

@ -13,6 +13,7 @@ import { SHOW_DELETE_SUCCESS_ALERT } from '~/packages_and_registries/shared/cons
import * as packageUtils from '~/packages_and_registries/shared/utils';
import InfrastructureSearch from '~/packages_and_registries/infrastructure_registry/list/components/infrastructure_search.vue';
import InfrastructureTitle from '~/packages_and_registries/infrastructure_registry/list/components/infrastructure_title.vue';
import { FILTERED_SEARCH_TERM } from '~/vue_shared/components/filtered_search_bar/constants';
jest.mock('~/lib/utils/common_utils');
@ -33,6 +34,7 @@ describe('packages_list_app', () => {
const findEmptyState = () => wrapper.findComponent(GlEmptyState);
const findListComponent = () => wrapper.findComponent(PackageList);
const findInfrastructureSearch = () => wrapper.findComponent(InfrastructureSearch);
const findInfrastructureTitle = () => wrapper.findComponent(InfrastructureTitle);
const createStore = ({ isGroupPage = false, filter = [], packageCount = 0 } = {}) => {
store = new Vuex.Store({
@ -158,6 +160,10 @@ describe('packages_list_app', () => {
expect(heading().text()).toBe('You have no Terraform modules in your project');
});
it('does not show infrastructure registry title', () => {
expect(findInfrastructureTitle().exists()).toBe(false);
});
describe('when group page', () => {
beforeEach(() => {
createStore({ isGroupPage: true });
@ -201,6 +207,10 @@ describe('packages_list_app', () => {
expect(findInfrastructureSearch().exists()).toBe(true);
});
it('shows infrastructure registry title', () => {
expect(findInfrastructureTitle().exists()).toBe(true);
});
it('on update fetches data from the store', () => {
store.dispatch.mockClear();

View File

@ -140,7 +140,6 @@ describe('Merge request merge checks component', () => {
${'conflict'} | ${'conflict'}
${'discussions_not_resolved'} | ${'discussions_not_resolved'}
${'need_rebase'} | ${'need_rebase'}
${'policies_denied'} | ${'default'}
`('renders $identifier merge check', async ({ identifier, componentName }) => {
shallowMountComponent({ mergeabilityChecks: [{ status: 'failed', identifier }] });

View File

@ -790,18 +790,6 @@ RSpec.describe DiffHelper, feature_category: :code_review_workflow do
)
end
context 'when cached_conflicts_with_types is disabled' do
before do
stub_feature_flags(cached_conflicts_with_types: false)
end
it 'still calls MergeRequests::Conflicts::ListService' do
expect(MergeRequests::Conflicts::ListService).to receive(:new)
helper.conflicts_with_types
end
end
context 'when source branch SHA changes' do
before do
allow(merge_request).to receive(:source_branch_sha).and_return('123abc')

View File

@ -55,6 +55,7 @@ RSpec.describe IntegrationsHelper, feature_category: :integrations do
let(:fields) do
[
:id,
:project_id,
:show_active,
:activated,
:activate_disabled,

View File

@ -31,7 +31,7 @@ RSpec.describe Projects::AlertManagementHelper do
'enable-alert-management-path' => setting_path,
'alerts-help-url' => 'http://test.host/help/operations/incident_management/alerts.md',
'populating-alerts-help-url' => 'http://test.host/help/operations/incident_management/integrations.md#configuration',
'empty-alert-svg-path' => match_asset_path('/assets/illustrations/alert-management-empty-state.svg'),
'empty-alert-svg-path' => match_asset_path('/assets/illustrations/empty-state/empty-scan-alert-md.svg'),
'user-can-enable-alert-management' => 'true',
'alert-management-enabled' => 'false',
'text-query': nil,

View File

@ -0,0 +1,53 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::DataBuilder::ResourceAccessToken, feature_category: :system_access do
let_it_be_with_reload(:user) { create(:user, :project_bot) }
let(:event) { :expiring }
let(:personal_access_token) { create(:personal_access_token, user: user) }
let(:data) { described_class.build(personal_access_token, event, resource) }
shared_examples 'includes standard data' do
specify do
expect(data[:object_attributes]).to eq(personal_access_token.hook_attrs)
expect(data[:object_kind]).to eq('access_token')
end
end
context 'when token belongs to a project' do
let_it_be_with_reload(:resource) { create(:project) }
let_it_be_with_reload(:project) { resource }
before_all do
resource.add_developer(user)
end
it_behaves_like 'includes standard data'
it_behaves_like 'project hook data'
it "contains project data" do
expect(data).to have_key(:project)
expect(data[:event_name]).to eq("expiring_access_token")
end
end
context 'when token belongs to a group' do
let_it_be_with_reload(:resource) { create(:group) }
before_all do
resource.add_developer(user)
end
it_behaves_like 'includes standard data'
it "contains group data" do
expect(data[:group]).to eq({
group_name: resource.name,
group_path: resource.path,
group_id: resource.id
})
expect(data[:event_name]).to eq("expiring_access_token")
end
end
end

View File

@ -7,7 +7,7 @@ RSpec.describe Gitlab::Database::MigrationHelpers::V2, feature_category: :databa
include Database::TableSchemaHelpers
let(:migration) do
ActiveRecord::Migration.new.extend(described_class)
Gitlab::Database::Migration[2.0].new.extend(described_class)
end
before do
@ -346,7 +346,7 @@ RSpec.describe Gitlab::Database::MigrationHelpers::V2, feature_category: :databa
describe '#with_lock_retries' do
let(:model) do
ActiveRecord::Migration.new.extend(described_class)
Gitlab::Database::Migration::V2_0.new.extend(described_class)
end
let(:buffer) { StringIO.new }
@ -380,7 +380,7 @@ RSpec.describe Gitlab::Database::MigrationHelpers::V2, feature_category: :databa
model.with_lock_retries(env: env, logger: in_memory_logger) {}
end
it 'defaults to disallowing subtransactions' do
it 'defaults to disallowing sub-transactions' do
with_lock_retries = double
expect(Gitlab::Database::WithLockRetries).to receive(:new).with(hash_including(allow_savepoints: false)).and_return(with_lock_retries)
expect(with_lock_retries).to receive(:run).with(raise_on_exhaustion: false)
@ -393,9 +393,9 @@ RSpec.describe Gitlab::Database::MigrationHelpers::V2, feature_category: :databa
allow(model).to receive(:transaction_open?).and_return(true)
end
context 'when lock retries are enabled' do
context 'with WithLockRetries already used' do
before do
allow(model).to receive(:enable_lock_retries?).and_return(true)
allow(model).to receive(:with_lock_retries_used?).and_return(true)
end
it 'does not use Gitlab::Database::WithLockRetries and executes the provided block directly' do
@ -405,13 +405,24 @@ RSpec.describe Gitlab::Database::MigrationHelpers::V2, feature_category: :databa
end
end
context 'when lock retries are not enabled' do
context 'without WithLockRetries being used' do
before do
allow(model).to receive(:enable_lock_retries?).and_return(false)
allow(model).to receive(:with_lock_retries_used?).and_return(false)
end
it 'raises an error' do
expect { model.with_lock_retries(env: env, logger: in_memory_logger) {} }.to raise_error /can not be run inside an already open transaction/
let(:error_msg) do
<<~MESSAGE
with_lock_retries can not be run inside an already open transaction.
Lock retries are enabled by default for transactional migrations, so this can be run without `with_lock_retries`.
For more details, see: https://docs.gitlab.com/ee/development/migration_style_guide.html#transactional-migrations
MESSAGE
end
it 'raises an exception' do
expect do
model.with_lock_retries(env: env, logger: in_memory_logger) {}
end.to raise_error(error_msg)
end
end
end

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Gitlab::Database::Migration do
RSpec.describe Gitlab::Database::Migration, feature_category: :database do
describe '.[]' do
context 'version: 1.0' do
subject { described_class[1.0] }
@ -45,19 +45,19 @@ RSpec.describe Gitlab::Database::Migration do
describe Gitlab::Database::Migration::LockRetriesConcern do
subject { class_def.new }
context 'when not explicitly called' do
let(:class_def) do
Class.new do
include Gitlab::Database::Migration::LockRetriesConcern
end
let(:class_def) do
Class.new do
include Gitlab::Database::Migration::LockRetriesConcern
end
end
it 'does not disable lock retries by default' do
context 'when not explicitly called' do
it 'does not enable lock retries' do
expect(subject.enable_lock_retries?).not_to be_truthy
end
end
context 'when explicitly disabled' do
context 'when explicitly called' do
let(:class_def) do
Class.new do
include Gitlab::Database::Migration::LockRetriesConcern
@ -66,9 +66,21 @@ RSpec.describe Gitlab::Database::Migration do
end
end
it 'does not disable lock retries by default' do
it 'enables lock retries when used in the class definition' do
expect(subject.enable_lock_retries?).to be_truthy
end
end
describe '#with_lock_retries_used?' do
it 'returns false without using with_lock_retries' do
expect(subject.with_lock_retries_used?).not_to be_truthy
end
it 'returns true on using with_lock_retries' do
subject.with_lock_retries_used!
expect(subject.with_lock_retries_used?).to be_truthy
end
end
end
end

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Database::Migrations::LockRetryMixin do
RSpec.describe Gitlab::Database::Migrations::LockRetryMixin, feature_category: :database do
describe Gitlab::Database::Migrations::LockRetryMixin::ActiveRecordMigrationProxyLockRetries do
let(:connection) { ActiveRecord::Base.connection }
let(:migration) { double(connection: connection) }
@ -18,18 +18,28 @@ RSpec.describe Gitlab::Database::Migrations::LockRetryMixin do
end
end
describe '#enable_lock_retries?' do
subject { class_def.new(migration).enable_lock_retries? }
shared_examples 'delegable' do |method|
subject { class_def.new(migration).public_send(method) }
it 'delegates to #migration' do
expect(migration).to receive(:enable_lock_retries?).and_return(return_value)
it 'delegates to migration' do
expect(migration).to receive(method).and_return(return_value)
result = subject
expect(result).to eq(return_value)
expect(subject).to eq(return_value)
end
end
describe '#enable_lock_retries?' do
it_behaves_like 'delegable', :enable_lock_retries?
end
describe '#with_lock_retries_used!' do
it_behaves_like 'delegable', :with_lock_retries_used!
end
describe '#with_lock_retries_used?' do
it_behaves_like 'delegable', :with_lock_retries_used?
end
describe '#migration_class' do
subject { class_def.new(migration).migration_class }
@ -82,7 +92,7 @@ RSpec.describe Gitlab::Database::Migrations::LockRetryMixin do
end
context 'with transactions disabled' do
let(:migration) { double('migration', enable_lock_retries?: false) }
let(:migration) { double('migration') }
let(:receiver) { double('receiver', use_transaction?: false) }
it 'calls super method' do
@ -94,33 +104,37 @@ RSpec.describe Gitlab::Database::Migrations::LockRetryMixin do
end
end
context 'with transactions enabled, but lock retries disabled' do
let(:receiver) { double('receiver', use_transaction?: true) }
let(:migration) { double('migration', enable_lock_retries?: false) }
it 'calls super method' do
p = proc {}
expect(receiver).to receive(:ddl_transaction).with(migration, &p)
subject.ddl_transaction(migration, &p)
context 'with transactions enabled' do
before do
allow(migration).to receive(:migration_connection).and_return(connection)
end
end
context 'with transactions enabled and lock retries enabled' do
let(:receiver) { double('receiver', use_transaction?: true) }
let(:migration) { double('migration', migration_connection: connection, enable_lock_retries?: true) }
let(:migration) do
Class.new(Gitlab::Database::Migration[2.2]) do
milestone 16.10
def change
# no-op
end
end.new
end
let(:connection) { ActiveRecord::Base.connection }
it 'calls super method' do
it 'calls super method and sets with_lock_retries_used! on the migration' do
p = proc {}
expect(receiver).not_to receive(:ddl_transaction)
expect_next_instance_of(Gitlab::Database::WithLockRetries) do |retries|
expect(retries).to receive(:run).with(raise_on_exhaustion: false, &p)
end
subject.ddl_transaction(migration, &p)
expect(migration.with_lock_retries_used?).to be_truthy
end
end
end

View File

@ -0,0 +1,25 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::HookData::ResourceAccessTokenBuilder, feature_category: :system_access do
let_it_be(:personal_access_token) { create(:personal_access_token, user: create(:user)) }
let(:builder) { described_class.new(personal_access_token) }
describe '#build' do
let(:data) { builder.build }
it 'includes safe attributes' do
expect(data.keys).to match_array(
%w[
user_id
created_at
id
name
expires_at
]
)
end
end
end

View File

@ -65,6 +65,7 @@ RSpec.describe Gitlab::ImportExport::Project::RelationFactory, :use_clean_rails_
'wiki_page_events' => true,
'releases_events' => false,
'emoji_events' => false,
'resource_access_token_events' => false,
'token' => token
}
end

View File

@ -609,6 +609,7 @@ ProjectHook:
- repository_update_events
- releases_events
- emoji_events
- resource_access_token_events
ProtectedBranch:
- id
- project_id

View File

@ -322,60 +322,13 @@ RSpec.describe Gitlab::InternalEvents, :snowplow, feature_category: :product_ana
context 'when there are multiple unique keys' do
let(:property_names) { [:project, :user] }
before do
stub_feature_flags(redis_hll_property_name_tracking: property_name_flag_enabled)
end
it 'all of them are used when logging to RedisHLL', :aggregate_failures do
described_class.track_event(event_name, user: user, project: project)
context "with the property_name tracking feature flag enabled" do
let(:property_name_flag_enabled) { true }
it 'all of them are used when logging to RedisHLL', :aggregate_failures do
described_class.track_event(event_name, user: user, project: project)
expect_redis_tracking
expect_redis_hll_tracking(user.id, :user)
expect_redis_hll_tracking(project.id, :project)
expect_snowplow_tracking
end
end
context "with the property_name tracking feature flag disabled" do
let(:property_name_flag_enabled) { false }
context "with multiple property_names defined" do
it 'logs an error', :aggregate_failures do
described_class.track_event(event_name, user: user, project: project)
expect(Gitlab::ErrorTracking).to have_received(:track_and_raise_for_dev_exception).with(
Gitlab::InternalEvents::EventDefinitions::InvalidMetricConfiguration, anything
)
expect(Gitlab::UsageDataCounters::HLLRedisCounter).not_to have_received(:track_event)
end
end
context "with single property_names defined" do
let(:property_names) { [:project] }
it 'logs to RedisHLL only once' do
described_class.track_event(event_name, user: user, project: project)
expect(Gitlab::UsageDataCounters::HLLRedisCounter).to have_received(:track_event).once
end
end
context "when event_name is user_visited_dashboard" do
let(:event_name) { 'user_visited_dashboard' }
it 'logs to RedisHLL only once with user_id' do
# make it defined also on FOSS tests
allow(Gitlab::InternalEvents::EventDefinitions).to receive(:known_event?).with(event_name).and_return(true)
described_class.track_event(event_name, user: user, project: project)
expect(Gitlab::UsageDataCounters::HLLRedisCounter).to have_received(:track_event).once
.with(event_name, values: user.id, property_name: :user)
end
end
expect_redis_tracking
expect_redis_hll_tracking(user.id, :user)
expect_redis_hll_tracking(project.id, :project)
expect_snowplow_tracking
end
end

View File

@ -7,8 +7,6 @@ RSpec.describe Gitlab::Usage::Metrics::Instrumentations::AggregatedMetric, :clea
using RSpec::Parameterized::TableSyntax
before do
stub_feature_flags(redis_hll_property_name_tracking: property_name_flag_enabled)
redis_counter_class = Gitlab::UsageDataCounters::HLLRedisCounter
# weekly AND 1 weekly OR 2
@ -43,13 +41,11 @@ RSpec.describe Gitlab::Usage::Metrics::Instrumentations::AggregatedMetric, :clea
)
end
where(:data_source, :time_frame, :attribute, :expected_value, :property_name_flag_enabled) do
'redis_hll' | '28d' | 'user_id' | 3 | true
'redis_hll' | '28d' | 'user_id' | 4 | false
'redis_hll' | '28d' | 'project_id' | 4 | false
'redis_hll' | '7d' | 'user_id' | 2 | true
'redis_hll' | '7d' | 'project_id' | 1 | true
'database' | '7d' | 'user_id' | 3.0 | true
where(:data_source, :time_frame, :attribute, :expected_value) do
'redis_hll' | '28d' | 'user_id' | 3
'redis_hll' | '7d' | 'user_id' | 2
'redis_hll' | '7d' | 'project_id' | 1
'database' | '7d' | 'user_id' | 3.0
end
with_them do
@ -80,7 +76,6 @@ RSpec.describe Gitlab::Usage::Metrics::Instrumentations::AggregatedMetric, :clea
end
context "with not allowed aggregate attribute" do
let(:property_name_flag_enabled) { true }
let(:metric_definition) do
{
data_source: 'redis_hll',

View File

@ -29,11 +29,8 @@ RSpec.describe Gitlab::Usage::Metrics::Instrumentations::RedisHLLMetric, :clean_
context "with events attribute defined" do
let(:expected_value) { 2 }
let(:flag_enabled) { true }
before do
stub_feature_flags(redis_hll_property_name_tracking: flag_enabled)
Gitlab::UsageDataCounters::HLLRedisCounter.track_event(:g_project_management_issue_iteration_changed, values: 1, time: 1.week.ago, property_name: 'user')
Gitlab::UsageDataCounters::HLLRedisCounter.track_event(:g_project_management_issue_iteration_changed, values: 2, time: 2.weeks.ago, property_name: 'user')
Gitlab::UsageDataCounters::HLLRedisCounter.track_event(:g_project_management_issue_iteration_changed, values: 1, time: 2.weeks.ago, property_name: 'user')
@ -42,13 +39,6 @@ RSpec.describe Gitlab::Usage::Metrics::Instrumentations::RedisHLLMetric, :clean_
it_behaves_like 'a correct instrumented metric value', { time_frame: '28d', events: [name: 'g_project_management_issue_iteration_changed', unique: 'user.id'] }
context "with feature flag disabled" do
let(:expected_value) { 3 }
let(:flag_enabled) { false }
it_behaves_like 'a correct instrumented metric value', { time_frame: '28d', events: [name: 'g_project_management_issue_iteration_changed', unique: 'user.id'] }
end
context "with events having different `unique` values" do
let(:expected_value) { 3 }
let(:flag_enabled) { false }

View File

@ -92,7 +92,6 @@ RSpec.describe Gitlab::UsageDataCounters::HLLRedisCounter, :clean_gitlab_redis_s
let(:productivity_category) { 'productivity' }
let(:analytics_category) { 'analytics' }
let(:other_category) { 'other' }
let(:property_name_flag_enabled) { false }
let(:known_events) do
[
@ -109,7 +108,6 @@ RSpec.describe Gitlab::UsageDataCounters::HLLRedisCounter, :clean_gitlab_redis_s
before do
skip_default_enabled_yaml_check
allow(described_class).to receive(:known_events).and_return(known_events)
stub_feature_flags(redis_hll_property_name_tracking: property_name_flag_enabled)
end
describe '.track_event' do
@ -209,53 +207,25 @@ RSpec.describe Gitlab::UsageDataCounters::HLLRedisCounter, :clean_gitlab_redis_s
described_class.track_event(event_name, values: entity1)
end
end
context "with the file including overrides" do
let(:overrides_file_content) { "#{event_name}1: new_key2\n#{event_name}: new_key" }
context "when the event is included in overrides file" do
it "tracks the events using overridden Redis key" do
expected_key = "{hll_counters}_new_key-2020-23"
expect(Gitlab::Redis::HLL).to receive(:add).with(hash_including(key: expected_key))
described_class.track_event(:g_analytics_contribution, values: entity1)
end
end
context "when the event is not included in overrides file" do
let(:not_overridden_name) { "g_compliance_dashboard" }
it "tracks the events using original Redis key" do
expected_key = "{hll_counters}_#{not_overridden_name}-2020-23"
expect(Gitlab::Redis::HLL).to receive(:add).with(hash_including(key: expected_key))
described_class.track_event(not_overridden_name, values: entity1, property_name: :user)
end
end
end
end
describe "property_name" do
context "with enabled feature flag" do
let(:property_name_flag_enabled) { true }
context "with a property_name for an overridden event" do
context "with a property_name sent as a symbol" do
it "tracks the events using the Redis key override" do
expected_key = "{hll_counters}_#{event_overridden_for_user}-2020-23"
expect(Gitlab::Redis::HLL).to receive(:add).with(hash_including(key: expected_key))
context "with a property_name for an overridden event" do
context "with a property_name sent as a symbol" do
it "tracks the events using the Redis key override" do
expected_key = "{hll_counters}_#{event_overridden_for_user}-2020-23"
expect(Gitlab::Redis::HLL).to receive(:add).with(hash_including(key: expected_key))
described_class.track_event(event_overridden_for_user, values: entity1, property_name: :user)
end
described_class.track_event(event_overridden_for_user, values: entity1, property_name: :user)
end
end
context "with a property_name sent in string format" do
it "tracks the events using the Redis key override" do
expected_key = "{hll_counters}_#{event_overridden_for_user}-2020-23"
expect(Gitlab::Redis::HLL).to receive(:add).with(hash_including(key: expected_key))
context "with a property_name sent in string format" do
it "tracks the events using the Redis key override" do
expected_key = "{hll_counters}_#{event_overridden_for_user}-2020-23"
expect(Gitlab::Redis::HLL).to receive(:add).with(hash_including(key: expected_key))
described_class.track_event(event_overridden_for_user, values: entity1, property_name: 'user.id')
end
described_class.track_event(event_overridden_for_user, values: entity1, property_name: 'user.id')
end
end
@ -301,28 +271,6 @@ RSpec.describe Gitlab::UsageDataCounters::HLLRedisCounter, :clean_gitlab_redis_s
end
end
end
context "with disabled feature flag" do
it "uses old Redis key for overridden events" do
expected_key = "{hll_counters}_#{event_overridden_for_user}-2020-23"
expect(Gitlab::Redis::HLL).to receive(:add).with(hash_including(key: expected_key))
described_class.track_event(event_overridden_for_user, values: entity1, property_name: 'user')
end
it "uses old Redis key for new events" do
expected_key = "{hll_counters}_#{no_slot}-2020-23"
expect(Gitlab::Redis::HLL).to receive(:add).with(hash_including(key: expected_key))
described_class.track_event(no_slot, values: entity1, property_name: 'project')
end
it "raises an error for new events when no property_name sent" do
expect do
described_class.track_event(no_slot, values: entity1)
end.to raise_error(described_class::UnknownLegacyEventError, /hll_redis_legacy_events.yml/)
end
end
end
end
@ -393,93 +341,61 @@ RSpec.describe Gitlab::UsageDataCounters::HLLRedisCounter, :clean_gitlab_redis_s
end
describe "property_names" do
before do
stub_feature_flags(redis_hll_property_name_tracking: property_name_flag_enabled)
end
context "with enabled feature flag" do
let(:property_name_flag_enabled) { true }
context "with a property_name for an overridden event" do
context "with a property_name sent as a symbol" do
it "tracks the events using the Redis key override" do
expected_key = "{hll_counters}_#{event_overridden_for_user}-2020-22"
expect(Gitlab::Redis::HLL).to receive(:count).with(keys: [expected_key])
described_class.unique_events(event_names: [event_overridden_for_user], property_name: :user, start_date: 7.days.ago, end_date: Date.current)
end
end
context "with a property_name sent in string format" do
it "tracks the events using the Redis key override" do
expected_key = "{hll_counters}_#{event_overridden_for_user}-2020-22"
expect(Gitlab::Redis::HLL).to receive(:count).with(keys: [expected_key])
described_class.unique_events(event_names: [event_overridden_for_user], property_name: 'user.id', start_date: 7.days.ago, end_date: Date.current)
end
end
end
context "with a property_name for an overridden event that doesn't include this property_name" do
it "tracks the events using a Redis key with the property_name" do
expected_key = "{hll_counters}_#{no_slot}-user-2020-22"
context "with a property_name for an overridden event" do
context "with a property_name sent as a symbol" do
it "tracks the events using the Redis key override" do
expected_key = "{hll_counters}_#{event_overridden_for_user}-2020-22"
expect(Gitlab::Redis::HLL).to receive(:count).with(keys: [expected_key])
described_class.unique_events(event_names: [no_slot], property_name: 'user', start_date: 7.days.ago, end_date: Date.current)
described_class.unique_events(event_names: [event_overridden_for_user], property_name: :user, start_date: 7.days.ago, end_date: Date.current)
end
end
context "with a property_name for a new event" do
it "tracks the events using a Redis key with the property_name" do
expected_key = "{hll_counters}_#{no_slot}-project-2020-22"
context "with a property_name sent in string format" do
it "tracks the events using the Redis key override" do
expected_key = "{hll_counters}_#{event_overridden_for_user}-2020-22"
expect(Gitlab::Redis::HLL).to receive(:count).with(keys: [expected_key])
described_class.unique_events(event_names: [no_slot], property_name: 'project', start_date: 7.days.ago, end_date: Date.current)
end
end
context "with a property_name for a legacy event" do
it "raises an error with an instructive message" do
expect do
described_class.unique_events(event_names: 'g_analytics_productivity', property_name: 'project', start_date: 7.days.ago, end_date: Date.current)
end.to raise_error(described_class::UnfinishedEventMigrationError, /migration\.html/)
end
end
context "with no property_name for a overridden event" do
it "raises an error with an instructive message" do
expect do
described_class.unique_events(event_names: [event_overridden_for_user], start_date: 7.days.ago, end_date: Date.current)
end.to raise_error(described_class::UnknownLegacyEventError, /hll_redis_legacy_events.yml/)
end
end
context "with no property_name for a new event" do
it "raises an error with an instructive message" do
expect do
described_class.unique_events(event_names: [no_slot], start_date: 7.days.ago, end_date: Date.current)
end.to raise_error(described_class::UnknownLegacyEventError, /hll_redis_legacy_events.yml/)
described_class.unique_events(event_names: [event_overridden_for_user], property_name: 'user.id', start_date: 7.days.ago, end_date: Date.current)
end
end
end
context "with disabled feature flag" do
let(:property_name_flag_enabled) { false }
it "uses old Redis key for overridden events" do
expected_key = "{hll_counters}_#{event_overridden_for_user}-2020-22"
context "with a property_name for an overridden event that doesn't include this property_name" do
it "tracks the events using a Redis key with the property_name" do
expected_key = "{hll_counters}_#{no_slot}-user-2020-22"
expect(Gitlab::Redis::HLL).to receive(:count).with(keys: [expected_key])
described_class.unique_events(event_names: [event_overridden_for_user], property_name: 'user', start_date: 7.days.ago, end_date: Date.current)
described_class.unique_events(event_names: [no_slot], property_name: 'user', start_date: 7.days.ago, end_date: Date.current)
end
end
it "uses old Redis key for new events" do
expected_key = "{hll_counters}_#{no_slot}-2020-22"
context "with a property_name for a new event" do
it "tracks the events using a Redis key with the property_name" do
expected_key = "{hll_counters}_#{no_slot}-project-2020-22"
expect(Gitlab::Redis::HLL).to receive(:count).with(keys: [expected_key])
described_class.unique_events(event_names: [no_slot], property_name: 'project', start_date: 7.days.ago, end_date: Date.current)
end
end
context "with a property_name for a legacy event" do
it "raises an error with an instructive message" do
expect do
described_class.unique_events(event_names: 'g_analytics_productivity', property_name: 'project', start_date: 7.days.ago, end_date: Date.current)
end.to raise_error(described_class::UnfinishedEventMigrationError, /migration\.html/)
end
end
context "with no property_name for a overridden event" do
it "raises an error with an instructive message" do
expect do
described_class.unique_events(event_names: [event_overridden_for_user], start_date: 7.days.ago, end_date: Date.current)
end.to raise_error(described_class::UnknownLegacyEventError, /hll_redis_legacy_events.yml/)
end
end
context "with no property_name for a new event" do
it "raises an error with an instructive message" do
expect do
described_class.unique_events(event_names: [no_slot], start_date: 7.days.ago, end_date: Date.current)

View File

@ -55,7 +55,7 @@ RSpec.describe SensitiveSerializableHash do
expect(model.attributes).to include(attribute) # double-check the attribute does exist
expect(model.serializable_hash).not_to include(attribute)
expect(model.to_json).not_to include(attribute)
expect(model.to_json).not_to include(attribute.to_json)
expect(model.as_json).not_to include(attribute)
end
end

View File

@ -6357,6 +6357,14 @@ RSpec.describe Project, factory_default: :keep, feature_category: :groups_and_pr
expect(project.has_active_hooks?(:emoji_hooks)).to eq(true)
end
end
context 'with :access_token_hooks scope' do
it 'returns true when a matching access token hook exists' do
create(:project_hook, resource_access_token_events: true, project: project)
expect(project.has_active_hooks?(:resource_access_token_hooks)).to eq(true)
end
end
end
describe '#has_active_integrations?' do

View File

@ -41,6 +41,28 @@ RSpec.describe Projects::TriggeredHooks, feature_category: :webhooks do
run_hooks(:push_hooks, data)
end
context 'with access token hooks' do
let_it_be(:resource_access_token_hook) { create(:project_hook, project: project, resource_access_token_events: true) }
it 'executes hook' do
expect_hook_execution(resource_access_token_hook, data, 'resource_access_token_hooks')
run_hooks(:resource_access_token_hooks, data)
end
context 'when access_tokens_webhooks feature flag is disabled' do
before do
stub_feature_flags(access_tokens_webhooks: false)
end
it 'does not execute the hook' do
expect(WebHookService).not_to receive(:new)
run_hooks(:resource_access_token_hooks, data)
end
end
end
context 'with emoji hooks' do
let_it_be(:emoji_hook) { create(:project_hook, project: project, emoji_events: true) }
@ -61,18 +83,6 @@ RSpec.describe Projects::TriggeredHooks, feature_category: :webhooks do
run_hooks(:emoji_hooks, data)
end
end
context 'when emoji_webhooks feature flag is enabled for the project' do
before do
stub_feature_flags(emoji_webhooks: emoji_hook.project)
end
it 'executes the hook' do
expect_hook_execution(emoji_hook, data, 'emoji_hooks')
run_hooks(:emoji_hooks, data)
end
end
end
def expect_hook_execution(hook, data, scope)

View File

@ -1433,6 +1433,26 @@ RSpec.describe User, feature_category: :user_profile do
end
end
describe '.with_personal_access_tokens_and_resources' do
let_it_be(:user1) { create(:user) }
let_it_be(:user2) { create(:user) }
let_it_be(:user3) { create(:user) }
subject(:users) { described_class.with_personal_access_tokens_and_resources }
it 'includes expiring personal access tokens' do
expect(users.first.personal_access_tokens).to be_loaded
end
it 'includes groups' do
expect(users.first.groups).to be_loaded
end
it 'includes projects' do
expect(users.first.projects).to be_loaded
end
end
describe '.active_without_ghosts' do
let_it_be(:user1) { create(:user, :external) }
let_it_be(:user2) { create(:user, state: 'blocked') }

View File

@ -60,6 +60,7 @@ RSpec.describe API::ProjectHooks, 'ProjectHooks', feature_category: :webhooks do
deployment_events
releases_events
emoji_events
resource_access_token_events
]
end

View File

@ -228,5 +228,17 @@ RSpec.describe TestHooks::ProjectService, feature_category: :code_testing do
expect(service.execute).to include(success_result)
end
end
context 'when resource access token events hook' do
let(:trigger) { 'resource_access_token_events' }
let(:trigger_key) { :resource_access_token_hooks }
it 'executes hook' do
allow(Gitlab::DataBuilder::ResourceAccessToken).to receive(:build).and_return(sample_data)
expect(hook).to receive(:execute).with(sample_data, trigger_key, force: true).and_return(success_result)
expect(service.execute).to include(success_result)
end
end
end
end

View File

@ -5,6 +5,21 @@ require 'spec_helper'
RSpec.describe PersonalAccessTokens::ExpiringWorker, type: :worker, feature_category: :system_access do
subject(:worker) { described_class.new }
shared_examples 'sends notification about expiry of bot user tokens' do
it 'uses notification service to send the email' do
expect_next_instance_of(NotificationService) do |notification_service|
expect(notification_service).to receive(:resource_access_tokens_about_to_expire)
.with(project_bot, expiring_token.name)
end
worker.perform
end
it 'marks the notification as delivered' do
expect { worker.perform }.to change { expiring_token.reload.expire_notification_delivered }.from(false).to(true)
end
end
describe '#perform' do
context 'when a token needs to be notified' do
let_it_be(:user) { create(:user) }
@ -36,9 +51,15 @@ RSpec.describe PersonalAccessTokens::ExpiringWorker, type: :worker, feature_cate
create(:personal_access_token, user: user2, expires_at: 5.days.from_now)
# Query count increased for the user look up
# there are still 2 N+1 queries one for token name look up and another for token update.
# there are still two N+1 queries one for token name look up and another for token update.
expect { worker.perform }.not_to exceed_all_query_limit(control).with_threshold(2)
end
it 'does not execute webhook' do
expect(::Projects::TriggeredHooks).not_to receive(:execute)
worker.perform
end
end
context 'when no tokens need to be notified' do
@ -70,30 +91,65 @@ RSpec.describe PersonalAccessTokens::ExpiringWorker, type: :worker, feature_cate
end
context 'when a token is owned by a project bot' do
let_it_be(:maintainer1) { create(:user) }
let_it_be(:maintainer2) { create(:user) }
let_it_be(:project_bot) { create(:user, :project_bot) }
let_it_be(:project) { create(:project) }
let_it_be(:expiring_token) { create(:personal_access_token, user: project_bot, expires_at: 5.days.from_now) }
let(:fake_wh_service) { double }
before_all do
project.add_developer(project_bot)
project.add_maintainer(maintainer1)
project.add_maintainer(maintainer2)
end
it 'uses notification service to send the email' do
expect_next_instance_of(NotificationService) do |notification_service|
expect(notification_service).to receive(:resource_access_tokens_about_to_expire)
.with(project_bot, match_array([expiring_token.name]))
end
it_behaves_like 'sends notification about expiry of bot user tokens'
it 'executes access token webhook' do
hook_data = {}
project_hook = create(:project_hook, project: project, resource_access_token_events: true)
expect(Gitlab::DataBuilder::ResourceAccessToken).to receive(:build).and_return(hook_data)
expect(fake_wh_service).to receive(:async_execute).once
expect(WebHookService)
.to receive(:new).with(project_hook, {}, 'resource_access_token_hooks') { fake_wh_service }
worker.perform
end
it 'marks the notification as delivered' do
expect { worker.perform }.to change { expiring_token.reload.expire_notification_delivered }.from(false).to(true)
it 'avoids N+1 queries', :use_sql_query_cache do
control = ActiveRecord::QueryRecorder.new(skip_cached: false) { worker.perform }
user1 = create(:user, :project_bot, developer_projects: [project])
create(:personal_access_token, user: user1, expires_at: 5.days.from_now)
user2 = create(:user, :project_bot, developer_projects: [project])
create(:personal_access_token, user: user2, expires_at: 5.days.from_now)
expect { worker.perform }.not_to exceed_all_query_limit(control)
end
context 'when access_tokens_webhooks feature is disabled' do
before do
stub_feature_flags(access_tokens_webhooks: false)
end
it "does not execute access token webhook" do
expect(::Projects::TriggeredHooks).not_to receive(:execute)
worker.perform
end
end
end
context 'when a token is owned by a group bot' do
let_it_be(:project_bot) { create(:user, :project_bot) }
let_it_be(:group) { create(:group) }
let_it_be(:expiring_token) { create(:personal_access_token, user: project_bot, expires_at: 5.days.from_now) }
before_all do
group.add_developer(project_bot)
end
it_behaves_like 'sends notification about expiry of bot user tokens'
end
end
end

View File

@ -22,7 +22,7 @@ require (
github.com/sirupsen/logrus v1.9.3
github.com/smartystreets/goconvey v1.8.1
github.com/stretchr/testify v1.8.4
gitlab.com/gitlab-org/gitaly/v16 v16.9.0-rc3
gitlab.com/gitlab-org/gitaly/v16 v16.9.1
gitlab.com/gitlab-org/labkit v1.21.0
gocloud.dev v0.36.0
golang.org/x/image v0.14.0

View File

@ -442,8 +442,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
gitlab.com/gitlab-org/gitaly/v16 v16.9.0-rc3 h1:ln4coycTiIuE3Gn1fpvOtywBQqlDINOuIoXVxortSzY=
gitlab.com/gitlab-org/gitaly/v16 v16.9.0-rc3/go.mod h1:gVfBijGmC5ORMvoXpN8TwBumKvKMA0zzm91Tnv5fR1w=
gitlab.com/gitlab-org/gitaly/v16 v16.9.1 h1:XFoZyeV3HCGIB6pnQj6r5ZYsUYa34HoVOyod7XegIdI=
gitlab.com/gitlab-org/gitaly/v16 v16.9.1/go.mod h1:K2zAXwDEEwnm9NLxboCllREyS4Rx1yRBRxLBC0EcakA=
gitlab.com/gitlab-org/labkit v1.21.0 h1:hLmdBDtXjD1yOmZ+uJOac3a5Tlo83QaezwhES4IYik4=
gitlab.com/gitlab-org/labkit v1.21.0/go.mod h1:zeATDAaSBelPcPLbTTq8J3ZJEHyPTLVBM1q3nva+/W4=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=

View File

@ -1321,10 +1321,10 @@
stylelint-declaration-strict-value "1.10.4"
stylelint-scss "6.0.0"
"@gitlab/svgs@3.88.0":
version "3.88.0"
resolved "https://registry.yarnpkg.com/@gitlab/svgs/-/svgs-3.88.0.tgz#f57bd0ea94866038b19d94a2fa815d54f10c2c09"
integrity sha512-j2C+Ddt2LcJTf2hQZZ6sybsiQ/mMnZG4wKmJfM5YjXR5qPVaAUyH+MHnvPGcHnt+tMYr2P52zlcpsQa6WB5xeQ==
"@gitlab/svgs@3.89.0":
version "3.89.0"
resolved "https://registry.yarnpkg.com/@gitlab/svgs/-/svgs-3.89.0.tgz#4f7eb0babe18e5a2320a43e665d8901f906fb640"
integrity sha512-utf2MjgLwlhhv4ltHkGIJyO278EKXtXC76Kz4DiEgt7ECTsA+otj9hDCXaA6+8kCA1bLTICVJM/RiAS3Iu9TBw==
"@gitlab/ui@78.1.1":
version "78.1.1"