Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2025-02-04 00:10:03 +00:00
parent 9251853fdd
commit 1b859cccfe
85 changed files with 1839 additions and 236 deletions

View File

@ -64,11 +64,12 @@ praefect:
- .qa
variables:
QA_SCENARIO: Test::Integration::Praefect
QA_SUITE_CLASS_NAME: Test::Instance::Create # overrides default 'All' suite to only run 'Create' group specs
QA_CAN_TEST_PRAEFECT: "true"
QA_GITALY_TRANSACTIONS_ENABLED: "false"
rules:
- !reference [.rules:test:smoke-for-omnibus-mr, rules]
- if: $QA_SUITES =~ /Test::Instance::All/
- if: $QA_SUITES =~ /Test::Instance::Create/
# ========== gitaly transactions enabled ===========
# https://docs.gitlab.com/ee/architecture/blueprints/gitaly_transaction_management/
@ -79,12 +80,13 @@ gitaly-transactions:
- .with-ignored-runtime-data
variables:
QA_SCENARIO: Test::Integration::Praefect
QA_SUITE_CLASS_NAME: Test::Instance::Create # overrides default 'All' suite to only run 'Create' group specs
QA_CAN_TEST_PRAEFECT: "true"
QA_GITALY_TRANSACTIONS_ENABLED: "true"
QA_RUN_IN_PARALLEL: true
QA_PARALLEL_PROCESSES: 6
rules:
- if: $QA_SUITES =~ /Test::Instance::All/
- if: $QA_SUITES =~ /Test::Instance::Create/
# ========== gitaly reftables backend ===========
# Verifies that E2E tests that interact with the gitaly backend work when the reftables backend is in use
@ -97,10 +99,11 @@ gitaly-reftables-backend:
- .with-ignored-runtime-data
variables:
QA_SCENARIO: Test::Integration::Praefect
QA_SUITE_CLASS_NAME: Test::Instance::Create # overrides default 'All' suite to only run 'Create' group specs
QA_CAN_TEST_PRAEFECT: "true"
QA_FEATURE_FLAGS: "gitaly_new_repo_reftable_backend=enabled"
rules:
- if: $QA_SUITES =~ /Test::Instance::All/
- if: $QA_SUITES =~ /Test::Instance::Create/
# ========== git sha256 enabled ===========
git-sha256-repositories:

View File

@ -528,8 +528,6 @@ group :development, :test do
gem 'gitlab-styles', '~> 13.0.2', feature_category: :tooling
gem 'haml_lint', '~> 0.58', feature_category: :tooling
gem 'bundler-audit', '~> 0.9.1', require: false, feature_category: :shared
# Benchmarking & profiling
gem 'benchmark-ips', '~> 2.11.0', require: false, feature_category: :shared
gem 'benchmark-memory', '~> 0.1', require: false, feature_category: :shared

View File

@ -65,7 +65,6 @@
{"name":"browser","version":"5.3.1","platform":"ruby","checksum":"62745301701ff2c6c5d32d077bb12532b20be261929dcb52c6781ed0d5658b3c"},
{"name":"builder","version":"3.2.4","platform":"ruby","checksum":"99caf08af60c8d7f3a6b004029c4c3c0bdaebced6c949165fe98f1db27fbbc10"},
{"name":"bullet","version":"7.1.2","platform":"ruby","checksum":"429725c174cb74ca0ae99b9720bf22cab80be59ee9401805f7ecc9ac62cbb3bb"},
{"name":"bundler-audit","version":"0.9.1","platform":"ruby","checksum":"bdc716fc21cd8652a6507b137e5bc51f5e0e4f6f106a114ab004c89d0200bd3d"},
{"name":"byebug","version":"11.1.3","platform":"ruby","checksum":"2485944d2bb21283c593d562f9ae1019bf80002143cc3a255aaffd4e9cf4a35b"},
{"name":"capybara","version":"3.40.0","platform":"ruby","checksum":"42dba720578ea1ca65fd7a41d163dd368502c191804558f6e0f71b391054aeef"},
{"name":"capybara-screenshot","version":"1.0.26","platform":"ruby","checksum":"816b9370a07752097c82a05f568aaf5d3b7f45c3db5d3aab2014071e1b3c0c77"},

View File

@ -394,9 +394,6 @@ GEM
bullet (7.1.2)
activesupport (>= 3.0.0)
uniform_notifier (~> 1.11)
bundler-audit (0.9.1)
bundler (>= 1.2.0, < 3)
thor (~> 1.0)
byebug (11.1.3)
capybara (3.40.0)
addressable
@ -2019,7 +2016,6 @@ DEPENDENCIES
bootsnap (~> 1.18.3)
browser (~> 5.3.1)
bullet (~> 7.1.2)
bundler-audit (~> 0.9.1)
bundler-checksum (~> 0.1.0)!
capybara (~> 3.40)
capybara-screenshot (~> 1.0.26)

View File

@ -65,7 +65,6 @@
{"name":"browser","version":"5.3.1","platform":"ruby","checksum":"62745301701ff2c6c5d32d077bb12532b20be261929dcb52c6781ed0d5658b3c"},
{"name":"builder","version":"3.2.4","platform":"ruby","checksum":"99caf08af60c8d7f3a6b004029c4c3c0bdaebced6c949165fe98f1db27fbbc10"},
{"name":"bullet","version":"7.1.2","platform":"ruby","checksum":"429725c174cb74ca0ae99b9720bf22cab80be59ee9401805f7ecc9ac62cbb3bb"},
{"name":"bundler-audit","version":"0.9.1","platform":"ruby","checksum":"bdc716fc21cd8652a6507b137e5bc51f5e0e4f6f106a114ab004c89d0200bd3d"},
{"name":"byebug","version":"11.1.3","platform":"ruby","checksum":"2485944d2bb21283c593d562f9ae1019bf80002143cc3a255aaffd4e9cf4a35b"},
{"name":"capybara","version":"3.40.0","platform":"ruby","checksum":"42dba720578ea1ca65fd7a41d163dd368502c191804558f6e0f71b391054aeef"},
{"name":"capybara-screenshot","version":"1.0.26","platform":"ruby","checksum":"816b9370a07752097c82a05f568aaf5d3b7f45c3db5d3aab2014071e1b3c0c77"},

View File

@ -406,9 +406,6 @@ GEM
bullet (7.1.2)
activesupport (>= 3.0.0)
uniform_notifier (~> 1.11)
bundler-audit (0.9.1)
bundler (>= 1.2.0, < 3)
thor (~> 1.0)
byebug (11.1.3)
capybara (3.40.0)
addressable
@ -2054,7 +2051,6 @@ DEPENDENCIES
bootsnap (~> 1.18.3)
browser (~> 5.3.1)
bullet (~> 7.1.2)
bundler-audit (~> 0.9.1)
bundler-checksum (~> 0.1.0)!
capybara (~> 3.40)
capybara-screenshot (~> 1.0.26)

View File

@ -1,8 +1,16 @@
<script>
import { GlTable, GlFormInput } from '@gitlab/ui';
import { memoize } from 'lodash';
import { __ } from '~/locale';
import { sanitize } from '~/lib/dompurify';
import SafeHtml from '~/vue_shared/directives/safe_html';
const domParser = new DOMParser();
export default {
directives: {
SafeHtml,
},
components: {
GlTable,
GlFormInput,
@ -26,6 +34,11 @@ export default {
required: false,
default: __('Generated with JSON data'),
},
isHtmlSafe: {
type: Boolean,
required: false,
default: false,
},
},
data() {
return {
@ -42,11 +55,25 @@ export default {
key: field.key,
label: field.label,
sortable: field.sortable || false,
sortByFormatted: field.sortable && this.isHtmlSafe ? this.getSortableFieldValue : false,
class: field.class || [],
markdown: field.markdown || false,
};
});
},
},
created() {
this.getSortableFieldValue = memoize((value) => {
const document = domParser.parseFromString(sanitize(value), 'text/html');
return document.documentElement.innerText.trim();
});
},
methods: {
cellSlot(field) {
return `cell(${field.key})`;
},
},
};
</script>
<template>
@ -64,8 +91,13 @@ export default {
show-empty
class="!gl-mt-0"
>
<template v-if="isHtmlSafe" #cell()="data">
<div v-safe-html="data.value"></div>
</template>
<template v-else #cell()="data">{{ data.value }}</template>
<template v-if="caption" #table-caption>
<small>{{ caption }}</small>
<small v-if="isHtmlSafe" v-safe-html="caption"></small>
<small v-else>{{ caption }}</small>
</template>
</gl-table>
</div>

View File

@ -4,7 +4,7 @@ import { renderKroki } from './render_kroki';
import renderMath from './render_math';
import renderSandboxedMermaid from './render_sandboxed_mermaid';
import { renderGlql } from './render_glql';
import { renderJSONTable } from './render_json_table';
import { renderJSONTable, renderJSONTableHTML } from './render_json_table';
function initPopovers(elements) {
if (!elements.length) return;
@ -21,12 +21,23 @@ export function renderGFM(element) {
return;
}
const [highlightEls, krokiEls, mathEls, mermaidEls, tableEls, glqlEls, userEls, popoverEls] = [
const [
highlightEls,
krokiEls,
mathEls,
mermaidEls,
tableEls,
tableHTMLEls,
glqlEls,
userEls,
popoverEls,
] = [
'.js-syntax-highlight',
'.js-render-kroki[hidden]',
'.js-render-math',
'.js-render-mermaid',
'[data-canonical-lang="json"][data-lang-params="table"]',
'table[data-table-fields]',
'[data-canonical-lang="glql"]',
'.gfm-project_member',
'.gfm-issue, .gfm-work_item, .gfm-merge_request, .gfm-epic, .gfm-milestone',
@ -37,6 +48,7 @@ export function renderGFM(element) {
renderMath(mathEls);
renderSandboxedMermaid(mermaidEls);
renderJSONTable(tableEls.map((e) => e.parentNode));
renderJSONTableHTML(tableHTMLEls);
highlightCurrentUser(userEls);
initPopovers(popoverEls);

View File

@ -2,6 +2,7 @@ import { memoize } from 'lodash';
import Vue from 'vue';
import { __ } from '~/locale';
import { createAlert } from '~/alert';
import { parseBoolean } from '~/lib/utils/common_utils';
// Async import component since we might not need it...
const JSONTable = memoize(
@ -29,23 +30,27 @@ const mountParseError = (element) => {
};
const mountJSONTableVueComponent = (userData, element) => {
const { fields = [], items = [], filter, caption } = userData;
const { fields = [], items = [], filter, caption, isHtmlSafe } = userData;
const container = document.createElement('div');
element.innerHTML = '';
element.appendChild(container);
element.classList.add('js-json-table');
element.replaceChildren(container);
const props = {
fields,
items,
hasFilter: filter,
isHtmlSafe,
};
if (caption) {
props.caption = caption;
}
return new Vue({
el: container,
render(h) {
return h(JSONTable, {
props: {
fields,
items,
hasFilter: filter,
caption,
},
});
return h(JSONTable, { props });
},
});
};
@ -56,8 +61,6 @@ const renderTable = (element) => {
return;
}
element.classList.add('js-json-table');
try {
mountJSONTableVueComponent(JSON.parse(element.textContent), element);
} catch (e) {
@ -65,6 +68,45 @@ const renderTable = (element) => {
}
};
const renderTableHTML = (element) => {
const parent = element.parentElement;
// Avoid rendering multiple times
if (!parent || parent.classList.contains('js-json-table')) {
return;
}
try {
// Extract data from rendered HTML table
const fields = JSON.parse(element.dataset.tableFields);
const filter = parseBoolean(element.dataset.tableFilter);
const markdown = parseBoolean(element.dataset.tableMarkdown);
// The caption was processed with markdown, so it's wrapped in a <p>.
// We want that removed so it will fit semantically within a <small>.
const captionNode = element.querySelector('caption p');
const caption = captionNode ? captionNode.innerHTML : null;
const items = Array.from(element.querySelectorAll('tbody tr').values()).map((row) =>
fields.reduce(
(item, field, index) => ({
...item,
[field.key]: row.querySelectorAll('td').item(index).innerHTML,
}),
{},
),
);
mountJSONTableVueComponent({ fields, filter, caption, items, isHtmlSafe: markdown }, parent);
} catch (e) {
mountParseError(parent);
}
};
export const renderJSONTable = (elements) => {
elements.forEach(renderTable);
};
export const renderJSONTableHTML = (elements) => {
elements.forEach(renderTableHTML);
};

View File

@ -50,6 +50,9 @@ export const config = {
toReference({ __typename: 'LocalWorkItemChildIsExpanded', id: variables.id }),
},
},
MergeRequestConnection: {
merge: true,
},
DesignManagement: {
merge(existing = {}, incoming) {
return { ...existing, ...incoming };

View File

@ -141,6 +141,7 @@ export default {
initialEmail: { default: '' },
getMergeRequestsQuery: { default: undefined },
getMergeRequestsCountsQuery: { default: undefined },
getMergeRequestsApprovalsQuery: { default: undefined },
isProject: { default: true },
groupId: { default: undefined },
showNewResourceDropdown: { default: undefined },
@ -190,6 +191,29 @@ export default {
return !this.hasAnyMergeRequests || isEmpty(this.pageParams) || !this.getMergeRequestsQuery;
},
},
// The approvals data gets loaded in a seperate request so that if it timesout due to
// the large amount of data getting processed on the backend we can still render the
// merge request list.
// The data here gets stored in cache and then loaded through the `@client` directives
// in the merge request query.
// eslint-disable-next-line @gitlab/vue-no-undef-apollo-properties
mergeRequestApprovals: {
query() {
return this.getMergeRequestsApprovalsQuery;
},
variables() {
return this.queryVariables;
},
skip() {
return (
!this.hasAnyMergeRequests ||
isEmpty(this.pageParams) ||
!this.getMergeRequestsApprovalsQuery
);
},
manual: true,
result() {},
},
mergeRequestCounts: {
query() {
return this.getMergeRequestsCountsQuery;
@ -842,7 +866,12 @@ export default {
</template>
<template #approval-status="{ issuable = {} }">
<li v-if="issuable.approvalsRequired || issuable.approvedBy.nodes.length" class="!gl-mr-0">
<li
v-if="
issuable.approvalsRequired || (issuable.approvedBy && issuable.approvedBy.nodes.length)
"
class="!gl-mr-0"
>
<approval-count :merge-request="issuable" full-text class="gl-mt-1" />
</li>
</template>

View File

@ -9,6 +9,7 @@ import MoreactionsDropdown from './components/more_actions_dropdown.vue';
export async function mountMergeRequestListsApp({
getMergeRequestsQuery,
getMergeRequestsCountsQuery,
getMergeRequestsApprovalsQuery,
isProject = true,
} = {}) {
const el = document.querySelector('.js-merge-request-list-root');
@ -85,6 +86,7 @@ export async function mountMergeRequestListsApp({
resetPath,
getMergeRequestsQuery,
getMergeRequestsCountsQuery,
getMergeRequestsApprovalsQuery,
isProject,
groupId: groupId ? `${groupId}` : null,
showNewResourceDropdown: parseBoolean(showNewResourceDropdown),

View File

@ -69,6 +69,15 @@ query getGroupMergeRequests(
nodes {
...MergeRequestFragment
reference(full: true)
# This data gets loaded through the `mergeRequestApprovals` request,
# we are doing it this way to combat timeouts with the approval
# data causing the whole merge request list to not render
approved @client
approvedBy @client {
nodes {
id
}
}
}
}
}

View File

@ -0,0 +1,69 @@
#import "ee_else_ce/merge_request_dashboard/queries/merge_request_approval.fragment.graphql"
query getGroupMergeRequestsApprovals(
$fullPath: ID!
$sort: MergeRequestSort
$state: MergeRequestState
$search: String
$approvedBy: [String!]
$assigneeUsernames: String
$assigneeWildcardId: AssigneeWildcardId
$reviewerUsername: String
$reviewerWildcardId: ReviewerWildcardId
$authorUsername: String
$draft: Boolean
$labelName: [String!]
$releaseTag: String
$mergeUser: String
$milestoneTitle: String
$milestoneWildcardId: MilestoneWildcardId
$myReactionEmoji: String
$sourceBranches: [String!]
$targetBranches: [String!]
$deployedBefore: Time
$deployedAfter: Time
$environmentName: String
$not: MergeRequestsResolverNegatedParams
$beforeCursor: String
$afterCursor: String
$firstPageSize: Int
$lastPageSize: Int
) {
namespace: group(fullPath: $fullPath) {
id
mergeRequests(
sort: $sort
state: $state
search: $search
approvedBy: $approvedBy
assigneeUsername: $assigneeUsernames
assigneeWildcardId: $assigneeWildcardId
reviewerUsername: $reviewerUsername
reviewerWildcardId: $reviewerWildcardId
authorUsername: $authorUsername
draft: $draft
labelName: $labelName
releaseTag: $releaseTag
mergedBy: $mergeUser
milestoneTitle: $milestoneTitle
milestoneWildcardId: $milestoneWildcardId
myReactionEmoji: $myReactionEmoji
sourceBranches: $sourceBranches
targetBranches: $targetBranches
deployedBefore: $deployedBefore
deployedAfter: $deployedAfter
environmentName: $environmentName
not: $not
includeSubgroups: true
before: $beforeCursor
after: $afterCursor
first: $firstPageSize
last: $lastPageSize
) {
nodes {
id
...MergeRequestApprovalFragment
}
}
}
}

View File

@ -1,5 +1,4 @@
#import "~/graphql_shared/fragments/user.fragment.graphql"
#import "ee_else_ce/merge_request_dashboard/queries/merge_request_approval.fragment.graphql"
fragment MergeRequestFragment on MergeRequest {
id
@ -62,5 +61,4 @@ fragment MergeRequestFragment on MergeRequest {
count
}
hidden
...MergeRequestApprovalFragment
}

View File

@ -67,6 +67,15 @@ query getProjectMergeRequests(
}
nodes {
...MergeRequestFragment
# This data gets loaded through the `mergeRequestApprovals` request,
# we are doing it this way to combat timeouts with the approval
# data causing the whole merge request list to not render
approved @client
approvedBy @client {
nodes {
id
}
}
}
}
}

View File

@ -0,0 +1,68 @@
#import "ee_else_ce/merge_request_dashboard/queries/merge_request_approval.fragment.graphql"
query getProjectMergeRequestsApprovals(
$fullPath: ID!
$sort: MergeRequestSort
$state: MergeRequestState
$search: String
$approvedBy: [String!]
$assigneeUsernames: String
$assigneeWildcardId: AssigneeWildcardId
$reviewerUsername: String
$reviewerWildcardId: ReviewerWildcardId
$authorUsername: String
$draft: Boolean
$labelName: [String!]
$releaseTag: String
$mergeUser: String
$milestoneTitle: String
$milestoneWildcardId: MilestoneWildcardId
$myReactionEmoji: String
$sourceBranches: [String!]
$targetBranches: [String!]
$deployedBefore: Time
$deployedAfter: Time
$environmentName: String
$not: MergeRequestsResolverNegatedParams
$beforeCursor: String
$afterCursor: String
$firstPageSize: Int
$lastPageSize: Int
) {
namespace: project(fullPath: $fullPath) {
id
mergeRequests(
sort: $sort
state: $state
search: $search
approvedBy: $approvedBy
assigneeUsername: $assigneeUsernames
assigneeWildcardId: $assigneeWildcardId
reviewerUsername: $reviewerUsername
reviewerWildcardId: $reviewerWildcardId
authorUsername: $authorUsername
draft: $draft
labelName: $labelName
releaseTag: $releaseTag
mergedBy: $mergeUser
milestoneTitle: $milestoneTitle
milestoneWildcardId: $milestoneWildcardId
myReactionEmoji: $myReactionEmoji
sourceBranches: $sourceBranches
targetBranches: $targetBranches
deployedBefore: $deployedBefore
deployedAfter: $deployedAfter
environmentName: $environmentName
not: $not
before: $beforeCursor
after: $afterCursor
first: $firstPageSize
last: $lastPageSize
) {
nodes {
id
...MergeRequestApprovalFragment
}
}
}
}

View File

@ -1,6 +1,7 @@
import addExtraTokensForMergeRequests from 'ee_else_ce/filtered_search/add_extra_tokens_for_merge_requests';
import getMergeRequestsQuery from 'ee_else_ce/merge_requests/list/queries/group/get_merge_requests.query.graphql';
import getMergeRequestsCountsQuery from 'ee_else_ce/merge_requests/list/queries/group/get_merge_requests_counts.query.graphql';
import getMergeRequestsApprovalsQuery from 'ee_else_ce/merge_requests/list/queries/group/get_merge_requests_approvals.query.graphql';
import IssuableFilteredSearchTokenKeys from '~/filtered_search/issuable_filtered_search_token_keys';
import { FILTERED_SEARCH } from '~/filtered_search/constants';
import { initBulkUpdateSidebar } from '~/issuable';
@ -26,4 +27,9 @@ initNewResourceDropdown({
query: searchUserGroupProjectsWithMergeRequestsEnabled,
extractProjects: (data) => data?.group?.projects?.nodes,
});
mountMergeRequestListsApp({ getMergeRequestsQuery, getMergeRequestsCountsQuery, isProject: false });
mountMergeRequestListsApp({
getMergeRequestsQuery,
getMergeRequestsCountsQuery,
getMergeRequestsApprovalsQuery,
isProject: false,
});

View File

@ -1,6 +1,7 @@
import addExtraTokensForMergeRequests from 'ee_else_ce/filtered_search/add_extra_tokens_for_merge_requests';
import getMergeRequestsQuery from 'ee_else_ce/merge_requests/list/queries/project/get_merge_requests.query.graphql';
import getMergeRequestsCountsQuery from 'ee_else_ce/merge_requests/list/queries/project/get_merge_requests_counts.query.graphql';
import getMergeRequestsApprovalsQuery from 'ee_else_ce/merge_requests/list/queries/project/get_merge_requests_approvals.query.graphql';
import { addShortcutsExtension } from '~/behaviors/shortcuts';
import ShortcutsNavigation from '~/behaviors/shortcuts/shortcuts_navigation';
import IssuableFilteredSearchTokenKeys from '~/filtered_search/issuable_filtered_search_token_keys';
@ -25,4 +26,8 @@ addShortcutsExtension(ShortcutsNavigation);
initIssuableByEmail();
initCsvImportExportButtons();
mountMoreActionsDropdown();
mountMergeRequestListsApp({ getMergeRequestsQuery, getMergeRequestsCountsQuery });
mountMergeRequestListsApp({
getMergeRequestsQuery,
getMergeRequestsApprovalsQuery,
getMergeRequestsCountsQuery,
});

View File

@ -80,6 +80,12 @@ module GraphqlTriggers
:issuable_todo_updated, { issuable_id: issuable.to_gid }, issuable
)
end
def self.user_merge_request_updated(user, merge_request)
return unless Feature.enabled?(:merge_request_dashboard_realtime, user, type: :wip)
GitlabSchema.subscriptions.trigger(:user_merge_request_updated, { user_id: user.to_gid }, merge_request)
end
end
GraphqlTriggers.prepend_mod

View File

@ -0,0 +1,29 @@
# frozen_string_literal: true
module Subscriptions # rubocop:disable Gitlab/BoundedContexts -- Existing module
module User
class MergeRequestUpdated < ::Subscriptions::BaseSubscription
include Gitlab::Graphql::Laziness
argument :user_id, ::Types::GlobalIDType[::User],
required: true,
description: 'ID of the user.'
payload_type Types::MergeRequestType
def authorized?(user_id:)
user = force(GitlabSchema.find_by_gid(user_id))
unauthorized! unless user && current_user.id == user.id
true
end
def update(user_id:)
return NO_UPDATE unless Ability.allowed?(current_user, :read_merge_request, object)
super
end
end
end
end

View File

@ -72,6 +72,12 @@ module Types
subscription: Subscriptions::IssuableUpdated, null: true,
description: 'Triggered when a todo on an issuable is updated.',
experiment: { milestone: '17.5' }
field :user_merge_request_updated,
subscription: Subscriptions::User::MergeRequestUpdated,
null: true,
description: 'Triggered when a merge request the user is an assignee or a reviewer of is updated.',
experiment: { milestone: '17.9' }
end
end

View File

@ -89,6 +89,7 @@ module MergeRequests
trigger_merge_request_reviewers_updated(merge_request)
set_first_reviewer_assigned_at_metrics(merge_request) if new_reviewers.any?
trigger_user_merge_request_updated(merge_request)
end
def cleanup_environments(merge_request)
@ -281,6 +282,12 @@ module MergeRequests
GraphqlTriggers.merge_request_approval_state_updated(merge_request)
end
def trigger_user_merge_request_updated(merge_request)
[merge_request.assignees, merge_request.reviewers].flatten.uniq.each do |user|
GraphqlTriggers.user_merge_request_updated(user, merge_request)
end
end
def set_first_reviewer_assigned_at_metrics(merge_request)
metrics = merge_request.metrics
return unless metrics

View File

@ -28,6 +28,8 @@ module MergeRequests
invalidate_cache_counts(merge_request, users: old_assignees)
trigger_user_merge_request_updated(merge_request)
execute_assignees_hooks(merge_request, old_assignees) if options['execute_hooks']
end

View File

@ -16,6 +16,7 @@ module MergeRequests
trigger_merge_request_merge_status_updated(merge_request)
trigger_merge_request_reviewers_updated(merge_request)
trigger_merge_request_approval_state_updated(merge_request)
trigger_user_merge_request_updated(merge_request)
create_system_note(merge_request, user, has_unapproved)
user.invalidate_merge_request_cache_counts if user.merge_request_dashboard_enabled?

View File

@ -15,6 +15,7 @@ module MergeRequests
return error("Failed to update reviewer") unless reviewer.update(state: state)
trigger_merge_request_reviewers_updated(merge_request)
trigger_user_merge_request_updated(merge_request)
if current_user.merge_request_dashboard_enabled?
invalidate_cache_counts(merge_request, users: merge_request.assignees)

View File

@ -0,0 +1,12 @@
---
api_type:
attr: anti_abuse_settings
clusterwide: true
column: anti_abuse_settings
db_type: jsonb
default: "'{}'::jsonb"
description: Configuration settings related to anti-abuse features
encrypted: false
gitlab_com_different_than_default: true
jihu: false
not_null: true

View File

@ -0,0 +1,9 @@
---
name: merge_request_dashboard_realtime
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/512629
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/179560
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/516000
milestone: '17.9'
group: group::code review
type: wip
default_enabled: false

View File

@ -0,0 +1,17 @@
- title: "REST API endpoint `pre_receive_secret_detection_enabled` is deprecated"
removal_milestone: "18.0"
announcement_milestone: "17.9"
breaking_change: true
window: "3"
reporter: abellucci
stage: application_security_testing
issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/514413
impact: medium
scope: instance
resolution_role: Admin
manual_task: true
body: | # (required) Don't change this line.
The REST API endpoint `pre_receive_secret_detection_enabled` is deprecated in favor of `secret_push_protection_enabled`. We are renaming some API fields to reflect the name change of the feature `pre_receive_secret_detection` to `secret_push_protection`.
To avoid breaking workflows that use the old name, you should stop using the `pre_receive_secret_detection_enabled` endpoint before GitLab 18.0. Instead, use the new `secret_push_protection_enabled` endpoint.
tiers: ultimate
documentation_url: https://docs.gitlab.com/ee/api/projects.html#secret-push-protection-status

View File

@ -5,4 +5,4 @@ feature_category: database
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/172230
milestone: '17.6'
queued_migration_version: 20241111055711
finalized_by: # version of the migration that finalized this BBM
finalized_by: 20250123165457

View File

@ -7,4 +7,4 @@ milestone: '17.6'
queued_migration_version: 20240908225334
# Replace with the approximate date you think it's best to ensure the completion of this BBM.
finalize_after: '2024-09-16'
finalized_by: # version of the migration that finalized this BBM
finalized_by: 20250123165451

View File

@ -7,4 +7,4 @@ milestone: '17.6'
queued_migration_version: 20240911101712
# Replace with the approximate date you think it's best to ensure the completion of this BBM.
finalize_after: '2024-09-16'
finalized_by: # version of the migration that finalized this BBM
finalized_by: 20250123165454

View File

@ -0,0 +1,8 @@
---
migration_job_name: SplitMicrosoftApplicationsTable
description: Splits group-specific entries out of SystemAccess::MicrosoftApplication to SystemAccess::GroupMicrosoftApplication
feature_category: system_access
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/177531
milestone: '17.9'
queued_migration_version: 20250127052138
finalized_by: # version of the migration that finalized this BBM

View File

@ -0,0 +1,25 @@
# frozen_string_literal: true
class SystemAccessGroupMicrosoftApplicationsUniqueGroupId < Gitlab::Database::Migration[2.2]
milestone '17.9'
# rubocop:disable Migration/AddIndex, Migration/RemoveIndex -- these are on empty tables
def up
# these tables are currently unused, there should be no data in them
# truncating for unexpected cases (local dev, failed migrations, etc)
truncate_tables! 'system_access_group_microsoft_graph_access_tokens', 'system_access_group_microsoft_applications'
remove_index :system_access_group_microsoft_applications,
name: 'index_system_access_group_microsoft_applications_on_group_id'
add_index :system_access_group_microsoft_applications, :group_id, unique: true
end
def down
remove_index :system_access_group_microsoft_applications,
name: 'index_system_access_group_microsoft_applications_on_group_id'
add_index :system_access_group_microsoft_applications, :group_id
end
# rubocop:enable Migration/AddIndex, Migration/RemoveIndex
end

View File

@ -0,0 +1,21 @@
# frozen_string_literal: true
class AddAntiAbuseSettingsToApplicationSettings < Gitlab::Database::Migration[2.2]
milestone '17.9'
disable_ddl_transaction!
def up
add_column :application_settings, :anti_abuse_settings, :jsonb, default: {}, null: false
add_check_constraint(
:application_settings,
"(jsonb_typeof(anti_abuse_settings) = 'object')",
'check_anti_abuse_settings_is_hash'
)
end
def down
remove_column :application_settings, :anti_abuse_settings
end
end

View File

@ -0,0 +1,22 @@
# frozen_string_literal: true
class FinalizeTheDeduplicationOfIssuableResourceLinks < Gitlab::Database::Migration[2.2]
DEDUPLICATION_MIGRATION = 'MarkDuplicateIssuableResourceLinks'
disable_ddl_transaction!
milestone '17.9'
restrict_gitlab_migration gitlab_schema: :gitlab_main
def up
ensure_batched_background_migration_is_finished(
job_class_name: DEDUPLICATION_MIGRATION,
table_name: :issuable_resource_links,
column_name: :issue_id,
job_arguments: [],
finalize: true
)
end
def down; end
end

View File

@ -0,0 +1,22 @@
# frozen_string_literal: true
class FinalizeTheDeduplicationOfIssuableResourceLinks2 < Gitlab::Database::Migration[2.2]
DEDUPLICATION_MIGRATION = 'MarkSingleIssuableResourceLinks'
disable_ddl_transaction!
milestone '17.9'
restrict_gitlab_migration gitlab_schema: :gitlab_main
def up
ensure_batched_background_migration_is_finished(
job_class_name: DEDUPLICATION_MIGRATION,
table_name: :issuable_resource_links,
column_name: :issue_id,
job_arguments: [],
finalize: true
)
end
def down; end
end

View File

@ -0,0 +1,22 @@
# frozen_string_literal: true
class FinalizeTheDeduplicationOfIssuableResourceLinks3 < Gitlab::Database::Migration[2.2]
DELETION_MIGRATION = 'DeleteDuplicateIssuableResourceLinks'
disable_ddl_transaction!
milestone '17.9'
restrict_gitlab_migration gitlab_schema: :gitlab_main
def up
ensure_batched_background_migration_is_finished(
job_class_name: DELETION_MIGRATION,
table_name: :issuable_resource_links,
column_name: :id,
job_arguments: [],
finalize: true
)
end
def down; end
end

View File

@ -0,0 +1,27 @@
# frozen_string_literal: true
class QueueSplitMicrosoftApplicationsTable < Gitlab::Database::Migration[2.2]
milestone '17.9'
restrict_gitlab_migration gitlab_schema: :gitlab_main
MIGRATION = "SplitMicrosoftApplicationsTable"
DELAY_INTERVAL = 2.minutes
BATCH_SIZE = 1000
SUB_BATCH_SIZE = 100
def up
queue_batched_background_migration(
MIGRATION,
:system_access_microsoft_applications,
:id,
job_interval: DELAY_INTERVAL,
batch_size: BATCH_SIZE,
sub_batch_size: SUB_BATCH_SIZE
)
end
def down
delete_batched_background_migration(MIGRATION, :system_access_microsoft_applications, :id, [])
end
end

View File

@ -0,0 +1,21 @@
# frozen_string_literal: true
class DropVirtualRegistriesPackagesMavenUpstreamsEncryptedCredentials < Gitlab::Database::Migration[2.2]
disable_ddl_transaction!
milestone '17.9'
TABLE_NAME = :virtual_registries_packages_maven_upstreams
def up
remove_column TABLE_NAME, :encrypted_credentials, if_exists: true
remove_column TABLE_NAME, :encrypted_credentials_iv, if_exists: true
end
def down
add_column TABLE_NAME, :encrypted_credentials, :binary, null: true, if_not_exists: true
add_column TABLE_NAME, :encrypted_credentials_iv, :binary, null: true, if_not_exists: true
add_check_constraint(TABLE_NAME, 'octet_length(encrypted_credentials) <= 1020', 'check_b9e3bfa31a')
add_check_constraint(TABLE_NAME, 'octet_length(encrypted_credentials_iv) <= 1020', 'check_4af2999ab8')
end
end

View File

@ -0,0 +1 @@
843abc3d6c94d7d36e83303843c2edcfe68026754514f2a4ad7ad15ae692d8a6

View File

@ -0,0 +1 @@
983461e9c960bf4e1d2eee9a0e5de6abbf1ca2df6a39daa207ea74e9cc819deb

View File

@ -0,0 +1 @@
67f03f88827c018b30125ecc4a1e1f2c87feff0f0bda2b6f06989d9e0b4ca318

View File

@ -0,0 +1 @@
5a7d6d8a68527847d3c2fe3cbfd9692388248677438ede13e8b56997a741fb8a

View File

@ -0,0 +1 @@
f59db429605963c3433a251be69cdbce9db69240fb168a98b14c9a5228ff562e

View File

@ -0,0 +1 @@
94f1f49da59a704cb2840afe857d6396145ba91343afa9267cc9337bde55ee91

View File

@ -0,0 +1 @@
8cee35d14e50e690d692fa106c5063b428d1d820148c68023c42ec56275503e6

View File

@ -7964,6 +7964,7 @@ CREATE TABLE application_settings (
elasticsearch_max_code_indexing_concurrency integer DEFAULT 30 NOT NULL,
observability_settings jsonb DEFAULT '{}'::jsonb NOT NULL,
search jsonb DEFAULT '{}'::jsonb NOT NULL,
anti_abuse_settings jsonb DEFAULT '{}'::jsonb NOT NULL,
CONSTRAINT app_settings_container_reg_cleanup_tags_max_list_size_positive CHECK ((container_registry_cleanup_tags_service_max_list_size >= 0)),
CONSTRAINT app_settings_dep_proxy_ttl_policies_worker_capacity_positive CHECK ((dependency_proxy_ttl_group_policy_worker_capacity >= 0)),
CONSTRAINT app_settings_ext_pipeline_validation_service_url_text_limit CHECK ((char_length(external_pipeline_validation_service_url) <= 255)),
@ -8011,6 +8012,7 @@ CREATE TABLE application_settings (
CONSTRAINT check_9c6c447a13 CHECK ((char_length(maintenance_mode_message) <= 255)),
CONSTRAINT check_a5704163cc CHECK ((char_length(secret_detection_revocation_token_types_url) <= 255)),
CONSTRAINT check_ae53cf7f82 CHECK ((char_length(vertex_ai_host) <= 255)),
CONSTRAINT check_anti_abuse_settings_is_hash CHECK ((jsonb_typeof(anti_abuse_settings) = 'object'::text)),
CONSTRAINT check_app_settings_namespace_storage_forks_cost_factor_range CHECK (((namespace_storage_forks_cost_factor >= (0)::double precision) AND (namespace_storage_forks_cost_factor <= (1)::double precision))),
CONSTRAINT check_app_settings_sentry_clientside_traces_sample_rate_range CHECK (((sentry_clientside_traces_sample_rate >= (0)::double precision) AND (sentry_clientside_traces_sample_rate <= (1)::double precision))),
CONSTRAINT check_application_settings_clickhouse_is_hash CHECK ((jsonb_typeof(clickhouse) = 'object'::text)),
@ -22513,8 +22515,6 @@ CREATE TABLE virtual_registries_packages_maven_upstreams (
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
url text NOT NULL,
encrypted_credentials bytea,
encrypted_credentials_iv bytea,
cache_validity_hours smallint DEFAULT 24 NOT NULL,
encrypted_username bytea,
encrypted_username_iv bytea,
@ -22522,9 +22522,7 @@ CREATE TABLE virtual_registries_packages_maven_upstreams (
encrypted_password_iv bytea,
CONSTRAINT check_2366658457 CHECK ((octet_length(encrypted_username) <= 1020)),
CONSTRAINT check_26c0572777 CHECK ((char_length(url) <= 255)),
CONSTRAINT check_4af2999ab8 CHECK ((octet_length(encrypted_credentials_iv) <= 1020)),
CONSTRAINT check_a3593dca3a CHECK ((cache_validity_hours >= 0)),
CONSTRAINT check_b9e3bfa31a CHECK ((octet_length(encrypted_credentials) <= 1020)),
CONSTRAINT check_c3977cdb0c CHECK ((octet_length(encrypted_username_iv) <= 1020)),
CONSTRAINT check_e4b6e651bf CHECK ((octet_length(encrypted_password_iv) <= 1020)),
CONSTRAINT check_e57d1f3005 CHECK ((octet_length(encrypted_password) <= 1020))
@ -34393,7 +34391,7 @@ CREATE INDEX index_successful_authentication_events_for_metrics ON authenticatio
CREATE UNIQUE INDEX index_suggestions_on_note_id_and_relative_order ON suggestions USING btree (note_id, relative_order);
CREATE INDEX index_system_access_group_microsoft_applications_on_group_id ON system_access_group_microsoft_applications USING btree (group_id);
CREATE UNIQUE INDEX index_system_access_group_microsoft_applications_on_group_id ON system_access_group_microsoft_applications USING btree (group_id);
CREATE UNIQUE INDEX index_system_access_microsoft_applications_on_namespace_id ON system_access_microsoft_applications USING btree (namespace_id);

View File

@ -42,96 +42,182 @@ To disable the agent server on a single node:
1. [Reconfigure GitLab](../restart_gitlab.md#reconfigure-a-linux-package-installation).
##### Configure KAS to listen on a UNIX socket
#### Turn on KAS on multiple nodes
If you use GitLab behind a proxy, KAS might not work correctly. You can resolve this issue on a single-node installation, you can configure KAS to listen on a UNIX socket.
KAS instances communicate with each other by registering their private addresses in Redis at a well-known location. Each KAS must be configured with its specific location details so that other instances can reach it.
To configure KAS to listen on a UNIX socket:
To turn on KAS on multiple nodes:
1. Create a directory for the KAS sockets:
1. Add the [common configuration](#common-configuration).
1. Add the configuration from one of the following options:
```shell
sudo mkdir -p /var/opt/gitlab/gitlab-kas/sockets/
```
1. Edit `/etc/gitlab/gitlab.rb`:
```ruby
gitlab_kas['internal_api_listen_network'] = 'unix'
gitlab_kas['internal_api_listen_address'] = '/var/opt/gitlab/gitlab-kas/sockets/internal-api.socket'
gitlab_kas['private_api_listen_network'] = 'unix'
gitlab_kas['private_api_listen_address'] = '/var/opt/gitlab/gitlab-kas/sockets/private-api.socket'
gitlab_kas['client_timeout_seconds'] = '5'
gitlab_kas['env'] = {
'SSL_CERT_DIR' => "/opt/gitlab/embedded/ssl/certs/",
'OWN_PRIVATE_API_URL' => 'unix:///var/opt/gitlab/gitlab-kas/sockets/private-api.socket'
}
```
- [Option 1 - explicit manual configuration](#option-1---explicit-manual-configuration)
- [Option 2 - automatic CIDR-based configuration](#option-2---automatic-cidr-based-configuration)
- [Option 3 - automatic configuration based on listener configuration](#option-3---automatic-configuration-based-on-listener-configuration)
1. [Reconfigure GitLab](../restart_gitlab.md#reconfigure-a-linux-package-installation).
1. (Optional) If you use a multi-server environment with separate GitLab Rails and Sidekiq nodes, enable KAS on the Sidekiq nodes.
For additional configuration options, see the **GitLab Kubernetes agent server** section of
[`gitlab.rb.template`](https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-config-template/gitlab.rb.template).
##### Common configuration
#### Enable on multiple nodes
For each KAS node, edit the file at `/etc/gitlab/gitlab.rb` and add the following configuration:
To enable the agent server on multiple nodes:
```ruby
gitlab_kas_external_url 'wss://kas.gitlab.example.com/'
1. For each agent server node, edit `/etc/gitlab/gitlab.rb`:
gitlab_kas['api_secret_key'] = '<32_bytes_long_base64_encoded_value>'
gitlab_kas['private_api_secret_key'] = '<32_bytes_long_base64_encoded_value>'
```ruby
gitlab_kas_external_url 'wss://kas.gitlab.example.com/'
# private_api_listen_address examples, pick one:
gitlab_kas['api_secret_key'] = '<32_bytes_long_base64_encoded_value>'
gitlab_kas['private_api_secret_key'] = '<32_bytes_long_base64_encoded_value>'
gitlab_kas['private_api_listen_address'] = '0.0.0.0:8155'
gitlab_kas['client_timeout_seconds'] = '5'
gitlab_kas['env'] = {
'SSL_CERT_DIR' => "/opt/gitlab/embedded/ssl/certs/",
'OWN_PRIVATE_API_URL' => 'grpc://<ip_or_hostname_of_this_host>:8155' # use grpcs:// when using TLS on the private API endpoint
gitlab_kas['private_api_listen_address'] = 'A.B.C.D:8155' # Listen on a particular IPv4. Each node must use its own unique IP.
# gitlab_kas['private_api_listen_address'] = '[A:B:C::D]:8155' # Listen on a particular IPv6. Each node must use its own unique IP.
# gitlab_kas['private_api_listen_address'] = 'kas-N.gitlab.example.com:8155' # Listen on all IPv4 and IPv6 interfaces that the DNS name resolves to. Each node must use its own unique domain.
# gitlab_kas['private_api_listen_address'] = ':8155' # Listen on all IPv4 and IPv6 interfaces.
# gitlab_kas['private_api_listen_address'] = '0.0.0.0:8155' # Listen on all IPv4 interfaces.
# gitlab_kas['private_api_listen_address'] = '[::]:8155' # Listen on all IPv6 interfaces.
# 'OWN_PRIVATE_API_HOST' => '<server-name-from-cert>' # Add if you want to use TLS for KAS->KAS communication. This is used to verify the TLS certificate host name.
gitlab_kas['env'] = {
# 'OWN_PRIVATE_API_HOST' => '<server-name-from-cert>' # Add if you want to use TLS for KAS->KAS communication. This name is used to verify the TLS certificate host name instead of the host in the URL of the destination KAS.
'SSL_CERT_DIR' => "/opt/gitlab/embedded/ssl/certs/",
}
# 'OWN_PRIVATE_API_CIDR' => '10.0.0.0/8', # IPv4 example
# 'OWN_PRIVATE_API_CIDR' => '2001:db8:8a2e:370::7334/64', # IPv6 example
# 'OWN_PRIVATE_API_PORT' => '8155',
# 'OWN_PRIVATE_API_SCHEME' => 'grpc',
}
gitlab_rails['gitlab_kas_external_url'] = 'wss://gitlab.example.com/-/kubernetes-agent/'
gitlab_rails['gitlab_kas_internal_url'] = 'grpc://kas.internal.gitlab.example.com'
gitlab_rails['gitlab_kas_external_k8s_proxy_url'] = 'https://gitlab.example.com/-/kubernetes-agent/k8s-proxy/'
```
gitlab_rails['gitlab_kas_external_url'] = 'wss://gitlab.example.com/-/kubernetes-agent/'
gitlab_rails['gitlab_kas_internal_url'] = 'grpc://kas.internal.gitlab.example.com'
gitlab_rails['gitlab_kas_external_k8s_proxy_url'] = 'https://gitlab.example.com/-/kubernetes-agent/k8s-proxy/'
```
**Do not** set `private_api_listen_address` to listen on an internal address, such as:
You might not be able to specify an exact IP address or host name in the `OWN_PRIVATE_API_URL` variable.
For example, if the kas host is assigned an IP dynamically.
- `localhost`
- Loopback IP addresses, like `127.0.0.1` or `::1`
- A UNIX socket
In this situation, you can configure `OWN_PRIVATE_API_CIDR` instead to set up kas to dynamically construct `OWN_PRIVATE_API_URL`:
Other KAS nodes cannot reach these addresses.
- Comment out `OWN_PRIVATE_API_URL` to disable this variable.
- Configure `OWN_PRIVATE_API_CIDR` to specify what network kas listens on. When you start kas, kas looks at
the IP addresses the host is assigned, and uses the address that matches the specified CIDR as its own private IP address.
- By default, kas uses the port from the `private_api_listen_address` parameter. Configure `OWN_PRIVATE_API_PORT` to use a different port.
- Optional. By default, kas uses the `grpc` scheme. If you use TLS on the private API endpoint, configure `OWN_PRIVATE_API_SCHEME=grpcs`.
- Optional. By default, the `client_timeout_seconds` parameter is configured to wait for the kas response for 5 seconds.
For single-node configurations, you can set `private_api_listen_address` to listen on an internal address.
1. [Reconfigure GitLab](../restart_gitlab.md#reconfigure-a-linux-package-installation).
1. Optional. If you use a multi-server environment with separate GitLab Rails and Sidekiq nodes, enable the agent server on the Sidekiq nodes.
##### Option 1 - explicit manual configuration
For each KAS node, edit the file at `/etc/gitlab/gitlab.rb` and set the `OWN_PRIVATE_API_URL` environment variable explicitly:
```ruby
gitlab_kas['env'] = {
# OWN_PRIVATE_API_URL examples, pick one. Each node must use its own unique IP or DNS name.
# Use grpcs:// when using TLS on the private API endpoint.
'OWN_PRIVATE_API_URL' => 'grpc://A.B.C.D:8155' # IPv4
# 'OWN_PRIVATE_API_URL' => 'grpcs://A.B.C.D:8155' # IPv4 + TLS
# 'OWN_PRIVATE_API_URL' => 'grpc://[A:B:C::D]:8155' # IPv6
# 'OWN_PRIVATE_API_URL' => 'grpc://kas-N-private-api.gitlab.example.com:8155' # DNS name
}
```
##### Option 2 - automatic CIDR-based configuration
> - [Introduced](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/issues/464) in GitLab 16.5.0.
> - [Added](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/merge_requests/2183) multiple CIDR support to `OWN_PRIVATE_API_CIDR` in GitLab 17.8.1.
You might not be able to set an exact IP address or hostname in the `OWN_PRIVATE_API_URL` variable if, for example,
the KAS host is assigned an IP address and a hostname dynamically.
If you cannot set an exact IP address or hostname, you can configure `OWN_PRIVATE_API_CIDR` to set up KAS to dynamically construct
`OWN_PRIVATE_API_URL` based on one or more [CIDRs](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing):
This approach allows each KAS node to use a static configuration that works as long as
the CIDR doesn't change.
For each KAS node, edit the file at `/etc/gitlab/gitlab.rb` to dynamically construct the
`OWN_PRIVATE_API_URL` URL:
1. Comment out `OWN_PRIVATE_API_URL` in your common configuration to turn off this variable.
1. Configure `OWN_PRIVATE_API_CIDR` to specify what networks the KAS nodes listen on.
When you start KAS, it determines which private IP address to use by selecting the host address that matches the specified CIDR.
1. Configure `OWN_PRIVATE_API_PORT` to use a different port. By default, KAS uses the port from the `private_api_listen_address` parameter.
1. If you use TLS on the private API endpoint, configure `OWN_PRIVATE_API_SCHEME=grpcs`. By default, KAS uses the `grpc` scheme.
```ruby
gitlab_kas['env'] = {
# 'OWN_PRIVATE_API_CIDR' => '10.0.0.0/8', # IPv4 example
# 'OWN_PRIVATE_API_CIDR' => '2001:db8:8a2e:370::7334/64', # IPv6 example
# 'OWN_PRIVATE_API_CIDR' => '10.0.0.0/8,2001:db8:8a2e:370::7334/64', # multiple CIRDs example
# 'OWN_PRIVATE_API_PORT' => '8155',
# 'OWN_PRIVATE_API_SCHEME' => 'grpc',
}
```
##### Option 3 - automatic configuration based on listener configuration
> - [Introduced](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/issues/464) in GitLab 16.5.0.
> - [Updated](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/issues/510) KAS to listen on and publish all non-loopback IP addresses and filter out IPv4 and IPv6 addresses based on the value of `private_api_listen_network`.
A KAS node can determine what IP addresses are available based on the `private_api_listen_network` and
`private_api_listen_address` settings:
- If `private_api_listen_address` is set to a fixed IP address and port number (for example, `ip:port`), it uses this IP address.
- If `private_api_listen_address` has no IP address (for example, `:8155`), or has an unspecified IP address
(for example, `[::]:8155` or `0.0.0.0:8155`), KAS assigns all non-loopback and non-link-local IP addresses to the node.
IPv4 and IPv6 addresses are filtered based on the value of `private_api_listen_network`.
- If `private_api_listen_address` is a `hostname:PORT` (for example, `kas-N-private-api.gitlab.example.com:8155`), KAS
resolves the DNS name and assigns all IP addresses to the node.
In this mode, KAS listens only on the first IP address (This behavior is defined by the [Go standard library](https://pkg.go.dev/net#Listen)).
IPv4 and IPv6 addresses are filtered based on the value of `private_api_listen_network`.
Before exposing the private API address of a KAS on all IP addresses, make sure this action does not conflict with your organization's security policy.
The private API endpoint requires a valid authentication token for all requests.
For each KAS node, edit the file at `/etc/gitlab/gitlab.rb`:
Example 1. Listen on all IPv4 and IPv6 interfaces:
```ruby
# gitlab_kas['private_api_listen_network'] = 'tcp' # this is the default value, no need to set it.
gitlab_kas['private_api_listen_address'] = ':8155' # Listen on all IPv4 and IPv6 interfaces
```
Example 2. Listen on all IPv4 interfaces:
```ruby
gitlab_kas['private_api_listen_network'] = 'tcp4'
gitlab_kas['private_api_listen_address'] = ':8155'
```
Example 3. Listen on all IPv6 interfaces:
```ruby
gitlab_kas['private_api_listen_network'] = 'tcp6'
gitlab_kas['private_api_listen_address'] = ':8155'
```
You can use environment variables to override the scheme and port that
construct the `OWN_PRIVATE_API_URL`:
```ruby
gitlab_kas['env'] = {
# 'OWN_PRIVATE_API_PORT' => '8155',
# 'OWN_PRIVATE_API_SCHEME' => 'grpc',
}
```
##### Agent server node settings
| Setting | Description |
|---------|-------------|
| `gitlab_kas['private_api_listen_address']` | The address the agent server listens on. Set to `0.0.0.0` or to an IP address reachable by other nodes in the cluster. |
| `gitlab_kas['api_secret_key']` | The shared secret used for authentication between KAS and GitLab. The value must be Base64-encoded and exactly 32 bytes long. |
| `gitlab_kas['private_api_secret_key']` | The shared secret used for authentication between different KAS instances. The value must be Base64-encoded and exactly 32 bytes long. |
| Setting | Description |
|---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `gitlab_kas['private_api_listen_network']` | The network family KAS listens on. Defaults to `tcp` for both IPv4 and IPv6 networks. Set to `tcp4` for IPv4 or `tcp6` for IPv6. |
| `gitlab_kas['private_api_listen_address']` | The address the KAS listens on. Set to `0.0.0.0:8155` or to an IP:PORT reachable by other nodes in the cluster. |
| `gitlab_kas['api_secret_key']` | The shared secret used for authentication between KAS and GitLab. The value must be Base64-encoded and exactly 32 bytes long. |
| `gitlab_kas['private_api_secret_key']` | The shared secret used for authentication between different KAS instances. The value must be Base64-encoded and exactly 32 bytes long. |
| `OWN_PRIVATE_API_SCHEME` | Optional value used to specify what scheme to use when constructing `OWN_PRIVATE_API_URL`. Can be `grpc` or `grpcs`. |
| `OWN_PRIVATE_API_URL` | The environment variable used by KAS for service discovery. Set to the hostname or IP address of the node you're configuring. The node must be reachable by other nodes in the cluster. |
| `OWN_PRIVATE_API_HOST` | Optional value used to verify the TLS certificate host name. <sup>1</sup> A client compares this value to the host name in the server's TLS certificate file.|
| `gitlab_kas['client_timeout_seconds']` | The timeout for the client to connect to the agent server. |
| `gitlab_kas_external_url` | The user-facing URL for the in-cluster `agentk`. Can be a fully qualified domain or subdomain, <sup>2</sup> or a GitLab external URL. <sup>3</sup> If blank, defaults to a GitLab external URL. |
| `gitlab_rails['gitlab_kas_external_url']` | The user-facing URL for the in-cluster `agentk`. If blank, defaults to the `gitlab_kas_external_url`. |
| `gitlab_rails['gitlab_kas_external_k8s_proxy_url']` | The user-facing URL for Kubernetes API proxying. If blank, defaults to a URL based on `gitlab_kas_external_url`. |
| `gitlab_rails['gitlab_kas_internal_url']` | The internal URL the GitLab backend uses to communicate with KAS. |
| `OWN_PRIVATE_API_HOST` | Optional value used to verify the TLS certificate hostname. <sup>1</sup> A client compares this value to the hostname in the server's TLS certificate file. |
| `OWN_PRIVATE_API_PORT` | Optional value used to specify what port to use when constructing `OWN_PRIVATE_API_URL`. |
| `OWN_PRIVATE_API_CIDR` | Optional value used to specify which IP addresses from the available networks to use when constructing `OWN_PRIVATE_API_URL`. |
| `gitlab_kas['client_timeout_seconds']` | The timeout for the client to connect to the KAS. |
| `gitlab_kas_external_url` | The user-facing URL for the in-cluster `agentk`. Can be a fully qualified domain or subdomain, <sup>2</sup> or a GitLab external URL. <sup>3</sup> If blank, defaults to a GitLab external URL. |
| `gitlab_rails['gitlab_kas_external_url']` | The user-facing URL for the in-cluster `agentk`. If blank, defaults to the `gitlab_kas_external_url`. |
| `gitlab_rails['gitlab_kas_external_k8s_proxy_url']` | The user-facing URL for Kubernetes API proxying. If blank, defaults to a URL based on `gitlab_kas_external_url`. |
| `gitlab_rails['gitlab_kas_internal_url']` | The internal URL the GitLab backend uses to communicate with KAS. |
**Footnotes:**
@ -231,7 +317,7 @@ gitlab_kas['gitlab_address'] = 'http://gitlab.example.com'
If you encounter this error when trying to reach the GitLab URL, it means it doesn't trust the GitLab certificate.
You might see a similar error in the Kubernetes Agent Server (KAS) logs of your GitLab application server:
You might see a similar error in the KAS logs of your GitLab application server:
```json
{"level":"error","time":"2023-03-07T20:19:48.151Z","msg":"AgentInfo()","grpc_service":"gitlab.agent.agent_configuration.rpc.AgentConfiguration","grpc_method":"GetConfiguration","error":"Get \"https://gitlab.example.com/api/v4/internal/kubernetes/agent_info\": x509: certificate signed by unknown authority"}
@ -239,7 +325,7 @@ You might see a similar error in the Kubernetes Agent Server (KAS) logs of your
To fix this error, install the public certificate of your internal CA in the `/etc/gitlab/trusted-certs` directory.
Alternatively, you can configure your KAS to read the certificate from a custom directory. To do this, add to `/etc/gitlab/gitlab.rb` the following configuration:
Alternatively, you can configure KAS to read the certificate from a custom directory. To do this, add the following configuration to the file at `/etc/gitlab/gitlab.rb`:
```ruby
gitlab_kas['env'] = {
@ -255,7 +341,7 @@ To apply the changes:
sudo gitlab-ctl reconfigure
```
1. Restart GitLab KAS:
1. Restart agent server:
```shell
gitlab-ctl restart gitlab-kas

View File

@ -285,6 +285,10 @@ end
Read the documentation on [`ReactiveCaching`](reactive_caching.md).
## `TokenAuthenticatable`
Read the documentation on [`TokenAuthenticatable`](token_authenticatable.md).
## `CircuitBreaker`
The `Gitlab::CircuitBreaker` can be wrapped around any class that needs to run code with circuit breaker protection. It provides a `run_with_circuit` method that wraps a code block with circuit breaker functionality, which helps prevent cascading failures and improves system resilience. For more information about the circuit breaker pattern, see:

View File

@ -88,6 +88,7 @@ This window takes place on May 5 - 7, 2025 from 09:00 UTC to 22:00 UTC.
| [Updated tooling to release CI/CD components to the Catalog](https://gitlab.com/groups/gitlab-org/-/epics/12788) | High | Verify | Instance |
| [Increased default security for use of pipeline variables](https://gitlab.com/gitlab-org/gitlab/-/issues/502382) | Medium | Verify | Project |
| [Amazon S3 Signature Version 2](https://gitlab.com/gitlab-org/container-registry/-/issues/1449) | Low | Package | Project |
| [REST API endpoint `pre_receive_secret_detection_enabled` is deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/514413) | Medium | Application_security_testing | Instance |
| [Remove duoProAssignedUsersCount GraphQL field](https://gitlab.com/gitlab-org/gitlab/-/issues/498671) | Low | Plan | Group, project |
| [Secret detection analyzer doesn't run as root user by default](https://gitlab.com/gitlab-org/gitlab/-/issues/476160) | Low | Application_security_testing | Instance |
| [Remove `previousStageJobsOrNeeds` from GraphQL](https://gitlab.com/gitlab-org/gitlab/-/issues/424417) | Low | Verify | Instance |

View File

@ -1124,6 +1124,23 @@ to ensure the correct locations are being used to mirror the required scanner im
<div class="deprecation breaking-change" data-milestone="18.0">
### REST API endpoint `pre_receive_secret_detection_enabled` is deprecated
<div class="deprecation-notes">
- Announced in GitLab <span class="milestone">17.9</span>
- Removal in GitLab <span class="milestone">18.0</span> ([breaking change](https://docs.gitlab.com/ee/update/terminology.html#breaking-change))
- To discuss this change or learn more, see the [deprecation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/514413).
</div>
The REST API endpoint `pre_receive_secret_detection_enabled` is deprecated in favor of `secret_push_protection_enabled`. We are renaming some API fields to reflect the name change of the feature `pre_receive_secret_detection` to `secret_push_protection`.
To avoid breaking workflows that use the old name, you should stop using the `pre_receive_secret_detection_enabled` endpoint before GitLab 18.0. Instead, use the new `secret_push_protection_enabled` endpoint.
</div>
<div class="deprecation breaking-change" data-milestone="18.0">
### Rate limits for common User, Project, and Group API endpoints
<div class="deprecation-notes">

View File

@ -1218,6 +1218,7 @@ entry and paste the spreadsheet:
### JSON
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/86353) in GitLab 15.3.
> - Ability to use Markdown [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/375177) in GitLab 17.9.
To render tables with JSON code blocks, use the following syntax:
@ -1343,6 +1344,26 @@ You can use the `filter` attribute to render a table with content filtered dynam
```
````
You can use the `markdown` attribute to allow for GLFM Markdown in the items and caption,
including GitLab references. Fields do not support Markdown.
````markdown
```json:table
{
"fields" : [
{"key": "a", "label": "AA"},
{"key": "b", "label": "BB"},
{"key": "c", "label": "CC"}
],
"items" : [
{"a": "11", "b": "**22**", "c": "33"},
{"a": "#1", "b": "222", "c": "233"}
],
"markdown" : true
}
```
````
By default, every JSON table has the caption `Generated with JSON data`.
You can override this caption by specifying the `caption` attribute.

View File

@ -0,0 +1,111 @@
# frozen_string_literal: true
module Banzai
module Filter
# Prepares a `json:table` if it's been tagged as supporting markdown
#
# If the `markdown` option is not specfied or a boolean true, then we do
# nothing and allow the frontend to sanitize it and display it.
#
# If `markdown: true` is included in the table, then we
# - extract the data from the JSON
# - build a markdown pipe table with the data
# - run the markdown pipe table through the MarkdownFilter
# - run the caption through markdown and add as <caption> to table
# - add the table options as `data-` attributes so the frontend can properly display
# - note that this filter is handled _before_ the SanitizationFilter, which means
# the resulting HTML will get properly sanitized at that point.
class JsonTableFilter < HTML::Pipeline::Filter
include Concerns::OutputSafety
CSS = '[data-canonical-lang="json"][data-lang-params="table"] > code:only-child'
XPATH = Gitlab::Utils::Nokogiri.css_to_xpath(CSS).freeze
def call
doc.xpath(XPATH).each do |node|
process_json_table(node)
end
doc
end
private
attr_reader :fields, :items
def process_json_table(code_node)
return if code_node.parent&.parent.nil?
json = begin
Gitlab::Json.parse(code_node.text)
rescue JSON::ParserError
nil
end
# JSON not valid, let the frontend handle this block
return unless json
return unless json['markdown']
@fields = json['fields']
@items = json['items']
table = table_header
table << table_body
table_context = context.merge(no_sourcepos: true)
html = Banzai::Filter::MarkdownFilter.new(table, table_context).call
table_node = Nokogiri::HTML::DocumentFragment.parse(html)
table_node = table_node.children.first
table_node.set_attribute('data-table-fields', field_data.to_json)
table_node.set_attribute('data-table-filter', 'true') if json['filter']
table_node.set_attribute('data-table-markdown', 'true') if json['markdown']
if json['caption'].present?
html = Banzai::Filter::MarkdownFilter.new(json['caption'], table_context).call
caption_node = doc.document.create_element('caption')
caption_node << html
table_node.prepend_child(caption_node)
end
# frontend needs a wrapper div
wrapper = doc.document.create_element('div')
wrapper.add_child(table_node)
code_node.parent.replace(wrapper)
end
def table_header
labels = fields ? fields.pluck('label') : items.first.keys
<<~TABLE_HEADER
| #{labels.join(' | ')} |
#{'| --- ' * labels.size} |
TABLE_HEADER
end
def table_body
body = +''
item_keys = fields ? fields.pluck('key') : items.first.keys
items.each do |item|
row = item_keys.map { |key| item[key] || ' ' }
body << "| #{row.join(' | ')} |\n"
end
body
end
def field_data
return fields if fields
array = []
items.first.each_key { |value| array.push({ 'key' => value }) }
array
end
end
end
end

View File

@ -18,6 +18,9 @@ module Banzai
allowlist[:attributes]['td'] = %w[style]
allowlist[:css] = { properties: ['text-align'] }
# Allow json table attributes
allowlist[:attributes]['table'] = %w[data-table-fields data-table-filter data-table-markdown]
# Allow the 'data-sourcepos' from CommonMark on all elements
allowlist[:attributes][:all].push('data-sourcepos')
allowlist[:attributes][:all].push('data-escaped-char')

View File

@ -12,6 +12,7 @@ module Banzai
def self.filters
@filters ||= FilterArray[
Filter::CodeLanguageFilter,
Filter::JsonTableFilter, # process before sanitization
Filter::PlantumlFilter,
# Must always be before the SanitizationFilter/SanitizeLinkFilter to prevent XSS attacks
Filter::SpacedLinkFilter,

View File

@ -0,0 +1,40 @@
# frozen_string_literal: true
# See https://docs.gitlab.com/ee/development/database/batched_background_migrations.html
# for more information on how to use batched background migrations
# Update below commented lines with appropriate values.
module Gitlab
module BackgroundMigration
class SplitMicrosoftApplicationsTable < BatchedMigrationJob
operation_name :split_microsoft_applications_table
feature_category :system_access
def perform
each_sub_batch do |sub_batch|
connection.execute <<~SQL
INSERT INTO system_access_group_microsoft_applications
(temp_source_id, group_id, enabled, tenant_xid, client_xid, login_endpoint,
graph_endpoint, encrypted_client_secret, encrypted_client_secret_iv, created_at, updated_at)
SELECT
id,
namespace_id,
enabled,
tenant_xid,
client_xid,
login_endpoint,
graph_endpoint,
encrypted_client_secret,
encrypted_client_secret_iv,
created_at,
updated_at
FROM
(#{sub_batch.where.not(namespace_id: nil).to_sql}) AS sama
ON CONFLICT DO NOTHING
SQL
end
end
end
end
end

View File

@ -62,7 +62,7 @@
"@gitlab/fonts": "^1.3.0",
"@gitlab/query-language-rust": "0.3.2",
"@gitlab/svgs": "3.122.0",
"@gitlab/ui": "107.0.1",
"@gitlab/ui": "107.7.1",
"@gitlab/vue-router-vue3": "npm:vue-router@4.1.6",
"@gitlab/vuex-vue3": "npm:vuex@4.0.0",
"@gitlab/web-ide": "^0.0.1-dev-20250128095641",
@ -142,7 +142,7 @@
"deckar01-task_list": "^2.3.1",
"dexie": "^3.2.3",
"diff": "^3.4.0",
"dompurify": "^3.2.3",
"dompurify": "^3.2.4",
"dropzone": "^4.2.0",
"editorconfig": "^0.15.3",
"emoji-regex": "^10.3.0",

View File

@ -44,7 +44,7 @@ gem 'factory_bot', '~> 6.5.1'
group :development do
gem 'pry-byebug', '~> 3.10.1', platform: :mri
gem "ruby-debug-ide", "~> 0.7.4"
gem "ruby-debug-ide", "~> 0.7.5"
end
group :ci do

View File

@ -297,7 +297,7 @@ GEM
rspec-support (3.13.0)
rspec_junit_formatter (0.6.0)
rspec-core (>= 2, < 4, != 2.12.0)
ruby-debug-ide (0.7.4)
ruby-debug-ide (0.7.5)
rake (>= 0.8.1)
rubyzip (2.3.2)
sawyer (0.9.2)
@ -387,7 +387,7 @@ DEPENDENCIES
rspec (~> 3.13)
rspec-parameterized (~> 1.0.2)
rspec_junit_formatter (~> 0.6.0)
ruby-debug-ide (~> 0.7.4)
ruby-debug-ide (~> 0.7.5)
selenium-webdriver (= 4.28.0)
slack-notifier (~> 2.4)
terminal-table (~> 3.0.2)

View File

@ -33,7 +33,19 @@ module QA
@pipeline_mapping = kwargs
end
attr_reader :pipeline_mapping
# Glob pattern limiting which specs scenario can run
#
# @param pattern [String]
# @return [String]
def spec_glob_pattern(pattern)
unless pattern.is_a?(String) && pattern.end_with?("_spec.rb")
raise ArgumentError, "Scenario #{self.class.name} defines pattern that is not matching only spec files"
end
@spec_pattern = pattern
end
attr_reader :pipeline_mapping, :spec_pattern
end
def perform(options, *args)
@ -51,6 +63,7 @@ module QA
Specs::Runner.perform do |specs|
specs.tty = true
specs.tags = self.class.focus
specs.spec_pattern = self.class.spec_pattern
specs.options = args if args.any?
end
end

View File

@ -14,19 +14,13 @@ module QA
pipeline_mappings test_on_cng: %w[cng-instance],
test_on_gdk: %w[gdk-instance gdk-instance-gitaly-transactions gdk-instance-ff-inverse],
test_on_omnibus: %w[instance git-sha256-repositories],
test_on_omnibus_nightly: %w[
instance-image-slow-network
nplus1-instance-image
relative-url
decomposition-single-db
decomposition-multiple-db
],
test_on_omnibus: %w[
instance
praefect
gitaly-transactions
gitaly-reftables-backend
git-sha256-repositories
]
end
end

View File

@ -0,0 +1,15 @@
# frozen_string_literal: true
module QA
module Scenario
module Test
module Instance
class Create < All
spec_glob_pattern "qa/specs/features/**/3_create/**/*_spec.rb"
pipeline_mappings test_on_omnibus: %w[praefect gitaly-transactions gitaly-reftables-backend]
end
end
end
end
end

View File

@ -7,7 +7,7 @@ require 'tempfile'
module QA
module Specs
class Runner < Scenario::Template
attr_accessor :tty, :tags, :options
attr_accessor :tty, :tags, :spec_pattern, :options
RegexMismatchError = Class.new(StandardError)
@ -16,6 +16,8 @@ module QA
DEFAULT_STD_ARGS = [$stderr, $stdout].freeze
DEFAULT_SKIPPED_TAGS = %w[~orchestrated].freeze
ABSOLUTE_PATH_PREFIX_PATTERN = %r{^(\./|#{Runtime::Path.qa_root})?}
def initialize
@tty = false
@tags = []
@ -64,8 +66,28 @@ module QA
@spec_paths ||= options.select { |opt| opt.include?(DEFAULT_TEST_PATH) }
end
def scenario_specs
return [] unless spec_pattern
@scenario_specs ||= Dir.glob(spec_pattern)
end
def rspec_paths
@rspec_paths ||= custom_spec_paths.presence || DEFAULT_TEST_PATH_ARGS
return @rspec_paths if @rspec_paths
paths = custom_spec_paths.presence || DEFAULT_TEST_PATH_ARGS
return @rspec_paths = paths unless spec_pattern
@rspec_paths = paths.flat_map do |path|
next path if File.file?(path) && File.fnmatch(spec_pattern, path)
next specs_in_path(path) if File.directory?(path)
[]
end
end
def specs_in_path(path)
scenario_specs.select { |spec| spec.match?(%r{#{ABSOLUTE_PATH_PREFIX_PATTERN}#{path}}) }
end
def build_initial_args
@ -79,6 +101,13 @@ module QA
def run_rspec(args)
full_arg_list = [*args, "--", *rspec_paths]
if rspec_paths.empty?
Runtime::Logger.error("Scenario #{Runtime::Scenario.klass} has no specs to run!")
Runtime::Logger.error("Check if all tests have been filtered out by custom spec pattern!")
abort
end
if Runtime::Scenario.attributes[:count_examples_only]
count_examples_only(full_arg_list)
elsif Runtime::Scenario.attributes[:test_metadata_only]

View File

@ -16,17 +16,20 @@ module QA
].freeze
def self.fetch(qa_tests = nil)
new.fetch(qa_tests)
new(qa_tests).fetch
end
def initialize(qa_tests = nil)
@qa_tests = qa_tests
end
# Return list of executable examples for each scenario class
#
# @param qa_tests [Array<String>]
# @return [Hash<Class, Array<Hash>>]
def fetch(qa_tests = nil)
def fetch
logger.info("Fetching executable examples for all scenario classes")
(all_scenario_classes - ignored_scenarios).each_with_object({}) do |scenario, scenarios|
examples = fetch_examples(scenario, qa_tests)
examples = fetch_examples(scenario)
skipped_examples = examples.select { |example| example[:status] == "pending" }
logger.info(" detected examples, total: #{examples.size}, skipped: #{skipped_examples.size}")
@ -36,6 +39,8 @@ module QA
private
attr_reader :qa_tests
# Ignored scenarios classes
#
# @return [Array<Class>]
@ -47,7 +52,7 @@ module QA
# Get all defined scenarios
#
# @return [Array<String>]
# @return [Array<Class>]
def all_scenario_classes
foss_scenarios = scenario_classes(QA::Scenario::Test)
return foss_scenarios unless QA.const_defined?("QA::EE")
@ -71,14 +76,45 @@ module QA
# Fetch list of executable examples for scenario class
#
# @param klass [Class]
# @param qa_tests [Array<String>]
# @return [Array<Hash>]
def fetch_examples(klass, tests)
def fetch_examples(klass)
logger.info("Fetching examples for scenario '#{klass}'")
Support::ExampleData.fetch(klass.focus, tests, logger: logger).map do |example|
spec_pattern = klass.spec_pattern
scenario_tests = scenario_class_tests(spec_pattern)
return [] if spec_pattern && scenario_tests.empty? # no executable specs for this scenario class
Support::ExampleData.fetch(klass.focus, scenario_tests, logger: logger).map do |example|
example.slice(:id, :status)
end
end
# Specs for particular scenario class if it defines specific spec pattern
#
# @param pattern [String, nil]
# @return [Array]
def scenario_class_tests(pattern)
return qa_tests if pattern.nil?
scenario_tests = Dir.glob(pattern)
return scenario_tests if qa_tests.nil? || qa_tests.empty?
qa_tests.flat_map do |path|
next path if File.file?(path) && File.fnmatch(pattern, path)
next specs_in_path(path, scenario_tests) if File.directory?(path)
[]
end
end
# List of specs within a path
#
# @param path [String]
# @param scenario_specs [Array]
# @return [Array]
def specs_in_path(path, scenario_specs)
scenario_specs.select { |spec| spec.match?(%r{#{Specs::Runner::ABSOLUTE_PATH_PREFIX_PATTERN}#{path}}) }
end
end
end
end

View File

@ -1,7 +1,9 @@
# frozen_string_literal: true
RSpec.describe QA::Tools::Ci::ScenarioExamples do
let(:runnable_specs) { described_class.fetch(tests) }
subject(:example_fetcher) { described_class.new(tests) }
let(:runnable_specs) { example_fetcher.fetch }
let(:tests) { nil }
let(:examples) { [] }
@ -11,6 +13,73 @@ RSpec.describe QA::Tools::Ci::ScenarioExamples do
allow(QA::Support::ExampleData).to receive(:fetch).and_return(examples)
end
context "with custom spec pattern in scenario class" do
let(:specs) { ["specs/feature/fake_spec.rb"] }
let(:scenario_class) do
Class.new(QA::Scenario::Template) do
spec_glob_pattern "specs/feature/*_spec.rb"
end
end
before do
allow(Dir).to receive(:glob).and_return(specs)
allow(example_fetcher).to receive(:all_scenario_classes).and_return([scenario_class])
end
context "without specific specs" do
it "uses pattern defined in scenario class" do
example_fetcher.fetch
expect(QA::Support::ExampleData).to have_received(:fetch).with([], specs, logger: kind_of(Logger))
end
end
context "with specific tests" do
let(:tests) { ["specs/feature/fake_spec.rb", "specs/ee/feature/fake_spec.rb"] }
before do
tests.each { |spec| allow(File).to receive(:file?).with(spec).and_return(true) }
end
it "uses only tests matching pattern in scenario class" do
example_fetcher.fetch
expect(QA::Support::ExampleData).to have_received(:fetch).with([], specs, logger: kind_of(Logger))
end
end
context "with folder in specific test list" do
let(:tests) { ["specs/feature", "specs/ee/feature"] }
before do
tests.each do |spec|
allow(File).to receive(:file?).with(spec).and_return(false)
allow(File).to receive(:directory?).with(spec).and_return(true)
end
end
it "uses only tests matching pattern within folder" do
example_fetcher.fetch
expect(QA::Support::ExampleData).to have_received(:fetch).with([], specs, logger: kind_of(Logger))
end
end
context "with specific tests not matching custom pattern" do
let(:tests) { ["specs/ee/feature/fake_spec.rb"] }
before do
tests.each { |spec| allow(File).to receive(:file?).with(spec).and_return(true) }
end
it "returns empty list" do
expect(runnable_specs).to eq(scenario_class => [])
expect(QA::Support::ExampleData).not_to have_received(:fetch)
end
end
end
context "with rspec returning runnable specs" do
let(:examples) do
[

View File

@ -31,6 +31,7 @@ namespace :ci do
diff = mr_diff
qa_changes = QA::Tools::Ci::QaChanges.new(diff)
tests = []
if diff.empty?
logger.info("No changed file diff provided, full test suite will be executed")
@ -48,7 +49,7 @@ namespace :ci do
feature_flags_changes = QA::Tools::Ci::FfChanges.new(diff).fetch
# on run-all label or framework changes do not infer specific tests
run_all_tests = run_all_label_present || qa_changes.framework_changes? || !feature_flags_changes.nil?
tests = run_all_tests ? [] : qa_changes.qa_tests
tests = qa_changes.qa_tests unless run_all_tests
if run_all_label_present
logger.info("Merge request has pipeline:run-all-e2e label, full test suite will be executed")

View File

@ -5,7 +5,7 @@ require 'spec_helper'
RSpec.describe 'Rendering json:table code block in markdown', :js, feature_category: :markdown do
let_it_be(:project) { create(:project, :public) }
it 'creates table correctly' do
it 'creates regular JSON table correctly' do
description = <<~JSONTABLE
Hello world!
@ -37,4 +37,38 @@ RSpec.describe 'Rendering json:table code block in markdown', :js, feature_categ
expect(data).to eql(%w[11 22 211 222])
end
end
it 'creates markdown JSON table correctly' do
description = <<~JSONTABLE
Hello world!
```json:table
{
"fields" : [
{"key": "a", "label": "AA"},
{"key": "b", "label": "BB"}
],
"items" : [
{"a": "11", "b": "22"},
{"a": "211", "b": "222"}
],
"markdown": true
}
```
JSONTABLE
issue = create(:issue, project: project, description: description)
visit project_issue_path(project, issue)
wait_for_requests
within ".js-json-table table" do
headers = all("thead th").collect { |column| column.text.strip }
data = all("tbody td").collect { |column| column.text.strip }
expect(headers).to eql(%w[AA BB])
expect(data).to eql(%w[11 22 211 222])
end
end
end

View File

@ -1,119 +1,248 @@
import { nextTick } from 'vue';
import { renderJSONTable } from '~/behaviors/markdown/render_json_table';
import { renderJSONTable, renderJSONTableHTML } from '~/behaviors/markdown/render_json_table';
describe('behaviors/markdown/render_json_table', () => {
let element;
const TEST_DATA = {
fields: [
{ label: 'Field 1', key: 'a' },
{ label: 'F 2', key: 'b' },
{ label: 'F 3', key: 'c' },
],
items: [
{
a: '1',
b: 'b',
c: 'c',
},
{
a: '2',
b: 'd',
c: 'e',
},
],
};
const TEST_LABELS = TEST_DATA.fields.map((x) => x.label);
const tableAsData = (table) => ({
head: Array.from(table.querySelectorAll('thead th')).map((td) => td.textContent.trim()),
body: Array.from(table.querySelectorAll('tbody > tr')).map((tr) =>
Array.from(tr.querySelectorAll('td')).map((x) => x.textContent),
Array.from(tr.querySelectorAll('td')).map((x) => x.innerHTML),
),
});
const createTestSubject = async (json) => {
if (element) {
throw new Error('element has already been initialized');
}
const parent = document.createElement('div');
const pre = document.createElement('pre');
pre.textContent = json;
parent.appendChild(pre);
document.body.appendChild(parent);
renderJSONTable([parent]);
element = parent;
jest.runAllTimers();
await nextTick();
};
const findPres = () => document.querySelectorAll('pre');
const findTables = () => document.querySelectorAll('table');
const findAlerts = () => document.querySelectorAll('.gl-alert');
const findInputs = () => document.querySelectorAll('.gl-form-input');
const findCaption = () => document.querySelector('caption');
const findJsonTables = () => document.querySelectorAll('.js-json-table');
afterEach(() => {
document.body.innerHTML = '';
element = null;
});
describe('default', () => {
beforeEach(async () => {
await createTestSubject(JSON.stringify(TEST_DATA, null, 2));
});
describe('standard JSON table', () => {
const TEST_DATA = {
fields: [
{ label: 'Field 1', key: 'a' },
{ label: 'F 2', key: 'b' },
{ label: 'F 3', key: 'c' },
],
items: [
{
a: '1',
b: 'b',
c: 'c',
},
{
a: '2',
b: 'd',
c: 'e',
},
],
};
const TEST_LABELS = TEST_DATA.fields.map((x) => x.label);
it('removes pre', () => {
expect(findPres()).toHaveLength(0);
});
const createTestSubject = async (json) => {
if (element) {
throw new Error('element has already been initialized');
}
it('replaces pre with table', () => {
const tables = findTables();
const parent = document.createElement('div');
const pre = document.createElement('pre');
expect(tables).toHaveLength(1);
expect(tableAsData(tables[0])).toEqual({
head: TEST_LABELS,
body: [
['1', 'b', 'c'],
['2', 'd', 'e'],
],
pre.textContent = json;
parent.appendChild(pre);
document.body.appendChild(parent);
renderJSONTable([parent]);
element = parent;
jest.runAllTimers();
await nextTick();
};
describe('default', () => {
beforeEach(async () => {
await createTestSubject(JSON.stringify(TEST_DATA, null, 2));
});
it('removes pre', () => {
expect(findPres()).toHaveLength(0);
});
it('replaces pre with table', () => {
const tables = findTables();
const jsonTables = findJsonTables();
expect(tables).toHaveLength(1);
expect(jsonTables).toHaveLength(1);
expect(tableAsData(tables[0])).toEqual({
head: TEST_LABELS,
body: [
['1', 'b', 'c'],
['2', 'd', 'e'],
],
});
});
it('does not show filter', () => {
expect(findInputs()).toHaveLength(0);
});
});
it('does not show filter', () => {
expect(findInputs()).toHaveLength(0);
describe('with invalid json', () => {
beforeEach(() => {
createTestSubject('funky but not json');
});
it('preserves pre', () => {
expect(findPres()).toHaveLength(1);
});
it('shows alert', () => {
const alerts = findAlerts();
expect(alerts).toHaveLength(1);
expect(alerts[0].textContent).toMatchInterpolatedText('Unable to parse JSON');
});
});
describe('with filter set', () => {
beforeEach(() => {
createTestSubject(JSON.stringify({ ...TEST_DATA, filter: true }));
});
it('shows filter', () => {
expect(findInputs()).toHaveLength(1);
});
});
});
describe('with invalid json', () => {
beforeEach(() => {
createTestSubject('funky but not json');
describe('markdown JSON table', () => {
const TEST_MARKDOWN_DATA = `
<table data-table-fields='[{"key":"starts_at","label":"Date \\u003c \\u0026 \\u003e","sortable":false},{"key":"url","label":"URL"}]' data-table-filter="false" data-table-markdown="true">
<caption><p>Markdown <em>enabled</em> table</p></caption>
<thead>
<tr>
<th>Date &lt; &amp; &gt;</th>
<th>URL</th>
</tr>
</thead>
<tbody>
<tr>
<td><em>2024-10-07</em></td>
<td></td>
</tr>
<tr>
<td></td>
<td><a href="https://example.com/page2.html">https://example.com/page2.html</a></td>
</tr>
</tbody>
</table>
`;
const TEST_MARKDOWN_INVALID_DATA = `
<table data-table-fields='[{"key""starts_at","label":"Date"}]' data-table-filter="false" data-table-markdown="true">
<caption><p>Markdown <em>enabled</em> table</p></caption>
<thead>
<tr><th>Date</th></tr>
</thead>
<tbody>
<tr><td><em>2024-10-07</em></td></tr>
</tbody>
</table>
`;
const TEST_MARKDOWN_FILTERABLE_DATA = `
<table data-table-fields='[{"key":"starts_at","label":"Date"}]' data-table-filter="true" data-table-markdown="true">
<caption>foo</caption>
<thead>
<tr><th>Date</th></tr>
</thead>
<tbody>
<tr><td>bar</td></tr>
</tbody>
</table>
`;
const createTestSubject = async (html) => {
if (element) {
throw new Error('element has already been initialized');
}
const parent = document.createElement('div');
parent.innerHTML = html;
document.body.appendChild(parent);
renderJSONTableHTML([parent.firstElementChild]);
element = parent;
jest.runAllTimers();
await nextTick();
};
describe('default', () => {
beforeEach(async () => {
await createTestSubject(TEST_MARKDOWN_DATA);
});
it('handles existing table with embedded HTML', () => {
const tables = findTables();
const jsonTables = findJsonTables();
expect(tables).toHaveLength(1);
expect(jsonTables).toHaveLength(1);
expect(tableAsData(tables[0])).toEqual({
head: ['Date < & >', 'URL'],
body: [
['<div><em>2024-10-07</em></div>', '<div></div>'],
[
'<div></div>',
'<div><a href="https://example.com/page2.html">https://example.com/page2.html</a></div>',
],
],
});
});
it('caption is allowed HTML', () => {
const caption = findCaption().innerHTML;
expect(caption).toEqual('<small>Markdown <em>enabled</em> table</small>');
});
it('does not show filter', () => {
expect(findInputs()).toHaveLength(0);
});
});
it('preserves pre', () => {
expect(findPres()).toHaveLength(1);
describe('with invalid data-table-fields json', () => {
beforeEach(() => {
createTestSubject(TEST_MARKDOWN_INVALID_DATA);
});
it('shows alert', () => {
const alerts = findAlerts();
expect(alerts).toHaveLength(1);
expect(alerts[0].textContent).toMatchInterpolatedText('Unable to parse JSON');
});
});
it('shows alert', () => {
const alerts = findAlerts();
describe('with filter set', () => {
beforeEach(() => {
createTestSubject(TEST_MARKDOWN_FILTERABLE_DATA);
});
expect(alerts).toHaveLength(1);
expect(alerts[0].textContent).toMatchInterpolatedText('Unable to parse JSON');
});
});
describe('with filter set', () => {
beforeEach(() => {
createTestSubject(JSON.stringify({ ...TEST_DATA, filter: true }));
});
it('shows filter', () => {
expect(findInputs()).toHaveLength(1);
it('shows filter', () => {
expect(findInputs()).toHaveLength(1);
});
});
});
});

View File

@ -42,6 +42,7 @@ import MergeRequestsListApp from '~/merge_requests/list/components/merge_request
import { BRANCH_LIST_REFRESH_INTERVAL } from '~/merge_requests/list/constants';
import getMergeRequestsQuery from 'ee_else_ce/merge_requests/list/queries/project/get_merge_requests.query.graphql';
import getMergeRequestsCountsQuery from 'ee_else_ce/merge_requests/list/queries/project/get_merge_requests_counts.query.graphql';
import getMergeRequestsApprovalsQuery from 'ee_else_ce/merge_requests/list/queries/group/get_merge_requests_approvals.query.graphql';
import IssuableList from '~/vue_shared/issuable/list/components/issuable_list_root.vue';
import MergeRequestReviewers from '~/issuable/components/merge_request_reviewers.vue';
import issuableEventHub from '~/issues/list/eventhub';
@ -65,10 +66,28 @@ function createComponent({
} = {}) {
getQueryResponseMock = jest.fn().mockResolvedValue(response);
getCountsQueryResponseMock = jest.fn().mockResolvedValue(getCountsQueryResponse);
const apolloProvider = createMockApollo([
[getMergeRequestsCountsQuery, getCountsQueryResponseMock],
[getMergeRequestsQuery, getQueryResponseMock],
]);
const getApprovalsQueryResponseMock = jest.fn().mockResolvedValue(response);
const apolloProvider = createMockApollo(
[
[getMergeRequestsCountsQuery, getCountsQueryResponseMock],
[getMergeRequestsQuery, getQueryResponseMock],
[getMergeRequestsApprovalsQuery, getApprovalsQueryResponseMock],
],
{},
{
typePolicies: {
Query: {
fields: {
project: { merge: true },
},
},
MergeRequestConnection: {
merge: true,
},
},
},
);
router = new VueRouter({ mode: 'history' });
router.push = jest.fn();
@ -93,6 +112,7 @@ function createComponent({
defaultBranch: 'main',
getMergeRequestsCountsQuery,
getMergeRequestsQuery,
getMergeRequestsApprovalsQuery,
...provide,
},
apolloProvider,
@ -147,7 +167,7 @@ describe('Merge requests list app', () => {
describe('fetching branches', () => {
const apiVersion = 1;
const projectId = 2;
const projectId = 1;
const fullPath = 'gitlab-org/gitlab';
const allBranchesPath = `/api/${apiVersion}/projects/${encodeURIComponent(fullPath)}/repository/branches`;
const sourceBranchPath = `/-/autocomplete/merge_request_source_branches.json?project_id=${projectId}`;
@ -205,6 +225,10 @@ describe('Merge requests list app', () => {
beforeEach(() => {
axiosMock.resetHistory();
const initialTime = new Date(2025, 0, 1, 12, 0, 0).getTime();
jest.useFakeTimers({ legacyFakeTimers: false });
jest.setSystemTime(initialTime);
createComponent();
return waitForPromises();

View File

@ -1,7 +1,7 @@
export const getQueryResponse = {
data: {
namespace: {
id: '1',
id: 1,
__typename: 'Project',
mergeRequests: {
pageInfo: {

View File

@ -49,6 +49,9 @@ exports[`PypiInstallation renders all the messages 1`] = `
data-testid="base-dropdown-menu"
id="reference-1"
>
<div
class="gl-new-dropdown-arrow"
/>
<div
class="gl-new-dropdown-inner"
>

View File

@ -186,4 +186,35 @@ RSpec.describe GraphqlTriggers, feature_category: :shared do
described_class.issuable_todo_updated(issuable)
end
end
describe '.user_merge_request_updated' do
let_it_be(:user) { create(:user) }
let_it_be(:merge_request) { create(:merge_request) }
it 'triggers the user_merge_request_updated subscription' do
expect(GitlabSchema.subscriptions).to receive(:trigger).with(
:user_merge_request_updated,
{ user_id: user.to_gid },
merge_request
).and_call_original
described_class.user_merge_request_updated(user, merge_request)
end
describe 'when merge_request_dashboard_realtime is disabled' do
before do
stub_feature_flags(merge_request_dashboard_realtime: false)
end
it 'does not trigger the user_merge_request_updated subscription' do
expect(GitlabSchema.subscriptions).not_to receive(:trigger).with(
:user_merge_request_updated,
{ user_id: user.id },
merge_request
).and_call_original
described_class.user_merge_request_updated(user, merge_request)
end
end
end
end

View File

@ -0,0 +1,76 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Subscriptions::User::MergeRequestUpdated, feature_category: :code_review_workflow do
include GraphqlHelpers
it { expect(described_class).to have_graphql_arguments(:user_id) }
it { expect(described_class.payload_type).to eq(Types::MergeRequestType) }
describe '#resolve' do
let_it_be(:unauthorized_user) { create(:user) }
let_it_be(:merge_request) { create(:merge_request) }
let(:current_user) { merge_request.author }
let(:user_id) { merge_request.author.to_gid }
subject(:subscription) { resolver.resolve_with_support(user_id: user_id) }
context 'for initial subscription' do
let(:resolver) { resolver_instance(described_class, ctx: query_context, subscription_update: false) }
it 'returns nil' do
expect(subscription).to be_nil
end
context 'when user is unauthorized' do
let(:current_user) { unauthorized_user }
it 'raises an exception' do
expect { subscription }.to raise_error(GraphQL::ExecutionError)
end
end
context 'when user does not exist' do
let(:user_id) { GlobalID.parse("gid://gitlab/User/#{non_existing_record_id}") }
it 'raises an exception' do
expect { subscription }.to raise_error(GraphQL::ExecutionError)
end
end
end
context 'with subscription updates' do
let(:resolver) do
resolver_instance(described_class, obj: merge_request, ctx: query_context, subscription_update: true)
end
it 'returns the resolved object' do
expect(subscription).to eq(merge_request)
end
context 'when user can not read the merge request' do
before do
allow(Ability).to receive(:allowed?)
.with(current_user, :read_merge_request, merge_request)
.and_return(false)
end
it 'unsubscribes the user' do
# GraphQL::Execution::Skip is returned when unsubscribed
expect(subscription).to be_an(GraphQL::Execution::Skip)
end
end
context 'when user is unauthorized' do
let(:current_user) { unauthorized_user }
it 'unsubscribes the user' do
# GraphQL::Execution::Skip is returned when unsubscribed
expect(subscription).to be_an(GraphQL::Execution::Skip)
end
end
end
end
end

View File

@ -18,6 +18,7 @@ RSpec.describe GitlabSchema.types['Subscription'], feature_category: :subscripti
merge_request_diff_generated
work_item_updated
issuable_todo_updated
user_merge_request_updated
]
expect(described_class).to include_graphql_fields(*expected_fields)

View File

@ -0,0 +1,159 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Banzai::Filter::JsonTableFilter, feature_category: :markdown do
include FilterSpecHelper
let_it_be(:table_with_fields) do
<<~TEXT
<pre data-canonical-lang="json" data-lang-params="table">
<code>
{
"fields": [
{
"key": "starts_at",
"label": "Date < & >",
"sortable": true
},
{
"key": "url",
"label": "URL"
}
],
"items": [
{
"starts_at": "_2024-10-07_"
},
{
"url": "https://example.com/page2.html"
}
],
"filter": true,
"caption": "Markdown enabled table",
"markdown": true
}
</code>
</pre>
TEXT
end
let_it_be(:table_with_fields_html) do
<<~HTML
<div><table data-table-fields='[{"key":"starts_at","label":"Date \\u003c \\u0026 \\u003e","sortable":true},{"key":"url","label":"URL"}]' data-table-filter="true" data-table-markdown="true">
<caption><p>Markdown enabled table</p></caption>
<thead>
<tr>
<th>Date &lt; &amp; &gt;</th>
<th>URL</th>
</tr>
</thead>
<tbody>
<tr>
<td><em>2024-10-07</em></td>
<td></td>
</tr>
<tr>
<td></td>
<td><a href="https://example.com/page2.html">https://example.com/page2.html</a></td>
</tr>
</tbody>
</table></div>
HTML
end
let_it_be(:table_without_fields) do
<<~TEXT
<pre data-canonical-lang="json" data-lang-params="table">
<code>
{
"items": [
{
"starts_at": "_2024-10-07_",
"url": "https://example.com/page2.html"
}
],
"markdown": true
}
</code>
</pre>
TEXT
end
let_it_be(:table_without_fields_html) do
<<~HTML
<div><table data-table-fields='[{"key":"starts_at"},{"key":"url"}]' data-table-markdown="true">
<thead>
<tr>
<th>starts_at</th>
<th>url</th>
</tr>
</thead>
<tbody>
<tr>
<td><em>2024-10-07</em></td>
<td><a href="https://example.com/page2.html">https://example.com/page2.html</a></td>
</tr>
</tbody>
</table></div>
HTML
end
let_it_be(:table_no_markdown) do
<<~TEXT
<pre data-canonical-lang="json" data-lang-params="table">
<code>
{
"items": [
{
"starts_at": "_2024-10-07_",
"url": "https://example.com/page2.html"
}
]
}
</code>
</pre>
TEXT
end
let_it_be(:table_invalid_json) do
<<~TEXT
<pre data-canonical-lang="json" data-lang-params="table">
<code>
{
{
"starts_at": "_2024-10-07_",
"url": "https://example.com/page2.html"
}
],
"markdown": true
}
</code>
</pre>
TEXT
end
context 'when fields are provided' do
it 'generates the correct HTML' do
expect(filter(table_with_fields).to_html).to eq table_with_fields_html
end
end
context 'when fields are not provided' do
it 'generates the correct HTML' do
expect(filter(table_without_fields).to_html).to eq table_without_fields_html
end
end
context 'when markdown is not enabled' do
it 'does not change the HTML' do
expect(filter(table_no_markdown).to_html).to eq table_no_markdown
end
end
context 'when json is invalid' do
it 'does not change the HTML' do
expect(filter(table_invalid_json).to_html).to eq table_invalid_json
end
end
end

View File

@ -40,6 +40,19 @@ RSpec.describe Banzai::Filter::SanitizationFilter, feature_category: :markdown d
expect(filter(act).to_html).to eq %q(<span>def</span>)
end
it 'allows `data-table-*` attributes on `table` elements' do
html = <<-HTML
<table data-table-fields="foo" data-table-filter="true" data-table-markdown="true">
</table>
HTML
doc = filter(html)
expect(doc.at_css('table')['data-table-fields']).to eq 'foo'
expect(doc.at_css('table')['data-table-filter']).to eq 'true'
expect(doc.at_css('table')['data-table-markdown']).to eq 'true'
end
it 'allows `text-align` property in `style` attribute on table elements' do
html = <<~HTML
<table>

View File

@ -0,0 +1,96 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::BackgroundMigration::SplitMicrosoftApplicationsTable, feature_category: :system_access do
let(:organizations) { table(:organizations) }
let(:namespaces) { table(:namespaces) }
let(:system_access_microsoft_applications) { table(:system_access_microsoft_applications) }
let(:system_access_group_microsoft_applications) { table(:system_access_group_microsoft_applications) }
let(:organization) { organizations.create!(name: 'organization', path: 'organization') }
let(:group) do
namespaces.create!(name: 'test-group', path: 'test-group', type: 'Group', organization_id: organization.id)
end
let(:migration_attrs) do
{
start_id: system_access_microsoft_applications.minimum(:id),
end_id: system_access_microsoft_applications.maximum(:id),
batch_table: :system_access_microsoft_applications,
batch_column: :id,
sub_batch_size: 2,
pause_ms: 0,
connection: ApplicationRecord.connection
}
end
let(:base_app_attributes) do
{
enabled: true,
tenant_xid: 'abc123',
client_xid: 'def456',
login_endpoint: 'http://microsoft-login.test',
graph_endpoint: 'http://microsoft-graph.test',
encrypted_client_secret: 'fake-data-not-real',
encrypted_client_secret_iv: 'fake-data-not-real-2'
}
end
let!(:group_app) do
system_access_microsoft_applications.create!(
base_app_attributes.merge(namespace_id: group.id)
)
end
let!(:instance_app) do
system_access_microsoft_applications.create!(base_app_attributes)
end
let(:instance) { described_class.new(**migration_attrs) }
describe '#perform' do
subject(:perform) { instance.perform }
it 'transfers all attributes of microsoft applications' do
perform
expect(system_access_group_microsoft_applications.count).to eq(1)
record = system_access_group_microsoft_applications.first
%w[enabled tenant_xid client_xid login_endpoint graph_endpoint
encrypted_client_secret encrypted_client_secret_iv].each do |field|
expect(record[field]).to eq(group_app[field])
end
expect(record.group_id).to eq(group_app.namespace_id)
expect(record.temp_source_id).to eq(group_app.id)
expect(record.created_at).to be_within(1.second).of(group_app.created_at)
expect(record.updated_at).to be_within(1.second).of(group_app.updated_at)
end
it 'does not migrate apps without namespace_id' do
perform
relation = system_access_group_microsoft_applications.where(temp_source_id: instance_app.id)
expect(relation.count).to eq(0)
end
it 'handles conflicts on group_id gracefully' do
system_access_group_microsoft_applications.create!(
base_app_attributes.merge(
group_id: group.id,
tenant_xid: 'zxc123'
)
)
expect { perform }.not_to raise_error
expect(system_access_group_microsoft_applications.count).to eq(1)
record = system_access_group_microsoft_applications.first
expect(record.tenant_xid).to eq('zxc123')
end
end
end

View File

@ -0,0 +1,27 @@
# frozen_string_literal: true
require 'spec_helper'
require_migration!
RSpec.describe QueueSplitMicrosoftApplicationsTable, migration: :gitlab_main, feature_category: :system_access do
let!(:batched_migration) { described_class::MIGRATION }
it 'schedules a new batched migration' do
reversible_migration do |migration|
migration.before -> {
expect(batched_migration).not_to have_scheduled_batched_migration
}
migration.after -> {
expect(batched_migration).to have_scheduled_batched_migration(
gitlab_schema: :gitlab_main,
table_name: :system_access_microsoft_applications,
column_name: :id,
interval: described_class::DELAY_INTERVAL,
batch_size: described_class::BATCH_SIZE,
sub_batch_size: described_class::SUB_BATCH_SIZE
)
}
end
end
end

View File

@ -130,6 +130,12 @@ RSpec.describe MergeRequests::ApprovalService, feature_category: :code_review_wo
it_behaves_like 'triggers GraphQL subscription mergeRequestApprovalStateUpdated' do
let(:action) { service.execute(merge_request) }
end
it 'triggers GraphQL subscription userMergeRequestUpdated' do
expect(GraphqlTriggers).to receive(:user_merge_request_updated).with(user, merge_request)
service.execute(merge_request)
end
end
end
end

View File

@ -119,6 +119,12 @@ RSpec.describe MergeRequests::HandleAssigneesChangeService, feature_category: :c
execute
end
it 'triggers GraphQL subscription userMergeRequestUpdated' do
expect(GraphqlTriggers).to receive(:user_merge_request_updated).with(assignee, merge_request)
execute
end
context 'when execute_hooks option is set to true' do
let(:options) { { 'execute_hooks' => true } }

View File

@ -128,6 +128,12 @@ RSpec.describe MergeRequests::RemoveApprovalService, feature_category: :code_rev
it_behaves_like 'triggers GraphQL subscription mergeRequestApprovalStateUpdated' do
let(:action) { execute! }
end
it 'triggers GraphQL subscription userMergeRequestUpdated' do
expect(GraphqlTriggers).to receive(:user_merge_request_updated).with(user, merge_request)
execute!
end
end
context 'with a user who has not approved' do

View File

@ -78,6 +78,12 @@ RSpec.describe MergeRequests::UpdateReviewerStateService, feature_category: :cod
let(:action) { result }
end
it 'triggers GraphQL subscription userMergeRequestUpdated' do
expect(GraphqlTriggers).to receive(:user_merge_request_updated).with(current_user, merge_request)
result
end
context 'when merge_request_dashboard feature flag is enabled' do
before do
stub_feature_flags(merge_request_dashboard: true)

View File

@ -726,6 +726,13 @@ RSpec.describe MergeRequests::UpdateService, :mailer, feature_category: :code_re
let(:action) { update_merge_request({ reviewer_ids: [user2.id] }) }
end
it 'triggers GraphQL subscription userMergeRequestUpdated' do
expect(GraphqlTriggers).to receive(:user_merge_request_updated).with(user3, merge_request)
expect(GraphqlTriggers).to receive(:user_merge_request_updated).with(user2, merge_request)
update_merge_request(reviewer_ids: [user2.id])
end
describe 'recording the first reviewer assigned at timestamp' do
subject(:metrics) { merge_request.reload.metrics }

View File

@ -1436,10 +1436,10 @@
resolved "https://registry.yarnpkg.com/@gitlab/svgs/-/svgs-3.122.0.tgz#f3d21ff18d60510c4b6983a211dfbc39304a517b"
integrity sha512-2ED6E6aYTpzOv4Tz2zFtPnBP/ZTSR1mNebHYkMfm+kx9u61ILwfVJjaQ3vu73BviToAs9+1bs7RIe3iAtzCB2g==
"@gitlab/ui@107.0.1":
version "107.0.1"
resolved "https://registry.yarnpkg.com/@gitlab/ui/-/ui-107.0.1.tgz#3698ae3d8f7764d5bc9e491bacabbe9b488377ed"
integrity sha512-LFAwVTiap0zg1B6AF9xa6R9PFqEKxwXiErG7lkq6aFQPKz7eH64+DIbLj4qrPp5BgSvyV+9tViYtgVO06MOwHg==
"@gitlab/ui@107.7.1":
version "107.7.1"
resolved "https://registry.yarnpkg.com/@gitlab/ui/-/ui-107.7.1.tgz#8d46342114b35227853b7631837d66c362c9713f"
integrity sha512-WEQILs6Tq3DRVGMmJrlqKy2M8ao/B/V2JAPHoAkV1LsAfDHwbUlLxwPRaO/xgivucqzOZe3jHEFpt5hqb+/iGA==
dependencies:
"@floating-ui/dom" "1.4.3"
echarts "^5.3.2"
@ -6749,10 +6749,10 @@ domexception@^4.0.0:
dependencies:
webidl-conversions "^7.0.0"
dompurify@^3.0.5, dompurify@^3.2.3:
version "3.2.3"
resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-3.2.3.tgz#05dd2175225324daabfca6603055a09b2382a4cd"
integrity sha512-U1U5Hzc2MO0oW3DF+G9qYN0aT7atAou4AgI0XjWz061nyBPbdxkfdhfy5uMgGn6+oLFCfn44ZGbdDqCzVmlOWA==
dompurify@^3.0.5, dompurify@^3.2.4:
version "3.2.4"
resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-3.2.4.tgz#af5a5a11407524431456cf18836c55d13441cd8e"
integrity sha512-ysFSFEDVduQpyhzAob/kkuJjf5zWkZD8/A9ywSp1byueyuCfHamrCBa14/Oc2iiB0e51B+NpxSl5gmzn+Ms/mg==
optionalDependencies:
"@types/trusted-types" "^2.0.7"