Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-10-01 12:11:45 +00:00
parent cd3fcd1c22
commit 9bfe01f361
152 changed files with 6788 additions and 937 deletions

View File

@ -1 +1 @@
a37c345ec156988d06821339d64bac81b29d464e
4f63f997dcb7582af0c3ca6e510295c972bf2674

View File

@ -422,7 +422,7 @@ export function setHighlightedRow({ lineCode, event }) {
// This is adding line discussions to the actual lines in the diff tree
// once for parallel and once for inline mode
export function assignDiscussionsToDiff(discussions) {
const targetDiscussions = discussions || useNotes().notes.discussions;
const targetDiscussions = discussions || useNotes().discussions;
const id = window?.location?.hash;
const isNoteLink = id.indexOf('#note') === 0;
const diffPositionByLineCode = getDiffPositionByLineCode(this.diffFiles);
@ -463,7 +463,7 @@ export function toggleLineDiscussions(options) {
}
export function renderFileForDiscussionId(discussionId) {
const discussion = useNotes().notes.discussions.find((d) => d.id === discussionId);
const discussion = useNotes().discussions.find((d) => d.id === discussionId);
if (discussion && discussion.diff_file) {
const file = this.diffFiles.find((f) => f.file_hash === discussion.diff_file.file_hash);

View File

@ -0,0 +1,844 @@
import $ from 'jquery';
import Vue from 'vue';
import { debounce } from 'lodash';
import actionCable from '~/actioncable_consumer';
import Api from '~/api';
import { createAlert, VARIANT_INFO } from '~/alert';
import { EVENT_ISSUABLE_VUE_APP_CHANGE } from '~/issuable/constants';
import { STATUS_CLOSED, STATUS_REOPENED, TYPE_ISSUE } from '~/issues/constants';
import axios from '~/lib/utils/axios_utils';
import { __, sprintf } from '~/locale';
import toast from '~/vue_shared/plugins/global_toast';
import { confidentialWidget } from '~/sidebar/components/confidential/sidebar_confidentiality_widget.vue';
import updateIssueLockMutation from '~/sidebar/queries/update_issue_lock.mutation.graphql';
import updateMergeRequestLockMutation from '~/sidebar/queries/update_merge_request_lock.mutation.graphql';
import loadAwardsHandler from '~/awards_handler';
import { isInMRPage } from '~/lib/utils/common_utils';
import { mergeUrlParams } from '~/lib/utils/url_utility';
import sidebarTimeTrackingEventHub from '~/sidebar/event_hub';
import TaskList from '~/task_list';
import mrWidgetEventHub from '~/vue_merge_request_widget/event_hub';
import { convertToGraphQLId } from '~/graphql_shared/utils';
import { TYPENAME_NOTE } from '~/graphql_shared/constants';
import notesEventHub from '../../event_hub';
import promoteTimelineEvent from '../../graphql/promote_timeline_event.mutation.graphql';
import * as constants from '../../constants';
import * as types from '../../stores/mutation_types';
import * as utils from '../../stores/utils';
export const updateLockedAttribute = ({ commit, getters }, { locked, fullPath }) => {
const { iid, targetType } = getters.getNoteableData;
return utils.gqClient
.mutate({
mutation:
targetType === TYPE_ISSUE ? updateIssueLockMutation : updateMergeRequestLockMutation,
variables: {
input: {
projectPath: fullPath,
iid: String(iid),
locked,
},
},
})
.then(({ data }) => {
const discussionLocked =
targetType === TYPE_ISSUE
? data.issueSetLocked.issue.discussionLocked
: data.mergeRequestSetLocked.mergeRequest.discussionLocked;
commit(types.SET_ISSUABLE_LOCK, discussionLocked);
});
};
export const expandDiscussion = ({ commit, dispatch }, data) => {
if (data.discussionId) {
dispatch('diffs/renderFileForDiscussionId', data.discussionId, { root: true });
}
commit(types.EXPAND_DISCUSSION, data);
};
export const collapseDiscussion = ({ commit }, data) => commit(types.COLLAPSE_DISCUSSION, data);
export const setNotesData = ({ commit }, data) => commit(types.SET_NOTES_DATA, data);
export const setNoteableData = ({ commit }, data) => commit(types.SET_NOTEABLE_DATA, data);
export const setConfidentiality = ({ commit }, data) => commit(types.SET_ISSUE_CONFIDENTIAL, data);
export const setUserData = ({ commit }, data) => commit(types.SET_USER_DATA, data);
export const setLastFetchedAt = ({ commit }, data) => commit(types.SET_LAST_FETCHED_AT, data);
export const setInitialNotes = ({ commit }, discussions) =>
commit(types.ADD_OR_UPDATE_DISCUSSIONS, discussions);
export const setTargetNoteHash = ({ commit }, data) => commit(types.SET_TARGET_NOTE_HASH, data);
export const setNotesFetchedState = ({ commit }, state) =>
commit(types.SET_NOTES_FETCHED_STATE, state);
export const toggleDiscussion = ({ commit }, data) => commit(types.TOGGLE_DISCUSSION, data);
export const toggleAllDiscussions = ({ commit, getters }) => {
const expanded = getters.allDiscussionsExpanded;
commit(types.SET_EXPAND_ALL_DISCUSSIONS, !expanded);
};
export const fetchDiscussions = (
{ commit, dispatch, getters },
{ path, filter, persistFilter },
) => {
let config =
filter !== undefined
? { params: { notes_filter: filter, persist_filter: persistFilter } }
: null;
if (getters.noteableType === constants.MERGE_REQUEST_NOTEABLE_TYPE) {
config = { params: { notes_filter: 0, persist_filter: false } };
}
if (
getters.noteableType === constants.ISSUE_NOTEABLE_TYPE ||
getters.noteableType === constants.MERGE_REQUEST_NOTEABLE_TYPE
) {
return dispatch('fetchDiscussionsBatch', { path, config, perPage: 20 });
}
return axios.get(path, config).then(({ data }) => {
commit(types.ADD_OR_UPDATE_DISCUSSIONS, data);
commit(types.SET_FETCHING_DISCUSSIONS, false);
dispatch('updateResolvableDiscussionsCounts');
});
};
export const fetchNotes = ({ dispatch, getters }) => {
if (getters.isFetching) return null;
dispatch('setFetchingState', true);
return dispatch('fetchDiscussions', getters.getFetchDiscussionsConfig)
.then(() => dispatch('initPolling'))
.then(() => {
dispatch('setLoadingState', false);
dispatch('setNotesFetchedState', true);
notesEventHub.$emit('fetchedNotesData');
dispatch('setFetchingState', false);
})
.catch(() => {
dispatch('setLoadingState', false);
dispatch('setNotesFetchedState', true);
createAlert({
message: __('Something went wrong while fetching comments. Please try again.'),
});
});
};
export const initPolling = ({ state, dispatch, getters, commit }) => {
if (state.isPollingInitialized) {
return;
}
dispatch('setLastFetchedAt', getters.getNotesDataByProp('lastFetchedAt'));
const debouncedFetchUpdatedNotes = debounce(() => {
dispatch('fetchUpdatedNotes');
}, constants.FETCH_UPDATED_NOTES_DEBOUNCE_TIMEOUT);
actionCable.subscriptions.create(
{
channel: 'Noteable::NotesChannel',
project_id: state.notesData.projectId,
group_id: state.notesData.groupId,
noteable_type: state.notesData.noteableType,
noteable_id: state.notesData.noteableId,
},
{
connected() {
dispatch('fetchUpdatedNotes');
},
received(data) {
if (data.event === 'updated') {
debouncedFetchUpdatedNotes();
}
},
},
);
commit(types.SET_IS_POLLING_INITIALIZED, true);
};
export const fetchDiscussionsBatch = ({ commit, dispatch }, { path, config, cursor, perPage }) => {
const params = { ...config?.params, per_page: perPage };
if (cursor) {
params.cursor = cursor;
}
return axios.get(path, { params }).then(({ data, headers }) => {
commit(types.ADD_OR_UPDATE_DISCUSSIONS, data);
if (headers && headers['x-next-page-cursor']) {
const nextConfig = { ...config };
if (config?.params?.persist_filter) {
delete nextConfig.params.notes_filter;
delete nextConfig.params.persist_filter;
}
return dispatch('fetchDiscussionsBatch', {
path,
config: nextConfig,
cursor: headers['x-next-page-cursor'],
perPage: Math.min(Math.round(perPage * 1.5), 100),
});
}
commit(types.SET_DONE_FETCHING_BATCH_DISCUSSIONS, true);
commit(types.SET_FETCHING_DISCUSSIONS, false);
dispatch('updateResolvableDiscussionsCounts');
return undefined;
});
};
export const updateDiscussion = ({ commit, state }, discussion) => {
commit(types.UPDATE_DISCUSSION, discussion);
return utils.findNoteObjectById(state.discussions, discussion.id);
};
export const setDiscussionSortDirection = ({ commit }, { direction, persist = true }) => {
commit(types.SET_DISCUSSIONS_SORT, { direction, persist });
};
export const setTimelineView = ({ commit }, enabled) => {
commit(types.SET_TIMELINE_VIEW, enabled);
};
export const setSelectedCommentPosition = ({ commit }, position) => {
commit(types.SET_SELECTED_COMMENT_POSITION, position);
};
export const setSelectedCommentPositionHover = ({ commit }, position) => {
commit(types.SET_SELECTED_COMMENT_POSITION_HOVER, position);
};
export const removeNote = ({ commit, dispatch, state }, note) => {
const discussion = state.discussions.find(({ id }) => id === note.discussion_id);
commit(types.DELETE_NOTE, note);
dispatch('updateMergeRequestWidget');
dispatch('updateResolvableDiscussionsCounts');
if (isInMRPage()) {
dispatch('diffs/removeDiscussionsFromDiff', discussion);
}
};
export const deleteNote = ({ dispatch }, note) =>
axios.delete(note.path).then(() => {
dispatch('removeNote', note);
});
export const updateNote = ({ commit, dispatch }, { endpoint, note }) =>
axios.put(endpoint, note).then(({ data }) => {
commit(types.UPDATE_NOTE, data);
dispatch('startTaskList');
});
export const updateOrCreateNotes = ({ commit, state, getters, dispatch }, notes) => {
const { notesById } = getters;
const debouncedFetchDiscussions = (isFetching) => {
if (!isFetching) {
commit(types.SET_FETCHING_DISCUSSIONS, true);
dispatch('fetchDiscussions', { path: state.notesData.discussionsPath });
} else {
if (isFetching !== true) {
clearTimeout(state.currentlyFetchingDiscussions);
}
commit(
types.SET_FETCHING_DISCUSSIONS,
setTimeout(() => {
dispatch('fetchDiscussions', { path: state.notesData.discussionsPath });
}, constants.DISCUSSION_FETCH_TIMEOUT),
);
}
};
notes.forEach((note) => {
if (notesById[note.id]) {
commit(types.UPDATE_NOTE, note);
} else if (note.type === constants.DISCUSSION_NOTE || note.type === constants.DIFF_NOTE) {
const discussion = utils.findNoteObjectById(state.discussions, note.discussion_id);
if (discussion) {
commit(types.ADD_NEW_REPLY_TO_DISCUSSION, note);
} else if (note.type === constants.DIFF_NOTE && !note.base_discussion) {
debouncedFetchDiscussions(state.currentlyFetchingDiscussions);
} else {
commit(types.ADD_NEW_NOTE, note);
}
} else {
commit(types.ADD_NEW_NOTE, note);
}
});
};
export const promoteCommentToTimelineEvent = (
{ commit },
{ noteId, addError, addGenericError },
) => {
commit(types.SET_PROMOTE_COMMENT_TO_TIMELINE_PROGRESS, true); // Set loading state
return utils.gqClient
.mutate({
mutation: promoteTimelineEvent,
variables: {
input: {
noteId: convertToGraphQLId(TYPENAME_NOTE, noteId),
},
},
})
.then(({ data = {} }) => {
const errors = data.timelineEventPromoteFromNote?.errors;
if (errors.length) {
const errorMessage = sprintf(addError, {
error: errors.join('. '),
});
throw new Error(errorMessage);
} else {
notesEventHub.$emit('comment-promoted-to-timeline-event');
toast(__('Comment added to the timeline.'));
}
})
.catch((error) => {
const message = error.message || addGenericError;
let captureError = false;
let errorObj = null;
if (message === addGenericError) {
captureError = true;
errorObj = error;
}
createAlert({
message,
captureError,
error: errorObj,
});
})
.finally(() => {
commit(types.SET_PROMOTE_COMMENT_TO_TIMELINE_PROGRESS, false); // Revert loading state
});
};
export const replyToDiscussion = (
{ commit, state, getters, dispatch },
{ endpoint, data: reply },
) =>
axios.post(endpoint, reply).then(({ data }) => {
if (data.discussion) {
commit(types.UPDATE_DISCUSSION, data.discussion);
updateOrCreateNotes({ commit, state, getters, dispatch }, data.discussion.notes);
dispatch('updateMergeRequestWidget');
dispatch('startTaskList');
dispatch('updateResolvableDiscussionsCounts');
} else {
commit(types.ADD_NEW_REPLY_TO_DISCUSSION, data);
}
return data;
});
export const createNewNote = ({ commit, dispatch }, { endpoint, data: reply }) =>
axios.post(endpoint, reply).then(({ data }) => {
if (!data.errors) {
commit(types.ADD_NEW_NOTE, data);
dispatch('updateMergeRequestWidget');
dispatch('startTaskList');
dispatch('updateResolvableDiscussionsCounts');
}
return data;
});
export const removePlaceholderNotes = ({ commit }) => commit(types.REMOVE_PLACEHOLDER_NOTES);
export const resolveDiscussion = ({ state, dispatch, getters }, { discussionId }) => {
const discussion = utils.findNoteObjectById(state.discussions, discussionId);
const isResolved = getters.isDiscussionResolved(discussionId);
if (!discussion) {
return Promise.reject();
}
if (isResolved) {
return Promise.resolve();
}
return dispatch('toggleResolveNote', {
endpoint: discussion.resolve_path,
isResolved,
discussion: true,
});
};
export const toggleResolveNote = ({ commit, dispatch }, { endpoint, isResolved, discussion }) => {
const method = isResolved
? constants.UNRESOLVE_NOTE_METHOD_NAME
: constants.RESOLVE_NOTE_METHOD_NAME;
const mutationType = discussion ? types.UPDATE_DISCUSSION : types.UPDATE_NOTE;
return axios[method](endpoint).then(({ data }) => {
commit(mutationType, data);
dispatch('updateResolvableDiscussionsCounts');
dispatch('updateMergeRequestWidget');
});
};
export const closeIssuable = ({ commit, dispatch, state }) => {
dispatch('toggleStateButtonLoading', true);
return axios.put(state.notesData.closePath).then(({ data }) => {
commit(types.CLOSE_ISSUE);
dispatch('emitStateChangedEvent', data);
dispatch('toggleStateButtonLoading', false);
});
};
export const reopenIssuable = ({ commit, dispatch, state }) => {
dispatch('toggleStateButtonLoading', true);
return axios.put(state.notesData.reopenPath).then(({ data }) => {
commit(types.REOPEN_ISSUE);
dispatch('emitStateChangedEvent', data);
dispatch('toggleStateButtonLoading', false);
});
};
export const toggleStateButtonLoading = ({ commit }, value) =>
commit(types.TOGGLE_STATE_BUTTON_LOADING, value);
export const emitStateChangedEvent = ({ getters }, data) => {
const event = new CustomEvent(EVENT_ISSUABLE_VUE_APP_CHANGE, {
detail: {
data,
isClosed: getters.openState === STATUS_CLOSED,
},
});
document.dispatchEvent(event);
};
export const toggleIssueLocalState = ({ commit }, newState) => {
if (newState === STATUS_CLOSED) {
commit(types.CLOSE_ISSUE);
} else if (newState === STATUS_REOPENED) {
commit(types.REOPEN_ISSUE);
}
};
export const saveNote = ({ commit, dispatch }, noteData) => {
// For MR discussuions we need to post as `note[note]` and issue we use `note.note`.
// For batch comments, we use draft_note
const note = noteData.data.draft_note || noteData.data['note[note]'] || noteData.data.note.note;
let placeholderText = note;
const hasQuickActions = utils.hasQuickActions(placeholderText);
const replyId = noteData.data.in_reply_to_discussion_id;
let methodToDispatch;
const postData = { ...noteData };
if (postData.isDraft === true) {
methodToDispatch = replyId
? 'batchComments/addDraftToDiscussion'
: 'batchComments/createNewDraft';
if (!postData.draft_note && noteData.note) {
postData.draft_note = postData.note;
delete postData.note;
}
} else {
methodToDispatch = replyId ? 'replyToDiscussion' : 'createNewNote';
}
$('.notes-form .flash-container').hide(); // hide previous flash notification
commit(types.REMOVE_PLACEHOLDER_NOTES); // remove previous placeholders
if (hasQuickActions) {
placeholderText = utils.stripQuickActions(placeholderText);
}
if (placeholderText.length) {
commit(types.SHOW_PLACEHOLDER_NOTE, {
noteBody: placeholderText,
replyId,
});
}
if (hasQuickActions) {
commit(types.SHOW_PLACEHOLDER_NOTE, {
isSystemNote: true,
noteBody: utils.getQuickActionText(note),
replyId,
});
}
const processQuickActions = (res) => {
const {
errors: { commands_only: commandsOnly } = {
commands_only: null,
command_names: [],
},
command_names: commandNames,
} = res;
const message = commandsOnly;
if (commandNames?.indexOf('submit_review') >= 0) {
dispatch('batchComments/clearDrafts');
}
/*
The following reply means that quick actions have been successfully applied:
{"commands_changes":{},"valid":false,"errors":{"commands_only":["Commands applied"]}}
*/
if (hasQuickActions && message) {
// synchronizing the quick action with the sidebar widget
// this is a temporary solution until we have confidentiality real-time updates
if (
confidentialWidget.setConfidentiality &&
message.some((m) => m.includes('Made this issue confidential'))
) {
confidentialWidget.setConfidentiality();
}
$('.js-gfm-input').trigger('clear-commands-cache.atwho');
createAlert({
message: message || __('Commands applied'),
variant: VARIANT_INFO,
parent: noteData.flashContainer,
});
}
return res;
};
const processEmojiAward = (res) => {
const { commands_changes: commandsChanges } = res;
const { emoji_award: emojiAward } = commandsChanges || {};
if (!emojiAward) {
return res;
}
const votesBlock = $('.js-awards-block').eq(0);
return loadAwardsHandler()
.then((awardsHandler) => {
awardsHandler.addAwardToEmojiBar(votesBlock, emojiAward);
awardsHandler.scrollToAwards();
})
.catch(() => {
createAlert({
message: __('Something went wrong while adding your award. Please try again.'),
parent: noteData.flashContainer,
});
})
.then(() => res);
};
const processTimeTracking = (res) => {
const { commands_changes: commandsChanges } = res;
const { spend_time: spendTime, time_estimate: timeEstimate } = commandsChanges || {};
if (spendTime != null || timeEstimate != null) {
sidebarTimeTrackingEventHub.$emit('timeTrackingUpdated', {
commands_changes: commandsChanges,
});
}
return res;
};
const removePlaceholder = (res) => {
commit(types.REMOVE_PLACEHOLDER_NOTES);
return res;
};
return dispatch(methodToDispatch, postData, { root: true })
.then(processQuickActions)
.then(processEmojiAward)
.then(processTimeTracking)
.then(removePlaceholder);
};
export const setFetchingState = ({ commit }, fetchingState) =>
commit(types.SET_NOTES_FETCHING_STATE, fetchingState);
// eslint-disable-next-line max-params
const pollSuccessCallBack = async (resp, commit, state, getters, dispatch) => {
if (state.isResolvingDiscussion) {
return null;
}
if (resp.notes?.length) {
await dispatch('updateOrCreateNotes', resp.notes);
dispatch('startTaskList');
dispatch('updateResolvableDiscussionsCounts');
}
commit(types.SET_LAST_FETCHED_AT, resp.last_fetched_at);
return resp;
};
const getFetchDataParams = (state) => {
const endpoint = state.notesData.notesPath;
const options = {
headers: {
'X-Last-Fetched-At': state.lastFetchedAt ? `${state.lastFetchedAt}` : undefined,
},
};
return { endpoint, options };
};
export const fetchUpdatedNotes = ({ commit, state, getters, dispatch }) => {
const { endpoint, options } = getFetchDataParams(state);
return axios
.get(endpoint, options)
.then(({ data }) => {
pollSuccessCallBack(data, commit, state, getters, dispatch);
})
.catch(() => {});
};
export const toggleAward = ({ commit, getters }, { awardName, noteId }) => {
commit(types.TOGGLE_AWARD, { awardName, note: getters.notesById[noteId] });
};
export const toggleAwardRequest = ({ dispatch }, data) => {
const { endpoint, awardName } = data;
return axios.post(endpoint, { name: awardName }).then(() => {
dispatch('toggleAward', data);
});
};
export const fetchDiscussionDiffLines = ({ commit }, discussion) =>
axios.get(discussion.truncated_diff_lines_path).then(({ data }) => {
commit(types.SET_DISCUSSION_DIFF_LINES, {
discussionId: discussion.id,
diffLines: data.truncated_diff_lines,
});
});
export const updateMergeRequestWidget = () => {
mrWidgetEventHub.$emit('mr.discussion.updated');
};
export const setLoadingState = ({ commit }, data) => {
commit(types.SET_NOTES_LOADING_STATE, data);
};
export const filterDiscussion = ({ commit, dispatch }, { path, filter, persistFilter }) => {
commit(types.CLEAR_DISCUSSIONS);
dispatch('setLoadingState', true);
dispatch('fetchDiscussions', { path, filter, persistFilter })
.then(() => {
dispatch('setLoadingState', false);
dispatch('setNotesFetchedState', true);
})
.catch(() => {
dispatch('setLoadingState', false);
dispatch('setNotesFetchedState', true);
createAlert({
message: __('Something went wrong while fetching comments. Please try again.'),
});
});
};
export const setCommentsDisabled = ({ commit }, data) => {
commit(types.DISABLE_COMMENTS, data);
};
export const startTaskList = ({ dispatch }) =>
Vue.nextTick(
() =>
new TaskList({
dataType: 'note',
fieldName: 'note',
selector: '.notes .is-editable',
onSuccess: () => dispatch('startTaskList'),
}),
);
export const updateResolvableDiscussionsCounts = ({ commit }) =>
commit(types.UPDATE_RESOLVABLE_DISCUSSIONS_COUNTS);
export const submitSuggestion = (
{ commit, dispatch },
{ discussionId, suggestionId, flashContainer, message },
) => {
const dispatchResolveDiscussion = () =>
dispatch('resolveDiscussion', { discussionId }).catch(() => {});
commit(types.SET_RESOLVING_DISCUSSION, true);
return Api.applySuggestion(suggestionId, message)
.then(dispatchResolveDiscussion)
.catch((err) => {
const defaultMessage = __(
'Something went wrong while applying the suggestion. Please try again.',
);
const errorMessage = err.response.data?.message;
const alertMessage = errorMessage || defaultMessage;
createAlert({
message: alertMessage,
parent: flashContainer,
});
})
.finally(() => {
commit(types.SET_RESOLVING_DISCUSSION, false);
});
};
export const submitSuggestionBatch = ({ commit, dispatch, state }, { message, flashContainer }) => {
const suggestionIds = state.batchSuggestionsInfo.map(({ suggestionId }) => suggestionId);
const resolveAllDiscussions = () =>
state.batchSuggestionsInfo.map((suggestionInfo) => {
const { discussionId } = suggestionInfo;
return dispatch('resolveDiscussion', { discussionId }).catch(() => {});
});
commit(types.SET_APPLYING_BATCH_STATE, true);
commit(types.SET_RESOLVING_DISCUSSION, true);
return Api.applySuggestionBatch(suggestionIds, message)
.then(() => Promise.all(resolveAllDiscussions()))
.then(() => commit(types.CLEAR_SUGGESTION_BATCH))
.catch((err) => {
const defaultMessage = __(
'Something went wrong while applying the batch of suggestions. Please try again.',
);
const errorMessage = err.response.data?.message;
const alertMessage = errorMessage || defaultMessage;
createAlert({
message: alertMessage,
parent: flashContainer,
});
})
.finally(() => {
commit(types.SET_APPLYING_BATCH_STATE, false);
commit(types.SET_RESOLVING_DISCUSSION, false);
});
};
export const addSuggestionInfoToBatch = ({ commit }, { suggestionId, noteId, discussionId }) =>
commit(types.ADD_SUGGESTION_TO_BATCH, { suggestionId, noteId, discussionId });
export const removeSuggestionInfoFromBatch = ({ commit }, suggestionId) =>
commit(types.REMOVE_SUGGESTION_FROM_BATCH, suggestionId);
export const convertToDiscussion = ({ commit }, noteId) =>
commit(types.CONVERT_TO_DISCUSSION, noteId);
export const removeConvertedDiscussion = ({ commit }, noteId) =>
commit(types.REMOVE_CONVERTED_DISCUSSION, noteId);
export const setCurrentDiscussionId = ({ commit }, discussionId) =>
commit(types.SET_CURRENT_DISCUSSION_ID, discussionId);
export const fetchDescriptionVersion = ({ dispatch }, { endpoint, startingVersion, versionId }) => {
let requestUrl = endpoint;
if (startingVersion) {
requestUrl = mergeUrlParams({ start_version_id: startingVersion }, requestUrl);
}
dispatch('requestDescriptionVersion');
return axios
.get(requestUrl)
.then((res) => {
dispatch('receiveDescriptionVersion', { descriptionVersion: res.data, versionId });
})
.catch((error) => {
dispatch('receiveDescriptionVersionError', error);
createAlert({
message: __('Something went wrong while fetching description changes. Please try again.'),
});
});
};
export const requestDescriptionVersion = ({ commit }) => {
commit(types.REQUEST_DESCRIPTION_VERSION);
};
export const receiveDescriptionVersion = ({ commit }, descriptionVersion) => {
commit(types.RECEIVE_DESCRIPTION_VERSION, descriptionVersion);
};
export const receiveDescriptionVersionError = ({ commit }, error) => {
commit(types.RECEIVE_DESCRIPTION_VERSION_ERROR, error);
};
export const softDeleteDescriptionVersion = (
{ dispatch },
{ endpoint, startingVersion, versionId },
) => {
let requestUrl = endpoint;
if (startingVersion) {
requestUrl = mergeUrlParams({ start_version_id: startingVersion }, requestUrl);
}
dispatch('requestDeleteDescriptionVersion');
return axios
.delete(requestUrl)
.then(() => {
dispatch('receiveDeleteDescriptionVersion', versionId);
})
.catch((error) => {
dispatch('receiveDeleteDescriptionVersionError', error);
createAlert({
message: __('Something went wrong while deleting description changes. Please try again.'),
});
// Throw an error here because a component like SystemNote -
// needs to know if the request failed to reset its internal state.
throw new Error();
});
};
export const requestDeleteDescriptionVersion = ({ commit }) => {
commit(types.REQUEST_DELETE_DESCRIPTION_VERSION);
};
export const receiveDeleteDescriptionVersion = ({ commit }, versionId) => {
commit(types.RECEIVE_DELETE_DESCRIPTION_VERSION, { [versionId]: __('Deleted') });
};
export const receiveDeleteDescriptionVersionError = ({ commit }, error) => {
commit(types.RECEIVE_DELETE_DESCRIPTION_VERSION_ERROR, error);
};
export const updateAssignees = ({ commit }, assignees) => {
commit(types.UPDATE_ASSIGNEES, assignees);
};
export const updateDiscussionPosition = ({ commit }, updatedPosition) => {
commit(types.UPDATE_DISCUSSION_POSITION, updatedPosition);
};
export const updateMergeRequestFilters = ({ commit }, newFilters) =>
commit(types.SET_MERGE_REQUEST_FILTERS, newFilters);

View File

@ -0,0 +1,389 @@
import { flattenDeep, clone } from 'lodash';
import { match } from '~/diffs/utils/diff_file';
import { isInMRPage } from '~/lib/utils/common_utils';
import { doesHashExistInUrl } from '~/lib/utils/url_utility';
import { badgeState } from '~/merge_requests/components/merge_request_header.vue';
import * as constants from '../../constants';
import { collapseSystemNotes } from '../../stores/collapse_utils';
const getDraftComments = (state) => {
if (!state.batchComments) {
return [];
}
return state.batchComments.drafts
.filter((draft) => !draft.file_path && !draft.discussion_id)
.map((x) => ({
...x,
// Treat a top-level draft note as individual_note so it's not included in
// expand/collapse threads
individual_note: true,
}))
.sort((a, b) => a.id - b.id);
};
const hideActivity = (filters, discussion) => {
if (filters.length === constants.MR_FILTER_OPTIONS) return false;
if (filters.length === 0) return true;
const firstNote = discussion.notes[0];
const hidingFilters = constants.MR_FILTER_OPTIONS.filter(({ value }) => !filters.includes(value));
for (let i = 0, len = hidingFilters.length; i < len; i += 1) {
const filter = hidingFilters[i];
if (
// For all of the below firstNote is the first note of a discussion, whether that be
// the first in a discussion or a single note
// If the filter option filters based on icon check against the first notes system note icon
filter.systemNoteIcons?.includes(firstNote.system_note_icon_name) ||
// If the filter option filters based on note type use the first notes type
(filter.noteType?.includes(firstNote.type) && !firstNote.author?.bot) ||
// If the filter option filters based on the note text then check if it is sytem
// and filter based on the text of the system note
(firstNote.system && filter.noteText?.some((t) => firstNote.note.includes(t))) ||
// For individual notes we filter if the discussion is a single note and is not a sytem
(filter.individualNote === discussion.individual_note &&
!firstNote.system &&
!firstNote.author?.bot) ||
// For bot comments we filter on the authors `bot` boolean attribute
(filter.bot && firstNote.author?.bot)
) {
return true;
}
}
return false;
};
// was 'discussions', replace all mapGetters using this
export const filteredDiscussions = (state, getters, rootState) => {
let discussionsInState = clone(state.discussions);
// NOTE: not testing bc will be removed when backend is finished.
if (state.noteableData.targetType === 'merge_request') {
discussionsInState = discussionsInState.reduce((acc, discussion) => {
if (hideActivity(state.mergeRequestFilters, discussion)) {
return acc;
}
acc.push(discussion);
return acc;
}, []);
}
if (state.isTimelineEnabled) {
discussionsInState = discussionsInState
.reduce((acc, discussion) => {
const transformedToIndividualNotes = discussion.notes.map((note) => ({
...discussion,
id: note.id,
created_at: note.created_at,
individual_note: true,
notes: [note],
}));
return acc.concat(transformedToIndividualNotes);
}, [])
.sort((a, b) => new Date(a.created_at) - new Date(b.created_at));
}
discussionsInState = collapseSystemNotes(discussionsInState);
discussionsInState = discussionsInState.concat(getDraftComments(rootState));
if (state.discussionSortOrder === constants.DESC) {
discussionsInState = discussionsInState.reverse();
}
return discussionsInState;
};
// these should've never been getters
// export const convertedDisscussionIds = (state) => state.convertedDisscussionIds;
// export const targetNoteHash = (state) => state.targetNoteHash;
// export const isNotesFetched = (state) => state.isNotesFetched;
export const getNotesData = (state) => state.notesData;
/*
* WARNING: This is an example of an "unnecessary" getter
* more info found here: https://gitlab.com/groups/gitlab-org/-/epics/2913.
*/
export const sortDirection = (state) => state.discussionSortOrder;
export const timelineEnabled = (state) => state.isTimelineEnabled;
// these should've never been getters
// export const isFetching = (state) => state.isFetching;
// export const isLoading = (state) => state.isLoading;
// export const persistSortOrder = (state) => state.persistSortOrder;
export const getNotesDataByProp = (state) => (prop) => state.notesData[prop];
export const getNoteableData = (state) => state.noteableData;
export const getNoteableDataByProp = (state) => (prop) => state.noteableData[prop];
export const getBlockedByIssues = (state) => state.noteableData.blocked_by_issues;
export const userCanReply = (state) => Boolean(state.noteableData.current_user.can_create_note);
export const openState = (state) => (isInMRPage() ? badgeState.state : state.noteableData.state);
export const getUserData = (state) => state.userData || {};
export const getUserDataByProp = (state) => (prop) => state.userData && state.userData[prop];
// this should've never been a getter
// export const descriptionVersions = (state) => state.descriptionVersions;
export const canUserAddIncidentTimelineEvents = (state) => {
return Boolean(
state.userData?.can_add_timeline_events &&
state.noteableData.type === constants.NOTEABLE_TYPE_MAPPING.Incident,
);
};
export const notesById = (state) =>
state.discussions.reduce((acc, note) => {
note.notes.every((n) => Object.assign(acc, { [n.id]: n }));
return acc;
}, {});
export const noteableType = (state) => {
const { ISSUE_NOTEABLE_TYPE, MERGE_REQUEST_NOTEABLE_TYPE, EPIC_NOTEABLE_TYPE } = constants;
if (state.noteableData.noteableType === EPIC_NOTEABLE_TYPE) {
return EPIC_NOTEABLE_TYPE;
}
return state.noteableData.merge_params ? MERGE_REQUEST_NOTEABLE_TYPE : ISSUE_NOTEABLE_TYPE;
};
const reverseNotes = (array) => array.slice(0).reverse();
const isLastNote = (note, state) =>
!note.system && state.userData && note.author && note.author.id === state.userData.id;
export const getCurrentUserLastNote = (state) =>
flattenDeep(reverseNotes(state.discussions).map((note) => reverseNotes(note.notes))).find((el) =>
isLastNote(el, state),
);
export const getDiscussionLastNote = (state) => (discussion) =>
reverseNotes(discussion.notes).find((el) => isLastNote(el, state));
// these should've never been getters
// export const unresolvedDiscussionsCount = (state) => state.unresolvedDiscussionsCount;
// export const resolvableDiscussionsCount = (state) => state.resolvableDiscussionsCount;
export const showJumpToNextDiscussion =
(state, getters) =>
(mode = 'discussion') => {
const orderedDiffs =
mode !== 'discussion'
? getters.unresolvedDiscussionsIdsByDiff
: getters.unresolvedDiscussionsIdsByDate;
return orderedDiffs.length > 1;
};
export const isDiscussionResolved = (state, getters) => (discussionId) =>
getters.resolvedDiscussionsById[discussionId] !== undefined;
export const allResolvableDiscussions = (state) =>
state.discussions.filter((d) => !d.individual_note && d.resolvable);
export const resolvedDiscussionsById = (state) => {
const map = {};
state.discussions
.filter((d) => d.resolvable)
.forEach((n) => {
if (n.notes) {
const resolved = n.notes.filter((note) => note.resolvable).every((note) => note.resolved);
if (resolved) {
map[n.id] = n;
}
}
});
return map;
};
// Gets Discussions IDs ordered by the date of their initial note
export const unresolvedDiscussionsIdsByDate = (state, getters) =>
getters.allResolvableDiscussions
.filter((d) => !d.resolved)
.sort((a, b) => {
const aDate = new Date(a.notes[0].created_at);
const bDate = new Date(b.notes[0].created_at);
if (aDate < bDate) {
return -1;
}
return aDate === bDate ? 0 : 1;
})
.map((d) => d.id);
// Gets Discussions IDs ordered by their position in the diff
//
// Sorts the array of resolvable yet unresolved discussions by
// comparing file names first. If file names are the same, compares
// line numbers.
export const unresolvedDiscussionsIdsByDiff = (state, getters, allState) => {
const authoritativeFiles = allState.diffs.diffFiles;
return getters.allResolvableDiscussions
.filter((d) => !d.resolved && d.active)
.sort((a, b) => {
let order = 0;
if (!a.diff_file || !b.diff_file) {
return order;
}
const authoritativeA = authoritativeFiles.find((source) =>
match({ fileA: source, fileB: a.diff_file, mode: 'mr' }),
);
const authoritativeB = authoritativeFiles.find((source) =>
match({ fileA: source, fileB: b.diff_file, mode: 'mr' }),
);
if (authoritativeA && authoritativeB) {
order = authoritativeA.order - authoritativeB.order;
}
// Get the line numbers, to compare within the same file
const aLines = [a.position.new_line, a.position.old_line];
const bLines = [b.position.new_line, b.position.old_line];
return order < 0 ||
(order === 0 &&
// .max() because one of them might be zero (if removed/added)
Math.max(aLines[0], aLines[1]) < Math.max(bLines[0], bLines[1]))
? -1
: 1;
})
.map((d) => d.id);
};
export const resolvedDiscussionCount = (state, getters) => {
const resolvedMap = getters.resolvedDiscussionsById;
return Object.keys(resolvedMap).length;
};
export const discussionTabCounter = (state) =>
state.discussions.reduce(
(acc, discussion) =>
acc + discussion.notes.filter((note) => !note.system && !note.placeholder).length,
0,
);
// Returns the list of discussion IDs ordered according to given parameter
// @param {Boolean} diffOrder - is ordered by diff?
export const unresolvedDiscussionsIdsOrdered = (state, getters) => (diffOrder) => {
if (diffOrder) {
return getters.unresolvedDiscussionsIdsByDiff;
}
return getters.unresolvedDiscussionsIdsByDate;
};
// Checks if a given discussion is the last in the current order (diff or date)
// @param {Boolean} discussionId - id of the discussion
// @param {Boolean} diffOrder - is ordered by diff?
export const isLastUnresolvedDiscussion = (state, getters) => (discussionId, diffOrder) => {
const idsOrdered = getters.unresolvedDiscussionsIdsOrdered(diffOrder);
const lastDiscussionId = idsOrdered[idsOrdered.length - 1];
return lastDiscussionId === discussionId;
};
export const findUnresolvedDiscussionIdNeighbor =
(state, getters) =>
({ discussionId, diffOrder, step }) => {
const diffIds = getters.unresolvedDiscussionsIdsOrdered(diffOrder);
const dateIds = getters.unresolvedDiscussionsIdsOrdered(false);
const ids = diffIds.length ? diffIds : dateIds;
const index = ids.indexOf(discussionId) + step;
if (index < 0 && step < 0) {
return ids[ids.length - 1];
}
if (index === ids.length && step > 0) {
return ids[0];
}
return ids[index];
};
// Gets the ID of the discussion following the one provided, respecting order (diff or date)
// @param {Boolean} discussionId - id of the current discussion
// @param {Boolean} diffOrder - is ordered by diff?
export const nextUnresolvedDiscussionId = (state, getters) => (discussionId, diffOrder) =>
getters.findUnresolvedDiscussionIdNeighbor({ discussionId, diffOrder, step: 1 });
export const previousUnresolvedDiscussionId = (state, getters) => (discussionId, diffOrder) =>
getters.findUnresolvedDiscussionIdNeighbor({ discussionId, diffOrder, step: -1 });
// @param {Boolean} diffOrder - is ordered by diff?
export const firstUnresolvedDiscussionId = (state, getters) => (diffOrder) => {
if (diffOrder) {
return getters.unresolvedDiscussionsIdsByDiff[0];
}
return getters.unresolvedDiscussionsIdsByDate[0];
};
export const getDiscussion = (state) => (discussionId) =>
state.discussions.find((discussion) => discussion.id === discussionId);
// this should've never been a getter
// export const commentsDisabled = (state) => state.commentsDisabled;
export const suggestionsCount = (state, getters) =>
Object.values(getters.notesById).filter((n) => n.suggestions?.length).length;
// eslint-disable-next-line max-params
export const hasDrafts = (state, getters, rootState, rootGetters) =>
Boolean(rootGetters['batchComments/hasDrafts']);
export const getSuggestionsFilePaths = (state) => () =>
state.batchSuggestionsInfo.reduce((acc, suggestion) => {
const discussion = state.discussions.find((d) => d.id === suggestion.discussionId);
if (acc.indexOf(discussion?.diff_file?.file_path) === -1) {
acc.push(discussion.diff_file.file_path);
}
return acc;
}, []);
export const getFetchDiscussionsConfig = (state, getters) => {
const defaultConfig = { path: getters.getNotesDataByProp('discussionsPath') };
const currentFilter =
getters.getNotesDataByProp('notesFilter') || constants.DISCUSSION_FILTERS_DEFAULT_VALUE;
if (
doesHashExistInUrl(constants.NOTE_UNDERSCORE) &&
currentFilter !== constants.DISCUSSION_FILTERS_DEFAULT_VALUE
) {
return {
...defaultConfig,
filter: constants.DISCUSSION_FILTERS_DEFAULT_VALUE,
persistFilter: false,
};
}
return defaultConfig;
};
export const allDiscussionsExpanded = (state) => {
return state.discussions.every((discussion) => discussion.expanded);
};

View File

@ -1,17 +1,70 @@
import { defineStore } from 'pinia';
import { ASC, MR_FILTER_OPTIONS } from '~/notes/constants';
// import * as actions from './actions';
// import mutations from './mutations';
// import * as getters from './getters';
export const useNotes = defineStore('legacyNotes', {
state() {
return {
notes: null,
discussions: [],
discussionSortOrder: ASC,
persistSortOrder: true,
convertedDisscussionIds: [],
targetNoteHash: null,
lastFetchedAt: null,
currentDiscussionId: null,
batchSuggestionsInfo: [],
currentlyFetchingDiscussions: false,
doneFetchingBatchDiscussions: false,
/**
* selectedCommentPosition & selectedCommentPositionHover structures are the same as `position.line_range`:
* {
* start: { line_code: string, new_line: number, old_line:number, type: string },
* end: { line_code: string, new_line: number, old_line:number, type: string },
* }
*/
selectedCommentPosition: null,
selectedCommentPositionHover: null,
// View layer
isToggleStateButtonLoading: false,
isNotesFetched: false,
isLoading: true,
isLoadingDescriptionVersion: false,
isPromoteCommentToTimelineEventInProgress: false,
// holds endpoints and permissions provided through haml
notesData: {
markdownDocsPath: '',
},
userData: {},
noteableData: {
discussion_locked: false,
confidential: false, // TODO: Move data like this to Issue Store, should not be apart of notes.
current_user: {},
preview_note_path: 'path/to/preview',
},
isResolvingDiscussion: false,
commentsDisabled: false,
resolvableDiscussionsCount: 0,
unresolvedDiscussionsCount: 0,
descriptionVersions: {},
isTimelineEnabled: false,
isFetching: false,
isPollingInitialized: false,
mergeRequestFilters: MR_FILTER_OPTIONS.map((f) => f.value),
};
},
actions: {
// ...mutations,
// ...actions,
saveNote() {},
updateDiscussion() {},
updateResolvableDiscussionsCounts() {},
},
getters: {
// ...getters,
notesById() {},
getDiscussion() {},
},

View File

@ -0,0 +1,455 @@
import { isEqual } from 'lodash';
import { STATUS_CLOSED, STATUS_REOPENED } from '~/issues/constants';
import { isInMRPage } from '~/lib/utils/common_utils';
import { uuids } from '~/lib/utils/uuids';
import * as constants from '../../constants';
import * as types from '../../stores/mutation_types';
import * as utils from '../../stores/utils';
export default {
[types.ADD_NEW_NOTE](state, data) {
const note = data.discussion ? data.discussion.notes[0] : data;
const { discussion_id: discussionId, type } = note;
const [exists] = state.discussions.filter((n) => n.id === note.discussion_id);
const isDiscussion = type === constants.DISCUSSION_NOTE || type === constants.DIFF_NOTE;
if (!exists) {
let discussion = data.discussion || note.base_discussion;
if (!discussion) {
discussion = {
expanded: true,
id: discussionId,
individual_note: !isDiscussion,
reply_id: discussionId,
};
if (isDiscussion && isInMRPage()) {
discussion.resolvable = note.resolvable;
discussion.resolved = false;
discussion.active = true;
discussion.resolve_path = note.resolve_path;
discussion.resolve_with_issue_path = note.resolve_with_issue_path;
discussion.diff_discussion = false;
}
}
if (discussion.truncated_diff_lines) {
discussion.truncated_diff_lines = utils.prepareDiffLines(discussion.truncated_diff_lines);
}
// note.base_discussion = undefined; // No point keeping a reference to this
delete note.base_discussion;
discussion.notes = [note];
state.discussions.push(discussion);
}
},
[types.ADD_NEW_REPLY_TO_DISCUSSION](state, note) {
const discussion = utils.findNoteObjectById(state.discussions, note.discussion_id);
const existingNote = discussion && utils.findNoteObjectById(discussion.notes, note.id);
if (discussion && !existingNote) {
discussion.notes.push(note);
}
},
[types.DELETE_NOTE](state, note) {
const noteObj = utils.findNoteObjectById(state.discussions, note.discussion_id);
if (noteObj.individual_note) {
state.discussions.splice(state.discussions.indexOf(noteObj), 1);
} else {
const comment = utils.findNoteObjectById(noteObj.notes, note.id);
noteObj.notes.splice(noteObj.notes.indexOf(comment), 1);
if (!noteObj.notes.length) {
state.discussions.splice(state.discussions.indexOf(noteObj), 1);
}
}
},
[types.EXPAND_DISCUSSION](state, { discussionId }) {
const discussion = utils.findNoteObjectById(state.discussions, discussionId);
Object.assign(discussion, { expanded: true });
},
[types.COLLAPSE_DISCUSSION](state, { discussionId }) {
const discussion = utils.findNoteObjectById(state.discussions, discussionId);
Object.assign(discussion, { expanded: false });
},
[types.REMOVE_PLACEHOLDER_NOTES](state) {
const { discussions } = state;
for (let i = discussions.length - 1; i >= 0; i -= 1) {
const note = discussions[i];
const children = note.notes;
if (children.length > 1) {
// remove placeholder from discussions
for (let j = children.length - 1; j >= 0; j -= 1) {
if (children[j].isPlaceholderNote) {
children.splice(j, 1);
}
}
} else if (note.isPlaceholderNote) {
// remove placeholders from state root
discussions.splice(i, 1);
}
}
},
[types.SET_NOTES_DATA](state, data) {
Object.assign(state, { notesData: data });
},
[types.SET_NOTEABLE_DATA](state, data) {
Object.assign(state, { noteableData: data });
},
[types.SET_ISSUE_CONFIDENTIAL](state, data) {
state.noteableData.confidential = data;
},
[types.SET_ISSUABLE_LOCK](state, locked) {
state.noteableData.discussion_locked = locked;
},
[types.SET_USER_DATA](state, data) {
Object.assign(state, { userData: data });
},
[types.CLEAR_DISCUSSIONS](state) {
state.discussions = [];
},
[types.ADD_OR_UPDATE_DISCUSSIONS](state, discussionsData) {
discussionsData.forEach((d) => {
const discussion = { ...d };
const diffData = {};
if (discussion.diff_file) {
diffData.file_hash = discussion.diff_file.file_hash;
diffData.truncated_diff_lines = utils.prepareDiffLines(
discussion.truncated_diff_lines || [],
);
}
// To support legacy notes, should be very rare case.
if (discussion.individual_note && discussion.notes.length > 1) {
discussion.notes.forEach((n) => {
const newDiscussion = {
...discussion,
...diffData,
notes: [n], // override notes array to only have one item to mimick individual_note
};
const oldDiscussion = state.discussions.find(
(existingDiscussion) =>
existingDiscussion.id === discussion.id && existingDiscussion.notes[0].id === n.id,
);
if (oldDiscussion) {
state.discussions.splice(state.discussions.indexOf(oldDiscussion), 1, newDiscussion);
} else {
state.discussions.push(newDiscussion);
}
});
} else {
const oldDiscussion = utils.findNoteObjectById(state.discussions, discussion.id);
if (oldDiscussion) {
state.discussions.splice(state.discussions.indexOf(oldDiscussion), 1, {
...discussion,
...diffData,
expanded: oldDiscussion.expanded,
});
} else {
state.discussions.push({ ...discussion, ...diffData });
}
}
});
},
[types.SET_LAST_FETCHED_AT](state, fetchedAt) {
Object.assign(state, { lastFetchedAt: fetchedAt });
},
[types.SET_TARGET_NOTE_HASH](state, hash) {
Object.assign(state, { targetNoteHash: hash });
},
[types.SHOW_PLACEHOLDER_NOTE](state, data) {
let notesArr = state.discussions;
const existingDiscussion = utils.findNoteObjectById(notesArr, data.replyId);
if (existingDiscussion) {
notesArr = existingDiscussion.notes;
}
notesArr.push({
id: uuids()[0],
individual_note: true,
isPlaceholderNote: true,
placeholderType: data.isSystemNote ? constants.SYSTEM_NOTE : constants.NOTE,
notes: [
{
body: data.noteBody,
},
],
});
},
[types.TOGGLE_AWARD](state, data) {
const { awardName, note } = data;
const { id, name, username } = state.userData;
const hasEmojiAwardedByCurrentUser = note.award_emoji.filter(
(emoji) => `${emoji.name}` === `${data.awardName}` && emoji.user.id === id,
);
if (hasEmojiAwardedByCurrentUser.length) {
// If current user has awarded this emoji, remove it.
note.award_emoji.splice(note.award_emoji.indexOf(hasEmojiAwardedByCurrentUser[0]), 1);
} else {
note.award_emoji.push({
name: awardName,
user: { id, name, username },
});
}
},
[types.TOGGLE_DISCUSSION](state, { discussionId, forceExpanded = null }) {
const discussion = utils.findNoteObjectById(state.discussions, discussionId);
Object.assign(discussion, {
expanded: forceExpanded === null ? !discussion.expanded : forceExpanded,
});
},
[types.SET_EXPAND_DISCUSSIONS](state, { discussionIds, expanded }) {
if (discussionIds?.length) {
discussionIds.forEach((discussionId) => {
const discussion = utils.findNoteObjectById(state.discussions, discussionId);
Object.assign(discussion, { expanded });
});
}
},
[types.SET_EXPAND_ALL_DISCUSSIONS](state, expanded) {
state.discussions.forEach((discussion) => {
Object.assign(discussion, { expanded });
});
},
[types.SET_RESOLVING_DISCUSSION](state, isResolving) {
state.isResolvingDiscussion = isResolving;
},
[types.UPDATE_NOTE](state, note) {
const discussion = utils.findNoteObjectById(state.discussions, note.discussion_id);
// Disable eslint here so we can delete the property that we no longer need
// in the note object
// eslint-disable-next-line no-param-reassign
delete note.base_discussion;
if (discussion.individual_note) {
if (note.type === constants.DISCUSSION_NOTE) {
discussion.individual_note = false;
}
discussion.notes.splice(0, 1, note);
} else {
const comment = utils.findNoteObjectById(discussion.notes, note.id);
if (!isEqual(comment, note)) {
discussion.notes.splice(discussion.notes.indexOf(comment), 1, note);
}
}
if (note.resolvable && note.id === discussion.notes[0].id) {
Object.assign(discussion, {
resolvable: note.resolvable,
resolved: note.resolved,
resolved_at: note.resolved_at,
resolved_by: note.resolved_by,
resolved_by_push: note.resolved_by_push,
});
}
},
[types.APPLY_SUGGESTION](state, { noteId, discussionId, suggestionId }) {
const noteObj = utils.findNoteObjectById(state.discussions, discussionId);
const comment = utils.findNoteObjectById(noteObj.notes, noteId);
comment.suggestions = comment.suggestions.map((suggestion) => ({
...suggestion,
applied: suggestion.applied || suggestion.id === suggestionId,
appliable: false,
}));
},
[types.SET_APPLYING_BATCH_STATE](state, isApplyingBatch) {
state.batchSuggestionsInfo.forEach((suggestionInfo) => {
const { discussionId, noteId, suggestionId } = suggestionInfo;
const noteObj = utils.findNoteObjectById(state.discussions, discussionId);
const comment = utils.findNoteObjectById(noteObj.notes, noteId);
comment.suggestions = comment.suggestions.map((suggestion) => ({
...suggestion,
is_applying_batch: suggestion.id === suggestionId && isApplyingBatch,
}));
});
},
[types.ADD_SUGGESTION_TO_BATCH](state, { noteId, discussionId, suggestionId }) {
state.batchSuggestionsInfo.push({
suggestionId,
noteId,
discussionId,
});
},
[types.REMOVE_SUGGESTION_FROM_BATCH](state, id) {
const index = state.batchSuggestionsInfo.findIndex(({ suggestionId }) => suggestionId === id);
if (index !== -1) {
state.batchSuggestionsInfo.splice(index, 1);
}
},
[types.CLEAR_SUGGESTION_BATCH](state) {
state.batchSuggestionsInfo.splice(0, state.batchSuggestionsInfo.length);
},
[types.UPDATE_DISCUSSION](state, noteData) {
const note = noteData;
const selectedDiscussion = state.discussions.find((disc) => disc.id === note.id);
note.expanded = true; // override expand flag to prevent collapse
Object.assign(selectedDiscussion, { ...note });
},
[types.UPDATE_DISCUSSION_POSITION](state, { discussionId, position }) {
const selectedDiscussion = state.discussions.find((disc) => disc.id === discussionId);
if (selectedDiscussion) Object.assign(selectedDiscussion.position, { ...position });
},
[types.CLOSE_ISSUE](state) {
Object.assign(state.noteableData, { state: STATUS_CLOSED });
},
[types.REOPEN_ISSUE](state) {
Object.assign(state.noteableData, { state: STATUS_REOPENED });
},
[types.TOGGLE_STATE_BUTTON_LOADING](state, value) {
Object.assign(state, { isToggleStateButtonLoading: value });
},
[types.SET_NOTES_FETCHED_STATE](state, value) {
Object.assign(state, { isNotesFetched: value });
},
[types.SET_NOTES_LOADING_STATE](state, value) {
state.isLoading = value;
},
[types.SET_NOTES_FETCHING_STATE](state, value) {
state.isFetching = value;
},
[types.SET_DISCUSSION_DIFF_LINES](state, { discussionId, diffLines }) {
const discussion = utils.findNoteObjectById(state.discussions, discussionId);
discussion.truncated_diff_lines = utils.prepareDiffLines(diffLines);
},
[types.SET_DISCUSSIONS_SORT](state, { direction, persist }) {
state.discussionSortOrder = direction;
state.persistSortOrder = persist;
},
[types.SET_TIMELINE_VIEW](state, value) {
state.isTimelineEnabled = value;
},
[types.SET_SELECTED_COMMENT_POSITION](state, position) {
state.selectedCommentPosition = position;
},
[types.SET_SELECTED_COMMENT_POSITION_HOVER](state, position) {
state.selectedCommentPositionHover = position;
},
[types.DISABLE_COMMENTS](state, value) {
state.commentsDisabled = value;
},
[types.UPDATE_RESOLVABLE_DISCUSSIONS_COUNTS](state) {
state.resolvableDiscussionsCount = state.discussions.filter(
(discussion) => !discussion.individual_note && discussion.resolvable,
).length;
state.unresolvedDiscussionsCount = state.discussions.filter(
(discussion) =>
!discussion.individual_note &&
discussion.resolvable &&
discussion.notes.some((note) => note.resolvable && !note.resolved),
).length;
},
[types.CONVERT_TO_DISCUSSION](state, discussionId) {
const convertedDisscussionIds = [...state.convertedDisscussionIds, discussionId];
Object.assign(state, { convertedDisscussionIds });
},
[types.REMOVE_CONVERTED_DISCUSSION](state, discussionId) {
const convertedDisscussionIds = [...state.convertedDisscussionIds];
convertedDisscussionIds.splice(convertedDisscussionIds.indexOf(discussionId), 1);
Object.assign(state, { convertedDisscussionIds });
},
[types.SET_CURRENT_DISCUSSION_ID](state, discussionId) {
state.currentDiscussionId = discussionId;
},
[types.REQUEST_DESCRIPTION_VERSION](state) {
state.isLoadingDescriptionVersion = true;
},
[types.RECEIVE_DESCRIPTION_VERSION](state, { descriptionVersion, versionId }) {
const descriptionVersions = { ...state.descriptionVersions, [versionId]: descriptionVersion };
Object.assign(state, { descriptionVersions, isLoadingDescriptionVersion: false });
},
[types.RECEIVE_DESCRIPTION_VERSION_ERROR](state) {
state.isLoadingDescriptionVersion = false;
},
[types.REQUEST_DELETE_DESCRIPTION_VERSION](state) {
state.isLoadingDescriptionVersion = true;
},
[types.RECEIVE_DELETE_DESCRIPTION_VERSION](state, descriptionVersion) {
state.isLoadingDescriptionVersion = false;
Object.assign(state.descriptionVersions, descriptionVersion);
},
[types.RECEIVE_DELETE_DESCRIPTION_VERSION_ERROR](state) {
state.isLoadingDescriptionVersion = false;
},
[types.UPDATE_ASSIGNEES](state, assignees) {
state.noteableData.assignees = assignees;
},
[types.SET_FETCHING_DISCUSSIONS](state, value) {
state.currentlyFetchingDiscussions = value;
},
[types.SET_DONE_FETCHING_BATCH_DISCUSSIONS](state, value) {
state.doneFetchingBatchDiscussions = value;
},
[types.SET_PROMOTE_COMMENT_TO_TIMELINE_PROGRESS](state, value) {
state.isPromoteCommentToTimelineEventInProgress = value;
},
[types.SET_IS_POLLING_INITIALIZED](state, value) {
state.isPollingInitialized = value;
},
[types.SET_MERGE_REQUEST_FILTERS](state, value) {
state.mergeRequestFilters = value;
},
};

View File

@ -164,7 +164,7 @@ export default {
<span
v-if="displayedCount || $scopedSlots.count"
class="gl-inline-flex gl-items-center gl-gap-2 gl-self-start gl-text-sm gl-text-subtle"
class="crud-count gl-inline-flex gl-items-center gl-gap-2 gl-self-start gl-text-sm gl-text-subtle"
data-testid="crud-count"
>
<template v-if="displayedCount">

View File

@ -1,5 +1,6 @@
<script>
import { GlAlert, GlBadge, GlKeysetPagination, GlSkeletonLoader, GlPagination } from '@gitlab/ui';
import EmptyResult from '~/vue_shared/components/empty_result.vue';
import LocalStorageSync from '~/vue_shared/components/local_storage_sync.vue';
import PageSizeSelector from '~/vue_shared/components/page_size_selector.vue';
import { updateHistory, setUrlParams } from '~/lib/utils/url_utility';
@ -40,6 +41,7 @@ export default {
VueDraggable,
PageSizeSelector,
LocalStorageSync,
EmptyResult,
},
mixins: [glFeatureFlagMixin()],
props: {
@ -424,6 +426,7 @@ export default {
<div v-else-if="issuables.length > 0 && isGridView">
<issuable-grid />
</div>
<empty-result v-else-if="initialFilterValue.length > 0" />
<slot v-else-if="!error" name="empty-state"></slot>
</template>

View File

@ -8,6 +8,10 @@ $crud-header-min-height: px-to-rem(49px);
min-height: $crud-header-min-height;
}
.crud-count .gl-badge {
@apply -gl-ml-2 -gl-my-1;
}
.crud-body:has(.gl-table) {
margin-block: -1px;
margin-inline: 0;

View File

@ -2,6 +2,10 @@
class SentNotificationsController < ApplicationController
skip_before_action :authenticate_user!
# Automatic unsubscribe by an email client should happen via a POST request.
# See https://datatracker.ietf.org/doc/html/rfc8058
# This allows POST requests without CSRF token.
skip_before_action :verify_authenticity_token, only: [:unsubscribe]
feature_category :team_planning
urgency :low
@ -11,7 +15,7 @@ class SentNotificationsController < ApplicationController
return render_404 unless unsubscribe_prerequisites_met?
unsubscribe_and_redirect if current_user || params[:force]
unsubscribe_and_redirect if current_user || params[:force] || request.post?
end
private

View File

@ -231,7 +231,9 @@ class Notify < ApplicationMailer
def add_unsubscription_headers_and_links
return unless !@labels_url && @sent_notification && @sent_notification.unsubscribable?
list_unsubscribe_methods = [unsubscribe_sent_notification_url(@sent_notification, force: true)]
@unsubscribe_url = unsubscribe_sent_notification_url(@sent_notification)
list_unsubscribe_methods = [@unsubscribe_url]
if Gitlab::Email::IncomingEmail.enabled? && Gitlab::Email::IncomingEmail.supports_wildcard?
list_unsubscribe_methods << "mailto:#{Gitlab::Email::IncomingEmail.unsubscribe_address(reply_key)}"
end
@ -241,7 +243,6 @@ class Notify < ApplicationMailer
# be signalled with using the List-Unsubscribe-Post header
# See https://datatracker.ietf.org/doc/html/rfc8058
headers['List-Unsubscribe-Post'] = 'List-Unsubscribe=One-Click'
@unsubscribe_url = unsubscribe_sent_notification_url(@sent_notification)
end
def email_with_layout(to:, subject:, layout: 'mailer')

View File

@ -19,8 +19,6 @@ module Ci
# Record in SafeRequestStore a cross-project access attempt
def self.capture(origin_project:, accessed_project:)
return if Feature.disabled?(:ci_job_token_authorizations_log, accessed_project)
# Skip self-referential accesses as they are always allowed and don't need
# to be logged neither added to the allowlist.
return if origin_project == accessed_project
@ -44,8 +42,6 @@ module Ci
return unless authorizations
accessed_project_id = authorizations[:accessed_project_id]
return if Feature.disabled?(:ci_job_token_authorizations_log, Project.actor_from_id(accessed_project_id))
Ci::JobToken::LogAuthorizationWorker # rubocop:disable CodeReuse/Worker -- This method is called from a middleware and it's better tested
.perform_in(CAPTURE_DELAY, accessed_project_id, authorizations[:origin_project_id])
end

View File

@ -224,6 +224,7 @@ module Ci
scope :with_creator, -> { preload(:creator) }
validate :tag_constraints
validates :sharding_key_id, presence: true, on: :create, unless: :instance_type?
validates :name, length: { maximum: 256 }, if: :name_changed?
validates :description, length: { maximum: 1024 }, if: :description_changed?
validates :access_level, presence: true
@ -231,6 +232,7 @@ module Ci
validates :registration_type, presence: true
validate :no_projects, unless: :project_type?
validate :no_sharding_key_id, if: :instance_type?
validate :no_groups, unless: :group_type?
validate :any_project, if: :project_type?
validate :exactly_one_group, if: :group_type?
@ -354,6 +356,7 @@ module Ci
begin
transaction do
self.sharding_key_id = project.id if self.runner_projects.empty?
self.runner_projects << ::Ci::RunnerProject.new(project: project, runner: self)
self.save!
end
@ -571,6 +574,12 @@ module Ci
end
end
def no_sharding_key_id
if sharding_key_id
errors.add(:runner, 'cannot have sharding_key_id assigned')
end
end
def no_projects
if runner_projects.any?
errors.add(:runner, 'cannot have projects assigned')

View File

@ -44,10 +44,10 @@ module Ci
{ runner_type: :instance_type }
elsif runner_registrar_valid?('project') && project = ::Project.find_by_runners_token(registration_token)
# Create a project runner
{ runner_type: :project_type, projects: [project] }
{ runner_type: :project_type, projects: [project], sharding_key_id: project.id }
elsif runner_registrar_valid?('group') && group = ::Group.find_by_runners_token(registration_token)
# Create a group runner
{ runner_type: :group_type, groups: [group] }
{ runner_type: :group_type, groups: [group], sharding_key_id: group.id }
elsif registration_token.present? && !Gitlab::CurrentSettings.allow_runner_registration_token
{} # Will result in a :runner_registration_disallowed response
end

View File

@ -12,8 +12,11 @@ module Ci
end
def normalize_params
params[:runner_type] = 'group_type'
params[:groups] = [scope]
params.merge!({
runner_type: 'group_type',
sharding_key_id: scope&.id,
groups: [scope]
})
end
def validate_params

View File

@ -12,8 +12,11 @@ module Ci
end
def normalize_params
params[:runner_type] = 'project_type'
params[:projects] = [scope]
params.merge!({
runner_type: 'project_type',
sharding_key_id: scope&.id,
projects: [scope]
})
end
def validate_params

View File

@ -0,0 +1,70 @@
# frozen_string_literal: true
module Issuable # rubocop:disable Gitlab/BoundedContexts -- existing module we need for looking up callback classes
module Callbacks
class Labels < Base
include ::Gitlab::Utils::StrongMemoize
ALLOWED_PARAMS = %i[labels add_labels remove_labels label_ids add_label_ids remove_label_ids].freeze
def after_initialize
params[:label_ids] = params[:add_label_ids] = [] if excluded_in_new_type?
return unless ALLOWED_PARAMS.any? { |param| params.key?(param) }
return unless has_permission?(:"set_#{issuable.to_ability_name}_metadata")
normalize_and_filter_label_params!
existing_label_ids = issuable.label_ids.sort
new_label_ids = compute_new_label_ids.sort
issuable.label_ids = new_label_ids
issuable.touch if issuable.persisted? && existing_label_ids != new_label_ids
end
private
def normalize_and_filter_label_params!
normalize_and_filter_param(:add_label_ids, :add_labels)
normalize_and_filter_param(:remove_label_ids, :remove_labels, create_when_missing: false)
normalize_and_filter_param(:label_ids, :labels)
end
def compute_new_label_ids
new_label_ids = params[:label_ids] || issuable.label_ids || []
new_label_ids |= params[:add_label_ids] if params[:add_label_ids]
new_label_ids -= params[:remove_label_ids] if params[:remove_label_ids]
restore_removed_locked_labels(new_label_ids.uniq)
end
# Restore any locked labels that the user is attempting to remove
def restore_removed_locked_labels(new_label_ids)
return new_label_ids unless issuable.supports_lock_on_merge?
return new_label_ids unless issuable.label_ids.present?
removed_label_ids = issuable.label_ids - new_label_ids
removed_locked_label_ids = available_labels_service.filter_locked_label_ids(removed_label_ids)
new_label_ids + removed_locked_label_ids
end
def normalize_and_filter_param(id_param_name, title_param_name, create_when_missing: true)
if params[id_param_name]
params[id_param_name] = available_labels_service.filter_labels_ids_in_param(id_param_name)
elsif params[title_param_name]
params[id_param_name] = available_labels_service.find_or_create_by_titles(
title_param_name, find_only: !create_when_missing
).map(&:id)
end
end
def available_labels_service
::Labels::AvailableLabelsService.new(current_user, issuable.resource_parent, params)
end
strong_memoize_attr :available_labels_service
end
end
end
Issuable::Callbacks::Labels.prepend_mod

View File

@ -7,6 +7,7 @@ class IssuableBaseService < ::BaseContainerService
[
Issuable::Callbacks::Description,
Issuable::Callbacks::Milestone,
Issuable::Callbacks::Labels,
Issuable::Callbacks::TimeTracking
].freeze
end
@ -66,12 +67,6 @@ class IssuableBaseService < ::BaseContainerService
def filter_params(issuable)
unless can_set_issuable_metadata?(issuable)
params.delete(:labels)
params.delete(:add_label_ids)
params.delete(:add_labels)
params.delete(:remove_label_ids)
params.delete(:remove_labels)
params.delete(:label_ids)
params.delete(:assignee_ids)
params.delete(:assignee_id)
params.delete(:add_assignee_ids)
@ -87,7 +82,6 @@ class IssuableBaseService < ::BaseContainerService
params.delete(:confidential) unless can_set_confidentiality?(issuable)
filter_contact_params(issuable)
filter_assignees(issuable)
filter_labels
filter_severity(issuable)
filter_escalation_status(issuable)
end
@ -126,26 +120,6 @@ class IssuableBaseService < ::BaseContainerService
can?(user, ability_name, issuable.resource_parent)
end
def filter_labels
label_ids_to_filter(:add_label_ids, :add_labels, false)
label_ids_to_filter(:remove_label_ids, :remove_labels, true)
label_ids_to_filter(:label_ids, :labels, false)
end
def label_ids_to_filter(label_id_key, label_key, find_only)
if params[label_id_key]
params[label_id_key] = labels_service.filter_labels_ids_in_param(label_id_key)
elsif params[label_key]
params[label_id_key] = labels_service.find_or_create_by_titles(label_key, find_only: find_only).map(&:id)
end
params.delete(label_key) if params[label_key].nil?
end
def labels_service
@labels_service ||= ::Labels::AvailableLabelsService.new(current_user, parent, params)
end
def filter_severity(issuable)
severity = params.delete(:severity)
return unless severity && issuable.supports_severity?
@ -172,31 +146,6 @@ class IssuableBaseService < ::BaseContainerService
params[:incident_management_issuable_escalation_status_attributes] = result[:escalation_status]
end
def process_label_ids(attributes, issuable:, existing_label_ids: nil, extra_label_ids: []) # rubocop:disable Lint/UnusedMethodArgument
label_ids = attributes.delete(:label_ids)
add_label_ids = attributes.delete(:add_label_ids)
remove_label_ids = attributes.delete(:remove_label_ids)
new_label_ids = label_ids || existing_label_ids || []
new_label_ids |= extra_label_ids
new_label_ids |= add_label_ids if add_label_ids
new_label_ids -= remove_label_ids if remove_label_ids
filter_locked_labels(issuable, new_label_ids.uniq, existing_label_ids)
end
# Filter out any locked labels that are attempting to be removed
def filter_locked_labels(issuable, ids, existing_label_ids)
return ids unless issuable.supports_lock_on_merge?
return ids unless existing_label_ids.present?
removed_label_ids = existing_label_ids - ids
removed_locked_label_ids = labels_service.filter_locked_label_ids(removed_label_ids)
ids + removed_locked_label_ids
end
def process_assignee_ids(attributes, existing_assignee_ids: nil, extra_assignee_ids: [])
process = Issuable::ProcessAssignees.new(
assignee_ids: attributes.delete(:assignee_ids),
@ -245,7 +194,6 @@ class IssuableBaseService < ::BaseContainerService
filter_params(issuable)
params.delete(:state_event)
params[:label_ids] = process_label_ids(params, issuable: issuable, extra_label_ids: issuable.label_ids.to_a)
if issuable.respond_to?(:assignee_ids)
params[:assignee_ids] = process_assignee_ids(params, extra_assignee_ids: issuable.assignee_ids.to_a)
@ -344,7 +292,6 @@ class IssuableBaseService < ::BaseContainerService
change_additional_attributes(issuable)
assign_requested_labels(issuable)
assign_requested_assignees(issuable)
assign_requested_crm_contacts(issuable)
widget_params = filter_widget_params
@ -547,14 +494,6 @@ class IssuableBaseService < ::BaseContainerService
end
# rubocop: enable CodeReuse/ActiveRecord
def assign_requested_labels(issuable)
label_ids = process_label_ids(params, issuable: issuable, existing_label_ids: issuable.label_ids)
return unless ids_changing?(issuable.label_ids, label_ids)
params[:label_ids] = label_ids
issuable.touch
end
def assign_requested_crm_contacts(issuable)
add_crm_contact_emails = params.delete(:add_contacts)
remove_crm_contact_emails = params.delete(:remove_contacts)

View File

@ -66,16 +66,12 @@ module MergeRequests
# merge_request.assign_attributes(...) below is a Rails
# method that only work if all the params it is passed have
# corresponding fields in the database. As there are no fields
# in the database for :add_label_ids, :remove_label_ids,
# :add_assignee_ids and :remove_assignee_ids, we
# in the database for :add_assignee_ids and :remove_assignee_ids, we
# need to remove them from the params before the call to
# merge_request.assign_attributes(...)
#
# IssuableBaseService#process_label_ids and
# IssuableBaseService#process_assignee_ids take care
# IssuableBaseService#process_assignee_ids takes care
# of the removal.
params[:label_ids] = process_label_ids(params, issuable: merge_request, extra_label_ids: merge_request.label_ids.to_a)
params[:assignee_ids] = process_assignee_ids(params, extra_assignee_ids: merge_request.assignee_ids.to_a)
merge_request.assign_attributes(params.to_h.compact)
@ -92,7 +88,6 @@ module MergeRequests
filter_params(merge_request)
# Filter out the following from params:
# - :add_label_ids and :remove_label_ids
# - :add_assignee_ids and :remove_assignee_ids
filter_id_params
end

View File

@ -1,18 +0,0 @@
# frozen_string_literal: true
module WorkItems
module Widgets
module LabelsService
class BaseService < WorkItems::Widgets::BaseService
private
def prepare_params(params: {}, permitted_params: [])
return if params.blank?
return unless has_permission?(:set_work_item_metadata)
service_params.merge!(params.slice(*permitted_params))
end
end
end
end
end

View File

@ -1,18 +0,0 @@
# frozen_string_literal: true
module WorkItems
module Widgets
module LabelsService
class CreateService < BaseService
def prepare_create_params(params: {})
prepare_params(params: params, permitted_params: %i[add_label_ids remove_label_ids label_ids])
end
def clear_label_params(params)
params[:add_label_ids] = []
params[:label_ids] = []
end
end
end
end
end

View File

@ -1,22 +0,0 @@
# frozen_string_literal: true
module WorkItems
module Widgets
module LabelsService
class UpdateService < BaseService
def prepare_update_params(params: {})
clear_label_params(params) if new_type_excludes_widget?
prepare_params(params: params, permitted_params: %i[add_label_ids remove_label_ids])
end
private
def clear_label_params(params)
params[:remove_label_ids] = @work_item.labels.map(&:id)
params[:add_label_ids] = []
end
end
end
end
end

View File

@ -1,9 +0,0 @@
---
name: ci_job_token_authorizations_log
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/467292
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/162645
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/478869
milestone: '17.5'
group: group::pipeline security
type: gitlab_com_derisk
default_enabled: false

View File

@ -208,7 +208,7 @@ InitializerConnections.raise_if_new_database_connection do
resources :sent_notifications, only: [], constraints: { id: /\h{32}/ } do
member do
get :unsubscribe
match :unsubscribe, via: [:get, :post]
end
end

View File

@ -19,3 +19,4 @@ desired_sharding_key:
table: approval_project_rules
sharding_key: project_id
belongs_to: approval_project_rule
desired_sharding_key_migration_job_name: BackfillApprovalProjectRulesProtectedBranchesProjectId

View File

@ -0,0 +1,9 @@
---
migration_job_name: BackfillApprovalProjectRulesProtectedBranchesProjectId
description: Backfills sharding key `approval_project_rules_protected_branches.project_id` from `approval_project_rules`.
feature_category: source_code_management
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/167648
milestone: '17.5'
queued_migration_version: 20240930160027
finalize_after: '2024-10-22'
finalized_by: # version of the migration that finalized this BBM

View File

@ -0,0 +1,9 @@
---
migration_job_name: BackfillPackagesNpmMetadataProjectId
description: Backfills sharding key `packages_npm_metadata.project_id` from `packages_packages`.
feature_category: package_registry
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/167446
milestone: '17.5'
queued_migration_version: 20240927125019
finalize_after: '2024-10-22'
finalized_by: # version of the migration that finalized this BBM

View File

@ -0,0 +1,9 @@
---
migration_job_name: BackfillPackagesRpmMetadataProjectId
description: Backfills sharding key `packages_rpm_metadata.project_id` from `packages_packages`.
feature_category: package_registry
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/167609
milestone: '17.5'
queued_migration_version: 20240930122236
finalize_after: '2024-10-22'
finalized_by: # version of the migration that finalized this BBM

View File

@ -0,0 +1,9 @@
---
migration_job_name: BackfillPagesDeploymentStatesProjectId
description: Backfills sharding key `pages_deployment_states.project_id` from `pages_deployments`.
feature_category: pages
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/167613
milestone: '17.5'
queued_migration_version: 20240930123056
finalize_after: '2024-10-22'
finalized_by: # version of the migration that finalized this BBM

View File

@ -0,0 +1,10 @@
---
migration_job_name: BackfillShardingKeyIdOnCiRunners
description: >-
Backfills the `sharding_key_id` column from `ci_runner_namespaces` and `ci_runner_projects`.
The column will serve as the sharding key in the future partitioned (by `runner_type`) table.
feature_category: runner
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/166916
milestone: '17.5'
queued_migration_version: 20240923132401
finalized_by: # version of the migration that finalized this BBM

View File

@ -19,3 +19,4 @@ desired_sharding_key:
table: packages_packages
sharding_key: project_id
belongs_to: package
desired_sharding_key_migration_job_name: BackfillPackagesNpmMetadataProjectId

View File

@ -19,3 +19,4 @@ desired_sharding_key:
table: packages_packages
sharding_key: project_id
belongs_to: package
desired_sharding_key_migration_job_name: BackfillPackagesRpmMetadataProjectId

View File

@ -19,3 +19,4 @@ desired_sharding_key:
table: pages_deployments
sharding_key: project_id
belongs_to: pages_deployment
desired_sharding_key_migration_job_name: BackfillPagesDeploymentStatesProjectId

View File

@ -0,0 +1,14 @@
# frozen_string_literal: true
class AddShardingKeyIdToCiRunners < Gitlab::Database::Migration[2.2]
milestone '17.5'
enable_lock_retries!
def up
add_column :ci_runners, :sharding_key_id, :bigint, null: true, if_not_exists: true
end
def down
remove_column :ci_runners, :sharding_key_id, if_exists: true
end
end

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
class AddProjectIdToPackagesNpmMetadata < Gitlab::Database::Migration[2.2]
milestone '17.5'
def change
add_column :packages_npm_metadata, :project_id, :bigint
end
end

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
class AddProjectIdToPackagesRpmMetadata < Gitlab::Database::Migration[2.2]
milestone '17.5'
def change
add_column :packages_rpm_metadata, :project_id, :bigint
end
end

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
class AddProjectIdToPagesDeploymentStates < Gitlab::Database::Migration[2.2]
milestone '17.5'
def change
add_column :pages_deployment_states, :project_id, :bigint
end
end

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
class AddProjectIdToApprovalProjectRulesProtectedBranches < Gitlab::Database::Migration[2.2]
milestone '17.5'
def change
add_column :approval_project_rules_protected_branches, :project_id, :bigint
end
end

View File

@ -0,0 +1,27 @@
# frozen_string_literal: true
class QueueBackfillShardingKeyIdOnCiRunners < Gitlab::Database::Migration[2.2]
milestone '17.5'
restrict_gitlab_migration gitlab_schema: :gitlab_ci
MIGRATION = 'BackfillShardingKeyIdOnCiRunners'
DELAY_INTERVAL = 2.minutes
BATCH_SIZE = 1000
SUB_BATCH_SIZE = 100
def up
queue_batched_background_migration(
MIGRATION,
:ci_runners,
:id,
job_interval: DELAY_INTERVAL,
batch_size: BATCH_SIZE,
sub_batch_size: SUB_BATCH_SIZE
)
end
def down
delete_batched_background_migration(MIGRATION, :ci_runners, :id, [])
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
class IndexPackagesNpmMetadataOnProjectId < Gitlab::Database::Migration[2.2]
milestone '17.5'
disable_ddl_transaction!
INDEX_NAME = 'index_packages_npm_metadata_on_project_id'
def up
add_concurrent_index :packages_npm_metadata, :project_id, name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :packages_npm_metadata, INDEX_NAME
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
class AddPackagesNpmMetadataProjectIdFk < Gitlab::Database::Migration[2.2]
milestone '17.5'
disable_ddl_transaction!
def up
add_concurrent_foreign_key :packages_npm_metadata, :projects, column: :project_id, on_delete: :cascade
end
def down
with_lock_retries do
remove_foreign_key :packages_npm_metadata, column: :project_id
end
end
end

View File

@ -0,0 +1,25 @@
# frozen_string_literal: true
class AddPackagesNpmMetadataProjectIdTrigger < Gitlab::Database::Migration[2.2]
milestone '17.5'
def up
install_sharding_key_assignment_trigger(
table: :packages_npm_metadata,
sharding_key: :project_id,
parent_table: :packages_packages,
parent_sharding_key: :project_id,
foreign_key: :package_id
)
end
def down
remove_sharding_key_assignment_trigger(
table: :packages_npm_metadata,
sharding_key: :project_id,
parent_table: :packages_packages,
parent_sharding_key: :project_id,
foreign_key: :package_id
)
end
end

View File

@ -0,0 +1,40 @@
# frozen_string_literal: true
class QueueBackfillPackagesNpmMetadataProjectId < Gitlab::Database::Migration[2.2]
milestone '17.5'
restrict_gitlab_migration gitlab_schema: :gitlab_main_cell
MIGRATION = "BackfillPackagesNpmMetadataProjectId"
DELAY_INTERVAL = 2.minutes
BATCH_SIZE = 1000
SUB_BATCH_SIZE = 100
def up
queue_batched_background_migration(
MIGRATION,
:packages_npm_metadata,
:package_id,
:project_id,
:packages_packages,
:project_id,
:package_id,
job_interval: DELAY_INTERVAL,
batch_size: BATCH_SIZE,
sub_batch_size: SUB_BATCH_SIZE
)
end
def down
delete_batched_background_migration(
MIGRATION,
:packages_npm_metadata,
:package_id,
[
:project_id,
:packages_packages,
:project_id,
:package_id
]
)
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
class IndexPackagesRpmMetadataOnProjectId < Gitlab::Database::Migration[2.2]
milestone '17.5'
disable_ddl_transaction!
INDEX_NAME = 'index_packages_rpm_metadata_on_project_id'
def up
add_concurrent_index :packages_rpm_metadata, :project_id, name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :packages_rpm_metadata, INDEX_NAME
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
class AddPackagesRpmMetadataProjectIdFk < Gitlab::Database::Migration[2.2]
milestone '17.5'
disable_ddl_transaction!
def up
add_concurrent_foreign_key :packages_rpm_metadata, :projects, column: :project_id, on_delete: :cascade
end
def down
with_lock_retries do
remove_foreign_key :packages_rpm_metadata, column: :project_id
end
end
end

View File

@ -0,0 +1,25 @@
# frozen_string_literal: true
class AddPackagesRpmMetadataProjectIdTrigger < Gitlab::Database::Migration[2.2]
milestone '17.5'
def up
install_sharding_key_assignment_trigger(
table: :packages_rpm_metadata,
sharding_key: :project_id,
parent_table: :packages_packages,
parent_sharding_key: :project_id,
foreign_key: :package_id
)
end
def down
remove_sharding_key_assignment_trigger(
table: :packages_rpm_metadata,
sharding_key: :project_id,
parent_table: :packages_packages,
parent_sharding_key: :project_id,
foreign_key: :package_id
)
end
end

View File

@ -0,0 +1,40 @@
# frozen_string_literal: true
class QueueBackfillPackagesRpmMetadataProjectId < Gitlab::Database::Migration[2.2]
milestone '17.5'
restrict_gitlab_migration gitlab_schema: :gitlab_main_cell
MIGRATION = "BackfillPackagesRpmMetadataProjectId"
DELAY_INTERVAL = 2.minutes
BATCH_SIZE = 1000
SUB_BATCH_SIZE = 100
def up
queue_batched_background_migration(
MIGRATION,
:packages_rpm_metadata,
:package_id,
:project_id,
:packages_packages,
:project_id,
:package_id,
job_interval: DELAY_INTERVAL,
batch_size: BATCH_SIZE,
sub_batch_size: SUB_BATCH_SIZE
)
end
def down
delete_batched_background_migration(
MIGRATION,
:packages_rpm_metadata,
:package_id,
[
:project_id,
:packages_packages,
:project_id,
:package_id
]
)
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
class IndexPagesDeploymentStatesOnProjectId < Gitlab::Database::Migration[2.2]
milestone '17.5'
disable_ddl_transaction!
INDEX_NAME = 'index_pages_deployment_states_on_project_id'
def up
add_concurrent_index :pages_deployment_states, :project_id, name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :pages_deployment_states, INDEX_NAME
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
class AddPagesDeploymentStatesProjectIdFk < Gitlab::Database::Migration[2.2]
milestone '17.5'
disable_ddl_transaction!
def up
add_concurrent_foreign_key :pages_deployment_states, :projects, column: :project_id, on_delete: :cascade
end
def down
with_lock_retries do
remove_foreign_key :pages_deployment_states, column: :project_id
end
end
end

View File

@ -0,0 +1,25 @@
# frozen_string_literal: true
class AddPagesDeploymentStatesProjectIdTrigger < Gitlab::Database::Migration[2.2]
milestone '17.5'
def up
install_sharding_key_assignment_trigger(
table: :pages_deployment_states,
sharding_key: :project_id,
parent_table: :pages_deployments,
parent_sharding_key: :project_id,
foreign_key: :pages_deployment_id
)
end
def down
remove_sharding_key_assignment_trigger(
table: :pages_deployment_states,
sharding_key: :project_id,
parent_table: :pages_deployments,
parent_sharding_key: :project_id,
foreign_key: :pages_deployment_id
)
end
end

View File

@ -0,0 +1,40 @@
# frozen_string_literal: true
class QueueBackfillPagesDeploymentStatesProjectId < Gitlab::Database::Migration[2.2]
milestone '17.5'
restrict_gitlab_migration gitlab_schema: :gitlab_main_cell
MIGRATION = "BackfillPagesDeploymentStatesProjectId"
DELAY_INTERVAL = 2.minutes
BATCH_SIZE = 1000
SUB_BATCH_SIZE = 100
def up
queue_batched_background_migration(
MIGRATION,
:pages_deployment_states,
:pages_deployment_id,
:project_id,
:pages_deployments,
:project_id,
:pages_deployment_id,
job_interval: DELAY_INTERVAL,
batch_size: BATCH_SIZE,
sub_batch_size: SUB_BATCH_SIZE
)
end
def down
delete_batched_background_migration(
MIGRATION,
:pages_deployment_states,
:pages_deployment_id,
[
:project_id,
:pages_deployments,
:project_id,
:pages_deployment_id
]
)
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
class IndexApprovalProjectRulesProtectedBranchesOnProjectId < Gitlab::Database::Migration[2.2]
milestone '17.5'
disable_ddl_transaction!
INDEX_NAME = 'index_approval_project_rules_protected_branches_on_project_id'
def up
add_concurrent_index :approval_project_rules_protected_branches, :project_id, name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :approval_project_rules_protected_branches, INDEX_NAME
end
end

View File

@ -0,0 +1,17 @@
# frozen_string_literal: true
class AddApprovalProjectRulesProtectedBranchesProjectIdFk < Gitlab::Database::Migration[2.2]
milestone '17.5'
disable_ddl_transaction!
def up
add_concurrent_foreign_key :approval_project_rules_protected_branches, :projects, column: :project_id,
on_delete: :cascade
end
def down
with_lock_retries do
remove_foreign_key :approval_project_rules_protected_branches, column: :project_id
end
end
end

View File

@ -0,0 +1,25 @@
# frozen_string_literal: true
class AddApprovalProjectRulesProtectedBranchesProjectIdTrigger < Gitlab::Database::Migration[2.2]
milestone '17.5'
def up
install_sharding_key_assignment_trigger(
table: :approval_project_rules_protected_branches,
sharding_key: :project_id,
parent_table: :approval_project_rules,
parent_sharding_key: :project_id,
foreign_key: :approval_project_rule_id
)
end
def down
remove_sharding_key_assignment_trigger(
table: :approval_project_rules_protected_branches,
sharding_key: :project_id,
parent_table: :approval_project_rules,
parent_sharding_key: :project_id,
foreign_key: :approval_project_rule_id
)
end
end

View File

@ -0,0 +1,41 @@
# frozen_string_literal: true
class QueueBackfillApprovalProjectRulesProtectedBranchesProjectId < Gitlab::Database::Migration[2.2]
milestone '17.5'
restrict_gitlab_migration gitlab_schema: :gitlab_main_cell
MIGRATION = "BackfillApprovalProjectRulesProtectedBranchesProjectId"
DELAY_INTERVAL = 2.minutes
BATCH_SIZE = 1000
SUB_BATCH_SIZE = 100
def up
queue_batched_background_migration(
MIGRATION,
:approval_project_rules_protected_branches,
:approval_project_rule_id,
:project_id,
:approval_project_rules,
:project_id,
:approval_project_rule_id,
job_interval: DELAY_INTERVAL,
batch_size: BATCH_SIZE,
batch_class_name: 'LooseIndexScanBatchingStrategy',
sub_batch_size: SUB_BATCH_SIZE
)
end
def down
delete_batched_background_migration(
MIGRATION,
:approval_project_rules_protected_branches,
:approval_project_rule_id,
[
:project_id,
:approval_project_rules,
:project_id,
:approval_project_rule_id
]
)
end
end

View File

@ -0,0 +1 @@
6aea04fdb5dc8a8753d73cfd4c20ecdf4747875460f9081cf9c0f8b9013d2c7c

View File

@ -0,0 +1 @@
9a9d6e31fdbf283db7c22fbd0f7e4dafb6e745dab183330591c4e3e3bb901acd

View File

@ -0,0 +1 @@
27a6cac8e6cf0894d266a8ffe2f6f106779bdbf53af4507151632a946e134098

View File

@ -0,0 +1 @@
983454b0a57756b8c360deb95dcda60753c4dca1de2eb638aa7134e695e59c4b

View File

@ -0,0 +1 @@
6d2ebea17ffab5769b5c4850446e7dba1658d9d953ae25bb08d40308c9db4028

View File

@ -0,0 +1 @@
aaa07ea7ee64e446cf167a0930121476163f408489e081e401513b28c21c78dc

View File

@ -0,0 +1 @@
4d90b8071dd9a83e4a6544bc5e65d53790799a7cd88ef12f536e77ca754b3f33

View File

@ -0,0 +1 @@
fb3da584cb34c526c28e509ef84d583244088412a2c72ec642799b0ba3de787f

View File

@ -0,0 +1 @@
9d73b08593bfb593466868b6d3ee3ebba793a02329a17514d318fc3516d90df5

View File

@ -0,0 +1 @@
2120d187635ba11ddc17c7473deac1e555da4fce842f09bfa058aa4e7b3c5531

View File

@ -0,0 +1 @@
e4e2ac0b6c0089b4ee282816e1d7f7c61609a456085b6c9f1c97d16b62b2d811

View File

@ -0,0 +1 @@
51d4f367dc50a8a0f9f38407c3d2a3c9a724c3effee02fa2e3c665abc35c63ed

View File

@ -0,0 +1 @@
06fcf8b77415a84dfd30254e2ab3d220cb2bb92cdf1960cb50281f262a5815e3

View File

@ -0,0 +1 @@
97f22306864d8c4fb739d4a2df80abfbeafb8ee2e14905b692ff97acd526be33

View File

@ -0,0 +1 @@
8a6d023abf2b37d6099ee7e7373b3b153fcde822a850119c01cafc6590b12b8c

View File

@ -0,0 +1 @@
03bd5975e16db4f4e6e5ebfc7ac3b693c29fa5726cda318f1117c413fb05e230

View File

@ -0,0 +1 @@
082d0e983d22c78baabb5615894db1c48f195eec081fe701b65b8d97816a5a33

View File

@ -0,0 +1 @@
e4246085fc454e022e500fdb3b5dbb5d3c78a71fe5107d41d615e945a80f31e5

View File

@ -0,0 +1 @@
d0525a4ec76c7847e59531e5fb6d5137b2121e1dd06ade1680b283bfcf28c53d

View File

@ -0,0 +1 @@
fd42f51e9904d94a6870d24f9ca017b0047b183f92cf1481268667a17e2477f9

View File

@ -0,0 +1 @@
d53da99c66293fa9e53fdef556f3134d490fe66e7bc7bed08093bea9f686f691

View File

@ -0,0 +1 @@
6f49ec85bcf41062ff0c9165444a74a8119ab500392221bc6e8f0a75d3566c45

View File

@ -841,6 +841,22 @@ RETURN NEW;
END
$$;
CREATE FUNCTION trigger_0a29d4d42b62() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
IF NEW."project_id" IS NULL THEN
SELECT "project_id"
INTO NEW."project_id"
FROM "approval_project_rules"
WHERE "approval_project_rules"."id" = NEW."approval_project_rule_id";
END IF;
RETURN NEW;
END
$$;
CREATE FUNCTION trigger_0da002390fdc() RETURNS trigger
LANGUAGE plpgsql
AS $$
@ -1017,6 +1033,22 @@ RETURN NEW;
END
$$;
CREATE FUNCTION trigger_248cafd363ff() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
IF NEW."project_id" IS NULL THEN
SELECT "project_id"
INTO NEW."project_id"
FROM "packages_packages"
WHERE "packages_packages"."id" = NEW."package_id";
END IF;
RETURN NEW;
END
$$;
CREATE FUNCTION trigger_2514245c7fc5() RETURNS trigger
LANGUAGE plpgsql
AS $$
@ -1401,6 +1433,22 @@ RETURN NEW;
END
$$;
CREATE FUNCTION trigger_627949f72f05() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
IF NEW."project_id" IS NULL THEN
SELECT "project_id"
INTO NEW."project_id"
FROM "packages_packages"
WHERE "packages_packages"."id" = NEW."package_id";
END IF;
RETURN NEW;
END
$$;
CREATE FUNCTION trigger_664594a3d0a7() RETURNS trigger
LANGUAGE plpgsql
AS $$
@ -1577,6 +1625,22 @@ RETURN NEW;
END
$$;
CREATE FUNCTION trigger_81b4c93e7133() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
IF NEW."project_id" IS NULL THEN
SELECT "project_id"
INTO NEW."project_id"
FROM "pages_deployments"
WHERE "pages_deployments"."id" = NEW."pages_deployment_id";
END IF;
RETURN NEW;
END
$$;
CREATE FUNCTION trigger_8204480b3a2e() RETURNS trigger
LANGUAGE plpgsql
AS $$
@ -6620,7 +6684,8 @@ ALTER SEQUENCE approval_project_rules_id_seq OWNED BY approval_project_rules.id;
CREATE TABLE approval_project_rules_protected_branches (
approval_project_rule_id bigint NOT NULL,
protected_branch_id bigint NOT NULL
protected_branch_id bigint NOT NULL,
project_id bigint
);
CREATE TABLE approval_project_rules_users (
@ -8872,6 +8937,7 @@ CREATE TABLE ci_runners (
creator_id bigint,
creation_state smallint DEFAULT 0 NOT NULL,
allowed_plan_ids bigint[] DEFAULT '{}'::bigint[] NOT NULL,
sharding_key_id bigint,
CONSTRAINT check_46c685e76f CHECK ((char_length((description)::text) <= 1024)),
CONSTRAINT check_91230910ec CHECK ((char_length((name)::text) <= 256)),
CONSTRAINT check_ce275cee06 CHECK ((char_length(maintainer_note) <= 1024))
@ -15251,6 +15317,7 @@ ALTER SEQUENCE packages_maven_metadata_id_seq OWNED BY packages_maven_metadata.i
CREATE TABLE packages_npm_metadata (
package_id bigint NOT NULL,
package_json jsonb DEFAULT '{}'::jsonb NOT NULL,
project_id bigint,
CONSTRAINT chk_rails_e5cbc301ae CHECK ((char_length((package_json)::text) < 20000))
);
@ -15451,6 +15518,7 @@ CREATE TABLE packages_rpm_metadata (
license text,
url text,
epoch integer DEFAULT 0 NOT NULL,
project_id bigint,
CONSTRAINT check_3798bae3d6 CHECK ((char_length(arch) <= 255)),
CONSTRAINT check_5d29ba59ac CHECK ((char_length(description) <= 5000)),
CONSTRAINT check_6e8cbd536d CHECK ((char_length(url) <= 1000)),
@ -15574,6 +15642,7 @@ CREATE TABLE pages_deployment_states (
verification_retry_count smallint,
verification_checksum bytea,
verification_failure text,
project_id bigint,
CONSTRAINT check_15217e8c3a CHECK ((char_length(verification_failure) <= 255))
);
@ -27499,6 +27568,8 @@ CREATE INDEX index_approval_project_rules_on_project_id ON approval_project_rule
CREATE INDEX index_approval_project_rules_on_rule_type ON approval_project_rules USING btree (rule_type);
CREATE INDEX index_approval_project_rules_protected_branches_on_project_id ON approval_project_rules_protected_branches USING btree (project_id);
CREATE INDEX index_approval_project_rules_protected_branches_pb_id ON approval_project_rules_protected_branches USING btree (protected_branch_id);
CREATE INDEX index_approval_project_rules_report_type ON approval_project_rules USING btree (report_type);
@ -29869,6 +29940,8 @@ CREATE UNIQUE INDEX index_packages_npm_metadata_caches_on_object_storage_key ON
CREATE INDEX index_packages_npm_metadata_caches_on_project_id_status ON packages_npm_metadata_caches USING btree (project_id, status);
CREATE INDEX index_packages_npm_metadata_on_project_id ON packages_npm_metadata USING btree (project_id);
CREATE INDEX index_packages_nuget_dl_metadata_on_dependency_link_id ON packages_nuget_dependency_link_metadata USING btree (dependency_link_id);
CREATE UNIQUE INDEX index_packages_nuget_symbols_on_object_storage_key ON packages_nuget_symbols USING btree (object_storage_key);
@ -29925,6 +29998,8 @@ CREATE INDEX index_packages_project_id_name_partial_for_nuget ON packages_packag
CREATE INDEX index_packages_rpm_metadata_on_package_id ON packages_rpm_metadata USING btree (package_id);
CREATE INDEX index_packages_rpm_metadata_on_project_id ON packages_rpm_metadata USING btree (project_id);
CREATE INDEX index_packages_rpm_repository_files_on_project_id_and_file_name ON packages_rpm_repository_files USING btree (project_id, file_name);
CREATE INDEX index_packages_tags_on_package_id_and_updated_at ON packages_tags USING btree (package_id, updated_at DESC);
@ -29939,6 +30014,8 @@ CREATE INDEX index_pages_deployment_states_needs_verification ON pages_deploymen
CREATE INDEX index_pages_deployment_states_on_pages_deployment_id ON pages_deployment_states USING btree (pages_deployment_id);
CREATE INDEX index_pages_deployment_states_on_project_id ON pages_deployment_states USING btree (project_id);
CREATE INDEX index_pages_deployment_states_on_verification_state ON pages_deployment_states USING btree (verification_state);
CREATE INDEX index_pages_deployment_states_pending_verification ON pages_deployment_states USING btree (verified_at NULLS FIRST) WHERE (verification_state = 0);
@ -33301,6 +33378,8 @@ CREATE TRIGGER trigger_05ce163deddf BEFORE INSERT OR UPDATE ON status_check_resp
CREATE TRIGGER trigger_0a1b0adcf686 BEFORE INSERT OR UPDATE ON packages_debian_project_components FOR EACH ROW EXECUTE FUNCTION trigger_0a1b0adcf686();
CREATE TRIGGER trigger_0a29d4d42b62 BEFORE INSERT OR UPDATE ON approval_project_rules_protected_branches FOR EACH ROW EXECUTE FUNCTION trigger_0a29d4d42b62();
CREATE TRIGGER trigger_0da002390fdc BEFORE INSERT OR UPDATE ON operations_feature_flags_issues FOR EACH ROW EXECUTE FUNCTION trigger_0da002390fdc();
CREATE TRIGGER trigger_0e13f214e504 BEFORE INSERT OR UPDATE ON merge_request_assignment_events FOR EACH ROW EXECUTE FUNCTION trigger_0e13f214e504();
@ -33323,6 +33402,8 @@ CREATE TRIGGER trigger_207005e8e995 BEFORE INSERT OR UPDATE ON operations_strate
CREATE TRIGGER trigger_219952df8fc4 BEFORE INSERT OR UPDATE ON merge_request_blocks FOR EACH ROW EXECUTE FUNCTION trigger_219952df8fc4();
CREATE TRIGGER trigger_248cafd363ff BEFORE INSERT OR UPDATE ON packages_npm_metadata FOR EACH ROW EXECUTE FUNCTION trigger_248cafd363ff();
CREATE TRIGGER trigger_2514245c7fc5 BEFORE INSERT OR UPDATE ON dast_site_profile_secret_variables FOR EACH ROW EXECUTE FUNCTION trigger_2514245c7fc5();
CREATE TRIGGER trigger_25c44c30884f BEFORE INSERT OR UPDATE ON work_item_parent_links FOR EACH ROW EXECUTE FUNCTION trigger_25c44c30884f();
@ -33371,6 +33452,8 @@ CREATE TRIGGER trigger_5ca97b87ee30 BEFORE INSERT OR UPDATE ON merge_request_con
CREATE TRIGGER trigger_5f6432d2dccc BEFORE INSERT OR UPDATE ON operations_strategies_user_lists FOR EACH ROW EXECUTE FUNCTION trigger_5f6432d2dccc();
CREATE TRIGGER trigger_627949f72f05 BEFORE INSERT OR UPDATE ON packages_rpm_metadata FOR EACH ROW EXECUTE FUNCTION trigger_627949f72f05();
CREATE TRIGGER trigger_664594a3d0a7 BEFORE INSERT OR UPDATE ON merge_request_user_mentions FOR EACH ROW EXECUTE FUNCTION trigger_664594a3d0a7();
CREATE TRIGGER trigger_68435a54ee2b BEFORE INSERT OR UPDATE ON packages_debian_project_architectures FOR EACH ROW EXECUTE FUNCTION trigger_68435a54ee2b();
@ -33393,6 +33476,8 @@ CREATE TRIGGER trigger_7a8b08eed782 BEFORE INSERT OR UPDATE ON boards_epic_board
CREATE TRIGGER trigger_7de792ddbc05 BEFORE INSERT OR UPDATE ON dast_site_validations FOR EACH ROW EXECUTE FUNCTION trigger_7de792ddbc05();
CREATE TRIGGER trigger_81b4c93e7133 BEFORE INSERT OR UPDATE ON pages_deployment_states FOR EACH ROW EXECUTE FUNCTION trigger_81b4c93e7133();
CREATE TRIGGER trigger_8204480b3a2e BEFORE INSERT OR UPDATE ON incident_management_escalation_rules FOR EACH ROW EXECUTE FUNCTION trigger_8204480b3a2e();
CREATE TRIGGER trigger_84d67ad63e93 BEFORE INSERT OR UPDATE ON wiki_page_slugs FOR EACH ROW EXECUTE FUNCTION trigger_84d67ad63e93();
@ -34306,6 +34391,9 @@ ALTER TABLE ONLY related_epic_links
ALTER TABLE ONLY import_export_uploads
ADD CONSTRAINT fk_83319d9721 FOREIGN KEY (group_id) REFERENCES namespaces(id) ON DELETE CASCADE;
ALTER TABLE ONLY packages_npm_metadata
ADD CONSTRAINT fk_83625a27c0 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY push_rules
ADD CONSTRAINT fk_83b29894de FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
@ -34318,6 +34406,9 @@ ALTER TABLE ONLY requirements
ALTER TABLE ONLY catalog_resource_components
ADD CONSTRAINT fk_85bb1d1e79 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY pages_deployment_states
ADD CONSTRAINT fk_8610d3d1cc FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY ci_build_pending_states
ADD CONSTRAINT fk_861cd17da3_p FOREIGN KEY (partition_id, build_id) REFERENCES p_ci_builds(partition_id, id) ON UPDATE CASCADE ON DELETE CASCADE;
@ -34462,6 +34553,9 @@ ALTER TABLE ONLY alert_management_alerts
ALTER TABLE ONLY approval_policy_rule_project_links
ADD CONSTRAINT fk_9ed5cf0600 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY packages_rpm_metadata
ADD CONSTRAINT fk_9f1814eb36 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY protected_branch_push_access_levels
ADD CONSTRAINT fk_9ffc86a3d9 FOREIGN KEY (protected_branch_id) REFERENCES protected_branches(id) ON DELETE CASCADE;
@ -34912,6 +35006,9 @@ ALTER TABLE ONLY user_preferences
ALTER TABLE ONLY packages_debian_group_components
ADD CONSTRAINT fk_e63e8ee3b1 FOREIGN KEY (group_id) REFERENCES namespaces(id) ON DELETE CASCADE;
ALTER TABLE ONLY approval_project_rules_protected_branches
ADD CONSTRAINT fk_e6ee913fc2 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY merge_requests
ADD CONSTRAINT fk_e719a85f8a FOREIGN KEY (author_id) REFERENCES users(id) ON DELETE SET NULL;

View File

@ -89,7 +89,7 @@ When your read-only replica is set up, you can skip to [configure your secondary
WARNING:
The use of logical replication methods such as [AWS Database Migration Service](https://aws.amazon.com/dms/)
or [Google Cloud Database Migration Service](https://cloud.google.com/database-migration) to, for instance,
replicate from an on-premise primary database to an RDS secondary are not supported.
replicate from an on-premise primary database to an RDS secondary are not supported.
#### Manually configure the primary database for replication
@ -225,7 +225,7 @@ the tracking database on port 5432.
Create and configure the tracking database in your PostgreSQL instance:
1. Set up PostgreSQL according to the
[database requirements document](../../../install/requirements.md#database).
[database requirements document](../../../install/requirements.md#postgresql).
1. Set up a `gitlab_geo` user with a password of your choice, create the `gitlabhq_geo_production` database, and make the user an owner of the database.
You can see an example of this setup in the [self-compiled installation documentation](../../../install/installation.md#7-database).
1. If you are **not** using a cloud-managed PostgreSQL database, ensure that your secondary

View File

@ -430,7 +430,7 @@ the tracking database on port 5432.
Create and configure the tracking database in your PostgreSQL instance:
1. Set up PostgreSQL according to the
[database requirements document](../../../install/requirements.md#database).
[database requirements document](../../../install/requirements.md#postgresql).
1. Set up a `gitlab_geo` user with a password of your choice, create the `gitlabhq_geo_production` database, and make the user an owner of the database.
You can see an example of this setup in the [self-compiled installation documentation](../../../install/installation.md#7-database).
1. If you are **not** using a cloud-managed PostgreSQL database, ensure that your secondary

View File

@ -75,6 +75,19 @@ Rejecting a user:
- Prevents the user from signing in or accessing instance information.
- Deletes the user.
## View users pending role promotion
When [user promotion management](../administration/settings/sign_up_restrictions.md#enable-role-promotion-approval) is enabled, any user added or promoted to a billable role will be pending administrator approval.
To view users pending role promotion:
1. On the left sidebar, at the bottom, select **Admin**.
1. Select **Overview > Users**.
1. Select **Role Promotions**.
A list of users with the highest role requested is displayed.
You can **Approve** or **Reject** the requests.
## Block and unblock users
GitLab administrators can block and unblock users.

View File

@ -20,7 +20,7 @@ separate from the Linux package.
If you use a cloud-managed service, or provide your own PostgreSQL instance:
1. Set up PostgreSQL according to the
[database requirements document](../../install/requirements.md#database).
[database requirements document](../../install/requirements.md#postgresql).
1. Set up a `gitlab` user with a password of your choice, create the `gitlabhq_production` database, and make the user an
owner of the database. You can see an example of this setup in the
[self-compiled installation documentation](../../install/installation.md#7-database).
@ -73,6 +73,6 @@ pg_dump: error: Error message from server: SSL SYSCALL error: EOF detected
To resolve this error, ensure that you are meeting the
[minimum PostgreSQL requirements](../../install/requirements.md#postgresql). After
upgrading your RDS instance to a [supported version](../../install/requirements.md#database),
upgrading your RDS instance to a [supported version](../../install/requirements.md#postgresql),
you should be able to perform a backup without this error.
See [issue 64763](https://gitlab.com/gitlab-org/gitlab/-/issues/364763) for more information.

View File

@ -527,7 +527,7 @@ If you use a third party external service:
1. The HA Linux package PostgreSQL setup encompasses PostgreSQL, PgBouncer and Consul. These components would no longer be required when using a third party external service.
1. Set up PostgreSQL according to the
[database requirements document](../../install/requirements.md#database).
[database requirements document](../../install/requirements.md#postgresql).
1. Set up a `gitlab` username with a password of your choice. The `gitlab` user
needs privileges to create the `gitlabhq_production` database.
1. Configure the GitLab application servers with the appropriate details.

View File

@ -531,7 +531,7 @@ If you use a third party external service:
1. The HA Linux package PostgreSQL setup encompasses PostgreSQL, PgBouncer and Consul. These components would no longer be required when using a third party external service.
1. Set up PostgreSQL according to the
[database requirements document](../../install/requirements.md#database).
[database requirements document](../../install/requirements.md#postgresql).
1. Set up a `gitlab` username with a password of your choice. The `gitlab` user
needs privileges to create the `gitlabhq_production` database.
1. Configure the GitLab application servers with the appropriate details.

View File

@ -278,7 +278,7 @@ If you use a third party external service:
1. The HA Linux package PostgreSQL setup encompasses PostgreSQL, PgBouncer and Consul. All of these components would no longer be required when using a third party external service.
1. Set up PostgreSQL according to the
[database requirements document](../../install/requirements.md#database).
[database requirements document](../../install/requirements.md#postgresql).
1. Set up a `gitlab` username with a password of your choice. The `gitlab` user
needs privileges to create the `gitlabhq_production` database.
1. Configure the GitLab application servers with the appropriate details.

View File

@ -517,7 +517,7 @@ If you use a third party external service:
1. The HA Linux package PostgreSQL setup encompasses PostgreSQL, PgBouncer and Consul. All of these components would no longer be required when using a third party external service.
1. Set up PostgreSQL according to the
[database requirements document](../../install/requirements.md#database).
[database requirements document](../../install/requirements.md#postgresql).
1. Set up a `gitlab` username with a password of your choice. The `gitlab` user
needs privileges to create the `gitlabhq_production` database.
1. Configure the GitLab application servers with the appropriate details.

View File

@ -535,7 +535,7 @@ If you use a third party external service:
1. The HA Linux package PostgreSQL setup encompasses PostgreSQL, PgBouncer and Consul. All of these components would no longer be required when using a third party external service.
1. Set up PostgreSQL according to the
[database requirements document](../../install/requirements.md#database).
[database requirements document](../../install/requirements.md#postgresql).
1. Set up a `gitlab` username with a password of your choice. The `gitlab` user
needs privileges to create the `gitlabhq_production` database.
1. Configure the GitLab application servers with the appropriate details.

View File

@ -517,7 +517,7 @@ If you use a third party external service:
1. The HA Linux package PostgreSQL setup encompasses PostgreSQL, PgBouncer and Consul. All of these components would no longer be required when using a third party external service.
1. Set up PostgreSQL according to the
[database requirements document](../../install/requirements.md#database).
[database requirements document](../../install/requirements.md#postgresql).
1. Set up a `gitlab` username with a password of your choice. The `gitlab` user
needs privileges to create the `gitlabhq_production` database.
1. Configure the GitLab application servers with the appropriate details.

View File

@ -253,6 +253,35 @@ You can limit GitLab access to a subset of the LDAP users on your LDAP server.
See the [documentation on setting up an LDAP user filter](../auth/ldap/index.md#set-up-ldap-user-filter) for more information.
## Enable role promotion approval
DETAILS:
**Tier:** Ultimate
**Offering:** Self-managed, GitLab Dedicated
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/433166) in GitLab 16.9 [with a flag](../feature_flags.md) named `member_promotion_management`.
FLAG:
The availability of this feature is controlled by a feature flag.
For more information, see the history.
This feature is available for testing, but not ready for production use.
In the **Ultimate** tier, [non billable roles](../../subscriptions/gitlab_com/index.md#billable-users), can be promoted to a billable role in any Project or Group, resulting in the increase of billable seats, without admins having any control on this promotion.
To prevent existing users of the subscription from being promoted to a billable role, you can enable role promotion approval.
When this setting is enabled, any existing user of the subscription when added to a [group](../../user/group/index.md#add-users-to-a-group) or [project](../../user/project/members/index.md#add-users-to-a-project) on a [billable role](../../subscriptions/gitlab_com/index.md#billable-users) will be [pending administrator approval](../moderate_users.md#view-users-pending-role-promotion).
Promotions or updates of user roles for users that already occupy a billable seat do not require administrator approval.
If the user is added to a group or project by an administrator, any membership requests for this user to any other group or project will be approved automatically.
To enable role promotion approval:
1. On the left sidebar, at the bottom, select **Admin**.
1. Select **Settings > General**.
1. Expand **Sign-up restrictions**.
1. In the **Seat controls** section, select **Approve role promotions**.
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues

View File

@ -261,7 +261,7 @@ To resolve the error, run `VACUUM` manually:
### GitLab database requirements
See [database requirements](../../install/requirements.md#database) and review and install the
See [database requirements](../../install/requirements.md#postgresql) and review and install the
[required extension list](../../install/postgresql_extensions.md).
### Serialization errors in the `production/sidekiq` log

View File

@ -74,11 +74,12 @@ When there was any error sending the email:
```
NOTE:
When the setting **Manage non-billable promotions** is enabled, new invited members with billable roles must be approved by an administrator.
When the ability to **Manage Non-Billable Promotions** is enabled, the newly invited member will be pending administrator approval if the role is billable. To enable **Manage Non-Billable Promotions**:
To enable **Manage non-billable promotions**:
1. Enable `enable_member_promotion_management` Application Setting
1. Enable `member_promotion_management` Feature Flag
1. Enable the `enable_member_promotion_management` application setting.
1. Enable the `member_promotion_management` feature flag.
Example response:

View File

@ -627,11 +627,12 @@ Example response:
```
NOTE:
When the setting **Manage non-billable promotions** is enabled, new invited members with billable roles must be approved by an administrator.
When the ability to **Manage Non-Billable Promotions** is enabled, the newly added member will be pending administrator approval if the role is billable. To enable **Manage Non-Billable Promotions**:
To enable **Manage Non-Billable Promotions**:
1. Enable `enable_member_promotion_management` Application Setting
1. Enable `member_promotion_management` Feature Flag
1. Enable the `enable_member_promotion_management` application setting.
1. Enable the `member_promotion_management` feature flag.
Example of queueing a single user:
@ -718,11 +719,12 @@ Example response:
```
NOTE:
When the setting **Manage non-billable promotions** is enabled, new invited users with billable roles must be approved by an administrator.
When the ability to **Manage Non-Billable Promotions** is enabled, if the role is changed to a billable one, the request will be sent to administrator approval. To enable **Manage Non-Billable Promotions**:
To enable **Manage non-billable promotions**:
1. Enable `enable_member_promotion_management` Application Setting
1. Enable `member_promotion_management` Feature Flag
1. Enable the `enable_member_promotion_management` application setting.
1. Enable the `member_promotion_management` feature flag.
Example response:

View File

@ -38,25 +38,31 @@ GET /projects/:id/protected_branches
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](rest/index.md#namespaced-paths) |
| `search` | string | no | Name or part of the name of protected branches to be searched for |
In the following example, the project ID is `5`.
```shell
curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/protected_branches"
```
Example response:
The following example response includes:
- Two protected branches with IDs `100` and `101`.
- `push_access_levels` with IDs `1001`, `1002`, and `1003`.
- `merge_access_levels` with IDs `2001` and `2002`.
```json
[
{
"id": 1,
"id": 100,
"name": "main",
"push_access_levels": [
{
"id": 1,
"id": 1001,
"access_level": 40,
"access_level_description": "Maintainers"
},
{
"id": 2,
"id": 1002,
"access_level": 40,
"access_level_description": "Deploy key",
"deploy_key_id": 1
@ -64,7 +70,7 @@ Example response:
],
"merge_access_levels": [
{
"id": 1,
"id": 2001,
"access_level": 40,
"access_level_description": "Maintainers"
}
@ -73,18 +79,18 @@ Example response:
"code_owner_approval_required": false
},
{
"id": 1,
"id": 101,
"name": "release/*",
"push_access_levels": [
{
"id": 1,
"id": 1003,
"access_level": 40,
"access_level_description": "Maintainers"
}
],
"merge_access_levels": [
{
"id": 1,
"id": 2002,
"access_level": 40,
"access_level_description": "Maintainers"
}
@ -100,23 +106,27 @@ Users on GitLab Premium or Ultimate also see
the `user_id`, `group_id` and `inherited` parameters. If the `inherited` parameter
exists, means the setting was inherited from the project's group.
Example response:
The following example response includes:
- One protected branch with ID `100`.
- `push_access_levels` with IDs `1001` and `1002`.
- `merge_access_levels` with ID `2001`.
```json
[
{
"id": 1,
"id": 101,
"name": "main",
"push_access_levels": [
{
"id": 1,
"id": 1001,
"access_level": 40,
"user_id": null,
"group_id": null,
"access_level_description": "Maintainers"
},
{
"id": 2,
"id": 1002,
"access_level": 40,
"access_level_description": "Deploy key",
"deploy_key_id": 1,
@ -126,7 +136,7 @@ Example response:
],
"merge_access_levels": [
{
"id": 1,
"id": 2001,
"access_level": null,
"user_id": null,
"group_id": 1234,
@ -154,6 +164,8 @@ GET /projects/:id/protected_branches/:name
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](rest/index.md#namespaced-paths) |
| `name` | string | yes | The name of the branch or wildcard |
In the following example, the project ID is `5` and branch name is `main`:
```shell
curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/protected_branches/main"
```
@ -162,18 +174,18 @@ Example response:
```json
{
"id": 1,
"id": 101,
"name": "main",
"push_access_levels": [
{
"id": 1,
"id": 1001,
"access_level": 40,
"access_level_description": "Maintainers"
}
],
"merge_access_levels": [
{
"id": 1,
"id": 2001,
"access_level": 40,
"access_level_description": "Maintainers"
}
@ -190,11 +202,11 @@ Example response:
```json
{
"id": 1,
"id": 101,
"name": "main",
"push_access_levels": [
{
"id": 1,
"id": 1001,
"access_level": 40,
"user_id": null,
"group_id": null,
@ -203,7 +215,7 @@ Example response:
],
"merge_access_levels": [
{
"id": 1,
"id": 2001,
"access_level": null,
"user_id": null,
"group_id": 1234,
@ -226,10 +238,6 @@ branches using a wildcard protected branch.
POST /projects/:id/protected_branches
```
```shell
curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/protected_branches?name=*-stable&push_access_level=30&merge_access_level=30&unprotect_access_level=40"
```
| Attribute | Type | Required | Description |
| -------------------------------------------- | ---- | -------- | ----------- |
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](rest/index.md#namespaced-paths). |
@ -243,29 +251,40 @@ curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" "https://gitla
| `push_access_level` | integer | no | Access levels allowed to push. (defaults: `40`, Maintainer role) |
| `unprotect_access_level` | integer | no | Access levels allowed to unprotect. (defaults: `40`, Maintainer role) |
Example response:
In the following example, the project ID is `5` and branch name is `*-stable`.
```shell
curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/protected_branches?name=*-stable&push_access_level=30&merge_access_level=30&unprotect_access_level=40"
```
The example response includes:
- A protected branch with ID `101`.
- `push_access_levels` with ID `1001`.
- `merge_access_levels` with ID `2001`.
- `unprotect_access_levels` with ID `3001`.
```json
{
"id": 1,
"id": 101,
"name": "*-stable",
"push_access_levels": [
{
"id": 1,
"id": 1001,
"access_level": 30,
"access_level_description": "Developers + Maintainers"
}
],
"merge_access_levels": [
{
"id": 1,
"id": 2001,
"access_level": 30,
"access_level_description": "Developers + Maintainers"
}
],
"unprotect_access_levels": [
{
"id": 1,
"id": 3001,
"access_level": 40,
"access_level_description": "Maintainers"
}
@ -278,7 +297,12 @@ Example response:
Users on GitLab Premium or Ultimate also see
the `user_id` and `group_id` parameters:
Example response:
The following example response includes:
- A protected branch with ID `101`.
- `push_access_levels` with ID `1001`.
- `merge_access_levels` with ID `2001`.
- `unprotect_access_levels` with ID `3001`.
```json
{
@ -286,7 +310,7 @@ Example response:
"name": "*-stable",
"push_access_levels": [
{
"id": 1,
"id": 1001,
"access_level": 30,
"user_id": null,
"group_id": null,
@ -295,7 +319,7 @@ Example response:
],
"merge_access_levels": [
{
"id": 1,
"id": 2001,
"access_level": 30,
"user_id": null,
"group_id": null,
@ -304,7 +328,7 @@ Example response:
],
"unprotect_access_levels": [
{
"id": 1,
"id": 3001,
"access_level": 40,
"user_id": null,
"group_id": null,
@ -329,15 +353,20 @@ form `{user_id: integer}`, `{group_id: integer}`, or `{access_level: integer}`.
curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/protected_branches?name=*-stable&allowed_to_push%5B%5D%5Buser_id%5D=1"
```
Example response:
The following example response includes:
- A protected branch with ID `101`.
- `push_access_levels` with ID `1001`.
- `merge_access_levels` with ID `2001`.
- `unprotect_access_levels` with ID `3001`.
```json
{
"id": 1,
"id": 101,
"name": "*-stable",
"push_access_levels": [
{
"id": 1,
"id": 1001,
"access_level": null,
"user_id": 1,
"group_id": null,
@ -346,7 +375,7 @@ Example response:
],
"merge_access_levels": [
{
"id": 1,
"id": 2001,
"access_level": 40,
"user_id": null,
"group_id": null,
@ -355,7 +384,7 @@ Example response:
],
"unprotect_access_levels": [
{
"id": 1,
"id": 3001,
"access_level": 40,
"user_id": null,
"group_id": null,
@ -381,18 +410,23 @@ The deploy key must be enabled for your project and it must have write access to
For other requirements, see [Allow deploy keys to push to a protected branch](../user/project/repository/branches/protected.md#allow-deploy-keys-to-push-to-a-protected-branch).
```shell
curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/protected_branches?name=*-stable&allowed_to_push%5B%5D%5Bdeploy_key_id%5D=1"
curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/protected_branches?name=*-stable&allowed_to_push[][deploy_key_id]=1"
```
Example response:
The following example response includes:
- An protected branch with ID `101`.
- `push_access_levels` with ID `1001`.
- `merge_access_levels` with ID `2001`.
- `unprotect_access_levels` with ID `3001`.
```json
{
"id": 1,
"id": 101,
"name": "*-stable",
"push_access_levels": [
{
"id": 1,
"id": 1001,
"access_level": null,
"user_id": null,
"group_id": null,
@ -402,7 +436,7 @@ Example response:
],
"merge_access_levels": [
{
"id": 1,
"id": 2001,
"access_level": 40,
"user_id": null,
"group_id": null,
@ -411,7 +445,7 @@ Example response:
],
"unprotect_access_levels": [
{
"id": 1,
"id": 3001,
"access_level": 40,
"user_id": null,
"group_id": null,
@ -439,25 +473,31 @@ curl --request POST \
--header "Content-Type: application/json" \
--data '{
"name": "main",
"allowed_to_push": [{"access_level": 30}],
"allowed_to_merge": [{
"access_level": 30
},{
"access_level": 40
}
]}'
"https://gitlab.example.com/api/v4/projects/5/protected_branches"
"allowed_to_push": [
{"access_level": 30}
],
"allowed_to_merge": [
{"access_level": 30},
{"access_level": 40}
]
}'
"https://gitlab.example.com/api/v4/projects/5/protected_branches"
```
Example response:
The following example response includes:
- A protected branch with ID `105`.
- `push_access_levels` with ID `1001`.
- `merge_access_levels` with IDs `2001` and `2002`.
- `unprotect_access_levels` with ID `3001`.
```json
{
"id": 5,
"id": 105,
"name": "main",
"push_access_levels": [
{
"id": 1,
"id": 1001,
"access_level": 30,
"access_level_description": "Developers + Maintainers",
"user_id": null,
@ -466,14 +506,14 @@ Example response:
],
"merge_access_levels": [
{
"id": 1,
"id": 2001,
"access_level": 30,
"access_level_description": "Developers + Maintainers",
"user_id": null,
"group_id": null
},
{
"id": 2,
"id": 2002,
"access_level": 40,
"access_level_description": "Maintainers",
"user_id": null,
@ -482,7 +522,7 @@ Example response:
],
"unprotect_access_levels": [
{
"id": 1,
"id": 3001,
"access_level": 40,
"access_level_description": "Maintainers",
"user_id": null,
@ -502,15 +542,17 @@ Unprotects the given protected branch or wildcard protected branch.
DELETE /projects/:id/protected_branches/:name
```
```shell
curl --request DELETE --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/protected_branches/*-stable"
```
| Attribute | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](rest/index.md#namespaced-paths) |
| `name` | string | yes | The name of the branch |
In the following example, the project ID is `5` and branch name is `*-stable`.
```shell
curl --request DELETE --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/protected_branches/*-stable"
```
## Update a protected branch
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101903) in GitLab 15.6.
@ -522,10 +564,6 @@ Updates a protected branch.
PATCH /projects/:id/protected_branches/:name
```
```shell
curl --request PATCH --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/protected_branches/feature-branch?allow_force_push=true&code_owner_approval_required=true"
```
| Attribute | Type | Required | Description |
| -------------------------------------------- | ---- | -------- | ----------- |
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](rest/index.md#namespaced-paths). |
@ -536,6 +574,12 @@ curl --request PATCH --header "PRIVATE-TOKEN: <your_access_token>" "https://gitl
| `allowed_to_unprotect` | array | no | Array of unprotect access levels, with each described by a hash of the form `{user_id: integer}`, `{group_id: integer}`, `{access_level: integer}`, or `{id: integer, _destroy: true}` to destroy an existing access level. The access level `No access` is not available for this field. Premium and Ultimate only. |
| `code_owner_approval_required` | boolean | no | Prevent pushes to this branch if it matches an item in the [`CODEOWNERS` file](../user/project/codeowners/index.md). Premium and Ultimate only. |
In the following example, the project ID is `5` and branch name is `feature-branch`.
```shell
curl --request PATCH --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/protected_branches/feature-branch?allow_force_push=true&code_owner_approval_required=true"
```
Elements in the `allowed_to_push`, `allowed_to_merge` and `allowed_to_unprotect` arrays should be one of `user_id`, `group_id` or
`access_level`, and take the form `{user_id: integer}`, `{group_id: integer}` or
`{access_level: integer}`.

View File

@ -515,7 +515,7 @@ From the EC2 dashboard:
1. Use the section below titled "[Find official GitLab-created AMI IDs on AWS](#find-official-gitlab-created-ami-ids-on-aws)" to find the correct AMI and select **Launch**.
1. In the **Name and tags** section, set the **Name** to `GitLab`.
1. In the **Instance type** dropdown list, select an instance type based on your workload. Consult the [hardware requirements](../../install/requirements.md#hardware) to choose one that fits your needs (at least `c5.2xlarge`, which is sufficient to accommodate 100 users).
1. In the **Instance type** dropdown list, select an instance type based on your workload. Consult the [hardware requirements](../../install/requirements.md) to choose one that fits your needs (at least `c5.2xlarge`, which is sufficient to accommodate 100 users).
1. In the **Key pair** section, select **Create new key pair**.
1. Give the key pair a name (we use `gitlab`) and save the `gitlab.pem` file for later use.
1. In the **Network settings** section:

View File

@ -66,7 +66,7 @@ The first items you need to configure are the basic settings of the underlying v
1. In **Availability options**, select **Availability zone** and set it to `1`.
Read more about the [availability zones](https://learn.microsoft.com/en-us/azure/virtual-machines/availability).
1. Ensure the selected image is set to **GitLab - Gen1**.
1. Select the VM size based on the [hardware requirements](../requirements.md#hardware).
1. Select the VM size based on the [hardware requirements](../requirements.md).
Because the minimum system requirements to run a GitLab environment for up to 500 users
is covered by the `D4s_v3` size, select that option.
1. Set the authentication type to **SSH public key**.

View File

@ -42,7 +42,7 @@ To deploy GitLab on GCP you must create a virtual machine:
1. On the next page, you can select the type of VM as well as the
estimated costs. Provide the name of the instance, desired data center, and machine type.
Note our [hardware requirements for different user base sizes](../requirements.md#hardware).
Note our [hardware requirements for different user base sizes](../requirements.md).
![Launch on Compute Engine](img/vm_details_v13_1.png)

View File

@ -16,7 +16,7 @@ ease of administration (backups, upgrades, and troubleshooting) with the cost of
| | | |
|--|--|--|
| [**Installation system requirements**](requirements.md)<br>Prerequisites for installation. | [**Installation methods**](install_methods.md)<br>Linux, Helm, Docker, Operator, source, or scripts. | [**Install GitLab on a cloud provider**](cloud_providers.md)<br>AWS, Google Cloud Platform, Azure. |
| [**Installation requirements**](requirements.md)<br>Prerequisites for installation. | [**Installation methods**](install_methods.md)<br>Linux, Helm, Docker, Operator, source, or scripts. | [**Install GitLab on a cloud provider**](cloud_providers.md)<br>AWS, Google Cloud Platform, Azure. |
| [**Offline GitLab**](../topics/offline/index.md)<br>Isolated installation. | [**Reference architectures**](../administration/reference_architectures/index.md)<br>Recommended deployments at scale. | [**Upgrade GitLab**](../update/index.md)<br>Latest version instructions. |
| [**Install GitLab Runner**](https://docs.gitlab.com/runner/install/)<br>Software for CI/CD jobs. | [**Configure GitLab Runner**](https://docs.gitlab.com/runner/configuration/)<br>`Config.toml`, certificates, autoscaling, proxy setup. | |

View File

@ -5,7 +5,7 @@ description: Prerequisites for installation.
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
---
# Installation system requirements
# GitLab installation requirements
DETAILS:
**Tier:** Free, Premium, Ultimate
@ -13,30 +13,28 @@ DETAILS:
This page contains information about the system requirements to install GitLab.
## Hardware
### Storage
## Storage
The necessary storage space largely depends on the size of the repositories you want to have in GitLab.
As a guideline, you should have at least as much free space as all your repositories combined.
The Linux package requires about 2.5 GB of storage space for installation.
For storage flexibility, consider mounting your hard drive through logical volume management.
You should have a hard drive with at least 7200 RPM or a solid-state drive to improve the responsiveness of GitLab.
You should have a hard drive with at least 7,200 RPM or a solid-state drive to reduce response times.
Because file system performance might affect the overall performance of GitLab, you should
[avoid using cloud-based file systems for storage](../administration/nfs.md#avoid-using-cloud-based-file-systems).
### CPU
## CPU
CPU requirements depend on the number of users and expected workload.
The workload includes your users' activity, use of automation and mirroring, and repository size.
For a maximum of 20 requests per second or 1,000 users, you should have 8 vCPUs.
For a maximum of 20 requests per second or 1,000 users, you should have 8 vCPU.
For more users or higher workload,
see [reference architectures](../administration/reference_architectures/index.md).
### Memory
## Memory
Memory requirements depend on the number of users and expected workload.
The workload includes your users' activity, use of automation and mirroring, and repository size.
@ -49,9 +47,7 @@ In some cases, GitLab can run with at least 8 GB of memory.
For more information, see
[running GitLab in a memory-constrained environment](https://docs.gitlab.com/omnibus/settings/memory_constrained_envs.html).
## Database
### PostgreSQL
## PostgreSQL
[PostgreSQL](https://www.postgresql.org/) is the only supported database and is bundled with the Linux package.
You can also use an [external PostgreSQL database](https://docs.gitlab.com/omnibus/settings/database.html#using-a-non-packaged-postgresql-database-management-server).
@ -81,7 +77,7 @@ To use a later major version of PostgreSQL than specified, check if a
You must also ensure some extensions are loaded into every GitLab database.
For more information, see [managing PostgreSQL extensions](postgresql_extensions.md).
#### GitLab Geo
### GitLab Geo
For [GitLab Geo](../administration/geo/index.md), you should use the Linux package or
[validated cloud providers](../administration/reference_architectures/index.md#recommended-cloud-providers-and-services)
@ -90,7 +86,7 @@ Compatibility with other external databases is not guaranteed.
For more information, see [requirements for running Geo](../administration/geo/index.md#requirements-for-running-geo).
#### Locale compatibility
### Locale compatibility
When you change locale data in `glibc`, PostgreSQL database files are
no longer fully compatible between different operating systems.
@ -104,7 +100,7 @@ when you:
For more information, see [upgrading operating systems for PostgreSQL](../administration/postgresql/upgrading_os.md).
#### GitLab schemas
### GitLab schemas
You should create or use databases exclusively for GitLab, [Geo](../administration/geo/index.md),
[Gitaly Cluster](../administration/gitaly/praefect.md), or other components.
@ -125,89 +121,47 @@ If you modify any schema, [GitLab upgrades](../update/index.md) might fail.
## Puma
The recommended settings for Puma are determined by the infrastructure on which it's running.
The Linux package defaults to the recommended Puma settings. Regardless of installation method, you can
tune the Puma settings:
The recommended [Puma](https://puma.io/) settings depend on your [installation](install_methods.md).
By default, the Linux package uses the recommended settings.
- If you're using the Linux package, see [Puma settings](../administration/operations/puma.md)
for instructions on changing the Puma settings.
- If you're using the GitLab Helm chart, see the
To adjust Puma settings:
- For the Linux package, see [Puma settings](../administration/operations/puma.md).
- For the GitLab Helm chart, see the
[`webservice` chart](https://docs.gitlab.com/charts/charts/gitlab/webservice/index.html).
### Workers
The recommended number of workers is calculated as the highest of the following:
The recommended number of Puma workers largely depends on CPU and memory capacity.
By default, the Linux package uses the recommended number of workers.
For more information about how this number is calculated,
see [`puma.rb`](https://gitlab.com/gitlab-org/omnibus-gitlab/-/blob/master/files/gitlab-cookbooks/gitlab/libraries/puma.rb?ref_type=heads#L46-69).
- `2`
- A combination of CPU and memory resource availability (see how this is configured automatically for the [Linux package](https://gitlab.com/gitlab-org/omnibus-gitlab/-/blob/ef9facdc927e7389db6a5e0655414ba8318c7b8a/files/gitlab-cookbooks/gitlab/libraries/puma.rb#L31-46)).
A node must never have fewer than two Puma workers.
For example, a node should have:
- Two workers for 2 CPU cores and 8 GB of memory
- Two workers for 4 CPU cores and 4 GB of memory
- Four workers for 4 CPU cores and 8 GB of memory
- Six workers for 8 CPU cores and 8 GB of memory
- Eight workers for 8 CPU cores and 16 GB of memory
By default, each Puma worker is limited to 1.2 GB of memory.
To increase the number of Puma workers, set
[`puma['per_worker_max_memory_mb']`](../administration/operations/puma.md#reducing-memory-use) to a higher limit.
You can [adjust this setting](../administration/operations/puma.md#reducing-memory-use) in `/etc/gitlab/gitlab.rb`.
Take for example the following scenarios:
- A node with 2 cores / 8 GB memory should be configured with **2 Puma workers**.
Calculated as:
```plaintext
The highest number from
2
And
[
the lowest number from
- number of cores: 2
- memory limit: (8 - 1.5) = 6.5
]
```
So, the highest from 2 and 2 is 2.
- A node with 4 cores / 4 GB memory should be configured with **2 Puma workers**.
```plaintext
The highest number from
2
And
[
the lowest number from
- number of cores: 4
- memory limit: (4 - 1.5) = 2.5
]
```
So, the highest from 2 and 2 is 2.
- A node with 4 cores / 8 GB memory should be configured with **4 Puma workers**.
```plaintext
The highest number from
2
And
[
the lowest number from
- number of cores: 4
- memory limit: (8 - 1.5) = 6.5
]
```
So, the highest from 2 and 4 is 4.
You can increase the number of Puma workers, provided enough CPU and memory capacity is available.
A higher number of Puma workers usually helps to reduce the response time of the application
and increase the ability to handle parallel requests. You must perform testing to verify the
optimal settings for your infrastructure.
You can also increase the number of Puma workers, provided enough CPU and memory capacity is available.
More workers would reduce response times and improve the ability to handle parallel requests.
Run tests to verify the optimal number of workers for your [installation](install_methods.md).
### Threads
The recommended number of threads is dependent on several factors, including total memory.
The recommended number of Puma threads depends on total system memory.
A node should use:
- If the operating system has a maximum 2 GB of memory, the recommended number of threads is `1`.
A higher value results in excess swapping, and decrease performance.
- In all other cases, the recommended number of threads is `4`. We don't recommend setting this
higher, due to how [Ruby MRI multi-threading](https://en.wikipedia.org/wiki/Global_interpreter_lock)
works.
- One thread for an operating system with a maximum of 2 GB of memory
- Four threads for an operating system with more than 2 GB of memory
More threads would lead to excessive swapping and lower performance.
## Redis

View File

@ -336,6 +336,8 @@ You can give a user access to all projects in a group.
Prerequisites:
- You must have the Owner role for the group.
- If [sign-up is disabled](../../administration/settings/sign_up_restrictions.md#disable-new-sign-ups), an administrator must add the user by email first.
- If [promotion management](../../administration/settings/sign_up_restrictions.md#enable-role-promotion-approval) is enabled, an administrator must approve the invite.
1. On the left sidebar, select **Search or go to** and find your group.
1. Select **Manage > Members**.
@ -372,6 +374,18 @@ This tab includes users who:
- Are waiting for [approval from an administrator](../../administration/moderate_users.md).
- [Exceed the group user cap](manage.md#user-cap-for-groups).
### View users pending promotion
When [promotion management](../../administration/settings/sign_up_restrictions.md#enable-role-promotion-approval) is enabled, an administrator must approve the membership requests of users who would become billable users in the subscription.
To view users pending promotion:
1. On the left sidebar, select **Search or go to** and find your group.
1. Select **Manage > Members**.
1. Select the **Promotions** tab.
If the **Promotions** tab is not displayed, the group has no pending promotions.
## Remove a member from the group
Prerequisites:

View File

@ -29,6 +29,10 @@ On GitLab.com, it takes seven days from when you delete your own account to when
- That user is [blocked](../../../administration/moderate_users.md#block-a-user).
- You cannot create a new account with the same username.
NOTE:
After the seven day time period is finished, any user can create a user account with that previously used username. Therefore, you should not assume that you will be able to create a new account with that username after the seven days, because it might be taken.
You can [create a new account with the same email address](#create-a-new-account-with-the-same-email-address)
if you remove that email address from your account first.

Some files were not shown because too many files have changed in this diff Show More