Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-12-13 15:07:56 +00:00
parent 9fdb3dbd6b
commit 0d55697d64
92 changed files with 1455 additions and 796 deletions

View File

@ -100,6 +100,9 @@
.if-fork-merge-request: &if-fork-merge-request
if: '$CI_PROJECT_NAMESPACE !~ /^gitlab(-org)?($|\/)/ && $CI_MERGE_REQUEST_IID && $CI_MERGE_REQUEST_LABELS !~ /pipeline:run-all-rspec/'
.if-schedule-pipeline: &if-schedule-pipeline
if: '$CI_PIPELINE_SOURCE == "schedule"'
.if-schedule-maintenance: &if-schedule-maintenance
if: '$CI_PIPELINE_SOURCE == "schedule" && $SCHEDULE_TYPE == "maintenance"'
@ -2050,7 +2053,7 @@
rules:
- <<: *if-default-branch-or-tag
allow_failure: true
- <<: *if-schedule-maintenance
- <<: *if-schedule-pipeline
allow_failure: true
- <<: *if-auto-deploy-branches
allow_failure: true

View File

@ -535,8 +535,6 @@ RSpec/FactoryBot/AvoidCreate:
- 'spec/serializers/project_mirror_entity_spec.rb'
- 'spec/serializers/project_note_entity_spec.rb'
- 'spec/serializers/project_serializer_spec.rb'
- 'spec/serializers/prometheus_alert_entity_spec.rb'
- 'spec/serializers/release_serializer_spec.rb'
- 'spec/serializers/review_app_setup_entity_spec.rb'
- 'spec/serializers/runner_entity_spec.rb'
- 'spec/serializers/serverless/domain_entity_spec.rb'

View File

@ -2,23 +2,8 @@
# Cop supports --autocorrect.
Style/StringConcatenation:
Exclude:
- 'app/components/pajamas/button_component.rb'
- 'app/controllers/concerns/creates_commit.rb'
- 'app/controllers/groups/dependency_proxy_for_containers_controller.rb'
- 'app/controllers/jira_connect/app_descriptor_controller.rb'
- 'app/controllers/projects/labels_controller.rb'
- 'app/controllers/projects/milestones_controller.rb'
- 'app/graphql/types/global_id_type.rb'
- 'app/helpers/application_helper.rb'
- 'app/helpers/blob_helper.rb'
- 'app/helpers/dropdowns_helper.rb'
- 'app/helpers/emails_helper.rb'
- 'app/helpers/events_helper.rb'
- 'app/helpers/numbers_helper.rb'
- 'app/helpers/submodule_helper.rb'
- 'app/helpers/todos_helper.rb'
- 'app/models/application_setting.rb'
- 'app/models/commit_range.rb'
- 'app/models/concerns/counter_attribute.rb'
- 'app/models/concerns/cross_database_modification.rb'
- 'app/models/concerns/from_set_operator.rb'

View File

@ -1 +1 @@
cf32d208912de9dfbfdd4baab42655baf82bfce5
b3df64b5c2838a52aed21700299f7aa69b82c992

View File

@ -444,20 +444,27 @@ export const scrollToLineIfNeededParallel = (_, line) => {
}
};
export const loadCollapsedDiff = ({ commit, getters, state }, file) =>
axios
.get(file.load_collapsed_diff_url, {
params: {
commit_id: getters.commitId,
w: state.showWhitespace ? '0' : '1',
},
})
.then((res) => {
commit(types.ADD_COLLAPSED_DIFFS, {
file,
data: res.data,
});
export const loadCollapsedDiff = ({ commit, getters, state }, file) => {
const versionPath = state.mergeRequestDiff?.version_path;
const loadParams = {
commit_id: getters.commitId,
w: state.showWhitespace ? '0' : '1',
};
if (versionPath) {
const { diffId, startSha } = getDerivedMergeRequestInformation({ endpoint: versionPath });
loadParams.diff_id = diffId;
loadParams.start_sha = startSha;
}
return axios.get(file.load_collapsed_diff_url, { params: loadParams }).then((res) => {
commit(types.ADD_COLLAPSED_DIFFS, {
file,
data: res.data,
});
});
};
/**
* Toggles the file discussions after user clicked on the toggle discussions button.

View File

@ -1,14 +1,30 @@
const endpointRE = /^(\/?(.+?)\/(.+?)\/-\/merge_requests\/(\d+)).*$/i;
function getVersionInfo({ endpoint } = {}) {
const dummyRoot = 'https://gitlab.com';
const endpointUrl = new URL(endpoint, dummyRoot);
const params = Object.fromEntries(endpointUrl.searchParams.entries());
const { start_sha: startSha, diff_id: diffId } = params;
return {
diffId,
startSha,
};
}
export function getDerivedMergeRequestInformation({ endpoint } = {}) {
let mrPath;
let userOrGroup;
let project;
let id;
let diffId;
let startSha;
const matches = endpointRE.exec(endpoint);
if (matches) {
[, mrPath, userOrGroup, project, id] = matches;
({ diffId, startSha } = getVersionInfo({ endpoint }));
}
return {
@ -16,5 +32,7 @@ export function getDerivedMergeRequestInformation({ endpoint } = {}) {
userOrGroup,
project,
id,
diffId,
startSha,
};
}

View File

@ -0,0 +1,28 @@
import { __ } from '~/locale';
const rulesReg = [
{
reg: /^[a-zA-Z0-9\u{00A9}-\u{1f9ff}_]/u,
msg: __("Name must start with a letter, digit, emoji, or '_'"),
},
{
reg: /^[a-zA-Z0-9\p{Pd}\u{002B}\u{00A9}-\u{1f9ff}_. ]+$/u,
msg: __("Name can contain only letters, digits, emojis, '_', '.', '+', dashes, or spaces"),
},
];
/**
*
* @param {string} text
* @returns {string} msg
*/
function checkRules(text) {
for (const item of rulesReg) {
if (!item.reg.test(text)) {
return item.msg;
}
}
return '';
}
export { checkRules };

View File

@ -12,6 +12,7 @@ import {
slugify,
convertUnicodeToAscii,
} from '../lib/utils/text_utility';
import { checkRules } from './project_name_rules';
let hasUserDefinedProjectPath = false;
let hasUserDefinedProjectName = false;
@ -87,10 +88,23 @@ const validateGroupNamespaceDropdown = (e) => {
}
};
const checkProjectName = (projectNameInput) => {
const msg = checkRules(projectNameInput.value);
const projectNameError = document.querySelector('#project_name_error');
if (!projectNameError) return;
if (msg) {
projectNameError.innerText = msg;
projectNameError.classList.remove('hidden');
} else {
projectNameError.classList.add('hidden');
}
};
const setProjectNamePathHandlers = ($projectNameInput, $projectPathInput) => {
const specialRepo = document.querySelector('.js-user-readme-repo');
const projectNameInputListener = () => {
onProjectNameChange($projectNameInput, $projectPathInput);
checkProjectName($projectNameInput);
hasUserDefinedProjectName = $projectNameInput.value.trim().length > 0;
hasUserDefinedProjectPath = $projectPathInput.value.trim().length > 0;
};

View File

@ -1,5 +1,6 @@
<script>
import { isEmpty } from 'lodash';
import { produce } from 'immer';
import {
GlAlert,
GlSkeletonLoader,
@ -11,6 +12,7 @@ import {
GlEmptyState,
} from '@gitlab/ui';
import noAccessSvg from '@gitlab/svgs/dist/illustrations/analytics/no-access.svg';
import * as Sentry from '@sentry/browser';
import { s__ } from '~/locale';
import { parseBoolean } from '~/lib/utils/common_utils';
import { getParameterByName } from '~/lib/utils/url_utility';
@ -269,6 +271,12 @@ export default {
id: this.workItemId,
};
},
children() {
const widgetHierarchy = this.workItem.widgets.find(
(widget) => widget.type === WIDGET_TYPE_HIERARCHY,
);
return widgetHierarchy.children.nodes;
},
},
methods: {
isWidgetPresent(type) {
@ -326,6 +334,74 @@ export default {
this.error = this.$options.i18n.fetchError;
document.title = s__('404|Not found');
},
addChild(child) {
const { defaultClient: client } = this.$apollo.provider.clients;
this.toggleChildFromCache(child, child.id, client);
},
toggleChildFromCache(workItem, childId, store) {
const sourceData = store.readQuery({
query: getWorkItemQuery(this.fetchByIid),
variables: this.queryVariables,
});
const newData = produce(sourceData, (draftState) => {
const widgetHierarchy = draftState.workItem.widgets.find(
(widget) => widget.type === WIDGET_TYPE_HIERARCHY,
);
const index = widgetHierarchy.children.nodes.findIndex((child) => child.id === childId);
if (index >= 0) {
widgetHierarchy.children.nodes.splice(index, 1);
} else {
widgetHierarchy.children.nodes.unshift(workItem);
}
});
store.writeQuery({
query: getWorkItemQuery(this.fetchByIid),
variables: this.queryVariables,
data: newData,
});
},
async updateWorkItem(workItem, childId, parentId) {
return this.$apollo.mutate({
mutation: updateWorkItemMutation,
variables: { input: { id: childId, hierarchyWidget: { parentId } } },
update: (store) => this.toggleChildFromCache(workItem, childId, store),
});
},
async undoChildRemoval(workItem, childId) {
try {
const { data } = await this.updateWorkItem(workItem, childId, this.workItem.id);
if (data.workItemUpdate.errors.length === 0) {
this.activeToast?.hide();
}
} catch (error) {
this.updateError = s__('WorkItem|Something went wrong while undoing child removal.');
Sentry.captureException(error);
} finally {
this.activeToast?.hide();
}
},
async removeChild(childId) {
try {
const { data } = await this.updateWorkItem(null, childId, null);
if (data.workItemUpdate.errors.length === 0) {
this.activeToast = this.$toast.show(s__('WorkItem|Child removed'), {
action: {
text: s__('WorkItem|Undo'),
onClick: this.undoChildRemoval.bind(this, data.workItemUpdate.workItem, childId),
},
});
}
} catch (error) {
this.updateError = s__('WorkItem|Something went wrong while removing child.');
Sentry.captureException(error);
}
},
},
WORK_ITEM_TYPE_VALUE_OBJECTIVE,
};
@ -507,6 +583,11 @@ export default {
v-if="workItemType === $options.WORK_ITEM_TYPE_VALUE_OBJECTIVE"
:work-item-type="workItemType"
:work-item-id="workItem.id"
:children="children"
:can-update="canUpdate"
:project-path="fullPath"
@addWorkItemChild="addChild"
@removeChild="removeChild"
/>
<gl-empty-state
v-if="error"

View File

@ -1,5 +1,5 @@
<script>
import { GlDropdown, GlDropdownDivider, GlDropdownSectionHeader, GlDropdownItem } from '@gitlab/ui';
import { GlDropdown, GlDropdownSectionHeader, GlDropdownItem } from '@gitlab/ui';
import { s__ } from '~/locale';
@ -30,7 +30,6 @@ export default {
objectiveActionItems,
components: {
GlDropdown,
GlDropdownDivider,
GlDropdownSectionHeader,
GlDropdownItem,
},
@ -53,6 +52,10 @@ export default {
{{ item.title }}
</gl-dropdown-item>
<!-- TODO: Uncomment once following two issues addressed -->
<!-- https://gitlab.com/gitlab-org/gitlab/-/issues/381833 -->
<!-- https://gitlab.com/gitlab-org/gitlab/-/issues/385084 -->
<!--
<gl-dropdown-divider />
<gl-dropdown-section-header>{{ __('Key result') }}</gl-dropdown-section-header>
<gl-dropdown-item
@ -62,5 +65,6 @@ export default {
>
{{ item.title }}
</gl-dropdown-item>
-->
</gl-dropdown>
</template>

View File

@ -1,12 +1,21 @@
<script>
import { GlButton, GlIcon, GlTooltipDirective } from '@gitlab/ui';
import { __ } from '~/locale';
import { __, s__ } from '~/locale';
import { createAlert } from '~/flash';
import { getIdFromGraphQLId } from '~/graphql_shared/utils';
import RichTimestampTooltip from '~/vue_shared/components/rich_timestamp_tooltip.vue';
import { STATE_OPEN } from '../../constants';
import {
STATE_OPEN,
TASK_TYPE_NAME,
WORK_ITEM_TYPE_VALUE_OBJECTIVE,
WIDGET_TYPE_HIERARCHY,
WORK_ITEM_NAME_TO_ICON_MAP,
} from '../../constants';
import getWorkItemTreeQuery from '../../graphql/work_item_tree.query.graphql';
import WorkItemLinksMenu from './work_item_links_menu.vue';
import WorkItemTreeChildren from './work_item_tree_children.vue';
export default {
components: {
@ -14,6 +23,7 @@ export default {
GlIcon,
RichTimestampTooltip,
WorkItemLinksMenu,
WorkItemTreeChildren,
},
directives: {
GlTooltip: GlTooltipDirective,
@ -35,16 +45,45 @@ export default {
type: Object,
required: true,
},
hasIndirectChildren: {
type: Boolean,
required: false,
default: true,
},
workItemType: {
type: String,
required: false,
default: '',
},
},
data() {
return {
isExpanded: false,
children: [],
isLoadingChildren: false,
};
},
computed: {
canHaveChildren() {
return this.workItemType === WORK_ITEM_TYPE_VALUE_OBJECTIVE;
},
isItemOpen() {
return this.childItem.state === STATE_OPEN;
},
iconClass() {
return this.isItemOpen ? 'gl-text-green-500' : 'gl-text-blue-500';
childItemType() {
return this.childItem.workItemType.name;
},
iconName() {
return this.isItemOpen ? 'issue-open-m' : 'issue-close';
if (this.childItemType === TASK_TYPE_NAME) {
return this.isItemOpen ? 'issue-open-m' : 'issue-close';
}
return WORK_ITEM_NAME_TO_ICON_MAP[this.childItemType];
},
iconClass() {
if (this.childItemType === TASK_TYPE_NAME) {
return this.isItemOpen ? 'gl-text-green-500' : 'gl-text-blue-500';
}
return '';
},
stateTimestamp() {
return this.isItemOpen ? this.childItem.createdAt : this.childItem.closedAt;
@ -55,55 +94,132 @@ export default {
childPath() {
return `/${this.projectPath}/-/work_items/${getIdFromGraphQLId(this.childItem.id)}`;
},
hasChildren() {
return this.getWidgetHierarchyForChild(this.childItem)?.hasChildren;
},
chevronType() {
return this.isExpanded ? 'chevron-down' : 'chevron-right';
},
chevronTooltip() {
return this.isExpanded ? __('Collapse') : __('Expand');
},
},
methods: {
toggleItem() {
this.isExpanded = !this.isExpanded;
if (this.children.length === 0 && this.hasChildren) {
this.fetchChildren();
}
},
getWidgetHierarchyForChild(workItem) {
const widgetHierarchy = workItem?.widgets?.find(
(widget) => widget.type === WIDGET_TYPE_HIERARCHY,
);
return widgetHierarchy || {};
},
async fetchChildren() {
this.isLoadingChildren = true;
try {
const { data } = await this.$apollo.query({
query: getWorkItemTreeQuery,
variables: {
id: this.childItem.id,
},
});
this.children = this.getWidgetHierarchyForChild(data?.workItem).children.nodes;
} catch (error) {
this.isExpanded = !this.isExpanded;
createAlert({
message: s__('Hierarchy|Something went wrong while fetching children.'),
captureError: true,
error,
});
} finally {
this.isLoadingChildren = false;
}
},
},
};
</script>
<template>
<div
class="gl-relative gl-display-flex gl-overflow-break-word gl-min-w-0 gl-bg-white gl-mb-3 gl-py-3 gl-px-4 gl-border gl-border-gray-100 gl-rounded-base gl-line-height-32"
data-testid="links-child"
>
<div class="gl-overflow-hidden gl-display-flex gl-align-items-center gl-flex-grow-1">
<span :id="`stateIcon-${childItem.id}`" class="gl-mr-3" data-testid="item-status-icon">
<gl-icon :name="iconName" :class="iconClass" :aria-label="stateTimestampTypeText" />
</span>
<rich-timestamp-tooltip
:target="`stateIcon-${childItem.id}`"
:raw-timestamp="stateTimestamp"
:timestamp-type-text="stateTimestampTypeText"
/>
<gl-icon
v-if="childItem.confidential"
v-gl-tooltip.top
name="eye-slash"
class="gl-mr-2 gl-text-orange-500"
data-testid="confidential-icon"
:aria-label="__('Confidential')"
:title="__('Confidential')"
/>
<gl-button
:href="childPath"
category="tertiary"
variant="link"
class="gl-text-truncate gl-max-w-80 gl-text-black-normal!"
@click="$emit('click', $event)"
@mouseover="$emit('mouseover')"
@mouseout="$emit('mouseout')"
>
{{ childItem.title }}
</gl-button>
</div>
<div>
<div
v-if="canUpdate"
class="gl-ml-0 gl-sm-ml-auto! gl-display-inline-flex gl-align-items-center"
class="gl-display-flex gl-align-items-center gl-mb-3"
:class="{ 'gl-ml-6': canHaveChildren && !hasChildren && hasIndirectChildren }"
>
<work-item-links-menu
:work-item-id="childItem.id"
:parent-work-item-id="issuableGid"
data-testid="links-menu"
@removeChild="$emit('remove', childItem.id)"
<gl-button
v-if="hasChildren"
v-gl-tooltip.viewport
:title="chevronTooltip"
:aria-label="chevronTooltip"
:icon="chevronType"
category="tertiary"
:loading="isLoadingChildren"
class="gl-px-0! gl-py-4! gl-mr-3"
data-testid="expand-child"
@click="toggleItem"
/>
<div
class="gl-relative gl-display-flex gl-flex-grow-1 gl-overflow-break-word gl-min-w-0 gl-bg-white gl-py-3 gl-px-4 gl-border gl-border-gray-100 gl-rounded-base gl-line-height-32"
data-testid="links-child"
>
<div class="gl-overflow-hidden gl-display-flex gl-align-items-center gl-flex-grow-1">
<span :id="`stateIcon-${childItem.id}`" class="gl-mr-3" data-testid="item-status-icon">
<gl-icon
class="gl-text-secondary"
:class="iconClass"
:name="iconName"
:aria-label="stateTimestampTypeText"
/>
</span>
<rich-timestamp-tooltip
:target="`stateIcon-${childItem.id}`"
:raw-timestamp="stateTimestamp"
:timestamp-type-text="stateTimestampTypeText"
/>
<gl-icon
v-if="childItem.confidential"
v-gl-tooltip.top
name="eye-slash"
class="gl-mr-2 gl-text-orange-500"
data-testid="confidential-icon"
:aria-label="__('Confidential')"
:title="__('Confidential')"
/>
<gl-button
:href="childPath"
category="tertiary"
variant="link"
class="gl-text-truncate gl-max-w-80 gl-text-black-normal!"
@click="$emit('click', $event)"
@mouseover="$emit('mouseover')"
@mouseout="$emit('mouseout')"
>
{{ childItem.title }}
</gl-button>
</div>
<div
v-if="canUpdate"
class="gl-ml-0 gl-sm-ml-auto! gl-display-inline-flex gl-align-items-center"
>
<work-item-links-menu
:work-item-id="childItem.id"
:parent-work-item-id="issuableGid"
data-testid="links-menu"
@removeChild="$emit('removeChild', childItem.id)"
/>
</div>
</div>
</div>
<work-item-tree-children
v-if="isExpanded"
:project-path="projectPath"
:can-update="canUpdate"
:work-item-id="issuableGid"
:work-item-type="workItemType"
:children="children"
@removeChild="fetchChildren"
/>
</div>
</template>

View File

@ -412,7 +412,7 @@ export default {
@click="openChild(child, $event)"
@mouseover="prefetchWorkItem(child)"
@mouseout="clearPrefetching"
@remove="removeChild"
@removeChild="removeChild"
/>
<work-item-detail-modal
ref="modal"

View File

@ -1,15 +1,26 @@
<script>
import { GlButton } from '@gitlab/ui';
import { s__ } from '~/locale';
import { isEmpty } from 'lodash';
import { __ } from '~/locale';
import { convertToGraphQLId } from '~/graphql_shared/utils';
import { TYPE_WORK_ITEM } from '~/graphql_shared/constants';
import { DEFAULT_DEBOUNCE_AND_THROTTLE_MS } from '~/lib/utils/constants';
import { parseBoolean } from '~/lib/utils/common_utils';
import { getParameterByName } from '~/lib/utils/url_utility';
import glFeatureFlagMixin from '~/vue_shared/mixins/gl_feature_flags_mixin';
import {
FORM_TYPES,
WIDGET_TYPE_HIERARCHY,
WORK_ITEMS_TREE_TEXT_MAP,
WORK_ITEM_TYPE_ENUM_OBJECTIVE,
WORK_ITEM_TYPE_ENUM_KEY_RESULT,
} from '../../constants';
import workItemQuery from '../../graphql/work_item.query.graphql';
import workItemByIidQuery from '../../graphql/work_item_by_iid.query.graphql';
import OkrActionsSplitButton from './okr_actions_split_button.vue';
import WorkItemLinksForm from './work_item_links_form.vue';
import WorkItemLinkChild from './work_item_link_child.vue';
export default {
FORM_TYPES,
@ -20,7 +31,9 @@ export default {
GlButton,
OkrActionsSplitButton,
WorkItemLinksForm,
WorkItemLinkChild,
},
mixins: [glFeatureFlagMixin()],
props: {
workItemType: {
type: String,
@ -30,6 +43,20 @@ export default {
type: String,
required: true,
},
children: {
type: Array,
required: false,
default: () => [],
},
canUpdate: {
type: Boolean,
required: false,
default: false,
},
projectPath: {
type: String,
required: true,
},
},
data() {
return {
@ -38,6 +65,7 @@ export default {
error: null,
formType: null,
childType: null,
prefetchedWorkItem: null,
};
},
computed: {
@ -45,8 +73,41 @@ export default {
return this.isOpen ? 'chevron-lg-up' : 'chevron-lg-down';
},
toggleLabel() {
return this.isOpen ? s__('WorkItem|Collapse tasks') : s__('WorkItem|Expand tasks');
return this.isOpen ? __('Collapse') : __('Expand');
},
fetchByIid() {
return this.glFeatures.useIidInWorkItemsPath && parseBoolean(getParameterByName('iid_path'));
},
childrenIds() {
return this.children.map((c) => c.id);
},
hasIndirectChildren() {
return this.children
.map(
(child) => child.widgets?.find((widget) => widget.type === WIDGET_TYPE_HIERARCHY) || {},
)
.some((hierarchy) => hierarchy.hasChildren);
},
childUrlParams() {
const params = {};
if (this.fetchByIid) {
const iid = getParameterByName('work_item_iid');
if (iid) {
params.iid = iid;
}
} else {
const workItemId = getParameterByName('work_item_id');
if (workItemId) {
params.id = convertToGraphQLId(TYPE_WORK_ITEM, workItemId);
}
}
return params;
},
},
mounted() {
if (!isEmpty(this.childUrlParams)) {
this.addWorkItemQuery(this.childUrlParams);
}
},
methods: {
toggle() {
@ -64,6 +125,37 @@ export default {
hideAddForm() {
this.isShownAddForm = false;
},
addWorkItemQuery({ id, iid }) {
const variables = this.fetchByIid
? {
fullPath: this.projectPath,
iid,
}
: {
id,
};
this.$apollo.addSmartQuery('prefetchedWorkItem', {
query() {
return this.fetchByIid ? workItemByIidQuery : workItemQuery;
},
variables,
update(data) {
return this.fetchByIid ? data.workspace.workItems.nodes[0] : data.workItem;
},
context: {
isSingleRequest: true,
},
});
},
prefetchWorkItem({ id, iid }) {
this.prefetch = setTimeout(
() => this.addWorkItemQuery({ id, iid }),
DEFAULT_DEBOUNCE_AND_THROTTLE_MS,
);
},
clearPrefetching() {
clearTimeout(this.prefetch);
},
},
};
</script>
@ -113,7 +205,7 @@ export default {
:class="{ 'gl-p-5 gl-pb-3': !error }"
data-testid="tree-body"
>
<div v-if="!isShownAddForm && !error" data-testid="tree-empty">
<div v-if="!isShownAddForm && !error && children.length === 0" data-testid="tree-empty">
<p class="gl-mb-3">
{{ $options.WORK_ITEMS_TREE_TEXT_MAP[workItemType].empty }}
</p>
@ -125,8 +217,23 @@ export default {
:issuable-gid="workItemId"
:form-type="formType"
:children-type="childType"
:children-ids="childrenIds"
@addWorkItemChild="$emit('addWorkItemChild', $event)"
@cancel="hideAddForm"
/>
<work-item-link-child
v-for="child in children"
:key="child.id"
:project-path="projectPath"
:can-update="canUpdate"
:issuable-gid="workItemId"
:child-item="child"
:work-item-type="workItemType"
:has-indirect-children="hasIndirectChildren"
@mouseover="prefetchWorkItem(child)"
@mouseout="clearPrefetching"
@removeChild="$emit('removeChild', $event)"
/>
</div>
</div>
</template>

View File

@ -0,0 +1,68 @@
<script>
import { createAlert } from '~/flash';
import { s__ } from '~/locale';
import updateWorkItemMutation from '../../graphql/update_work_item.mutation.graphql';
export default {
components: {
WorkItemLinkChild: () => import('./work_item_link_child.vue'),
},
props: {
workItemType: {
type: String,
required: true,
},
workItemId: {
type: String,
required: true,
},
children: {
type: Array,
required: false,
default: () => [],
},
canUpdate: {
type: Boolean,
required: false,
default: false,
},
projectPath: {
type: String,
required: true,
},
},
methods: {
async updateWorkItem(childId) {
try {
await this.$apollo.mutate({
mutation: updateWorkItemMutation,
variables: { input: { id: childId, hierarchyWidget: { parentId: null } } },
});
this.$emit('removeChild');
} catch (error) {
createAlert({
message: s__('Hierarchy|Something went wrong while removing a child item.'),
captureError: true,
error,
});
}
},
},
};
</script>
<template>
<div class="gl-ml-6">
<work-item-link-child
v-for="child in children"
:key="child.id"
:project-path="projectPath"
:can-update="canUpdate"
:issuable-gid="workItemId"
:child-item="child"
:work-item-type="workItemType"
@removeChild="updateWorkItem"
/>
</div>
</template>

View File

@ -116,7 +116,7 @@ export const WORK_ITEMS_TYPE_MAP = {
},
[WORK_ITEM_TYPE_ENUM_KEY_RESULT]: {
icon: `issue-type-issue`,
name: s__('WorkItem|Key result'),
name: s__('WorkItem|Key Result'),
},
};
@ -127,6 +127,14 @@ export const WORK_ITEMS_TREE_TEXT_MAP = {
},
};
export const WORK_ITEM_NAME_TO_ICON_MAP = {
Issue: 'issue-type-issue',
Task: 'issue-type-task',
Objective: 'issue-type-objective',
// eslint-disable-next-line @gitlab/require-i18n-strings
'Key Result': 'issue-type-key-result',
};
export const FORM_TYPES = {
create: 'create',
add: 'add',

View File

@ -24,6 +24,8 @@ query workItemLinksQuery($id: WorkItemID!) {
confidential
workItemType {
id
name
iconName
}
title
state

View File

@ -0,0 +1,47 @@
query workItemTreeQuery($id: WorkItemID!) {
workItem(id: $id) {
id
workItemType {
id
name
iconName
}
title
userPermissions {
deleteWorkItem
updateWorkItem
}
confidential
widgets {
type
... on WorkItemWidgetHierarchy {
type
parent {
id
}
children {
nodes {
id
iid
confidential
workItemType {
id
name
iconName
}
title
state
createdAt
closedAt
widgets {
... on WorkItemWidgetHierarchy {
type
hasChildren
}
}
}
}
}
}
}
}

View File

@ -38,6 +38,7 @@ fragment WorkItemWidgets on WorkItemWidget {
}
... on WorkItemWidgetHierarchy {
type
hasChildren
parent {
id
iid
@ -56,11 +57,19 @@ fragment WorkItemWidgets on WorkItemWidget {
confidential
workItemType {
id
name
iconName
}
title
state
createdAt
closedAt
widgets {
... on WorkItemWidgetHierarchy {
type
hasChildren
}
}
}
}
}

View File

@ -65,7 +65,7 @@ module Pajamas
classes.push(VARIANT_CLASSES[@variant])
unless NON_CATEGORY_VARIANTS.include?(@variant) || @category == :primary
classes.push(VARIANT_CLASSES[@variant] + '-' + CATEGORY_CLASSES[@category])
classes.push("#{VARIANT_CLASSES[@variant]}-#{CATEGORY_CLASSES[@category]}")
end
classes.push(@button_options[:class])

View File

@ -78,7 +78,7 @@ module CreatesCommit
_("You can now submit a merge request to get this change into the original branch.")
end
flash[:notice] += " " + mr_message
flash[:notice] += " #{mr_message}"
end
end
end

View File

@ -117,7 +117,7 @@ class Groups::DependencyProxyForContainersController < ::Groups::DependencyProxy
end
def blob_file_name
@blob_file_name ||= params[:sha].sub('sha256:', '') + '.gz'
@blob_file_name ||= "#{params[:sha].sub('sha256:', '')}.gz"
end
def manifest_file_name

View File

@ -76,7 +76,7 @@ class JiraConnect::AppDescriptorController < JiraConnect::ApplicationController
jiraDevelopmentTool: {
actions: {
createBranch: {
templateUrl: new_jira_connect_branch_url + '?issue_key={issue.key}&issue_summary={issue.summary}'
templateUrl: "#{new_jira_connect_branch_url}?issue_key={issue.key}&issue_summary={issue.summary}"
}
},
key: 'gitlab-development-tool',

View File

@ -23,10 +23,6 @@ module Projects
def destroy
image.delete_scheduled!
unless Feature.enabled?(:container_registry_delete_repository_with_cron_worker)
DeleteContainerRepositoryWorker.perform_async(current_user.id, image.id) # rubocop:disable CodeReuse/Worker
end
track_package_event(:delete_repository, :container)
respond_to do |format|

View File

@ -22,10 +22,6 @@ module Mutations
container_repository.delete_scheduled!
unless Feature.enabled?(:container_registry_delete_repository_with_cron_worker)
DeleteContainerRepositoryWorker.perform_async(current_user.id, container_repository.id) # rubocop:disable CodeReuse/Worker
end
track_event(:delete_repository, :container)
{

View File

@ -49,9 +49,7 @@ module Types
An example `#{graphql_name}` is: `"#{::Gitlab::GlobalId.build(model_name: model_name, id: 1)}"`.
#{
if deprecation = Gitlab::GlobalId::Deprecations.deprecation_by(model_name)
'The older format `"' +
::Gitlab::GlobalId.build(model_name: deprecation.old_name, id: 1).to_s +
'"` was deprecated in ' + deprecation.milestone + '.'
"The older format `\"#{::Gitlab::GlobalId.build(model_name: deprecation.old_name, id: 1)}\"` was deprecated in #{deprecation.milestone}."
end}
MD

View File

@ -234,11 +234,11 @@ module ApplicationHelper
end
def promo_url
'https://' + promo_host
"https://#{promo_host}"
end
def support_url
Gitlab::CurrentSettings.current_application_settings.help_page_support_url.presence || promo_url + '/getting-help/'
Gitlab::CurrentSettings.current_application_settings.help_page_support_url.presence || "#{promo_url}/getting-help/"
end
def instance_review_permitted?

View File

@ -113,7 +113,7 @@ module BlobHelper
end
def parent_dir_raw_path
blob_raw_path.rpartition("/").first + "/"
"#{blob_raw_path.rpartition('/').first}/"
end
# SVGs can contain malicious JavaScript; only include whitelisted

View File

@ -86,7 +86,7 @@ module DropdownsHelper
title_output = []
if has_back
title_output << content_tag(:button, class: "dropdown-title-button dropdown-menu-back " + margin_class, aria: { label: "Go back" }, type: "button") do
title_output << content_tag(:button, class: "dropdown-title-button dropdown-menu-back #{margin_class}", aria: { label: "Go back" }, type: "button") do
sprite_icon('arrow-left')
end
end
@ -94,7 +94,7 @@ module DropdownsHelper
title_output << content_tag(:span, title, class: margin_class)
if has_close
title_output << content_tag(:button, class: "dropdown-title-button dropdown-menu-close " + margin_class, aria: { label: "Close" }, type: "button") do
title_output << content_tag(:button, class: "dropdown-title-button dropdown-menu-close #{margin_class}", aria: { label: "Close" }, type: "button") do
sprite_icon('close', size: 16, css_class: 'dropdown-menu-close-icon')
end
end

View File

@ -139,7 +139,7 @@ module EmailsHelper
max_domain_length = list_id_max_length - Gitlab.config.gitlab.host.length - project.id.to_s.length - 2
if max_domain_length < 3
return project.id.to_s + "..." + Gitlab.config.gitlab.host
return "#{project.id}...#{Gitlab.config.gitlab.host}"
end
if project_path_as_domain.length > max_domain_length
@ -151,7 +151,7 @@ module EmailsHelper
project_path_as_domain = project_path_as_domain.slice(0, last_dot_index).concat("..")
end
project.id.to_s + "." + project_path_as_domain + "." + Gitlab.config.gitlab.host
"#{project.id}.#{project_path_as_domain}.#{Gitlab.config.gitlab.host}"
end
def html_header_message

View File

@ -92,7 +92,7 @@ module EventsHelper
content_tag :li, class: active do
link_to request.path, link_opts do
content_tag(:span, ' ' + text)
content_tag(:span, " #{text}")
end
end
end

View File

@ -6,7 +6,7 @@ module NumbersHelper
count = resource.page.total_count_with_limit(:all, limit: limit)
if count > limit
number_with_delimiter(count - 1, options) + '+'
"#{number_with_delimiter(count - 1, options)}+"
else
number_with_delimiter(count, options)
end

View File

@ -75,7 +75,7 @@ module SubmoduleHelper
return true if url_no_dotgit == [Gitlab.config.gitlab.url, '/', namespace, '/',
project].join('')
url_with_dotgit = url_no_dotgit + '.git'
url_with_dotgit = "#{url_no_dotgit}.git"
url_with_dotgit == Gitlab::RepositoryUrlBuilder.build([namespace, '/', project].join(''))
end
@ -108,7 +108,7 @@ module SubmoduleHelper
def relative_self_links(relative_path, commit, old_commit, project)
relative_path = relative_path.rstrip
absolute_project_path = "/" + project.full_path
absolute_project_path = "/#{project.full_path}"
# Resolve `relative_path` to target path
# Assuming `absolute_project_path` is `/g1/p1`:

View File

@ -79,7 +79,7 @@ module TodosHelper
IntegrationsHelper.integration_todo_target_type(todo.target_type)
end
target_type + ' ' + todo_target_name(todo)
"#{target_type} #{todo_target_name(todo)}"
end
def todo_target_path(todo)

View File

@ -88,7 +88,7 @@ class ApplicationSetting < ApplicationRecord
validates :grafana_url,
system_hook_url: {
blocked_message: "is blocked: %{exception_message}. " + GRAFANA_URL_ERROR_MESSAGE
blocked_message: "is blocked: %{exception_message}. #{GRAFANA_URL_ERROR_MESSAGE}"
},
if: :grafana_url_absolute?

View File

@ -148,7 +148,7 @@ class CommitRange
def sha_start
return unless sha_from
exclude_start? ? sha_from + '^' : sha_from
exclude_start? ? "#{sha_from}^" : sha_from
end
def commit_start

View File

@ -11,7 +11,7 @@ class ProjectStatistics < ApplicationRecord
attribute :snippets_size, default: 0
counter_attribute :build_artifacts_size
counter_attribute :packages_size, if: -> (statistics) { Feature.enabled?(:packages_size_counter_attribute, statistics.project) }
counter_attribute :packages_size
counter_attribute_after_flush do |project_statistic|
project_statistic.refresh_storage_size!
@ -23,7 +23,6 @@ class ProjectStatistics < ApplicationRecord
COLUMNS_TO_REFRESH = [:repository_size, :wiki_size, :lfs_objects_size, :commit_count, :snippets_size, :uploads_size, :container_registry_size].freeze
INCREMENTABLE_COLUMNS = {
packages_size: %i[storage_size], # remove this along with packages_size_counter_attribute
pipeline_artifacts_size: %i[storage_size],
snippets_size: %i[storage_size]
}.freeze

View File

@ -10,6 +10,7 @@
= f.label :name, class: 'label-bold' do
%span= _("Project name")
= f.text_field :name, placeholder: "My awesome project", class: "form-control gl-form-input input-lg", data: { qa_selector: 'project_name', track_label: "#{track_label}", track_action: "activate_form_input", track_property: "project_name", track_value: "" }, required: true, aria: { required: true }
#project_name_error.gl-field-error.hidden
.form-group.project-path.col-sm-6.gl-pr-0
= f.label :namespace_id, class: 'label-bold' do
%span= _('Project URL')

View File

@ -15,8 +15,6 @@ module ContainerRegistry
BATCH_SIZE = 200
def perform
return unless Feature.enabled?(:container_registry_delete_repository_with_cron_worker)
log_counts
reset_stale_deletes

View File

@ -22,7 +22,6 @@ module ContainerRegistry
}.freeze
def perform_work
return unless Feature.enabled?(:container_registry_delete_repository_with_cron_worker)
return unless next_container_repository
result = delete_tags
@ -40,8 +39,6 @@ module ContainerRegistry
end
def remaining_work_count
return 0 unless Feature.enabled?(:container_registry_delete_repository_with_cron_worker)
::ContainerRepository.delete_scheduled.limit(max_running_jobs + 1).count
end

View File

@ -11,64 +11,5 @@ class DeleteContainerRepositoryWorker # rubocop:disable Scalability/IdempotentWo
queue_namespace :container_repository
feature_category :container_registry
LEASE_TIMEOUT = 1.hour.freeze
FIXED_DELAY = 10.seconds.freeze
attr_reader :container_repository
def perform(current_user_id, container_repository_id)
current_user = User.find_by_id(current_user_id)
@container_repository = ContainerRepository.find_by_id(container_repository_id)
project = container_repository&.project
return unless current_user && container_repository && project
if migration.delete_container_repository_worker_support? && migrating?
delay = migration_duration
self.class.perform_in(delay.from_now)
log_extra_metadata_on_done(:delete_postponed, delay)
return
end
# If a user accidentally attempts to delete the same container registry in quick succession,
# this can lead to orphaned tags.
try_obtain_lease do
Projects::ContainerRepository::DestroyService.new(project, current_user).execute(container_repository)
end
end
private
def migrating?
!(container_repository.default? ||
container_repository.import_done? ||
container_repository.import_skipped?)
end
def migration_duration
duration = migration.import_timeout.seconds + FIXED_DELAY
if container_repository.pre_importing?
duration += migration.dynamic_pre_import_timeout_for(container_repository)
end
duration
end
def migration
ContainerRegistry::Migration
end
# For ExclusiveLeaseGuard concern
def lease_key
@lease_key ||= "container_repository:delete:#{container_repository.id}"
end
# For ExclusiveLeaseGuard concern
def lease_timeout
LEASE_TIMEOUT
end
def perform(current_user_id, container_repository_id); end
end

View File

@ -1,8 +0,0 @@
---
name: container_registry_delete_repository_with_cron_worker
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101918
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/378818
milestone: '15.6'
type: development
group: group::container registry
default_enabled: false

View File

@ -1,8 +0,0 @@
---
name: packages_size_counter_attribute
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/102978
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/381287
milestone: '15.7'
type: development
group: group::package registry
default_enabled: false

View File

@ -0,0 +1,42 @@
# frozen_string_literal: true
def filter_changed_lines(files)
lines = []
files.each do |file|
qa_selector_changed_lines = helper.changed_lines(file).select { |line| line =~ /qa_selector|data-qa-selector/ }
next unless qa_selector_changed_lines.any?
lines += ["file `#{file}`:", qa_selector_changed_lines]
end
lines
end
changed_code_files = helper.changed_files(/\.(vue|haml|js|rb)$/)
return if changed_code_files.empty?
lines_with_qa_selectors = filter_changed_lines(changed_code_files)
return if lines_with_qa_selectors.empty?
markdown(<<~MARKDOWN)
## QA Selectors
MARKDOWN
if lines_with_qa_selectors.any?
markdown(<<~MARKDOWN)
The following changed lines in this MR contain QA selectors:
* #{lines_with_qa_selectors.join("\n* ")}
Please ensure `e2e:package-and-test` job is run and the tests are passing.
For the list of known failures please refer to [the latest pipeline triage issue](https://gitlab.com/gitlab-org/quality/pipeline-triage/-/issues).
If your changes are under a feature flag, please check our [Testing with feature flags](https://docs.gitlab.com/ee/development/testing_guide/end_to_end/feature_flags.html#automatic-test-execution-when-a-feature-flag-definition-changes) documentation for instructions.
MARKDOWN
warn "This merge request contains lines with QA selectors. Please ensure `e2e:package-and-test` job is run."
end

View File

@ -39,10 +39,6 @@ results in a loop that consistently fails for all objects stored in object stora
For information on how to fix this, see
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](troubleshooting.md#failed-syncs-with-gitlab-managed-object-storage-replication).
## Upgrading to 14.6
[Geo proxying](../secondary_proxy/index.md) was [enabled by default for unified URLs](https://gitlab.com/gitlab-org/gitlab/-/issues/325732) in 14.6. This may be a breaking change. If needed, you may [disable Geo proxying](../secondary_proxy/index.md#disable-geo-proxying).
## Upgrading to 14.4
There is [an issue in GitLab 14.4.0 through 14.4.2](../../../update/index.md#1440) that can affect Geo and other features that rely on cronjobs. We recommend upgrading to GitLab 14.4.3 or later.

View File

@ -41,7 +41,8 @@ full list of reference architectures, see
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
- [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- Note that [Amazon RDS Multi-AZ DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html) is a separate product and is supported.
- [Amazon Aurora](https://aws.amazon.com/rds/aurora/) is **incompatible** with load balancing enabled by default in [14.4.0](../../update/index.md#1440).
- Consul is primarily used for Omnibus PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However, Consul is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
@ -1275,7 +1276,7 @@ in the second step, do not supply the `EXTERNAL_URL` value.
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['max_connections'] = 200
postgresql['max_connections'] = 500
# Prevent database migrations from running on upgrade automatically
gitlab_rails['auto_migrate'] = false
@ -2319,12 +2320,10 @@ Refer to [epic 6127](https://gitlab.com/groups/gitlab-org/-/epics/6127) for more
The following tables and diagram detail the hybrid environment using the same formats
as the normal environment above.
First are the components that run in Kubernetes. The recommendation at this time is to
use Google Cloud's Kubernetes Engine (GKE) or AWS Elastic Kubernetes Service (EKS) and associated machine types, but the memory
and CPU requirements should translate to most other providers. We hope to update this in the
future with further specific cloud provider details.
First are the components that run in Kubernetes. These run across several node groups, although you can change
the overall makeup as desired as long as the minimum CPU and Memory requirements are observed.
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
| Service Node Group | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 4 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `c5.9xlarge` | 127.5 vCPU, 118 GB memory |
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
@ -2333,7 +2332,7 @@ future with further specific cloud provider details.
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
- In production deployments, there is no need to assign pods to nodes. A minimum of three nodes in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
- In production deployments, there is no need to assign pods to specific nodes. A minimum of three nodes per node group in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
Next are the backend components that run on static compute VMs via Omnibus (or External PaaS
services where applicable):
@ -2355,7 +2354,8 @@ services where applicable):
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
- [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- Note that [Amazon RDS Multi-AZ DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html) is a separate product and is supported.
- [Amazon Aurora](https://aws.amazon.com/rds/aurora/) is **incompatible** with load balancing enabled by default in [14.4.0](../../update/index.md#1440).
- Consul is primarily used for Omnibus PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However, Consul is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.

View File

@ -41,7 +41,8 @@ full list of reference architectures, see
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
- [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- Note that [Amazon RDS Multi-AZ DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html) is a separate product and is supported.
- [Amazon Aurora](https://aws.amazon.com/rds/aurora/) is **incompatible** with load balancing enabled by default in [14.4.0](../../update/index.md#1440).
- Consul is primarily used for Omnibus PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However, Consul is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
@ -1295,7 +1296,7 @@ in the second step, do not supply the `EXTERNAL_URL` value.
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['max_connections'] = 200
postgresql['max_connections'] = 500
# Prevent database migrations from running on upgrade automatically
gitlab_rails['auto_migrate'] = false
@ -2338,12 +2339,10 @@ Refer to [epic 6127](https://gitlab.com/groups/gitlab-org/-/epics/6127) for more
The following tables and diagram detail the hybrid environment using the same formats
as the normal environment above.
First are the components that run in Kubernetes. The recommendation at this time is to
use Google Cloud's Kubernetes Engine (GKE) or AWS Elastic Kubernetes Service (EKS) and associated machine types, but the memory
and CPU requirements should translate to most other providers. We hope to update this in the
future with further specific cloud provider details.
First are the components that run in Kubernetes. These run across several node groups, although you can change
the overall makeup as desired as long as the minimum CPU and Memory requirements are observed.
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
| Service Node Group | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 7 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `c5.9xlarge` | 223 vCPU, 206.5 GB memory |
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
@ -2352,7 +2351,7 @@ future with further specific cloud provider details.
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
- In production deployments, there is no need to assign pods to nodes. A minimum of three nodes in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
- In production deployments, there is no need to assign pods to specific nodes. A minimum of three nodes per node group in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
Next are the backend components that run on static compute VMs via Omnibus (or External PaaS
services where applicable):
@ -2374,7 +2373,8 @@ services where applicable):
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
- [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- Note that [Amazon RDS Multi-AZ DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html) is a separate product and is supported.
- [Amazon Aurora](https://aws.amazon.com/rds/aurora/) is **incompatible** with load balancing enabled by default in [14.4.0](../../update/index.md#1440).
- Consul is primarily used for Omnibus PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However, Consul is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.

View File

@ -34,7 +34,8 @@ For a full list of reference architectures, see
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
- [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- Note that [Amazon RDS Multi-AZ DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html) is a separate product and is supported.
- [Amazon Aurora](https://aws.amazon.com/rds/aurora/) is **incompatible** with load balancing enabled by default in [14.4.0](../../update/index.md#1440), and [Azure Database for PostgreSQL](https://azure.microsoft.com/en-gb/products/postgresql/#overview) is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61).
- Consul is primarily used for Omnibus PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However, Consul is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
@ -1046,12 +1047,10 @@ Refer to [epic 6127](https://gitlab.com/groups/gitlab-org/-/epics/6127) for more
The following tables and diagram detail the hybrid environment using the same formats
as the normal environment above.
First are the components that run in Kubernetes. The recommendation at this time is to
use Google Cloud's Kubernetes Engine (GKE) or AWS Elastic Kubernetes Service (EKS) and associated machine types, but the memory
and CPU requirements should translate to most other providers. We hope to update this in the
future with further specific cloud provider details.
First are the components that run in Kubernetes. These run across several node groups, although you can change
the overall makeup as desired as long as the minimum CPU and Memory requirements are observed.
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
| Service Node Group | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|------------------------|-----------------|--------------|---------------------------------|
| Webservice | 3 | 8 vCPU, 7.2 GB memory | `n1-highcpu-8` | `c5.2xlarge` | 23.7 vCPU, 16.9 GB memory |
| Sidekiq | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.8 vCPU, 25.9 GB memory |
@ -1060,7 +1059,7 @@ future with further specific cloud provider details.
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
- In production deployments, there is no need to assign pods to nodes. A minimum of three nodes in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
- In production deployments, there is no need to assign pods to specific nodes. A minimum of three nodes per node group in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
Next are the backend components that run on static compute VMs via Omnibus (or External PaaS
services where applicable):
@ -1076,7 +1075,8 @@ services where applicable):
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
- [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- Note that [Amazon RDS Multi-AZ DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html) is a separate product and is supported.
- [Amazon Aurora](https://aws.amazon.com/rds/aurora/) is **incompatible** with load balancing enabled by default in [14.4.0](../../update/index.md#1440), and [Azure Database for PostgreSQL](https://azure.microsoft.com/en-gb/products/postgresql/#overview) is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61).
- Consul is primarily used for Omnibus PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However, Consul is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.

View File

@ -50,7 +50,8 @@ For a full list of reference architectures, see
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
- [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- Note that [Amazon RDS Multi-AZ DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html) is a separate product and is supported.
- [Amazon Aurora](https://aws.amazon.com/rds/aurora/) is **incompatible** with load balancing enabled by default in [14.4.0](../../update/index.md#1440).
- Consul is primarily used for Omnibus PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However, Consul is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
@ -1230,7 +1231,7 @@ in the second step, do not supply the `EXTERNAL_URL` value.
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['max_connections'] = 200
postgresql['max_connections'] = 500
# Prevent database migrations from running on upgrade automatically
gitlab_rails['auto_migrate'] = false
@ -2309,12 +2310,10 @@ Refer to [epic 6127](https://gitlab.com/groups/gitlab-org/-/epics/6127) for more
The following tables and diagram detail the hybrid environment using the same formats
as the normal environment above.
First are the components that run in Kubernetes. The recommendation at this time is to
use Google Cloud's Kubernetes Engine (GKE) or AWS Elastic Kubernetes Service (EKS) and associated machine types, but the memory
and CPU requirements should translate to most other providers. We hope to update this in the
future with further specific cloud provider details.
First are the components that run in Kubernetes. These run across several node groups, although you can change
the overall makeup as desired as long as the minimum CPU and Memory requirements are observed.
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
| Service Node Group | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 2 | 16 vCPU, 14.4 GB memory | `n1-highcpu-16` | `c5.4xlarge` | 31.8 vCPU, 24.8 GB memory |
| Sidekiq | 3 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 11.8 vCPU, 38.9 GB memory |
@ -2323,7 +2322,7 @@ future with further specific cloud provider details.
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
- In production deployments, there is no need to assign pods to nodes. A minimum of three nodes in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
- In production deployments, there is no need to assign pods to specific nodes. A minimum of three nodes per node group in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
Next are the backend components that run on static compute VMs via Omnibus (or External PaaS
services where applicable):
@ -2344,7 +2343,8 @@ services where applicable):
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
- [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- Note that [Amazon RDS Multi-AZ DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html) is a separate product and is supported.
- [Amazon Aurora](https://aws.amazon.com/rds/aurora/) is **incompatible** with load balancing enabled by default in [14.4.0](../../update/index.md#1440).
- Consul is primarily used for Omnibus PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However, Consul is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.

View File

@ -41,7 +41,8 @@ full list of reference architectures, see
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
- [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- Note that [Amazon RDS Multi-AZ DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html) is a separate product and is supported.
- [Amazon Aurora](https://aws.amazon.com/rds/aurora/) is **incompatible** with load balancing enabled by default in [14.4.0](../../update/index.md#1440).
- Consul is primarily used for Omnibus PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However, Consul is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
@ -1288,7 +1289,7 @@ in the second step, do not supply the `EXTERNAL_URL` value.
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['max_connections'] = 200
postgresql['max_connections'] = 500
# Prevent database migrations from running on upgrade automatically
gitlab_rails['auto_migrate'] = false
@ -2340,12 +2341,10 @@ Refer to [epic 6127](https://gitlab.com/groups/gitlab-org/-/epics/6127) for more
The following tables and diagram detail the hybrid environment using the same formats
as the normal environment above.
First are the components that run in Kubernetes. The recommendation at this time is to
use Google Cloud's Kubernetes Engine (GKE) or AWS Elastic Kubernetes Service (EKS) and associated machine types, but the memory
and CPU requirements should translate to most other providers. We hope to update this in the
future with further specific cloud provider details.
First are the components that run in Kubernetes. These run across several node groups, although you can change
the overall makeup as desired as long as the minimum CPU and Memory requirements are observed.
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
| Service Node Group | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 16 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `m5.8xlarge` | 510 vCPU, 472 GB memory |
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
@ -2354,7 +2353,7 @@ future with further specific cloud provider details.
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
- In production deployments, there is no need to assign pods to nodes. A minimum of three nodes in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
- In production deployments, there is no need to assign pods to specific nodes. A minimum of three nodes per node group in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
Next are the backend components that run on static compute VMs via Omnibus (or External PaaS
services where applicable):
@ -2376,7 +2375,8 @@ services where applicable):
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
- [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- Note that [Amazon RDS Multi-AZ DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html) is a separate product and is supported.
- [Amazon Aurora](https://aws.amazon.com/rds/aurora/) is **incompatible** with load balancing enabled by default in [14.4.0](../../update/index.md#1440).
- Consul is primarily used for Omnibus PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However, Consul is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.

View File

@ -47,7 +47,8 @@ costly-to-operate environment by using the
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
- [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- Note that [Amazon RDS Multi-AZ DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html) is a separate product and is supported.
- [Amazon Aurora](https://aws.amazon.com/rds/aurora/) is **incompatible** with load balancing enabled by default in [14.4.0](../../update/index.md#1440).
- Consul is primarily used for Omnibus PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However, Consul is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
@ -1226,7 +1227,7 @@ in the second step, do not supply the `EXTERNAL_URL` value.
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['max_connections'] = 200
postgresql['max_connections'] = 500
# Prevent database migrations from running on upgrade automatically
gitlab_rails['auto_migrate'] = false
@ -2284,16 +2285,14 @@ Refer to [epic 6127](https://gitlab.com/groups/gitlab-org/-/epics/6127) for more
The following tables and diagram detail the hybrid environment using the same formats
as the normal environment above.
First are the components that run in Kubernetes. The recommendation at this time is to
use Google Cloud's Kubernetes Engine (GKE) or AWS Elastic Kubernetes Service (EKS) and associated machine types, but the memory
and CPU requirements should translate to most other providers. We hope to update this in the
future with further specific cloud provider details.
First are the components that run in Kubernetes. These run across several node groups, although you can change
the overall makeup as desired as long as the minimum CPU and Memory requirements are observed.
| Service | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|-----------------------------------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 5 | 16 vCPU, 14.4 GB memory | `n1-highcpu-16` | `c5.4xlarge` | 79.5 vCPU, 62 GB memory |
| Sidekiq | 3 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 11.8 vCPU, 38.9 GB memory |
| Supporting services such as NGINX, Prometheus | 2 | 2 vCPU, 7.5 GB memory | `n1-standard-2` | `m5.large` | 3.9 vCPU, 11.8 GB memory |
| Service Node Group | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|-------------------- |-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 5 | 16 vCPU, 14.4 GB memory | `n1-highcpu-16` | `c5.4xlarge` | 79.5 vCPU, 62 GB memory |
| Sidekiq | 3 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 11.8 vCPU, 38.9 GB memory |
| Supporting services | 2 | 2 vCPU, 7.5 GB memory | `n1-standard-2` | `m5.large` | 3.9 vCPU, 11.8 GB memory |
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
@ -2319,7 +2318,8 @@ services where applicable):
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
- [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- Note that [Amazon RDS Multi-AZ DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html) is a separate product and is supported.
- [Amazon Aurora](https://aws.amazon.com/rds/aurora/) is **incompatible** with load balancing enabled by default in [14.4.0](../../update/index.md#1440).
- Consul is primarily used for Omnibus PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However, Consul is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.

View File

@ -207,7 +207,8 @@ Several cloud provider services are known not to support the above or have been
- [Amazon Aurora](https://aws.amazon.com/rds/aurora/) is incompatible and not supported. See [14.4.0](../../update/index.md#1440) for more details.
- [Azure Database for PostgreSQL Single Server](https://azure.microsoft.com/en-gb/products/postgresql/#overview) (Single / Flexible) is **strongly not recommended** for use due to notable performance / stability issues or missing functionality. See [Recommendation Notes for Azure](#recommendation-notes-for-azure) for more details.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- [Google AlloyDB](https://cloud.google.com/alloydb) and [Amazon RDS Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo.
- Note that [Amazon RDS Multi-AZ DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html) is a separate product and is supported.
### Recommendation notes for Azure

View File

@ -1250,6 +1250,10 @@ curl --request POST --header "PRIVATE-TOKEN: <your-token>" \
| `printing_merge_request_link_enabled` | boolean | **{dotted-circle}** No | Show link to create/view merge request when pushing from the command line. |
| `public_builds` | boolean | **{dotted-circle}** No | If `true`, jobs can be viewed by non-project members. |
| `releases_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `environments_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `feature_flags_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `infrastructure_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `monitor_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `remove_source_branch_after_merge` | boolean | **{dotted-circle}** No | Enable `Delete source branch` option by default for all new merge requests. |
| `repository_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `repository_storage` | string | **{dotted-circle}** No | Which storage shard the repository is on. _(administrator only)_ |
@ -1330,6 +1334,10 @@ POST /projects/user/:user_id
| `printing_merge_request_link_enabled` | boolean | **{dotted-circle}** No | Show link to create/view merge request when pushing from the command line. |
| `public_builds` | boolean | **{dotted-circle}** No | If `true`, jobs can be viewed by non-project-members. |
| `releases_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `environments_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `feature_flags_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `infrastructure_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `monitor_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `remove_source_branch_after_merge` | boolean | **{dotted-circle}** No | Enable `Delete source branch` option by default for all new merge requests. |
| `repository_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `repository_storage` | string | **{dotted-circle}** No | Which storage shard the repository is on. _(administrators only)_ |
@ -1434,6 +1442,10 @@ Supported attributes:
| `printing_merge_request_link_enabled` | boolean | **{dotted-circle}** No | Show link to create/view merge request when pushing from the command line. |
| `public_builds` | boolean | **{dotted-circle}** No | If `true`, jobs can be viewed by non-project members. |
| `releases_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `environments_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `feature_flags_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `infrastructure_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `monitor_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `remove_source_branch_after_merge` | boolean | **{dotted-circle}** No | Enable `Delete source branch` option by default for all new merge requests. |
| `repository_access_level` | string | **{dotted-circle}** No | One of `disabled`, `private`, or `enabled`. |
| `repository_storage` | string | **{dotted-circle}** No | Which storage shard the repository is on. _(administrators only)_ |

View File

@ -24,6 +24,40 @@ The analyzer uses the [OWASP Zed Attack Proxy](https://www.zaproxy.org/) (ZAP) t
to attack your application and produce a more extensive security report. It can be very
useful when combined with [Review Apps](../../../ci/review_apps/index.md).
## Templates
> - The DAST latest template was [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/254325) in GitLab 13.8.
> - All DAST templates were [updated](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/62597) to DAST_VERSION: 2 in GitLab 14.0.
> - All DAST templates were [updated](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/87183) to DAST_VERSION: 3 in GitLab 15.0.
GitLab DAST configuration is defined in CI/CD templates. Updates to the template are provided with
GitLab upgrades, allowing you to benefit from any improvements and additions.
Available templates:
- [`DAST.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Security/DAST.gitlab-ci.yml): Stable version of the DAST CI/CD template.
- [`DAST.latest.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Security/DAST.latest.gitlab-ci.yml): Latest version of the DAST template.
WARNING:
The latest version of the template may include breaking changes. Use the stable template unless you
need a feature provided only in the latest template.
For more information about template versioning, see the
[CI/CD documentation](../../../development/cicd/templates.md#latest-version).
## DAST versions
By default, the DAST template uses the latest major version of the DAST Docker image. You can choose
how DAST updates, using the `DAST_VERSION` variable:
- Automatically update DAST with new features and fixes by pinning to a major
version (such as `1`).
- Only update fixes by pinning to a minor version (such as `1.6`).
- Prevent all updates by pinning to a specific version (such as `1.6.4`).
Find the latest DAST versions on the [DAST releases](https://gitlab.com/gitlab-org/security-products/dast/-/releases)
page.
## DAST run options
You can use DAST to examine your web application:
@ -46,58 +80,32 @@ To enable DAST to run automatically, either:
- Enable [Auto DAST](../../../topics/autodevops/stages.md#auto-dast) (provided
by [Auto DevOps](../../../topics/autodevops/index.md)).
- [Include the DAST template](#include-the-dast-template) in your existing
`.gitlab-ci.yml` file.
- [Configure DAST using the UI](#configure-dast-using-the-ui).
- [Edit the `.gitlab.ci.yml` file manually](#edit-the-gitlabciyml-file-manually).
- [Use an automatically configured merge request](#configure-dast-using-the-ui).
#### Include the DAST template
#### Edit the `.gitlab.ci.yml` file manually
> - This template was [updated](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/62597) to DAST_VERSION: 2 in GitLab 14.0.
> - This template was [updated](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/87183) to DAST_VERSION: 3 in GitLab 15.0.
If you want to manually add DAST to your application, the DAST job is defined
in a CI/CD template file. Updates to the template are provided with GitLab
upgrades, allowing you to benefit from any improvements and additions.
In this method you manually edit the existing `.gitlab-ci.yml` file. Use this method if your GitLab CI/CD configuration file is complex.
To include the DAST template:
1. Select the CI/CD template you want to use:
1. On the top bar, select **Main menu > Projects** and find your project.
1. On the left sidebar, select **CI/CD > Editor**.
1. Copy and paste the following to the bottom of the `.gitlab-ci.yml` file.
- [`DAST.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Security/DAST.gitlab-ci.yml):
Stable version of the DAST CI/CD template.
- [`DAST.latest.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Security/DAST.latest.gitlab-ci.yml):
Latest version of the DAST template. ([Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/254325)
in GitLab 13.8).
To use the DAST stable template:
WARNING:
The latest version of the template may include breaking changes. Use the
stable template unless you need a feature provided only in the latest template.
```yaml
include:
- template: DAST.gitlab-ci.yml
```
For more information about template versioning, see the
[CI/CD documentation](../../../development/cicd/templates.md#latest-version).
To use the DAST latest template:
1. Add a `dast` stage to your GitLab CI stages configuration:
```yaml
stages:
- dast
```
1. Add the template to GitLab, based on your version of GitLab:
- In GitLab 11.9 and later, [include](../../../ci/yaml/index.md#includetemplate)
the template by adding the following to your `.gitlab-ci.yml` file:
```yaml
include:
- template: <template_file.yml>
variables:
DAST_WEBSITE: https://example.com
```
- In GitLab 11.8 and earlier, add the contents of the template to your
`.gitlab_ci.yml` file.
```yaml
include:
- template: DAST.latest.gitlab-ci.yml
```
1. Define the URL to be scanned by DAST by using one of these methods:
@ -125,9 +133,13 @@ To include the DAST template:
You can see an example of this in our
[Auto DevOps CI YAML](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/Deploy.gitlab-ci.yml)
file.
1. Select the **Validate** tab, then select **Validate pipeline**.
The message **Simulation completed successfully** indicates the file is valid.
1. Select the **Edit** tab.
1. Optional. In **Commit message**, customize the commit message.
1. Select **Commit changes**.
The included template creates a `dast` job in your CI/CD pipeline and scans
your project's running application for possible vulnerabilities.
Pipelines now include a DAST job.
The results are saved as a
[DAST report artifact](../../../ci/yaml/artifacts_reports.md#artifactsreportsdast)
@ -137,21 +149,12 @@ always take the latest DAST artifact available. Behind the scenes, the
is used to run the tests on the specified URL and scan it for possible
vulnerabilities.
By default, the DAST template uses the latest major version of the DAST Docker
image. Using the `DAST_VERSION` variable, you can choose how DAST updates:
- Automatically update DAST with new features and fixes by pinning to a major
version (such as `1`).
- Only update fixes by pinning to a minor version (such as `1.6`).
- Prevent all updates by pinning to a specific version (such as `1.6.4`).
Find the latest DAST versions on the [Releases](https://gitlab.com/gitlab-org/security-products/dast/-/releases)
page.
#### Configure DAST using the UI
You can enable or configure DAST settings using the UI. The generated settings are formatted so they
can be conveniently pasted into the `.gitlab-ci.yml` file.
In this method you select options in the UI. Based on your selections, a code
snippet is created that you paste into the `.gitlab-ci.yml` file.
To configure DAST using the UI:
1. On the top bar, select **Main menu > Projects** and find your project.
1. On the left sidebar, select **Security & Compliance > Configuration**.
@ -168,10 +171,13 @@ can be conveniently pasted into the `.gitlab-ci.yml` file.
1. To add the snippet to your project's `.gitlab-ci.yml` file, select
**Copy code and open `.gitlab-ci.yml` file**. The Pipeline Editor opens.
1. Paste the snippet into the `.gitlab-ci.yml` file.
1. Select the **Lint** tab to confirm the edited `.gitlab-ci.yml` file is valid.
1. Select the **Edit** tab, then select **Commit changes**.
1. Select the **Validate** tab, then select **Validate pipeline**.
The message **Simulation completed successfully** indicates the file is valid.
1. Select the **Edit** tab.
1. Optional. In **Commit message**, customize the commit message.
1. Select **Commit changes**.
When the snippet is committed to the `.gitlab-ci.yml` file, pipelines include a DAST job.
Pipelines now include a DAST job.
### API scan

View File

@ -1,6 +1,6 @@
---
stage: none
group: unassigned
stage: Release
group: Release
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
---

View File

@ -19,10 +19,13 @@ under the `TOP_LEVEL_ROUTES`, `PROJECT_WILDCARD_ROUTES` and `GROUP_ROUTES` lists
## Limitations on project and group names
- Special characters are not permitted at the start or end of project or group names. They are permitted in any other location of the name.
- Project or group names cannot end in `.git` or `.atom`.
- Project or group names must start with a letter, digit, emoji, or "_".
- Project or group names can only contain letters, digits, emojis, "_", ".", "+", dashes, or spaces.
- Paths can only contain letters, digits, "_", "-", and "."
- Project or group slugs must start with a letter or digit.
- Project or group slugs can only contain letters, digits, '_', '.', '+', or dashes.
- Project or group slugs must not contain consecutive special characters.
- Project or group slugs cannot end with a special character.
- Project or group slugs cannot end in `.git` or `.atom`.
## Reserved project names

View File

@ -81,6 +81,10 @@ module API
expose(:container_registry_access_level, documentation: { type: 'string', example: 'enabled' }) { |project, options| project_feature_string_access_level(project, :container_registry) }
expose(:security_and_compliance_access_level, documentation: { type: 'string', example: 'enabled' }) { |project, options| project_feature_string_access_level(project, :security_and_compliance) }
expose(:releases_access_level, documentation: { type: 'string', example: 'enabled' }) { |project, options| project_feature_string_access_level(project, :releases) }
expose(:environments_access_level, documentation: { type: 'string', example: 'enabled' }) { |project, options| project_feature_string_access_level(project, :environments) }
expose(:feature_flags_access_level, documentation: { type: 'string', example: 'enabled' }) { |project, options| project_feature_string_access_level(project, :feature_flags) }
expose(:infrastructure_access_level, documentation: { type: 'string', example: 'enabled' }) { |project, options| project_feature_string_access_level(project, :infrastructure) }
expose(:monitor_access_level, documentation: { type: 'string', example: 'enabled' }) { |project, options| project_feature_string_access_level(project, :monitor) }
expose :emails_disabled, documentation: { type: 'boolean' }
expose :shared_runners_enabled, documentation: { type: 'boolean' }

View File

@ -37,6 +37,10 @@ module API
optional :container_registry_access_level, type: String, values: %w(disabled private enabled), desc: 'Controls visibility of the container registry. One of `disabled`, `private` or `enabled`. `private` will make the container registry accessible only to project members (reporter role and above). `enabled` will make the container registry accessible to everyone who has access to the project. `disabled` will disable the container registry'
optional :security_and_compliance_access_level, type: String, values: %w(disabled private enabled), desc: 'Security and compliance access level. One of `disabled`, `private` or `enabled`'
optional :releases_access_level, type: String, values: %w(disabled private enabled), desc: 'Releases access level. One of `disabled`, `private` or `enabled`'
optional :environments_access_level, type: String, values: %w(disabled private enabled), desc: 'Environments access level. One of `disabled`, `private` or `enabled`'
optional :feature_flags_access_level, type: String, values: %w(disabled private enabled), desc: 'Feature flags access level. One of `disabled`, `private` or `enabled`'
optional :infrastructure_access_level, type: String, values: %w(disabled private enabled), desc: 'Infrastructure access level. One of `disabled`, `private` or `enabled`'
optional :monitor_access_level, type: String, values: %w(disabled private enabled), desc: 'Monitor access level. One of `disabled`, `private` or `enabled`'
optional :emails_disabled, type: Boolean, desc: 'Disable email notifications'
optional :show_default_award_emojis, type: Boolean, desc: 'Show default award emojis'
@ -183,6 +187,10 @@ module API
:mr_default_target_self,
:enforce_auth_checks_on_uploads,
:releases_access_level,
:environments_access_level,
:feature_flags_access_level,
:infrastructure_access_level,
:monitor_access_level,
# TODO: remove in API v5, replaced by *_access_level
:issues_enabled,

View File

@ -62,10 +62,6 @@ module API
authorize_admin_container_image!
repository.delete_scheduled!
unless Feature.enabled?(:container_registry_delete_repository_with_cron_worker)
DeleteContainerRepositoryWorker.perform_async(current_user.id, repository.id) # rubocop:disable CodeReuse/Worker
end
track_package_event('delete_repository', :container, user: current_user, project: user_project, namespace: user_project.namespace)
status :accepted

View File

@ -22,8 +22,8 @@ module Gitlab
# rubocop:disable Metrics/ParameterLists
def validate!(
url,
schemes:,
ports: [],
schemes: [],
allow_localhost: false,
allow_local_network: true,
allow_object_storage: false,
@ -35,19 +35,11 @@ module Gitlab
return [nil, nil] if url.nil?
raise ArgumentError, 'The schemes is a required argument' if schemes.blank?
# Param url can be a string, URI or Addressable::URI
uri = parse_url(url)
# TODO: This is a temporary log. It will be removed in
# https://gitlab.com/gitlab-org/gitlab/-/merge_requests/104285
if schemes.blank?
Gitlab::AppJsonLogger.info(
message: 'Blank scheme used in Gitlab::UrlBlocker',
uri_scheme: uri.scheme,
caller: Gitlab::BacktraceCleaner.clean_backtrace(caller)
)
end
validate_uri(
uri: uri,
schemes: schemes,

View File

@ -65,11 +65,7 @@ if ! cd "$app_root" ; then
echo "Failed to cd into $app_root, exiting!"; exit 1
fi
if [ -z "$SIDEKIQ_WORKERS" ]; then
sidekiq_pid_path="$pid_path/sidekiq.pid"
else
sidekiq_pid_path="$pid_path/sidekiq-cluster.pid"
fi
sidekiq_pid_path="$pid_path/sidekiq-cluster.pid"
### Init Script functions

View File

@ -10,13 +10,14 @@ Type=notify
User=git
WorkingDirectory=/home/git/gitlab
Environment=RAILS_ENV=production
ExecStart=/usr/local/bin/bundle exec sidekiq --config /home/git/gitlab/config/sidekiq_queues.yml --environment production
Environment=SIDEKIQ_QUEUES=*
ExecStart=/home/git/gitlab/bin/sidekiq-cluster $SIDEKIQ_QUEUES -P /home/git/gitlab/tmp/pids/sidekiq.pid
NotifyAccess=all
PIDFile=/home/git/gitlab/tmp/pids/sidekiq.pid
Restart=on-failure
RestartSec=1
SyslogIdentifier=gitlab-sidekiq
Slice=gitlab.slice
WatchdogSec=10
[Install]
WantedBy=gitlab.target

View File

@ -5,6 +5,8 @@ module SystemCheck
class SidekiqCheck < BaseCheck
set_name 'Sidekiq:'
SYSTEMD_UNIT_PATH = '/run/systemd/units/invocation:gitlab-sidekiq.service'
def multi_check
check_sidekiq_running
only_one_sidekiq_running
@ -37,9 +39,9 @@ module SystemCheck
$stdout.print 'Number of Sidekiq processes (cluster/worker) ... '
if (cluster_count == 1 && worker_count > 0) || (cluster_count == 0 && worker_count == 1)
if cluster_count == 1 && worker_count >= 1
$stdout.puts "#{cluster_count}/#{worker_count}".color(:green)
elsif File.symlink?('/run/systemd/units/invocation:gitlab-sidekiq.service')
elsif File.symlink?(SYSTEMD_UNIT_PATH)
$stdout.puts "#{cluster_count}/#{worker_count}".color(:red)
try_fixing_it(
'sudo systemctl restart gitlab-sidekiq.service'

View File

@ -20302,6 +20302,12 @@ msgstr ""
msgid "Hierarchy|Planning hierarchy"
msgstr ""
msgid "Hierarchy|Something went wrong while fetching children."
msgstr ""
msgid "Hierarchy|Something went wrong while removing a child item."
msgstr ""
msgid "Hierarchy|Take the work items survey"
msgstr ""
@ -23903,9 +23909,6 @@ msgstr ""
msgid "Key (PEM)"
msgstr ""
msgid "Key result"
msgstr ""
msgid "Key:"
msgstr ""
@ -26948,6 +26951,9 @@ msgstr ""
msgid "Name"
msgstr ""
msgid "Name can contain only letters, digits, emojis, '_', '.', '+', dashes, or spaces"
msgstr ""
msgid "Name can't be blank"
msgstr ""
@ -26957,6 +26963,9 @@ msgstr ""
msgid "Name is already taken."
msgstr ""
msgid "Name must start with a letter, digit, emoji, or '_'"
msgstr ""
msgid "Name new label"
msgstr ""
@ -46877,7 +46886,7 @@ msgstr ""
msgid "WorkItem|Iteration"
msgstr ""
msgid "WorkItem|Key result"
msgid "WorkItem|Key Result"
msgstr ""
msgid "WorkItem|Milestone"
@ -46964,6 +46973,12 @@ msgstr ""
msgid "WorkItem|Something went wrong while fetching milestones. Please try again."
msgstr ""
msgid "WorkItem|Something went wrong while removing child."
msgstr ""
msgid "WorkItem|Something went wrong while undoing child removal."
msgstr ""
msgid "WorkItem|Something went wrong while updating the %{workItemType}. Please try again."
msgstr ""

View File

@ -120,22 +120,6 @@ RSpec.describe Projects::Registry::RepositoriesController do
expect_snowplow_event(category: anything, action: 'delete_repository')
end
context 'with container_registry_delete_repository_with_cron_worker disabled' do
before do
stub_feature_flags(container_registry_delete_repository_with_cron_worker: false)
end
it 'schedules a job to delete a repository' do
expect(DeleteContainerRepositoryWorker).to receive(:perform_async).with(user.id, repository.id)
expect { delete_repository(repository) }
.to change { repository.reload.status }.from(nil).to('delete_scheduled')
expect(repository.reload).to be_delete_scheduled
expect(response).to have_gitlab_http_status(:no_content)
end
end
end
end
end

View File

@ -1,14 +0,0 @@
/**
* Returns a clone of the given object with all __typename keys omitted,
* including deeply nested ones.
*
* Only works with JSON-serializable objects.
*
* @param {object} An object with __typename keys (e.g., a GraphQL response)
* @returns {object} A new object with no __typename keys
*/
export const stripTypenames = (object) => {
return JSON.parse(
JSON.stringify(object, (key, value) => (key === '__typename' ? undefined : value)),
);
};

View File

@ -1,23 +0,0 @@
import { stripTypenames } from './graphql_helpers';
describe('stripTypenames', () => {
it.each`
input | expected
${{}} | ${{}}
${{ __typename: 'Foo' }} | ${{}}
${{ bar: 'bar', __typename: 'Foo' }} | ${{ bar: 'bar' }}
${{ bar: { __typename: 'Bar' }, __typename: 'Foo' }} | ${{ bar: {} }}
${{ bar: [{ __typename: 'Bar' }], __typename: 'Foo' }} | ${{ bar: [{}] }}
${[]} | ${[]}
${[{ __typename: 'Foo' }]} | ${[{}]}
${[{ bar: [{ a: 1, __typename: 'Bar' }] }]} | ${[{ bar: [{ a: 1 }] }]}
`('given $input returns $expected, with all __typename keys removed', ({ input, expected }) => {
const actual = stripTypenames(input);
expect(actual).toEqual(expected);
expect(input).not.toBe(actual);
});
it('given null returns null', () => {
expect(stripTypenames(null)).toEqual(null);
});
});

View File

@ -606,6 +606,50 @@ describe('DiffsStoreActions', () => {
params: { commit_id: '123', w: '0' },
});
});
describe('version parameters', () => {
const diffId = '4';
const startSha = 'abc';
const pathRoot = 'a/a/-/merge_requests/1';
let file;
let getters;
beforeAll(() => {
file = { load_collapsed_diff_url: '/load/collapsed/diff/url' };
getters = {};
});
beforeEach(() => {
jest.spyOn(axios, 'get').mockReturnValue(Promise.resolve({ data: {} }));
});
it('fetches the data when there is no mergeRequestDiff', () => {
diffActions.loadCollapsedDiff({ commit() {}, getters, state }, file);
expect(axios.get).toHaveBeenCalledWith(file.load_collapsed_diff_url, {
params: expect.any(Object),
});
});
it.each`
desc | versionPath | start_sha | diff_id
${'no additional version information'} | ${`${pathRoot}?search=terms`} | ${undefined} | ${undefined}
${'the diff_id'} | ${`${pathRoot}?diff_id=${diffId}`} | ${undefined} | ${diffId}
${'the start_sha'} | ${`${pathRoot}?start_sha=${startSha}`} | ${startSha} | ${undefined}
${'all available version information'} | ${`${pathRoot}?diff_id=${diffId}&start_sha=${startSha}`} | ${startSha} | ${diffId}
`('fetches the data and includes $desc', ({ versionPath, start_sha, diff_id }) => {
jest.spyOn(axios, 'get').mockReturnValue(Promise.resolve({ data: {} }));
diffActions.loadCollapsedDiff(
{ commit() {}, getters, state: { mergeRequestDiff: { version_path: versionPath } } },
file,
);
expect(axios.get).toHaveBeenCalledWith(file.load_collapsed_diff_url, {
params: expect.objectContaining({ start_sha, diff_id }),
});
});
});
});
describe('toggleFileDiscussions', () => {

View File

@ -2,30 +2,64 @@ import { getDerivedMergeRequestInformation } from '~/diffs/utils/merge_request';
import { diffMetadata } from '../mock_data/diff_metadata';
describe('Merge Request utilities', () => {
const derivedMrInfo = {
const derivedBaseInfo = {
mrPath: '/gitlab-org/gitlab-test/-/merge_requests/4',
userOrGroup: 'gitlab-org',
project: 'gitlab-test',
id: '4',
};
const derivedVersionInfo = {
diffId: '4',
startSha: 'eb227b3e214624708c474bdab7bde7afc17cefcc',
};
const noVersion = {
diffId: undefined,
startSha: undefined,
};
const unparseableEndpoint = {
mrPath: undefined,
userOrGroup: undefined,
project: undefined,
id: undefined,
...noVersion,
};
describe('getDerivedMergeRequestInformation', () => {
const endpoint = `${diffMetadata.latest_version_path}.json?searchParam=irrelevant`;
let endpoint = `${diffMetadata.latest_version_path}.json?searchParam=irrelevant`;
it.each`
argument | response
${{ endpoint }} | ${derivedMrInfo}
${{ endpoint }} | ${{ ...derivedBaseInfo, ...noVersion }}
${{}} | ${unparseableEndpoint}
${{ endpoint: undefined }} | ${unparseableEndpoint}
${{ endpoint: null }} | ${unparseableEndpoint}
`('generates the correct derived results based on $argument', ({ argument, response }) => {
expect(getDerivedMergeRequestInformation(argument)).toStrictEqual(response);
});
describe('version information', () => {
const bare = diffMetadata.latest_version_path;
endpoint = diffMetadata.merge_request_diffs[0].compare_path;
it('still gets the correct derived information', () => {
expect(getDerivedMergeRequestInformation({ endpoint })).toMatchObject(derivedBaseInfo);
});
it.each`
url | versionPart
${endpoint} | ${derivedVersionInfo}
${`${bare}?diff_id=${derivedVersionInfo.diffId}`} | ${{ ...derivedVersionInfo, startSha: undefined }}
${`${bare}?start_sha=${derivedVersionInfo.startSha}`} | ${{ ...derivedVersionInfo, diffId: undefined }}
`(
'generates the correct derived version information based on $url',
({ url, versionPart }) => {
expect(getDerivedMergeRequestInformation({ endpoint: url })).toMatchObject(versionPart);
},
);
it('extracts nothing if there is no available version-like information in the URL', () => {
expect(getDerivedMergeRequestInformation({ endpoint: bare })).toMatchObject(noVersion);
});
});
});
});

View File

@ -14,7 +14,6 @@ import VueApollo from 'vue-apollo';
import MockAdapter from 'axios-mock-adapter';
import createMockApollo from 'helpers/mock_apollo_helper';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import { stripTypenames } from 'helpers/graphql_helpers';
import waitForPromises from 'helpers/wait_for_promises';
import { GRAPHQL_PAGE_SIZE } from '~/packages_and_registries/dependency_proxy/constants';
import axios from '~/lib/utils/axios_utils';
@ -190,7 +189,7 @@ describe('DependencyProxyApp', () => {
it('shows list', () => {
expect(findManifestList().props()).toMatchObject({
manifests: proxyManifests(),
pagination: stripTypenames(pagination()),
pagination: pagination(),
});
});

View File

@ -1,5 +1,4 @@
import { GlKeysetPagination } from '@gitlab/ui';
import { stripTypenames } from 'helpers/graphql_helpers';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import ManifestRow from '~/packages_and_registries/dependency_proxy/components/manifest_row.vue';
@ -14,7 +13,7 @@ describe('Manifests List', () => {
const defaultProps = {
manifests: proxyManifests(),
pagination: stripTypenames(pagination()),
pagination: pagination(),
};
const createComponent = (propsData = defaultProps) => {
@ -60,9 +59,8 @@ describe('Manifests List', () => {
it('has the correct props', () => {
createComponent();
expect(findPagination().props()).toMatchObject({
...defaultProps.pagination,
});
const { __typename, ...paginationProps } = defaultProps.pagination;
expect(findPagination().props()).toMatchObject(paginationProps);
});
it('emits the next-page event', () => {

View File

@ -1,12 +1,14 @@
import { setHTMLFixture, resetHTMLFixture } from 'helpers/fixtures';
import { TEST_HOST } from 'helpers/test_constants';
import projectNew from '~/projects/project_new';
import { checkRules } from '~/projects/project_name_rules';
import { mockTracking, triggerEvent, unmockTracking } from 'helpers/tracking_helper';
describe('New Project', () => {
let $projectImportUrl;
let $projectPath;
let $projectName;
let $projectNameError;
const mockKeyup = (el) => el.dispatchEvent(new KeyboardEvent('keyup'));
const mockChange = (el) => el.dispatchEvent(new Event('change'));
@ -29,6 +31,7 @@ describe('New Project', () => {
</div>
</div>
<input id="project_name" />
<div class="gl-field-error hidden" id="project_name_error" />
<input id="project_path" />
</div>
<div class="js-user-readme-repo"></div>
@ -41,6 +44,7 @@ describe('New Project', () => {
$projectImportUrl = document.querySelector('#project_import_url');
$projectPath = document.querySelector('#project_path');
$projectName = document.querySelector('#project_name');
$projectNameError = document.querySelector('#project_name_error');
});
afterEach(() => {
@ -84,6 +88,57 @@ describe('New Project', () => {
});
});
describe('tracks manual name input', () => {
beforeEach(() => {
projectNew.bindEvents();
});
afterEach(() => {
unmockTracking();
});
it('no error message by default', () => {
expect($projectNameError.classList.contains('hidden')).toBe(true);
});
it('show error message if name is validate', () => {
$projectName.value = '.validate!Name';
triggerEvent($projectName, 'change');
expect($projectNameError.innerText).toBe(
"Name must start with a letter, digit, emoji, or '_'",
);
expect($projectNameError.classList.contains('hidden')).toBe(false);
});
});
describe('project name rule', () => {
describe("Name must start with a letter, digit, emoji, or '_'", () => {
const errormsg = "Name must start with a letter, digit, emoji, or '_'";
it("'.foo' should error", () => {
const text = '.foo';
expect(checkRules(text)).toBe(errormsg);
});
it('_foo should passed', () => {
const text = '_foo';
expect(checkRules(text)).toBe('');
});
});
describe("Name can contain only letters, digits, emojis, '_', '.', '+', dashes, or spaces", () => {
const errormsg =
"Name can contain only letters, digits, emojis, '_', '.', '+', dashes, or spaces";
it("'foo(#^.^#)foo' should error", () => {
const text = 'foo(#^.^#)foo';
expect(checkRules(text)).toBe(errormsg);
});
it("'foo123😊_.+- ' should passed", () => {
const text = 'foo123😊_.+- ';
expect(checkRules(text)).toBe('');
});
});
});
describe('deriveProjectPathFromUrl', () => {
const dummyImportUrl = `${TEST_HOST}/dummy/import/url.git`;

View File

@ -21,6 +21,7 @@ import WorkItemTitle from '~/work_items/components/work_item_title.vue';
import WorkItemAssignees from '~/work_items/components/work_item_assignees.vue';
import WorkItemLabels from '~/work_items/components/work_item_labels.vue';
import WorkItemMilestone from '~/work_items/components/work_item_milestone.vue';
import WorkItemTree from '~/work_items/components/work_item_links/work_item_tree.vue';
import { i18n } from '~/work_items/constants';
import workItemQuery from '~/work_items/graphql/work_item.query.graphql';
import workItemByIidQuery from '~/work_items/graphql/work_item_by_iid.query.graphql';
@ -38,6 +39,7 @@ import {
workItemAssigneesSubscriptionResponse,
workItemMilestoneSubscriptionResponse,
projectWorkItemResponse,
objectiveType,
} from '../mock_data';
describe('WorkItemDetail component', () => {
@ -78,6 +80,7 @@ describe('WorkItemDetail component', () => {
const findParentButton = () => findParent().findComponent(GlButton);
const findCloseButton = () => wrapper.find('[data-testid="work-item-close"]');
const findWorkItemType = () => wrapper.find('[data-testid="work-item-type"]');
const findHierarchyTree = () => wrapper.findComponent(WorkItemTree);
const createComponent = ({
isModal = false,
@ -638,4 +641,24 @@ describe('WorkItemDetail component', () => {
iid: '1',
});
});
describe('hierarchy widget', () => {
it('does not render children tree by default', async () => {
createComponent();
await waitForPromises();
expect(findHierarchyTree().exists()).toBe(false);
});
it('renders children tree when work item is an Objective', async () => {
const objectiveWorkItem = workItemResponseFactory({
workItemType: objectiveType,
});
const handler = jest.fn().mockResolvedValue(objectiveWorkItem);
createComponent({ handler });
await waitForPromises();
expect(findHierarchyTree().exists()).toBe(true);
});
});
});

View File

@ -25,9 +25,13 @@ describe('RelatedItemsTree', () => {
expect(wrapper.findAllComponents(GlDropdownSectionHeader).at(0).text()).toContain(
'Objective',
);
expect(wrapper.findAllComponents(GlDropdownSectionHeader).at(1).text()).toContain(
'Key result',
);
// TODO: Uncomment once following two issues addressed
// https://gitlab.com/gitlab-org/gitlab/-/issues/381833
// https://gitlab.com/gitlab-org/gitlab/-/issues/385084
// expect(wrapper.findAllComponents(GlDropdownSectionHeader).at(1).text()).toContain(
// 'Key result',
// );
});
});
});

View File

@ -1,33 +1,68 @@
import { GlButton, GlIcon } from '@gitlab/ui';
import Vue from 'vue';
import VueApollo from 'vue-apollo';
import createMockApollo from 'helpers/mock_apollo_helper';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import waitForPromises from 'helpers/wait_for_promises';
import { createAlert } from '~/flash';
import RichTimestampTooltip from '~/vue_shared/components/rich_timestamp_tooltip.vue';
import getWorkItemTreeQuery from '~/work_items/graphql/work_item_tree.query.graphql';
import WorkItemLinkChild from '~/work_items/components/work_item_links/work_item_link_child.vue';
import WorkItemLinksMenu from '~/work_items/components/work_item_links/work_item_links_menu.vue';
import WorkItemTreeChildren from '~/work_items/components/work_item_links/work_item_tree_children.vue';
import {
WIDGET_TYPE_HIERARCHY,
TASK_TYPE_NAME,
WORK_ITEM_TYPE_VALUE_OBJECTIVE,
} from '~/work_items/constants';
import { workItemTask, confidentialWorkItemTask, closedWorkItemTask } from '../../mock_data';
import {
workItemTask,
workItemObjectiveWithChild,
confidentialWorkItemTask,
closedWorkItemTask,
workItemHierarchyTreeResponse,
workItemHierarchyTreeFailureResponse,
} from '../../mock_data';
jest.mock('~/flash');
describe('WorkItemLinkChild', () => {
const WORK_ITEM_ID = 'gid://gitlab/WorkItem/2';
let wrapper;
let getWorkItemTreeQueryHandler;
Vue.use(VueApollo);
const createComponent = ({
projectPath = 'gitlab-org/gitlab-test',
canUpdate = true,
issuableGid = WORK_ITEM_ID,
childItem = workItemTask,
workItemType = TASK_TYPE_NAME,
apolloProvider = null,
} = {}) => {
getWorkItemTreeQueryHandler = jest.fn().mockResolvedValue(workItemHierarchyTreeResponse);
wrapper = shallowMountExtended(WorkItemLinkChild, {
apolloProvider:
apolloProvider || createMockApollo([[getWorkItemTreeQuery, getWorkItemTreeQueryHandler]]),
propsData: {
projectPath,
canUpdate,
issuableGid,
childItem,
workItemType,
},
});
};
beforeEach(() => {
createAlert.mockClear();
});
afterEach(() => {
wrapper.destroy();
});
@ -121,7 +156,78 @@ describe('WorkItemLinkChild', () => {
it('removeChild event on menu triggers `click-remove-child` event', () => {
itemMenuEl.vm.$emit('removeChild');
expect(wrapper.emitted('remove')).toEqual([[workItemTask.id]]);
expect(wrapper.emitted('removeChild')).toEqual([[workItemTask.id]]);
});
});
describe('nested children', () => {
const findExpandButton = () => wrapper.findByTestId('expand-child');
const findTreeChildren = () => wrapper.findComponent(WorkItemTreeChildren);
beforeEach(() => {
getWorkItemTreeQueryHandler.mockClear();
createComponent({
childItem: workItemObjectiveWithChild,
workItemType: WORK_ITEM_TYPE_VALUE_OBJECTIVE,
});
});
it('displays expand button when item has children, children are not displayed by default', () => {
expect(findExpandButton().exists()).toBe(true);
expect(findTreeChildren().exists()).toBe(false);
});
it('fetches and displays children of item when clicking on expand button', async () => {
await findExpandButton().vm.$emit('click');
expect(findExpandButton().props('loading')).toBe(true);
await waitForPromises();
expect(getWorkItemTreeQueryHandler).toHaveBeenCalled();
expect(findTreeChildren().exists()).toBe(true);
const widgetHierarchy = workItemHierarchyTreeResponse.data.workItem.widgets.find(
(widget) => widget.type === WIDGET_TYPE_HIERARCHY,
);
expect(findTreeChildren().props('children')).toEqual(widgetHierarchy.children.nodes);
});
it('does not fetch children if already fetched once while clicking expand button', async () => {
findExpandButton().vm.$emit('click'); // Expand for the first time
await waitForPromises();
expect(findTreeChildren().exists()).toBe(true);
await findExpandButton().vm.$emit('click'); // Collapse
findExpandButton().vm.$emit('click'); // Expand again
await waitForPromises();
expect(getWorkItemTreeQueryHandler).toHaveBeenCalledTimes(1); // ensure children were fetched only once.
expect(findTreeChildren().exists()).toBe(true);
});
it('calls createAlert when children fetch request fails on clicking expand button', async () => {
const getWorkItemTreeQueryFailureHandler = jest
.fn()
.mockRejectedValue(workItemHierarchyTreeFailureResponse);
const apolloProvider = createMockApollo([
[getWorkItemTreeQuery, getWorkItemTreeQueryFailureHandler],
]);
createComponent({
childItem: workItemObjectiveWithChild,
workItemType: WORK_ITEM_TYPE_VALUE_OBJECTIVE,
apolloProvider,
});
findExpandButton().vm.$emit('click');
await waitForPromises();
expect(createAlert).toHaveBeenCalledWith({
captureError: true,
error: expect.any(Object),
message: 'Something went wrong while fetching children.',
});
});
});
});

View File

@ -257,7 +257,7 @@ describe('WorkItemLinks', () => {
});
it('calls correct mutation with correct variables', async () => {
firstChild.vm.$emit('remove', firstChild.vm.childItem.id);
firstChild.vm.$emit('removeChild', firstChild.vm.childItem.id);
await waitForPromises();
@ -272,7 +272,7 @@ describe('WorkItemLinks', () => {
});
it('shows toast when mutation succeeds', async () => {
firstChild.vm.$emit('remove', firstChild.vm.childItem.id);
firstChild.vm.$emit('removeChild', firstChild.vm.childItem.id);
await waitForPromises();
@ -284,7 +284,7 @@ describe('WorkItemLinks', () => {
it('renders correct number of children after removal', async () => {
expect(findWorkItemLinkChildItems()).toHaveLength(4);
firstChild.vm.$emit('remove', firstChild.vm.childItem.id);
firstChild.vm.$emit('removeChild', firstChild.vm.childItem.id);
await waitForPromises();
expect(findWorkItemLinkChildItems()).toHaveLength(3);

View File

@ -2,12 +2,14 @@ import { nextTick } from 'vue';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import WorkItemTree from '~/work_items/components/work_item_links/work_item_tree.vue';
import WorkItemLinksForm from '~/work_items/components/work_item_links/work_item_links_form.vue';
import WorkItemLinkChild from '~/work_items/components/work_item_links/work_item_link_child.vue';
import OkrActionsSplitButton from '~/work_items/components/work_item_links/okr_actions_split_button.vue';
import {
FORM_TYPES,
WORK_ITEM_TYPE_ENUM_OBJECTIVE,
WORK_ITEM_TYPE_ENUM_KEY_RESULT,
} from '~/work_items/constants';
import { childrenWorkItems } from '../../mock_data';
describe('WorkItemTree', () => {
let wrapper;
@ -17,10 +19,16 @@ describe('WorkItemTree', () => {
const findEmptyState = () => wrapper.findByTestId('tree-empty');
const findToggleFormSplitButton = () => wrapper.findComponent(OkrActionsSplitButton);
const findForm = () => wrapper.findComponent(WorkItemLinksForm);
const findWorkItemLinkChildItems = () => wrapper.findAllComponents(WorkItemLinkChild);
const createComponent = () => {
const createComponent = ({ children = childrenWorkItems } = {}) => {
wrapper = shallowMountExtended(WorkItemTree, {
propsData: { workItemType: 'Objective', workItemId: 'gid://gitlab/WorkItem/515' },
propsData: {
workItemType: 'Objective',
workItemId: 'gid://gitlab/WorkItem/515',
children,
projectPath: 'test/project',
},
});
};
@ -47,9 +55,14 @@ describe('WorkItemTree', () => {
});
it('displays empty state if there are no children', () => {
createComponent({ children: [] });
expect(findEmptyState().exists()).toBe(true);
});
it('renders all hierarchy widget children', () => {
expect(findWorkItemLinkChildItems()).toHaveLength(4);
});
it('does not display form by default', () => {
expect(findForm().exists()).toBe(false);
});
@ -71,4 +84,11 @@ describe('WorkItemTree', () => {
expect(findForm().props('childrenType')).toBe(childType);
},
);
it('remove event on child triggers `removeChild` event', () => {
const firstChild = findWorkItemLinkChildItems().at(0);
firstChild.vm.$emit('removeChild', 'gid://gitlab/WorkItem/2');
expect(wrapper.emitted('removeChild')).toEqual([['gid://gitlab/WorkItem/2']]);
});
});

View File

@ -85,6 +85,7 @@ export const workItemQueryResponse = {
{
__typename: 'WorkItemWidgetHierarchy',
type: 'HIERARCHY',
hasChildren: true,
parent: {
id: 'gid://gitlab/Issue/1',
iid: '5',
@ -108,7 +109,15 @@ export const workItemQueryResponse = {
state: 'OPEN',
workItemType: {
id: '1',
name: 'Task',
iconName: 'issue-type-task',
},
widgets: [
{
type: 'HIERARCHY',
hasChildren: false,
},
],
},
],
},
@ -150,6 +159,7 @@ export const updateWorkItemMutationResponse = {
},
widgets: [
{
type: 'HIERARCHY',
children: {
nodes: [
{
@ -161,10 +171,13 @@ export const updateWorkItemMutationResponse = {
state: 'OPEN',
workItemType: {
id: '1',
name: 'Task',
iconName: 'issue-type-task',
},
},
],
},
__typename: 'WorkItemConnection',
},
{
__typename: 'WorkItemWidgetAssignees',
@ -219,6 +232,20 @@ export const descriptionHtmlWithCheckboxes = `
</ul>
`;
const taskType = {
__typename: 'WorkItemType',
id: 'gid://gitlab/WorkItems::Type/5',
name: 'Task',
iconName: 'issue-type-task',
};
export const objectiveType = {
__typename: 'WorkItemType',
id: 'gid://gitlab/WorkItems::Type/2411',
name: 'Objective',
iconName: 'issue-type-objective',
};
export const workItemResponseFactory = ({
canUpdate = false,
canDelete = false,
@ -236,6 +263,7 @@ export const workItemResponseFactory = ({
lastEditedBy = null,
withCheckboxes = false,
parent = mockParent.parent,
workItemType = taskType,
} = {}) => ({
data: {
workItem: {
@ -253,12 +281,7 @@ export const workItemResponseFactory = ({
id: '1',
fullPath: 'test-project-path',
},
workItemType: {
__typename: 'WorkItemType',
id: 'gid://gitlab/WorkItems::Type/5',
name: 'Task',
iconName: 'issue-type-task',
},
workItemType,
userPermissions: {
deleteWorkItem: canDelete,
updateWorkItem: canUpdate,
@ -338,6 +361,7 @@ export const workItemResponseFactory = ({
{
__typename: 'WorkItemWidgetHierarchy',
type: 'HIERARCHY',
hasChildren: true,
children: {
nodes: [
{
@ -349,7 +373,15 @@ export const workItemResponseFactory = ({
state: 'OPEN',
workItemType: {
id: '1',
name: 'Task',
iconName: 'issue-type-task',
},
widgets: [
{
type: 'HIERARCHY',
hasChildren: false,
},
],
},
],
},
@ -669,6 +701,8 @@ export const workItemHierarchyEmptyResponse = {
id: 'gid://gitlab/WorkItem/1',
workItemType: {
id: 'gid://gitlab/WorkItems::Type/6',
name: 'Issue',
iconName: 'issue-type-issue',
__typename: 'WorkItemType',
},
title: 'New title',
@ -692,6 +726,7 @@ export const workItemHierarchyEmptyResponse = {
{
type: 'HIERARCHY',
parent: null,
hasChildren: false,
children: {
nodes: [],
__typename: 'WorkItemConnection',
@ -710,6 +745,8 @@ export const workItemHierarchyNoUpdatePermissionResponse = {
id: 'gid://gitlab/WorkItem/1',
workItemType: {
id: 'gid://gitlab/WorkItems::Type/6',
name: 'Issue',
iconName: 'issue-type-issue',
__typename: 'WorkItemType',
},
title: 'New title',
@ -731,6 +768,7 @@ export const workItemHierarchyNoUpdatePermissionResponse = {
{
type: 'HIERARCHY',
parent: null,
hasChildren: true,
children: {
nodes: [
{
@ -738,6 +776,8 @@ export const workItemHierarchyNoUpdatePermissionResponse = {
iid: '2',
workItemType: {
id: 'gid://gitlab/WorkItems::Type/5',
name: 'Task',
iconName: 'issue-type-task',
__typename: 'WorkItemType',
},
title: 'xyz',
@ -745,6 +785,12 @@ export const workItemHierarchyNoUpdatePermissionResponse = {
confidential: false,
createdAt: '2022-08-03T12:41:54Z',
closedAt: null,
widgets: [
{
type: 'HIERARCHY',
hasChildren: false,
},
],
__typename: 'WorkItem',
},
],
@ -763,6 +809,8 @@ export const workItemTask = {
iid: '4',
workItemType: {
id: 'gid://gitlab/WorkItems::Type/5',
name: 'Task',
iconName: 'issue-type-task',
__typename: 'WorkItemType',
},
title: 'bar',
@ -778,6 +826,8 @@ export const confidentialWorkItemTask = {
iid: '2',
workItemType: {
id: 'gid://gitlab/WorkItems::Type/5',
name: 'Task',
iconName: 'issue-type-task',
__typename: 'WorkItemType',
},
title: 'xyz',
@ -793,6 +843,8 @@ export const closedWorkItemTask = {
iid: '3',
workItemType: {
id: 'gid://gitlab/WorkItems::Type/5',
name: 'Task',
iconName: 'issue-type-task',
__typename: 'WorkItemType',
},
title: 'abc',
@ -803,6 +855,28 @@ export const closedWorkItemTask = {
__typename: 'WorkItem',
};
export const childrenWorkItems = [
confidentialWorkItemTask,
closedWorkItemTask,
workItemTask,
{
id: 'gid://gitlab/WorkItem/5',
iid: '5',
workItemType: {
id: 'gid://gitlab/WorkItems::Type/5',
name: 'Task',
iconName: 'issue-type-task',
__typename: 'WorkItemType',
},
title: 'foobar',
state: 'OPEN',
confidential: false,
createdAt: '2022-08-03T12:41:54Z',
closedAt: null,
__typename: 'WorkItem',
},
];
export const workItemHierarchyResponse = {
data: {
workItem: {
@ -810,6 +884,8 @@ export const workItemHierarchyResponse = {
iid: '1',
workItemType: {
id: 'gid://gitlab/WorkItems::Type/6',
name: 'Objective',
iconName: 'issue-type-objective',
__typename: 'WorkItemType',
},
title: 'New title',
@ -831,23 +907,97 @@ export const workItemHierarchyResponse = {
{
type: 'HIERARCHY',
parent: null,
hasChildren: true,
children: {
nodes: childrenWorkItems,
__typename: 'WorkItemConnection',
},
__typename: 'WorkItemWidgetHierarchy',
},
],
__typename: 'WorkItem',
},
},
};
export const workItemObjectiveWithChild = {
id: 'gid://gitlab/WorkItem/12',
iid: '12',
workItemType: {
id: 'gid://gitlab/WorkItems::Type/2411',
name: 'Objective',
iconName: 'issue-type-objective',
__typename: 'WorkItemType',
},
title: 'Objective',
state: 'OPEN',
confidential: false,
createdAt: '2022-08-03T12:41:54Z',
closedAt: null,
widgets: [
{
type: 'HIERARCHY',
hasChildren: true,
__typename: 'WorkItemWidgetHierarchy',
},
],
__typename: 'WorkItem',
};
export const workItemHierarchyTreeResponse = {
data: {
workItem: {
id: 'gid://gitlab/WorkItem/2',
iid: '2',
workItemType: {
id: 'gid://gitlab/WorkItems::Type/2411',
name: 'Objective',
iconName: 'issue-type-objective',
__typename: 'WorkItemType',
},
title: 'New title',
userPermissions: {
deleteWorkItem: true,
updateWorkItem: true,
},
confidential: false,
project: {
__typename: 'Project',
id: '1',
fullPath: 'test-project-path',
},
widgets: [
{
type: 'DESCRIPTION',
__typename: 'WorkItemWidgetDescription',
},
{
type: 'HIERARCHY',
parent: null,
hasChildren: true,
children: {
nodes: [
confidentialWorkItemTask,
closedWorkItemTask,
workItemTask,
{
id: 'gid://gitlab/WorkItem/5',
iid: '5',
id: 'gid://gitlab/WorkItem/13',
iid: '13',
workItemType: {
id: 'gid://gitlab/WorkItems::Type/5',
id: 'gid://gitlab/WorkItems::Type/2411',
name: 'Objective',
iconName: 'issue-type-objective',
__typename: 'WorkItemType',
},
title: 'foobar',
title: 'Objective 2',
state: 'OPEN',
confidential: false,
createdAt: '2022-08-03T12:41:54Z',
closedAt: null,
widgets: [
{
type: 'HIERARCHY',
hasChildren: true,
__typename: 'WorkItemWidgetHierarchy',
},
],
__typename: 'WorkItem',
},
],
@ -861,6 +1011,15 @@ export const workItemHierarchyResponse = {
},
};
export const workItemHierarchyTreeFailureResponse = {
data: {},
errors: [
{
message: 'Something went wrong',
},
],
};
export const changeWorkItemParentMutationResponse = {
data: {
workItemUpdate: {
@ -894,6 +1053,7 @@ export const changeWorkItemParentMutationResponse = {
__typename: 'WorkItemWidgetHierarchy',
type: 'HIERARCHY',
parent: null,
hasChildren: false,
children: {
nodes: [],
},

View File

@ -55,23 +55,6 @@ RSpec.describe Mutations::ContainerRepositories::Destroy do
it_behaves_like params[:shared_examples_name]
end
context 'with container_registry_delete_repository_with_cron_worker disabled' do
before do
project.add_maintainer(user)
stub_feature_flags(container_registry_delete_repository_with_cron_worker: false)
end
it 'enqueues a removal job' do
expect(::Packages::CreateEventService)
.to receive(:new).with(nil, user, event_name: :delete_repository, scope: :container).and_call_original
expect(DeleteContainerRepositoryWorker)
.to receive(:perform_async).with(user.id, container_repository.id)
expect { subject }.to change { ::Packages::Event.count }.by(1)
expect(container_repository.reload.delete_scheduled?).to be true
end
end
end
end
end

View File

@ -5,8 +5,10 @@ require 'spec_helper'
RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
include StubRequests
let(:schemes) { %w[http https] }
describe '#validate!' do
subject { described_class.validate!(import_url) }
subject { described_class.validate!(import_url, schemes: schemes) }
shared_examples 'validates URI and hostname' do
it 'runs the url validations' do
@ -59,7 +61,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
context 'when allow_object_storage is true' do
subject { described_class.validate!(import_url, allow_object_storage: true) }
subject { described_class.validate!(import_url, allow_object_storage: true, schemes: schemes) }
context 'with a local domain name' do
let(:host) { 'http://review-minio-svc.svc:9000' }
@ -218,7 +220,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
context 'disabled DNS rebinding protection' do
subject { described_class.validate!(import_url, dns_rebind_protection: false) }
subject { described_class.validate!(import_url, dns_rebind_protection: false, schemes: schemes) }
context 'when URI is internal' do
let(:import_url) { 'http://localhost' }
@ -271,20 +273,6 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
end
end
context 'when schemes is blank' do
subject { described_class.validate!('https://example.org', schemes: []) }
it 'logs a warning message' do
expect(Gitlab::AppJsonLogger).to receive(:info).with(
message: 'Blank scheme used in Gitlab::UrlBlocker',
uri_scheme: 'https',
caller: anything
)
subject
end
end
end
describe '#blocked_url?' do
@ -292,115 +280,114 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
it 'allows imports from configured web host and port' do
import_url = "http://#{Gitlab.host_with_port}/t.git"
expect(described_class.blocked_url?(import_url)).to be false
expect(described_class.blocked_url?(import_url, schemes: schemes)).to be false
end
it 'allows mirroring from configured SSH host and port' do
import_url = "ssh://#{Gitlab.config.gitlab_shell.ssh_host}:#{Gitlab.config.gitlab_shell.ssh_port}/t.git"
expect(described_class.blocked_url?(import_url)).to be false
expect(described_class.blocked_url?(import_url, schemes: schemes)).to be false
end
it 'returns true for bad localhost hostname' do
expect(described_class.blocked_url?('https://localhost:65535/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://localhost:65535/foo/foo.git', schemes: schemes)).to be true
end
it 'returns true for bad port' do
expect(described_class.blocked_url?('https://gitlab.com:25/foo/foo.git', ports: ports)).to be true
expect(described_class.blocked_url?('https://gitlab.com:25/foo/foo.git', ports: ports, schemes: schemes)).to be true
end
it 'returns true for bad scheme' do
expect(described_class.blocked_url?('https://gitlab.com/foo/foo.git', schemes: ['https'])).to be false
expect(described_class.blocked_url?('https://gitlab.com/foo/foo.git')).to be false
expect(described_class.blocked_url?('https://gitlab.com/foo/foo.git', schemes: ['http'])).to be true
end
it 'returns true for bad protocol on configured web/SSH host and ports' do
web_url = "javascript://#{Gitlab.host_with_port}/t.git%0aalert(1)"
expect(described_class.blocked_url?(web_url)).to be true
expect(described_class.blocked_url?(web_url, schemes: schemes)).to be true
ssh_url = "javascript://#{Gitlab.config.gitlab_shell.ssh_host}:#{Gitlab.config.gitlab_shell.ssh_port}/t.git%0aalert(1)"
expect(described_class.blocked_url?(ssh_url)).to be true
expect(described_class.blocked_url?(ssh_url, schemes: schemes)).to be true
end
it 'returns true for localhost IPs' do
expect(described_class.blocked_url?('https://[0:0:0:0:0:0:0:0]/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://0.0.0.0/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://[::]/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://[0:0:0:0:0:0:0:0]/foo/foo.git', schemes: schemes)).to be true
expect(described_class.blocked_url?('https://0.0.0.0/foo/foo.git', schemes: schemes)).to be true
expect(described_class.blocked_url?('https://[::]/foo/foo.git', schemes: schemes)).to be true
end
it 'returns true for loopback IP' do
expect(described_class.blocked_url?('https://127.0.0.2/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://127.0.0.1/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://[::1]/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://127.0.0.2/foo/foo.git', schemes: schemes)).to be true
expect(described_class.blocked_url?('https://127.0.0.1/foo/foo.git', schemes: schemes)).to be true
expect(described_class.blocked_url?('https://[::1]/foo/foo.git', schemes: schemes)).to be true
end
it 'returns true for alternative version of 127.0.0.1 (0177.1)' do
expect(described_class.blocked_url?('https://0177.1:65535/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://0177.1:65535/foo/foo.git', schemes: schemes)).to be true
end
it 'returns true for alternative version of 127.0.0.1 (017700000001)' do
expect(described_class.blocked_url?('https://017700000001:65535/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://017700000001:65535/foo/foo.git', schemes: schemes)).to be true
end
it 'returns true for alternative version of 127.0.0.1 (0x7f.1)' do
expect(described_class.blocked_url?('https://0x7f.1:65535/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://0x7f.1:65535/foo/foo.git', schemes: schemes)).to be true
end
it 'returns true for alternative version of 127.0.0.1 (0x7f.0.0.1)' do
expect(described_class.blocked_url?('https://0x7f.0.0.1:65535/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://0x7f.0.0.1:65535/foo/foo.git', schemes: schemes)).to be true
end
it 'returns true for alternative version of 127.0.0.1 (0x7f000001)' do
expect(described_class.blocked_url?('https://0x7f000001:65535/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://0x7f000001:65535/foo/foo.git', schemes: schemes)).to be true
end
it 'returns true for alternative version of 127.0.0.1 (2130706433)' do
expect(described_class.blocked_url?('https://2130706433:65535/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://2130706433:65535/foo/foo.git', schemes: schemes)).to be true
end
it 'returns true for alternative version of 127.0.0.1 (127.000.000.001)' do
expect(described_class.blocked_url?('https://127.000.000.001:65535/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://127.000.000.001:65535/foo/foo.git', schemes: schemes)).to be true
end
it 'returns true for alternative version of 127.0.0.1 (127.0.1)' do
expect(described_class.blocked_url?('https://127.0.1:65535/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://127.0.1:65535/foo/foo.git', schemes: schemes)).to be true
end
context 'with ipv6 mapped address' do
it 'returns true for localhost IPs' do
expect(described_class.blocked_url?('https://[0:0:0:0:0:ffff:0.0.0.0]/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://[::ffff:0.0.0.0]/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://[::ffff:0:0]/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://[0:0:0:0:0:ffff:0.0.0.0]/foo/foo.git', schemes: schemes)).to be true
expect(described_class.blocked_url?('https://[::ffff:0.0.0.0]/foo/foo.git', schemes: schemes)).to be true
expect(described_class.blocked_url?('https://[::ffff:0:0]/foo/foo.git', schemes: schemes)).to be true
end
it 'returns true for loopback IPs' do
expect(described_class.blocked_url?('https://[0:0:0:0:0:ffff:127.0.0.1]/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://[::ffff:127.0.0.1]/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://[::ffff:7f00:1]/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://[0:0:0:0:0:ffff:127.0.0.2]/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://[::ffff:127.0.0.2]/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://[::ffff:7f00:2]/foo/foo.git')).to be true
expect(described_class.blocked_url?('https://[0:0:0:0:0:ffff:127.0.0.1]/foo/foo.git', schemes: schemes)).to be true
expect(described_class.blocked_url?('https://[::ffff:127.0.0.1]/foo/foo.git', schemes: schemes)).to be true
expect(described_class.blocked_url?('https://[::ffff:7f00:1]/foo/foo.git', schemes: schemes)).to be true
expect(described_class.blocked_url?('https://[0:0:0:0:0:ffff:127.0.0.2]/foo/foo.git', schemes: schemes)).to be true
expect(described_class.blocked_url?('https://[::ffff:127.0.0.2]/foo/foo.git', schemes: schemes)).to be true
expect(described_class.blocked_url?('https://[::ffff:7f00:2]/foo/foo.git', schemes: schemes)).to be true
end
end
it 'returns true for a non-alphanumeric hostname' do
aggregate_failures do
expect(described_class).to be_blocked_url('ssh://-oProxyCommand=whoami/a')
expect(described_class).to be_blocked_url('ssh://-oProxyCommand=whoami/a', schemes: ['ssh'])
# The leading character here is a Unicode "soft hyphen"
expect(described_class).to be_blocked_url('ssh://­oProxyCommand=whoami/a')
expect(described_class).to be_blocked_url('ssh://­oProxyCommand=whoami/a', schemes: ['ssh'])
# Unicode alphanumerics are allowed
expect(described_class).not_to be_blocked_url('ssh://ğitlab.com/a')
expect(described_class).not_to be_blocked_url('ssh://ğitlab.com/a', schemes: ['ssh'])
end
end
it 'returns true for invalid URL' do
expect(described_class.blocked_url?('http://:8080')).to be true
expect(described_class.blocked_url?('http://:8080', schemes: schemes)).to be true
end
it 'returns false for legitimate URL' do
expect(described_class.blocked_url?('https://gitlab.com/foo/foo.git')).to be false
expect(described_class.blocked_url?('https://gitlab.com/foo/foo.git', schemes: schemes)).to be false
end
context 'when allow_local_network is' do
@ -485,33 +472,33 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
context 'true (default)' do
it_behaves_like 'allows local requests', { allow_localhost: true, allow_local_network: true }
it_behaves_like 'allows local requests', { allow_localhost: true, allow_local_network: true, schemes: %w[http https] }
end
context 'false' do
it 'blocks urls from private networks' do
local_ips.each do |ip|
stub_domain_resolv(fake_domain, ip) do
expect(described_class).to be_blocked_url("http://#{fake_domain}", allow_local_network: false)
expect(described_class).to be_blocked_url("http://#{fake_domain}", allow_local_network: false, schemes: schemes)
end
expect(described_class).to be_blocked_url("http://#{ip}", allow_local_network: false)
expect(described_class).to be_blocked_url("http://#{ip}", allow_local_network: false, schemes: schemes)
end
end
it 'blocks IPv4 link-local endpoints' do
expect(described_class).to be_blocked_url('http://169.254.169.254', allow_local_network: false)
expect(described_class).to be_blocked_url('http://169.254.168.100', allow_local_network: false)
expect(described_class).to be_blocked_url('http://169.254.169.254', allow_local_network: false, schemes: schemes)
expect(described_class).to be_blocked_url('http://169.254.168.100', allow_local_network: false, schemes: schemes)
end
it 'blocks IPv6 link-local endpoints' do
expect(described_class).to be_blocked_url('http://[0:0:0:0:0:ffff:169.254.169.254]', allow_local_network: false)
expect(described_class).to be_blocked_url('http://[::ffff:169.254.169.254]', allow_local_network: false)
expect(described_class).to be_blocked_url('http://[::ffff:a9fe:a9fe]', allow_local_network: false)
expect(described_class).to be_blocked_url('http://[0:0:0:0:0:ffff:169.254.168.100]', allow_local_network: false)
expect(described_class).to be_blocked_url('http://[::ffff:169.254.168.100]', allow_local_network: false)
expect(described_class).to be_blocked_url('http://[::ffff:a9fe:a864]', allow_local_network: false)
expect(described_class).to be_blocked_url('http://[fe80::c800:eff:fe74:8]', allow_local_network: false)
expect(described_class).to be_blocked_url('http://[0:0:0:0:0:ffff:169.254.169.254]', allow_local_network: false, schemes: schemes)
expect(described_class).to be_blocked_url('http://[::ffff:169.254.169.254]', allow_local_network: false, schemes: schemes)
expect(described_class).to be_blocked_url('http://[::ffff:a9fe:a9fe]', allow_local_network: false, schemes: schemes)
expect(described_class).to be_blocked_url('http://[0:0:0:0:0:ffff:169.254.168.100]', allow_local_network: false, schemes: schemes)
expect(described_class).to be_blocked_url('http://[::ffff:169.254.168.100]', allow_local_network: false, schemes: schemes)
expect(described_class).to be_blocked_url('http://[::ffff:a9fe:a864]', allow_local_network: false, schemes: schemes)
expect(described_class).to be_blocked_url('http://[fe80::c800:eff:fe74:8]', allow_local_network: false, schemes: schemes)
end
it 'blocks limited broadcast address 255.255.255.255 and variants' do
@ -521,7 +508,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
stub_env('RSPEC_ALLOW_INVALID_URLS', 'false')
limited_broadcast_address_variants.each do |variant|
expect(described_class).to be_blocked_url("https://#{variant}", allow_local_network: false), "Expected #{variant} to be blocked"
expect(described_class).to be_blocked_url("https://#{variant}", allow_local_network: false, schemes: schemes), "Expected #{variant} to be blocked"
end
end
@ -529,7 +516,8 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
let(:url_blocker_attributes) do
{
allow_localhost: false,
allow_local_network: false
allow_local_network: false,
schemes: schemes
}
end
@ -559,7 +547,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
]
end
it_behaves_like 'allows local requests', { allow_localhost: false, allow_local_network: false }
it_behaves_like 'allows local requests', { allow_localhost: false, allow_local_network: false, schemes: %w[http https] }
it 'allows IP when dns_rebind_protection is disabled' do
url = "http://example.com"
@ -636,7 +624,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
it do
expect(described_class).not_to be_blocked_url(url, dns_rebind_protection: dns_rebind_value)
expect(described_class).not_to be_blocked_url(url, dns_rebind_protection: dns_rebind_value, schemes: schemes)
end
end
@ -690,26 +678,26 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
context 'when enforce_user is' do
context 'false (default)' do
it 'does not block urls with a non-alphanumeric username' do
expect(described_class).not_to be_blocked_url('ssh://-oProxyCommand=whoami@example.com/a')
expect(described_class).not_to be_blocked_url('ssh://-oProxyCommand=whoami@example.com/a', schemes: ['ssh'])
# The leading character here is a Unicode "soft hyphen"
expect(described_class).not_to be_blocked_url('ssh://­oProxyCommand=whoami@example.com/a')
expect(described_class).not_to be_blocked_url('ssh://­oProxyCommand=whoami@example.com/a', schemes: ['ssh'])
# Unicode alphanumerics are allowed
expect(described_class).not_to be_blocked_url('ssh://ğitlab@example.com/a')
expect(described_class).not_to be_blocked_url('ssh://ğitlab@example.com/a', schemes: ['ssh'])
end
end
context 'true' do
it 'blocks urls with a non-alphanumeric username' do
aggregate_failures do
expect(described_class).to be_blocked_url('ssh://-oProxyCommand=whoami@example.com/a', enforce_user: true)
expect(described_class).to be_blocked_url('ssh://-oProxyCommand=whoami@example.com/a', enforce_user: true, schemes: ['ssh'])
# The leading character here is a Unicode "soft hyphen"
expect(described_class).to be_blocked_url('ssh://­oProxyCommand=whoami@example.com/a', enforce_user: true)
expect(described_class).to be_blocked_url('ssh://­oProxyCommand=whoami@example.com/a', enforce_user: true, schemes: ['ssh'])
# Unicode alphanumerics are allowed
expect(described_class).not_to be_blocked_url('ssh://ğitlab@example.com/a', enforce_user: true)
expect(described_class).not_to be_blocked_url('ssh://ğitlab@example.com/a', enforce_user: true, schemes: ['ssh'])
end
end
end
@ -717,35 +705,35 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
context 'when ascii_only is true' do
it 'returns true for unicode domain' do
expect(described_class.blocked_url?('https://𝕘itαƄ.com/foo/foo.bar', ascii_only: true)).to be true
expect(described_class.blocked_url?('https://𝕘itαƄ.com/foo/foo.bar', ascii_only: true, schemes: schemes)).to be true
end
it 'returns true for unicode tld' do
expect(described_class.blocked_url?('https://gitlab.ο/foo/foo.bar', ascii_only: true)).to be true
expect(described_class.blocked_url?('https://gitlab.ο/foo/foo.bar', ascii_only: true, schemes: schemes)).to be true
end
it 'returns true for unicode path' do
expect(described_class.blocked_url?('https://gitlab.com/𝒇οο/𝒇οο.Ƅα', ascii_only: true)).to be true
expect(described_class.blocked_url?('https://gitlab.com/𝒇οο/𝒇οο.Ƅα', ascii_only: true, schemes: schemes)).to be true
end
it 'returns true for IDNA deviations' do
expect(described_class.blocked_url?('https://mißile.com/foo/foo.bar', ascii_only: true)).to be true
expect(described_class.blocked_url?('https://miςςile.com/foo/foo.bar', ascii_only: true)).to be true
expect(described_class.blocked_url?('https://gitlab.com/foo/foo.bar', ascii_only: true)).to be true
expect(described_class.blocked_url?('https://gitlab.com/foo/foo.bar', ascii_only: true)).to be true
expect(described_class.blocked_url?('https://mißile.com/foo/foo.bar', ascii_only: true, schemes: schemes)).to be true
expect(described_class.blocked_url?('https://miςςile.com/foo/foo.bar', ascii_only: true, schemes: schemes)).to be true
expect(described_class.blocked_url?('https://gitlab.com/foo/foo.bar', ascii_only: true, schemes: schemes)).to be true
expect(described_class.blocked_url?('https://gitlab.com/foo/foo.bar', ascii_only: true, schemes: schemes)).to be true
end
end
it 'blocks urls with invalid ip address' do
stub_env('RSPEC_ALLOW_INVALID_URLS', 'false')
expect(described_class).to be_blocked_url('http://8.8.8.8.8')
expect(described_class).to be_blocked_url('http://8.8.8.8.8', schemes: schemes)
end
it 'blocks urls whose hostname cannot be resolved' do
stub_env('RSPEC_ALLOW_INVALID_URLS', 'false')
expect(described_class).to be_blocked_url('http://foobar.x')
expect(described_class).to be_blocked_url('http://foobar.x', schemes: schemes)
end
context 'when gitlab is running on a non-default port' do
@ -757,13 +745,13 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
it 'returns true for url targeting the wrong port' do
stub_domain_resolv('gitlab.local', '127.0.0.1') do
expect(described_class).to be_blocked_url("http://gitlab.local/foo")
expect(described_class).to be_blocked_url("http://gitlab.local/foo", schemes: schemes)
end
end
it 'does not block url on gitlab port' do
stub_domain_resolv('gitlab.local', '127.0.0.1') do
expect(described_class).not_to be_blocked_url("http://gitlab.local:#{gitlab_port}/foo")
expect(described_class).not_to be_blocked_url("http://gitlab.local:#{gitlab_port}/foo", schemes: schemes)
end
end
end

View File

@ -37,6 +37,42 @@ RSpec.describe SystemCheck::SidekiqCheck do
)
end
context 'when only a worker process is running' do
before do
stub_ps_output <<~PS
root 2193955 92.2 3.1 4675972 515516 ? Sl 17:34 0:13 sidekiq 5.2.9 ...
PS
end
it 'fails with the right message for systemd' do
allow(File).to receive(:symlink?).with(described_class::SYSTEMD_UNIT_PATH).and_return(true)
expect_check_output <<~OUTPUT
Running? ... yes
Number of Sidekiq processes (cluster/worker) ... 0/1
Try fixing it:
sudo systemctl restart gitlab-sidekiq.service
Please fix the error above and rerun the checks.
OUTPUT
end
it 'fails with the right message for sysvinit' do
allow(File).to receive(:symlink?).with(described_class::SYSTEMD_UNIT_PATH).and_return(false)
allow(subject).to receive(:gitlab_user).and_return('git')
expect_check_output <<~OUTPUT
Running? ... yes
Number of Sidekiq processes (cluster/worker) ... 0/1
Try fixing it:
sudo service gitlab stop
sudo pkill -u git -f sidekiq
sleep 10 && sudo pkill -9 -u git -f sidekiq
sudo service gitlab start
Please fix the error above and rerun the checks.
OUTPUT
end
end
it 'succeeds when one cluster process and one or more worker processes are running' do
stub_ps_output <<~PS
root 2193947 0.9 0.1 146564 18104 ? Ssl 17:34 0:00 ruby bin/sidekiq-cluster * -P ...
@ -49,33 +85,5 @@ RSpec.describe SystemCheck::SidekiqCheck do
Number of Sidekiq processes (cluster/worker) ... 1/2
OUTPUT
end
# TODO: Running without a cluster is deprecated and will be removed in GitLab 14.0
# https://gitlab.com/gitlab-org/gitlab/-/issues/323225
context 'when running without a cluster' do
it 'fails when more than one worker process is running' do
stub_ps_output <<~PS
root 2193955 92.2 3.1 4675972 515516 ? Sl 17:34 0:13 sidekiq 5.2.9 ...
root 2193956 92.2 3.1 4675972 515516 ? Sl 17:34 0:13 sidekiq 5.2.9 ...
PS
expect_check_output include(
'Running? ... yes',
'Number of Sidekiq processes (cluster/worker) ... 0/2',
'Please fix the error above and rerun the checks.'
)
end
it 'succeeds when one worker process is running' do
stub_ps_output <<~PS
root 2193955 92.2 3.1 4675972 515516 ? Sl 17:34 0:13 sidekiq 5.2.9 ...
PS
expect_check_output <<~OUTPUT
Running? ... yes
Number of Sidekiq processes (cluster/worker) ... 0/1
OUTPUT
end
end
end
end

View File

@ -107,18 +107,6 @@ RSpec.describe Packages::PackageFile, type: :model do
subject { build(:package_file, :jar, package: package, size: 42) }
it_behaves_like 'UpdateProjectStatistics', :packages_size
context 'when packages_size_counter_attribute is disabled' do
before do
stub_feature_flags(packages_size_counter_attribute: false)
end
it 'uses the legacy increment function' do
expect(package.project.statistics).to receive(:legacy_increment_statistic)
expect(package.project.statistics).not_to receive(:delayed_increment_counter)
subject.save!
end
end
end
end

View File

@ -717,18 +717,6 @@ RSpec.describe Packages::Package, type: :model do
destroy!
end
context 'when packages_size_counter_attribute is disabled' do
before do
stub_feature_flags(packages_size_counter_attribute: false)
end
it 'affects project statistics' do
expect { destroy! }
.to change { project_statistics.reload.packages_size }
.from(package_file.size).to(0)
end
end
end
describe '.by_name_and_file_name' do

View File

@ -507,14 +507,6 @@ RSpec.describe ProjectStatistics do
let(:stat) { :packages_size }
it_behaves_like 'a statistic that increases storage_size asynchronously'
context 'with packages_size_counter_attribute disabled' do
before do
stub_feature_flags(packages_size_counter_attribute: false)
end
it_behaves_like 'a statistic that increases storage_size'
end
end
context 'when the amount is 0' do

View File

@ -80,25 +80,6 @@ RSpec.describe 'Destroying a container repository', feature_category: :container
it_behaves_like params[:shared_examples_name]
end
context 'with container_registry_delete_repository_with_cron_worker disabled' do
before do
project.add_maintainer(user)
stub_feature_flags(container_registry_delete_repository_with_cron_worker: false)
end
it 'enqueues a removal job' do
expect(::Packages::CreateEventService)
.to receive(:new).with(nil, user, event_name: :delete_repository, scope: :container).and_call_original
expect(DeleteContainerRepositoryWorker)
.to receive(:perform_async).with(user.id, container_repository.id)
expect { subject }.to change { ::Packages::Event.count }.by(1)
expect(container_repository_mutation_response).to match_schema('graphql/container_repository')
expect(container_repository_mutation_response['status']).to eq('DELETE_SCHEDULED')
end
end
end
context 'with invalid id' do

View File

@ -125,10 +125,6 @@ project_feature:
- created_at
- metrics_dashboard_access_level
- package_registry_access_level
- monitor_access_level
- infrastructure_access_level
- feature_flags_access_level
- environments_access_level
- project_id
- updated_at
computed_attributes:

View File

@ -147,20 +147,6 @@ RSpec.describe API::ProjectContainerRepositories, feature_category: :package_reg
expect(response).to have_gitlab_http_status(:accepted)
end
context 'with container_registry_delete_repository_with_cron_worker disabled' do
before do
stub_feature_flags(container_registry_delete_repository_with_cron_worker: false)
end
it 'schedules removal of repository' do
expect(DeleteContainerRepositoryWorker).to receive(:perform_async)
.with(maintainer.id, root_repository.id)
expect { subject }.to change { root_repository.reload.status }.from(nil).to('delete_scheduled')
expect(response).to have_gitlab_http_status(:accepted)
end
end
end
end
end

View File

@ -231,14 +231,16 @@ RSpec.describe API::Projects do
include_examples 'includes container_registry_access_level'
end
it 'includes releases_access_level', :aggregate_failures do
project.project_feature.update!(releases_access_level: ProjectFeature::DISABLED)
it 'includes various project feature fields', :aggregate_failures do
get api('/projects', user)
project_response = json_response.find { |p| p['id'] == project.id }
expect(response).to have_gitlab_http_status(:ok)
expect(project_response['releases_access_level']).to eq('disabled')
expect(project_response['releases_access_level']).to eq('enabled')
expect(project_response['environments_access_level']).to eq('enabled')
expect(project_response['feature_flags_access_level']).to eq('enabled')
expect(project_response['infrastructure_access_level']).to eq('enabled')
expect(project_response['monitor_access_level']).to eq('enabled')
end
context 'when some projects are in a group' do
@ -1192,6 +1194,10 @@ RSpec.describe API::Projects do
attrs[:container_registry_access_level] = 'private'
attrs[:security_and_compliance_access_level] = 'private'
attrs[:releases_access_level] = 'disabled'
attrs[:environments_access_level] = 'disabled'
attrs[:feature_flags_access_level] = 'disabled'
attrs[:infrastructure_access_level] = 'disabled'
attrs[:monitor_access_level] = 'disabled'
end
post api('/projects', user), params: project
@ -1201,7 +1207,8 @@ RSpec.describe API::Projects do
project.each_pair do |k, v|
next if %i[
has_external_issue_tracker has_external_wiki issues_enabled merge_requests_enabled wiki_enabled storage_version
container_registry_access_level releases_access_level
container_registry_access_level releases_access_level environments_access_level feature_flags_access_level
infrastructure_access_level monitor_access_level
].include?(k)
expect(json_response[k.to_s]).to eq(v)
@ -1217,6 +1224,10 @@ RSpec.describe API::Projects do
expect(project.project_feature.container_registry_access_level).to eq(ProjectFeature::PRIVATE)
expect(project.project_feature.security_and_compliance_access_level).to eq(ProjectFeature::PRIVATE)
expect(project.project_feature.releases_access_level).to eq(ProjectFeature::DISABLED)
expect(project.project_feature.environments_access_level).to eq(ProjectFeature::DISABLED)
expect(project.project_feature.feature_flags_access_level).to eq(ProjectFeature::DISABLED)
expect(project.project_feature.infrastructure_access_level).to eq(ProjectFeature::DISABLED)
expect(project.project_feature.monitor_access_level).to eq(ProjectFeature::DISABLED)
end
it 'assigns container_registry_enabled to project', :aggregate_failures do
@ -2356,6 +2367,10 @@ RSpec.describe API::Projects do
expect(json_response['operations_access_level']).to be_present
expect(json_response['security_and_compliance_access_level']).to be_present
expect(json_response['releases_access_level']).to be_present
expect(json_response['environments_access_level']).to be_present
expect(json_response['feature_flags_access_level']).to be_present
expect(json_response['infrastructure_access_level']).to be_present
expect(json_response['monitor_access_level']).to be_present
end
it 'exposes all necessary attributes' do
@ -2426,6 +2441,10 @@ RSpec.describe API::Projects do
expect(json_response['operations_access_level']).to be_present
expect(json_response['security_and_compliance_access_level']).to be_present
expect(json_response['releases_access_level']).to be_present
expect(json_response['environments_access_level']).to be_present
expect(json_response['feature_flags_access_level']).to be_present
expect(json_response['infrastructure_access_level']).to be_present
expect(json_response['monitor_access_level']).to be_present
expect(json_response).to have_key('emails_disabled')
expect(json_response['resolve_outdated_diff_discussions']).to eq(project.resolve_outdated_diff_discussions)
expect(json_response['remove_source_branch_after_merge']).to be_truthy
@ -3410,12 +3429,14 @@ RSpec.describe API::Projects do
expect(Project.find_by(path: project[:path]).analytics_access_level).to eq(ProjectFeature::PRIVATE)
end
it 'sets releases_access_level', :aggregate_failures do
put api("/projects/#{project.id}", user), params: { releases_access_level: 'private' }
%i(releases_access_level environments_access_level feature_flags_access_level infrastructure_access_level monitor_access_level).each do |field|
it "sets #{field}", :aggregate_failures do
put api("/projects/#{project.id}", user), params: { field => 'private' }
expect(response).to have_gitlab_http_status(:ok)
expect(json_response['releases_access_level']).to eq('private')
expect(Project.find_by(path: project[:path]).releases_access_level).to eq(ProjectFeature::PRIVATE)
expect(response).to have_gitlab_http_status(:ok)
expect(json_response[field.to_s]).to eq('private')
expect(Project.find_by(path: project[:path]).public_send(field)).to eq(ProjectFeature::PRIVATE)
end
end
it 'returns 400 when nothing sent' do

View File

@ -3,8 +3,8 @@
require 'spec_helper'
RSpec.describe PrometheusAlertEntity do
let(:user) { create(:user) }
let(:prometheus_alert) { create(:prometheus_alert) }
let(:user) { build_stubbed(:user) }
let(:prometheus_alert) { build_stubbed(:prometheus_alert) }
let(:request) { double('prometheus_alert', current_user: user) }
let(:entity) { described_class.new(prometheus_alert, request: request) }

View File

@ -3,18 +3,13 @@
require 'spec_helper'
RSpec.describe ReleaseSerializer do
let(:user) { create(:user) }
let(:project) { create :project }
let(:user) { build_stubbed(:user) }
subject { described_class.new.represent(resource, current_user: user) }
before do
project.add_developer(user)
end
describe '#represent' do
context 'when a single object is being serialized' do
let(:resource) { create(:release, project: project) }
let(:resource) { build_stubbed(:release) }
it 'serializes the label object' do
expect(subject[:tag]).to eq resource.tag
@ -26,7 +21,7 @@ RSpec.describe ReleaseSerializer do
end
context 'when multiple objects are being serialized' do
let(:resource) { create_list(:release, 3) }
let(:resource) { build_stubbed_list(:release, 3) }
it 'serializes the array of releases' do
expect(subject.size).to eq(3)

View File

@ -63,19 +63,5 @@ RSpec.describe ContainerRegistry::CleanupWorker, :aggregate_failures do
perform
end
end
context 'with container_registry_delete_repository_with_cron_worker disabled' do
before do
stub_feature_flags(container_registry_delete_repository_with_cron_worker: false)
end
it 'does not run' do
expect(worker).not_to receive(:reset_stale_deletes)
expect(worker).not_to receive(:enqueue_delete_container_repository_jobs)
expect(worker).not_to receive(:log_counts)
subject
end
end
end
end

View File

@ -103,18 +103,6 @@ RSpec.describe ContainerRegistry::DeleteContainerRepositoryWorker, :aggregate_fa
end
end
context 'with container_registry_delete_repository_with_cron_worker disabled' do
before do
stub_feature_flags(container_registry_delete_repository_with_cron_worker: false)
end
it 'will not delete any container repository' do
expect(::Projects::ContainerRepository::CleanupTagsService).not_to receive(:new)
expect { perform_work }.to not_change { ContainerRepository.count }
end
end
def expect_next_pending_destruction_container_repository
original_method = ContainerRepository.method(:next_pending_destruction)
expect(ContainerRepository).to receive(:next_pending_destruction).with(order_by: nil) do
@ -154,13 +142,5 @@ RSpec.describe ContainerRegistry::DeleteContainerRepositoryWorker, :aggregate_fa
subject { worker.remaining_work_count }
it { is_expected.to eq(described_class::MAX_CAPACITY + 1) }
context 'with container_registry_delete_repository_with_cron_worker disabled' do
before do
stub_feature_flags(container_registry_delete_repository_with_cron_worker: false)
end
it { is_expected.to eq(0) }
end
end
end

View File

@ -10,112 +10,10 @@ RSpec.describe DeleteContainerRepositoryWorker do
let(:worker) { described_class.new }
describe '#perform' do
let(:user_id) { user.id }
let(:repository_id) { repository.id }
subject(:perform) { worker.perform(user.id, repository.id) }
subject(:perform) { worker.perform(user_id, repository_id) }
it 'executes the destroy service' do
expect_destroy_service_execution
perform
end
context 'with an invalid user id' do
let(:user_id) { -1 }
it { expect { perform }.not_to raise_error }
end
context 'with an invalid repository id' do
let(:repository_id) { -1 }
it { expect { perform }.not_to raise_error }
end
context 'with a repository being migrated', :freeze_time do
before do
stub_application_setting(
container_registry_pre_import_tags_rate: 0.5,
container_registry_import_timeout: 10.minutes.to_i
)
end
shared_examples 'destroying the repository' do
it 'does destroy the repository' do
expect_next_found_instance_of(ContainerRepository) do |container_repository|
expect(container_repository).not_to receive(:tags_count)
end
expect(described_class).not_to receive(:perform_in)
expect_destroy_service_execution
perform
end
end
shared_examples 'not re enqueuing job if feature flag is disabled' do
before do
stub_feature_flags(container_registry_migration_phase2_delete_container_repository_worker_support: false)
end
it_behaves_like 'destroying the repository'
end
context 'with migration state set to pre importing' do
let_it_be(:repository) { create(:container_repository, :pre_importing) }
let(:tags_count) { 60 }
let(:delay) { (tags_count * 0.5).seconds + 10.minutes + described_class::FIXED_DELAY }
it 'does not destroy the repository and re enqueue the job' do
expect_next_found_instance_of(ContainerRepository) do |container_repository|
expect(container_repository).to receive(:tags_count).and_return(tags_count)
end
expect(described_class).to receive(:perform_in).with(delay.from_now)
expect(worker).to receive(:log_extra_metadata_on_done).with(:delete_postponed, delay)
expect(::Projects::ContainerRepository::DestroyService).not_to receive(:new)
perform
end
it_behaves_like 'not re enqueuing job if feature flag is disabled'
end
%i[pre_import_done importing import_aborted].each do |migration_state|
context "with migration state set to #{migration_state}" do
let_it_be(:repository) { create(:container_repository, migration_state) }
let(:delay) { 10.minutes + described_class::FIXED_DELAY }
it 'does not destroy the repository and re enqueue the job' do
expect_next_found_instance_of(ContainerRepository) do |container_repository|
expect(container_repository).not_to receive(:tags_count)
end
expect(described_class).to receive(:perform_in).with(delay.from_now)
expect(worker).to receive(:log_extra_metadata_on_done).with(:delete_postponed, delay)
expect(::Projects::ContainerRepository::DestroyService).not_to receive(:new)
perform
end
it_behaves_like 'not re enqueuing job if feature flag is disabled'
end
end
%i[default import_done import_skipped].each do |migration_state|
context "with migration state set to #{migration_state}" do
let_it_be(:repository) { create(:container_repository, migration_state) }
it_behaves_like 'destroying the repository'
it_behaves_like 'not re enqueuing job if feature flag is disabled'
end
end
end
def expect_destroy_service_execution
service = instance_double(Projects::ContainerRepository::DestroyService)
expect(service).to receive(:execute)
expect(Projects::ContainerRepository::DestroyService).to receive(:new).with(project, user).and_return(service)
it 'is a no op' do
expect { subject }.to not_change { ContainerRepository.count }
end
end
end