Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-05-07 12:10:27 +00:00
parent e8a31d8dc2
commit 53f456b167
146 changed files with 3330 additions and 1725 deletions

View File

@ -1 +1 @@
07aa359a7724540bbfa4868407b9a2c9b45bf139
2982990541d6980bf5035987996e976042c8ccc6

View File

@ -0,0 +1,5 @@
import { Blockquote } from '@tiptap/extension-blockquote';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
export const tiptapExtension = Blockquote;
export const serializer = defaultMarkdownSerializer.nodes.blockquote;

View File

@ -0,0 +1,5 @@
import { Bold } from '@tiptap/extension-bold';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
export const tiptapExtension = Bold;
export const serializer = defaultMarkdownSerializer.marks.strong;

View File

@ -0,0 +1,5 @@
import { BulletList } from '@tiptap/extension-bullet-list';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
export const tiptapExtension = BulletList;
export const serializer = defaultMarkdownSerializer.nodes.bullet_list;

View File

@ -0,0 +1,5 @@
import { Code } from '@tiptap/extension-code';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
export const tiptapExtension = Code;
export const serializer = defaultMarkdownSerializer.marks.code;

View File

@ -1,8 +1,9 @@
import { CodeBlockLowlight } from '@tiptap/extension-code-block-lowlight';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
const extractLanguage = (element) => element.firstElementChild?.getAttribute('lang');
export default CodeBlockLowlight.extend({
const ExtendedCodeBlockLowlight = CodeBlockLowlight.extend({
addAttributes() {
return {
...this.parent(),
@ -21,3 +22,6 @@ export default CodeBlockLowlight.extend({
};
},
});
export const tiptapExtension = ExtendedCodeBlockLowlight;
export const serializer = defaultMarkdownSerializer.nodes.code_block;

View File

@ -0,0 +1,3 @@
import Document from '@tiptap/extension-document';
export const tiptapExtension = Document;

View File

@ -0,0 +1,3 @@
import Dropcursor from '@tiptap/extension-dropcursor';
export const tiptapExtension = Dropcursor;

View File

@ -0,0 +1,3 @@
import Gapcursor from '@tiptap/extension-gapcursor';
export const tiptapExtension = Gapcursor;

View File

@ -0,0 +1,5 @@
import { HardBreak } from '@tiptap/extension-hard-break';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
export const tiptapExtension = HardBreak;
export const serializer = defaultMarkdownSerializer.nodes.hard_break;

View File

@ -0,0 +1,5 @@
import { Heading } from '@tiptap/extension-heading';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
export const tiptapExtension = Heading;
export const serializer = defaultMarkdownSerializer.nodes.heading;

View File

@ -0,0 +1,3 @@
import History from '@tiptap/extension-history';
export const tiptapExtension = History;

View File

@ -0,0 +1,5 @@
import { HorizontalRule } from '@tiptap/extension-horizontal-rule';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
export const tiptapExtension = HorizontalRule;
export const serializer = defaultMarkdownSerializer.nodes.horizontal_rule;

View File

@ -0,0 +1,9 @@
import { Image } from '@tiptap/extension-image';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
const ExtendedImage = Image.extend({
defaultOptions: { inline: true },
});
export const tiptapExtension = ExtendedImage;
export const serializer = defaultMarkdownSerializer.nodes.image;

View File

@ -0,0 +1,4 @@
import { Italic } from '@tiptap/extension-italic';
export const tiptapExtension = Italic;
export const serializer = { open: '_', close: '_', mixable: true, expelEnclosingWhitespace: true };

View File

@ -0,0 +1,5 @@
import { Link } from '@tiptap/extension-link';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
export const tiptapExtension = Link;
export const serializer = defaultMarkdownSerializer.marks.link;

View File

@ -0,0 +1,5 @@
import { ListItem } from '@tiptap/extension-list-item';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
export const tiptapExtension = ListItem;
export const serializer = defaultMarkdownSerializer.nodes.list_item;

View File

@ -0,0 +1,5 @@
import { OrderedList } from '@tiptap/extension-ordered-list';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
export const tiptapExtension = OrderedList;
export const serializer = defaultMarkdownSerializer.nodes.ordered_list;

View File

@ -0,0 +1,5 @@
import { Paragraph } from '@tiptap/extension-paragraph';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
export const tiptapExtension = Paragraph;
export const serializer = defaultMarkdownSerializer.nodes.paragraph;

View File

@ -0,0 +1,5 @@
import { Text } from '@tiptap/extension-text';
import { defaultMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
export const tiptapExtension = Text;
export const serializer = defaultMarkdownSerializer.nodes.text;

View File

@ -0,0 +1,22 @@
const buildSerializerConfig = (extensions = []) =>
extensions
.filter(({ serializer }) => serializer)
.reduce(
(serializers, { serializer, tiptapExtension: { name, type } }) => {
const collection = `${type}s`;
return {
...serializers,
[collection]: {
...serializers[collection],
[name]: serializer,
},
};
},
{
nodes: {},
marks: {},
},
);
export default buildSerializerConfig;

View File

@ -1,53 +1,57 @@
import Blockquote from '@tiptap/extension-blockquote';
import Bold from '@tiptap/extension-bold';
import BulletList from '@tiptap/extension-bullet-list';
import Code from '@tiptap/extension-code';
import Document from '@tiptap/extension-document';
import Dropcursor from '@tiptap/extension-dropcursor';
import Gapcursor from '@tiptap/extension-gapcursor';
import HardBreak from '@tiptap/extension-hard-break';
import Heading from '@tiptap/extension-heading';
import History from '@tiptap/extension-history';
import HorizontalRule from '@tiptap/extension-horizontal-rule';
import Image from '@tiptap/extension-image';
import Italic from '@tiptap/extension-italic';
import Link from '@tiptap/extension-link';
import ListItem from '@tiptap/extension-list-item';
import OrderedList from '@tiptap/extension-ordered-list';
import Paragraph from '@tiptap/extension-paragraph';
import Text from '@tiptap/extension-text';
import { Editor } from '@tiptap/vue-2';
import { isFunction } from 'lodash';
import { PROVIDE_SERIALIZER_OR_RENDERER_ERROR } from '../constants';
import CodeBlockHighlight from '../extensions/code_block_highlight';
import * as Blockquote from '../extensions/blockquote';
import * as Bold from '../extensions/bold';
import * as BulletList from '../extensions/bullet_list';
import * as Code from '../extensions/code';
import * as CodeBlockHighlight from '../extensions/code_block_highlight';
import * as Document from '../extensions/document';
import * as Dropcursor from '../extensions/dropcursor';
import * as Gapcursor from '../extensions/gapcursor';
import * as HardBreak from '../extensions/hard_break';
import * as Heading from '../extensions/heading';
import * as History from '../extensions/history';
import * as HorizontalRule from '../extensions/horizontal_rule';
import * as Image from '../extensions/image';
import * as Italic from '../extensions/italic';
import * as Link from '../extensions/link';
import * as ListItem from '../extensions/list_item';
import * as OrderedList from '../extensions/ordered_list';
import * as Paragraph from '../extensions/paragraph';
import * as Text from '../extensions/text';
import buildSerializerConfig from './build_serializer_config';
import { ContentEditor } from './content_editor';
import createMarkdownSerializer from './markdown_serializer';
const createTiptapEditor = ({ extensions = [], options } = {}) =>
const builtInContentEditorExtensions = [
Blockquote,
Bold,
BulletList,
Code,
CodeBlockHighlight,
Document,
Dropcursor,
Gapcursor,
HardBreak,
Heading,
History,
HorizontalRule,
Image,
Italic,
Link,
ListItem,
OrderedList,
Paragraph,
Text,
];
const collectTiptapExtensions = (extensions = []) =>
extensions.map(({ tiptapExtension }) => tiptapExtension);
const createTiptapEditor = ({ extensions = [], ...options } = {}) =>
new Editor({
extensions: [
Dropcursor,
Gapcursor,
History,
Document,
Text,
Paragraph,
Bold,
Italic,
Code,
Link,
Heading,
HardBreak,
Blockquote,
HorizontalRule,
BulletList,
OrderedList,
ListItem,
Image.configure({ inline: true }),
CodeBlockHighlight,
...extensions,
],
extensions: [...extensions],
editorProps: {
attributes: {
class: 'gl-outline-0!',
@ -61,8 +65,11 @@ export const createContentEditor = ({ renderMarkdown, extensions = [], tiptapOpt
throw new Error(PROVIDE_SERIALIZER_OR_RENDERER_ERROR);
}
const tiptapEditor = createTiptapEditor({ extensions, options: tiptapOptions });
const serializer = createMarkdownSerializer({ render: renderMarkdown });
const allExtensions = [...builtInContentEditorExtensions, ...extensions];
const tiptapExtensions = collectTiptapExtensions(allExtensions);
const tiptapEditor = createTiptapEditor({ extensions: tiptapExtensions, ...tiptapOptions });
const serializerConfig = buildSerializerConfig(allExtensions);
const serializer = createMarkdownSerializer({ render: renderMarkdown, serializerConfig });
return new ContentEditor({ tiptapEditor, serializer });
};

View File

@ -1,7 +1,4 @@
import {
MarkdownSerializer as ProseMirrorMarkdownSerializer,
defaultMarkdownSerializer,
} from 'prosemirror-markdown/src/to_markdown';
import { MarkdownSerializer as ProseMirrorMarkdownSerializer } from 'prosemirror-markdown/src/to_markdown';
import { DOMParser as ProseMirrorDOMParser } from 'prosemirror-model';
const wrapHtmlPayload = (payload) => `<div>${payload}</div>`;
@ -18,66 +15,46 @@ const wrapHtmlPayload = (payload) => `<div>${payload}</div>`;
* that parses the Markdown and converts it into HTML.
* @returns a markdown serializer
*/
const create = ({ render = () => null }) => {
return {
/**
* Converts a Markdown string into a ProseMirror JSONDocument based
* on a ProseMirror schema.
* @param {ProseMirror.Schema} params.schema A ProseMirror schema that defines
* the types of content supported in the document
* @param {String} params.content An arbitrary markdown string
* @returns A ProseMirror JSONDocument
*/
deserialize: async ({ schema, content }) => {
const html = await render(content);
export default ({ render = () => null, serializerConfig }) => ({
/**
* Converts a Markdown string into a ProseMirror JSONDocument based
* on a ProseMirror schema.
* @param {ProseMirror.Schema} params.schema A ProseMirror schema that defines
* the types of content supported in the document
* @param {String} params.content An arbitrary markdown string
* @returns A ProseMirror JSONDocument
*/
deserialize: async ({ schema, content }) => {
const html = await render(content);
if (!html) {
return null;
}
if (!html) {
return null;
}
const parser = new DOMParser();
const {
body: { firstElementChild },
} = parser.parseFromString(wrapHtmlPayload(html), 'text/html');
const state = ProseMirrorDOMParser.fromSchema(schema).parse(firstElementChild);
const parser = new DOMParser();
const {
body: { firstElementChild },
} = parser.parseFromString(wrapHtmlPayload(html), 'text/html');
const state = ProseMirrorDOMParser.fromSchema(schema).parse(firstElementChild);
return state.toJSON();
},
return state.toJSON();
},
/**
* Converts a ProseMirror JSONDocument based
* on a ProseMirror schema into Markdown
* @param {ProseMirror.Schema} params.schema A ProseMirror schema that defines
* the types of content supported in the document
* @param {String} params.content A ProseMirror JSONDocument
* @returns A Markdown string
*/
serialize: ({ schema, content }) => {
const document = schema.nodeFromJSON(content);
const { nodes, marks } = defaultMarkdownSerializer;
/**
* Converts a ProseMirror JSONDocument based
* on a ProseMirror schema into Markdown
* @param {ProseMirror.Schema} params.schema A ProseMirror schema that defines
* the types of content supported in the document
* @param {String} params.content A ProseMirror JSONDocument
* @returns A Markdown string
*/
serialize: ({ schema, content }) => {
const proseMirrorDocument = schema.nodeFromJSON(content);
const { nodes, marks } = serializerConfig;
const serializer = new ProseMirrorMarkdownSerializer(nodes, marks);
const serializer = new ProseMirrorMarkdownSerializer(
{
...defaultMarkdownSerializer.nodes,
horizontalRule: nodes.horizontal_rule,
bulletList: nodes.bullet_list,
listItem: nodes.list_item,
orderedList: nodes.ordered_list,
codeBlock: nodes.code_block,
hardBreak: nodes.hard_break,
},
{
...defaultMarkdownSerializer.marks,
bold: marks.strong,
italic: { open: '_', close: '_', mixable: true, expelEnclosingWhitespace: true },
},
);
return serializer.serialize(document, {
tightLists: true,
});
},
};
};
export default create;
return serializer.serialize(proseMirrorDocument, {
tightLists: true,
});
},
});

View File

@ -1,5 +1,6 @@
<script>
import {
GlButton,
GlEmptyState,
GlDropdown,
GlDropdownItem,
@ -8,10 +9,13 @@ import {
GlLoadingIcon,
GlSearchBoxByClick,
GlSprintf,
GlSafeHtmlDirective as SafeHtml,
GlTooltip,
} from '@gitlab/ui';
import { s__, __ } from '~/locale';
import { s__, __, n__ } from '~/locale';
import PaginationLinks from '~/vue_shared/components/pagination_links.vue';
import importGroupMutation from '../graphql/mutations/import_group.mutation.graphql';
import { STATUSES } from '../../constants';
import importGroupsMutation from '../graphql/mutations/import_groups.mutation.graphql';
import setNewNameMutation from '../graphql/mutations/set_new_name.mutation.graphql';
import setTargetNamespaceMutation from '../graphql/mutations/set_target_namespace.mutation.graphql';
import availableNamespacesQuery from '../graphql/queries/available_namespaces.query.graphql';
@ -23,6 +27,7 @@ const DEFAULT_PAGE_SIZE = PAGE_SIZES[0];
export default {
components: {
GlButton,
GlEmptyState,
GlDropdown,
GlDropdownItem,
@ -31,9 +36,13 @@ export default {
GlLoadingIcon,
GlSearchBoxByClick,
GlSprintf,
GlTooltip,
ImportTableRow,
PaginationLinks,
},
directives: {
SafeHtml,
},
props: {
sourceUrl: {
@ -65,12 +74,28 @@ export default {
},
computed: {
groups() {
return this.bulkImportSourceGroups?.nodes ?? [];
},
hasGroupsWithValidationError() {
return this.groups.some((g) => g.validation_errors.length);
},
availableGroupsForImport() {
return this.groups.filter((g) => g.progress.status === STATUSES.NONE);
},
isImportAllButtonDisabled() {
return this.hasGroupsWithValidationError || this.availableGroupsForImport.length === 0;
},
humanizedTotal() {
return this.paginationInfo.total >= 1000 ? __('1000+') : this.paginationInfo.total;
},
hasGroups() {
return this.bulkImportSourceGroups?.nodes?.length > 0;
return this.groups.length > 0;
},
hasEmptyFilter() {
@ -105,6 +130,10 @@ export default {
},
methods: {
groupsCount(count) {
return n__('%d group', '%d groups', count);
},
setPage(page) {
this.page = page;
},
@ -123,24 +152,57 @@ export default {
});
},
importGroup(sourceGroupId) {
importGroups(sourceGroupIds) {
this.$apollo.mutate({
mutation: importGroupMutation,
variables: { sourceGroupId },
mutation: importGroupsMutation,
variables: { sourceGroupIds },
});
},
importAllGroups() {
this.importGroups(this.availableGroupsForImport.map((g) => g.id));
},
setPageSize(size) {
this.perPage = size;
},
},
gitlabLogo: window.gon.gitlab_logo,
PAGE_SIZES,
};
</script>
<template>
<div>
<h1
class="gl-my-0 gl-py-4 gl-font-size-h1 gl-border-solid gl-border-gray-200 gl-border-0 gl-border-b-1 gl-display-flex"
>
<img :src="$options.gitlabLogo" class="gl-w-6 gl-h-6 gl-mb-2 gl-display-inline gl-mr-2" />
{{ s__('BulkImport|Import groups from GitLab') }}
<div ref="importAllButtonWrapper" class="gl-ml-auto">
<gl-button
v-if="!$apollo.loading && hasGroups"
:disabled="isImportAllButtonDisabled"
variant="confirm"
@click="importAllGroups"
>
<gl-sprintf :message="s__('BulkImport|Import %{groups}')">
<template #groups>
{{ groupsCount(availableGroupsForImport.length) }}
</template>
</gl-sprintf>
</gl-button>
</div>
<gl-tooltip v-if="isImportAllButtonDisabled" :target="() => $refs.importAllButtonWrapper">
<template v-if="hasGroupsWithValidationError">
{{ s__('BulkImport|One or more groups has validation errors') }}
</template>
<template v-else>
{{ s__('BulkImport|No groups on this page are available for import') }}
</template>
</gl-tooltip>
</h1>
<div
class="gl-py-5 gl-border-solid gl-border-gray-200 gl-border-0 gl-border-b-1 gl-display-flex"
>
@ -153,7 +215,7 @@ export default {
<strong>{{ paginationInfo.end }}</strong>
</template>
<template #total>
<strong>{{ n__('%d group', '%d groups', paginationInfo.total) }}</strong>
<strong>{{ groupsCount(paginationInfo.total) }}</strong>
</template>
<template #filter>
<strong>{{ filter }}</strong>
@ -196,7 +258,7 @@ export default {
:group-path-regex="groupPathRegex"
@update-target-namespace="updateTargetNamespace(group.id, $event)"
@update-new-name="updateNewName(group.id, $event)"
@import-group="importGroup(group.id)"
@import-group="importGroups([group.id])"
/>
</template>
</tbody>

View File

@ -10,8 +10,11 @@ import {
GlFormInput,
} from '@gitlab/ui';
import { joinPaths } from '~/lib/utils/url_utility';
import { s__ } from '~/locale';
import ImportStatus from '../../components/import_status.vue';
import { STATUSES } from '../../constants';
import addValidationErrorMutation from '../graphql/mutations/add_validation_error.mutation.graphql';
import removeValidationErrorMutation from '../graphql/mutations/remove_validation_error.mutation.graphql';
import groupQuery from '../graphql/queries/group.query.graphql';
const DEBOUNCE_INTERVAL = 300;
@ -52,6 +55,27 @@ export default {
fullPath: this.fullPath,
};
},
update({ existingGroup }) {
const variables = {
field: 'new_name',
sourceGroupId: this.group.id,
};
if (!existingGroup) {
this.$apollo.mutate({
mutation: removeValidationErrorMutation,
variables,
});
} else {
this.$apollo.mutate({
mutation: addValidationErrorMutation,
variables: {
...variables,
message: s__('BulkImport|Name already exists.'),
},
});
}
},
skip() {
return !this.isNameValid || this.isAlreadyImported;
},
@ -63,8 +87,12 @@ export default {
return this.group.import_target;
},
invalidNameValidationMessage() {
return this.group.validation_errors.find(({ field }) => field === 'new_name')?.message;
},
isInvalid() {
return Boolean(!this.isNameValid || this.existingGroup);
return Boolean(!this.isNameValid || this.invalidNameValidationMessage);
},
isNameValid() {
@ -157,21 +185,21 @@ export default {
<template v-if="!isNameValid">
{{ __('Please choose a group URL with no special characters.') }}
</template>
<template v-else-if="existingGroup">
{{ s__('BulkImport|Name already exists.') }}
<template v-else-if="invalidNameValidationMessage">
{{ invalidNameValidationMessage }}
</template>
</p>
</div>
</div>
</td>
<td class="gl-p-4 gl-white-space-nowrap">
<import-status :status="group.progress.status" />
<import-status :status="group.progress.status" class="gl-mt-2" />
</td>
<td class="gl-p-4">
<gl-button
v-if="!isAlreadyImported"
:disabled="isInvalid"
variant="success"
variant="confirm"
category="secondary"
@click="$emit('import-group')"
>{{ __('Import') }}</gl-button

View File

@ -20,6 +20,7 @@ export const clientTypenames = {
BulkImportPageInfo: 'ClientBulkImportPageInfo',
BulkImportTarget: 'ClientBulkImportTarget',
BulkImportProgress: 'ClientBulkImportProgress',
BulkImportValidationError: 'ClientBulkImportValidationError',
};
function makeGroup(data) {
@ -106,6 +107,7 @@ export function createResolvers({ endpoints, sourceUrl, GroupsManager = SourceGr
return makeGroup({
...group,
validation_errors: [],
progress: {
id: jobId ?? localProgressId(group.id),
status: cachedImportState?.status ?? STATUSES.NONE,
@ -152,7 +154,7 @@ export function createResolvers({ endpoints, sourceUrl, GroupsManager = SourceGr
async setImportProgress(_, { sourceGroupId, status, jobId }) {
if (jobId) {
groupsManager.saveImportState(jobId, { status });
groupsManager.updateImportProgress(jobId, status);
}
return makeGroup({
@ -165,7 +167,7 @@ export function createResolvers({ endpoints, sourceUrl, GroupsManager = SourceGr
},
async updateImportStatus(_, { id, status }) {
groupsManager.saveImportState(id, { status });
groupsManager.updateImportProgress(id, status);
return {
__typename: clientTypenames.BulkImportProgress,
@ -174,39 +176,81 @@ export function createResolvers({ endpoints, sourceUrl, GroupsManager = SourceGr
};
},
async importGroup(_, { sourceGroupId }, { client }) {
async addValidationError(_, { sourceGroupId, field, message }, { client }) {
const {
data: { bulkImportSourceGroup: group },
data: {
bulkImportSourceGroup: { validation_errors: validationErrors, ...group },
},
} = await client.query({
query: bulkImportSourceGroupQuery,
variables: { id: sourceGroupId },
});
const GROUP_BEING_SCHEDULED = makeGroup({
id: sourceGroupId,
progress: {
id: localProgressId(sourceGroupId),
status: STATUSES.SCHEDULING,
return {
...group,
validation_errors: [
...validationErrors.filter(({ field: f }) => f !== field),
{
__typename: clientTypenames.BulkImportValidationError,
field,
message,
},
],
};
},
async removeValidationError(_, { sourceGroupId, field }, { client }) {
const {
data: {
bulkImportSourceGroup: { validation_errors: validationErrors, ...group },
},
} = await client.query({
query: bulkImportSourceGroupQuery,
variables: { id: sourceGroupId },
});
return {
...group,
validation_errors: validationErrors.filter(({ field: f }) => f !== field),
};
},
async importGroups(_, { sourceGroupIds }, { client }) {
const groups = await Promise.all(
sourceGroupIds.map((id) =>
client
.query({
query: bulkImportSourceGroupQuery,
variables: { id },
})
.then(({ data }) => data.bulkImportSourceGroup),
),
);
const GROUPS_BEING_SCHEDULED = sourceGroupIds.map((sourceGroupId) =>
makeGroup({
id: sourceGroupId,
progress: {
id: localProgressId(sourceGroupId),
status: STATUSES.SCHEDULING,
},
}),
);
const defaultErrorMessage = s__('BulkImport|Importing the group failed');
axios
.post(endpoints.createBulkImport, {
bulk_import: [
{
source_type: 'group_entity',
source_full_path: group.full_path,
destination_namespace: group.import_target.target_namespace,
destination_name: group.import_target.new_name,
},
],
bulk_import: groups.map((group) => ({
source_type: 'group_entity',
source_full_path: group.full_path,
destination_namespace: group.import_target.target_namespace,
destination_name: group.import_target.new_name,
})),
})
.then(({ data: { id: jobId } }) => {
groupsManager.saveImportState(jobId, {
id: group.id,
importTarget: group.import_target,
groupsManager.createImportState(jobId, {
status: STATUSES.CREATED,
groups,
});
return { status: STATUSES.CREATED, jobId };
@ -217,14 +261,16 @@ export function createResolvers({ endpoints, sourceUrl, GroupsManager = SourceGr
return { status: STATUSES.NONE };
})
.then((newStatus) =>
client.mutate({
mutation: setImportProgressMutation,
variables: { sourceGroupId, ...newStatus },
}),
sourceGroupIds.forEach((sourceGroupId) =>
client.mutate({
mutation: setImportProgressMutation,
variables: { sourceGroupId, ...newStatus },
}),
),
)
.catch(() => createFlash({ message: defaultErrorMessage }));
return GROUP_BEING_SCHEDULED;
return GROUPS_BEING_SCHEDULED;
},
},
};

View File

@ -12,4 +12,8 @@ fragment BulkImportSourceGroupItem on ClientBulkImportSourceGroup {
target_namespace
new_name
}
validation_errors {
field
message
}
}

View File

@ -0,0 +1,9 @@
mutation addValidationError($sourceGroupId: String!, $field: String!, $message: String!) {
addValidationError(sourceGroupId: $sourceGroupId, field: $field, message: $message) @client {
id
validation_errors {
field
message
}
}
}

View File

@ -1,9 +0,0 @@
mutation importGroup($sourceGroupId: String!) {
importGroup(sourceGroupId: $sourceGroupId) @client {
id
progress {
id
status
}
}
}

View File

@ -0,0 +1,9 @@
mutation importGroups($sourceGroupIds: [String!]!) {
importGroups(sourceGroupIds: $sourceGroupIds) @client {
id
progress {
id
status
}
}
}

View File

@ -0,0 +1,9 @@
mutation removeValidationError($sourceGroupId: String!, $field: String!) {
removeValidationError(sourceGroupId: $sourceGroupId, field: $field) @client {
id
validation_errors {
field
message
}
}
}

View File

@ -13,25 +13,42 @@ export class SourceGroupsManager {
loadImportStatesFromStorage() {
try {
return JSON.parse(this.storage.getItem(KEY)) ?? {};
return Object.fromEntries(
Object.entries(JSON.parse(this.storage.getItem(KEY)) ?? {}).map(([jobId, config]) => {
// new format of storage
if (config.groups) {
return [jobId, config];
}
return [
jobId,
{
status: config.status,
groups: [{ id: config.id, importTarget: config.importTarget }],
},
];
}),
);
} catch {
return {};
}
}
saveImportState(importId, group) {
const key = this.getStorageKey(importId);
const oldState = this.importStates[key] ?? {};
createImportState(importId, jobConfig) {
this.importStates[this.getStorageKey(importId)] = {
status: jobConfig.status,
groups: jobConfig.groups.map((g) => ({ importTarget: g.import_target, id: g.id })),
};
this.saveImportStatesToStorage();
}
if (!oldState.id && !group.id) {
updateImportProgress(importId, status) {
const currentState = this.importStates[this.getStorageKey(importId)];
if (!currentState) {
return;
}
this.importStates[key] = {
...oldState,
...group,
status: group.status,
};
currentState.status = status;
this.saveImportStatesToStorage();
}
@ -39,10 +56,15 @@ export class SourceGroupsManager {
const PREFIX = this.getStorageKey('');
const [jobId, importState] =
Object.entries(this.importStates).find(
([key, group]) => key.startsWith(PREFIX) && group.id === groupId,
([key, state]) => key.startsWith(PREFIX) && state.groups.some((g) => g.id === groupId),
) ?? [];
return { jobId, importState };
if (!jobId) {
return null;
}
const group = importState.groups.find((g) => g.id === groupId);
return { jobId, importState: { ...group, status: importState.status } };
}
getStorageKey(importId) {

View File

@ -18,6 +18,11 @@ type ClientBulkImportProgress {
status: String!
}
type ClientBulkImportValidationError {
field: String!
message: String!
}
type ClientBulkImportSourceGroup {
id: ID!
web_url: String!
@ -25,6 +30,7 @@ type ClientBulkImportSourceGroup {
full_name: String!
progress: ClientBulkImportProgress!
import_target: ClientBulkImportTarget!
validation_errors: [ClientBulkImportValidationError!]!
}
type ClientBulkImportPageInfo {
@ -45,9 +51,15 @@ extend type Query {
}
extend type Mutation {
setNewName(newName: String, sourceGroupId: ID!): ClientTargetNamespace!
setTargetNamespace(targetNamespace: String, sourceGroupId: ID!): ClientTargetNamespace!
importGroup(id: ID!): ClientBulkImportSourceGroup!
setNewName(newName: String, sourceGroupId: ID!): ClientBulkImportSourceGroup!
setTargetNamespace(targetNamespace: String, sourceGroupId: ID!): ClientBulkImportSourceGroup!
importGroups(sourceGroupIds: [ID!]!): [ClientBulkImportSourceGroup!]!
setImportProgress(id: ID, status: String!): ClientBulkImportSourceGroup!
updateImportProgress(id: ID, status: String!): ClientBulkImportProgress
addValidationError(
sourceGroupId: ID!
field: String!
message: String!
): ClientBulkImportSourceGroup!
removeValidationError(sourceGroupId: ID!, field: String!): ClientBulkImportSourceGroup!
}

View File

@ -168,8 +168,8 @@ export default {
v-if="showOutdatedDescriptionWarning"
class="gl-mb-5"
variant="warning"
primary-button-text="__('Keep')"
secondary-button-text="__('Discard')"
:primary-button-text="__('Keep')"
:secondary-button-text="__('Discard')"
:dismissible="false"
@primaryAction="keepAutosave"
@secondaryAction="discardAutosave"

View File

@ -34,6 +34,7 @@ import {
import axios from '~/lib/utils/axios_utils';
import { convertObjectPropsToCamelCase, getParameterByName } from '~/lib/utils/common_utils';
import { __ } from '~/locale';
import { DEFAULT_NONE_ANY } from '~/vue_shared/components/filtered_search_bar/constants';
import AuthorToken from '~/vue_shared/components/filtered_search_bar/tokens/author_token.vue';
import EmojiToken from '~/vue_shared/components/filtered_search_bar/tokens/emoji_token.vue';
import IterationToken from '~/vue_shared/components/filtered_search_bar/tokens/iteration_token.vue';
@ -186,7 +187,7 @@ export default {
token: AuthorToken,
dataType: 'user',
unique: true,
defaultAuthors: [],
defaultAuthors: DEFAULT_NONE_ANY,
fetchAuthors: this.fetchUsers,
},
{
@ -213,7 +214,6 @@ export default {
token: EmojiToken,
unique: true,
operators: [{ value: '=', description: __('is') }],
defaultEmojis: [],
fetchEmojis: this.fetchEmojis,
},
{
@ -237,7 +237,6 @@ export default {
icon: 'iteration',
token: IterationToken,
unique: true,
defaultIterations: [],
fetchIterations: this.fetchIterations,
});
}

View File

@ -1,4 +1,9 @@
import { __, s__ } from '~/locale';
import {
FILTER_ANY,
FILTER_CURRENT,
FILTER_NONE,
} from '~/vue_shared/components/filtered_search_bar/constants';
// Maps sort order as it appears in the URL query to API `order_by` and `sort` params.
const PRIORITY = 'priority';
@ -194,81 +199,149 @@ export const FILTERED_SEARCH_TERM = 'filtered-search-term';
export const OPERATOR_IS = '=';
export const OPERATOR_IS_NOT = '!=';
export const NORMAL_FILTER = 'normalFilter';
export const SPECIAL_FILTER = 'specialFilter';
export const SPECIAL_FILTER_VALUES = [FILTER_NONE, FILTER_ANY, FILTER_CURRENT];
export const filters = {
author_username: {
apiParam: {
[OPERATOR_IS]: 'author_username',
[OPERATOR_IS_NOT]: 'not[author_username]',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'author_username',
},
[OPERATOR_IS_NOT]: {
[NORMAL_FILTER]: 'not[author_username]',
},
},
urlParam: {
[OPERATOR_IS]: 'author_username',
[OPERATOR_IS_NOT]: 'not[author_username]',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'author_username',
},
[OPERATOR_IS_NOT]: {
[NORMAL_FILTER]: 'not[author_username]',
},
},
},
assignee_username: {
apiParam: {
[OPERATOR_IS]: 'assignee_username',
[OPERATOR_IS_NOT]: 'not[assignee_username]',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'assignee_username',
[SPECIAL_FILTER]: 'assignee_id',
},
[OPERATOR_IS_NOT]: {
[NORMAL_FILTER]: 'not[assignee_username]',
},
},
urlParam: {
[OPERATOR_IS]: 'assignee_username[]',
[OPERATOR_IS_NOT]: 'not[assignee_username][]',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'assignee_username[]',
[SPECIAL_FILTER]: 'assignee_id',
},
[OPERATOR_IS_NOT]: {
[NORMAL_FILTER]: 'not[assignee_username][]',
},
},
},
milestone: {
apiParam: {
[OPERATOR_IS]: 'milestone',
[OPERATOR_IS_NOT]: 'not[milestone]',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'milestone',
},
[OPERATOR_IS_NOT]: {
[NORMAL_FILTER]: 'not[milestone]',
},
},
urlParam: {
[OPERATOR_IS]: 'milestone_title',
[OPERATOR_IS_NOT]: 'not[milestone_title]',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'milestone_title',
},
[OPERATOR_IS_NOT]: {
[NORMAL_FILTER]: 'not[milestone_title]',
},
},
},
labels: {
apiParam: {
[OPERATOR_IS]: 'labels',
[OPERATOR_IS_NOT]: 'not[labels]',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'labels',
},
[OPERATOR_IS_NOT]: {
[NORMAL_FILTER]: 'not[labels]',
},
},
urlParam: {
[OPERATOR_IS]: 'label_name[]',
[OPERATOR_IS_NOT]: 'not[label_name][]',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'label_name[]',
},
[OPERATOR_IS_NOT]: {
[NORMAL_FILTER]: 'not[label_name][]',
},
},
},
my_reaction_emoji: {
apiParam: {
[OPERATOR_IS]: 'my_reaction_emoji',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'my_reaction_emoji',
[SPECIAL_FILTER]: 'my_reaction_emoji',
},
},
urlParam: {
[OPERATOR_IS]: 'my_reaction_emoji',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'my_reaction_emoji',
[SPECIAL_FILTER]: 'my_reaction_emoji',
},
},
},
confidential: {
apiParam: {
[OPERATOR_IS]: 'confidential',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'confidential',
},
},
urlParam: {
[OPERATOR_IS]: 'confidential',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'confidential',
},
},
},
iteration: {
apiParam: {
[OPERATOR_IS]: 'iteration_title',
[OPERATOR_IS_NOT]: 'not[iteration_title]',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'iteration_title',
[SPECIAL_FILTER]: 'iteration_id',
},
[OPERATOR_IS_NOT]: {
[NORMAL_FILTER]: 'not[iteration_title]',
},
},
urlParam: {
[OPERATOR_IS]: 'iteration_title',
[OPERATOR_IS_NOT]: 'not[iteration_title]',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'iteration_title',
[SPECIAL_FILTER]: 'iteration_id',
},
[OPERATOR_IS_NOT]: {
[NORMAL_FILTER]: 'not[iteration_title]',
},
},
},
weight: {
apiParam: {
[OPERATOR_IS]: 'weight',
[OPERATOR_IS_NOT]: 'not[weight]',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'weight',
[SPECIAL_FILTER]: 'weight',
},
[OPERATOR_IS_NOT]: {
[NORMAL_FILTER]: 'not[weight]',
},
},
urlParam: {
[OPERATOR_IS]: 'weight',
[OPERATOR_IS_NOT]: 'not[weight]',
[OPERATOR_IS]: {
[NORMAL_FILTER]: 'weight',
[SPECIAL_FILTER]: 'weight',
},
[OPERATOR_IS_NOT]: {
[NORMAL_FILTER]: 'not[weight]',
},
},
},
};

View File

@ -11,12 +11,15 @@ import {
LABEL_PRIORITY_DESC,
MILESTONE_DUE_ASC,
MILESTONE_DUE_DESC,
NORMAL_FILTER,
POPULARITY_ASC,
POPULARITY_DESC,
PRIORITY_ASC,
PRIORITY_DESC,
RELATIVE_POSITION_ASC,
sortParams,
SPECIAL_FILTER,
SPECIAL_FILTER_VALUES,
UPDATED_ASC,
UPDATED_DESC,
WEIGHT_ASC,
@ -124,13 +127,18 @@ export const getSortOptions = (hasIssueWeightsFeature, hasBlockedIssuesFeature)
const tokenTypes = Object.keys(filters);
const urlParamKeys = tokenTypes.flatMap((key) => Object.values(filters[key].urlParam));
const getUrlParams = (tokenType) =>
Object.values(filters[tokenType].urlParam).flatMap((filterObj) => Object.values(filterObj));
const urlParamKeys = tokenTypes.flatMap(getUrlParams);
const getTokenTypeFromUrlParamKey = (urlParamKey) =>
tokenTypes.find((key) => Object.values(filters[key].urlParam).includes(urlParamKey));
tokenTypes.find((tokenType) => getUrlParams(tokenType).includes(urlParamKey));
const getOperatorFromUrlParamKey = (tokenType, urlParamKey) =>
Object.entries(filters[tokenType].urlParam).find(([, urlParam]) => urlParam === urlParamKey)[0];
Object.entries(filters[tokenType].urlParam).find(([, filterObj]) =>
Object.values(filterObj).includes(urlParamKey),
)[0];
const convertToFilteredTokens = (locationSearch) =>
Array.from(new URLSearchParams(locationSearch).entries())
@ -164,11 +172,15 @@ export const getFilterTokens = (locationSearch) => {
return filterTokens.concat(searchTokens);
};
const getFilterType = (data) =>
SPECIAL_FILTER_VALUES.includes(data) ? SPECIAL_FILTER : NORMAL_FILTER;
export const convertToApiParams = (filterTokens) =>
filterTokens
.filter((token) => token.type !== FILTERED_SEARCH_TERM)
.reduce((acc, token) => {
const apiParam = filters[token.type].apiParam[token.value.operator];
const filterType = getFilterType(token.value.data);
const apiParam = filters[token.type].apiParam[token.value.operator][filterType];
return Object.assign(acc, {
[apiParam]: acc[apiParam] ? `${acc[apiParam]},${token.value.data}` : token.value.data,
});
@ -178,7 +190,8 @@ export const convertToUrlParams = (filterTokens) =>
filterTokens
.filter((token) => token.type !== FILTERED_SEARCH_TERM)
.reduce((acc, token) => {
const urlParam = filters[token.type].urlParam[token.value.operator];
const filterType = getFilterType(token.value.data);
const urlParam = filters[token.type].urlParam[token.value.operator]?.[filterType];
return Object.assign(acc, {
[urlParam]: acc[urlParam] ? acc[urlParam].concat(token.value.data) : [token.value.data],
});

View File

@ -1,40 +1,107 @@
<script>
import { GlDropdown, GlDropdownItem, GlSprintf, GlTooltipDirective } from '@gitlab/ui';
import { __ } from '~/locale';
import {
GlAlert,
GlDropdown,
GlDropdownItem,
GlLoadingIcon,
GlSprintf,
GlTooltipDirective,
} from '@gitlab/ui';
import axios from '~/lib/utils/axios_utils';
import { __, s__ } from '~/locale';
export const i18n = {
artifacts: __('Artifacts'),
downloadArtifact: __('Download %{name} artifact'),
artifactSectionHeader: __('Download artifacts'),
artifactsFetchErrorMessage: s__('Pipelines|Could not load artifacts.'),
noArtifacts: s__('Pipelines|No artifacts available'),
};
export default {
i18n,
directives: {
GlTooltip: GlTooltipDirective,
},
components: {
GlAlert,
GlDropdown,
GlDropdownItem,
GlLoadingIcon,
GlSprintf,
},
translations: {
artifacts: __('Artifacts'),
downloadArtifact: __('Download %{name} artifact'),
inject: {
artifactsEndpoint: {
default: '',
},
artifactsEndpointPlaceholder: {
default: '',
},
},
props: {
artifacts: {
type: Array,
pipelineId: {
type: Number,
required: true,
},
},
data() {
return {
artifacts: [],
hasError: false,
isLoading: false,
};
},
computed: {
hasArtifacts() {
return Boolean(this.artifacts.length);
},
},
methods: {
fetchArtifacts() {
this.isLoading = true;
// Replace the placeholder with the ID of the pipeline we are viewing
const endpoint = this.artifactsEndpoint.replace(
this.artifactsEndpointPlaceholder,
this.pipelineId,
);
return axios
.get(endpoint)
.then(({ data }) => {
this.artifacts = data.artifacts;
})
.catch(() => {
this.hasError = true;
})
.finally(() => {
this.isLoading = false;
});
},
},
};
</script>
<template>
<gl-dropdown
v-gl-tooltip
class="build-artifacts js-pipeline-dropdown-download"
:title="$options.translations.artifacts"
:text="$options.translations.artifacts"
:aria-label="$options.translations.artifacts"
:title="$options.i18n.artifacts"
:text="$options.i18n.artifacts"
:aria-label="$options.i18n.artifacts"
icon="download"
right
lazy
text-sr-only
@show.once="fetchArtifacts"
>
<gl-alert v-if="hasError" variant="danger" :dismissible="false">
{{ $options.i18n.artifactsFetchErrorMessage }}
</gl-alert>
<gl-loading-icon v-if="isLoading" />
<gl-alert v-else-if="!hasArtifacts" variant="info" :dismissible="false">
{{ $options.i18n.noArtifacts }}
</gl-alert>
<gl-dropdown-item
v-for="(artifact, i) in artifacts"
:key="i"
@ -42,7 +109,7 @@ export default {
rel="nofollow"
download
>
<gl-sprintf :message="$options.translations.downloadArtifact">
<gl-sprintf :message="$options.i18n.downloadArtifact">
<template #name>{{ artifact.name }}</template>
</gl-sprintf>
</gl-dropdown-item>

View File

@ -107,9 +107,6 @@ export default {
hasCommitInfo() {
return this.pipeline.commit && Object.keys(this.pipeline.commit).length > 0;
},
hasArtifacts() {
return this.pipeline?.details?.artifacts?.length > 0;
},
isMergeRequestPipeline() {
return Boolean(this.pipeline.flags && this.pipeline.flags.merge_request_pipeline);
},
@ -288,11 +285,7 @@ export default {
/>
</span>
<linked-pipelines-mini-list v-if="triggered.length" :triggered="triggered" />
<pipeline-artifacts
v-if="hasArtifacts"
:artifacts="pipeline.details.artifacts"
class="gl-ml-3"
/>
<pipeline-artifacts :pipeline-id="pipeline.id" class="gl-ml-3" />
</span>
</div>
</div>

View File

@ -32,6 +32,10 @@ export default () => {
const vm = new Vue({
el: '#js-vue-mr-widget',
provide: {
artifactsEndpoint: gl.mrWidgetData.artifacts_endpoint,
artifactsEndpointPlaceholder: gl.mrWidgetData.artifacts_endpoint_placeholder,
},
...MrWidgetOptions,
apolloProvider,
});

View File

@ -3,21 +3,24 @@ import { __ } from '~/locale';
export const DEBOUNCE_DELAY = 200;
const DEFAULT_LABEL_NO_LABEL = { value: 'No label', text: __('No label') };
export const DEFAULT_LABEL_NONE = { value: 'None', text: __('None') };
export const DEFAULT_LABEL_ANY = { value: 'Any', text: __('Any') };
export const DEFAULT_LABEL_CURRENT = { value: 'Current', text: __('Current') };
export const FILTER_NONE = 'None';
export const FILTER_ANY = 'Any';
export const FILTER_CURRENT = 'Current';
export const DEFAULT_ITERATIONS = [DEFAULT_LABEL_NONE, DEFAULT_LABEL_ANY, DEFAULT_LABEL_CURRENT];
export const DEFAULT_LABEL_NONE = { value: FILTER_NONE, text: __(FILTER_NONE) };
export const DEFAULT_LABEL_ANY = { value: FILTER_ANY, text: __(FILTER_ANY) };
export const DEFAULT_NONE_ANY = [DEFAULT_LABEL_NONE, DEFAULT_LABEL_ANY];
export const DEFAULT_LABELS = [DEFAULT_LABEL_NO_LABEL];
export const DEFAULT_ITERATIONS = DEFAULT_NONE_ANY.concat([
{ value: FILTER_CURRENT, text: __(FILTER_CURRENT) },
]);
export const DEFAULT_MILESTONES = [
DEFAULT_LABEL_NONE,
DEFAULT_LABEL_ANY,
export const DEFAULT_LABELS = [{ value: 'No label', text: __('No label') }];
export const DEFAULT_MILESTONES = DEFAULT_NONE_ANY.concat([
{ value: 'Upcoming', text: __('Upcoming') },
{ value: 'Started', text: __('Started') },
];
]);
export const SortDirection = {
descending: 'descending',

View File

@ -10,7 +10,7 @@ import { debounce } from 'lodash';
import { deprecatedCreateFlash as createFlash } from '~/flash';
import { __ } from '~/locale';
import { DEFAULT_LABEL_NONE, DEFAULT_LABEL_ANY, DEBOUNCE_DELAY } from '../constants';
import { DEBOUNCE_DELAY, DEFAULT_NONE_ANY } from '../constants';
import { stripQuotes } from '../filtered_search_utils';
export default {
@ -33,7 +33,7 @@ export default {
data() {
return {
emojis: this.config.initialEmojis || [],
defaultEmojis: this.config.defaultEmojis || [DEFAULT_LABEL_NONE, DEFAULT_LABEL_ANY],
defaultEmojis: this.config.defaultEmojis || DEFAULT_NONE_ANY,
loading: true,
};
},

View File

@ -1,6 +1,6 @@
<script>
import { GlDropdownDivider, GlFilteredSearchSuggestion, GlFilteredSearchToken } from '@gitlab/ui';
import { DEFAULT_LABEL_ANY, DEFAULT_LABEL_NONE } from '../constants';
import { DEFAULT_NONE_ANY } from '../constants';
export default {
baseWeights: ['0', '1', '2', '3', '4', '5'],
@ -22,7 +22,7 @@ export default {
data() {
return {
weights: this.$options.baseWeights,
defaultWeights: this.config.defaultWeights || [DEFAULT_LABEL_NONE, DEFAULT_LABEL_ANY],
defaultWeights: this.config.defaultWeights || DEFAULT_NONE_ANY,
};
},
methods: {

View File

@ -108,10 +108,12 @@ export default {
error({ graphQLErrors }) {
// TODO This error suppression is temporary (BE fix required)
// https://gitlab.com/gitlab-org/gitlab/-/issues/329750
if (
graphQLErrors.length === 1 &&
graphQLErrors[0]?.message === 'Cannot return null for non-nullable field GroupMember.user'
) {
const isNullError = ({ message }) => {
return message === 'Cannot return null for non-nullable field GroupMember.user';
};
if (graphQLErrors?.length > 0 && graphQLErrors.every(isNullError)) {
// only null-related errors exist, suppress them.
// eslint-disable-next-line no-console
console.error(
"Suppressing the error 'Cannot return null for non-nullable field GroupMember.user'. Please see https://gitlab.com/gitlab-org/gitlab/-/issues/329750",

View File

@ -67,6 +67,11 @@ module Types
description: 'Blob content rich viewer.',
null: true
field :plain_data, GraphQL::STRING_TYPE,
description: 'Blob plain highlighted data.',
null: true,
calls_gitaly: true
def raw_text_blob
object.data unless object.binary?
end

View File

@ -285,10 +285,6 @@ module ProjectsHelper
!disabled && !compact_mode
end
def settings_operations_available?
!@project.archived? && can?(current_user, :admin_operations, @project)
end
def error_tracking_setting_project_json
setting = @project.error_tracking_setting
@ -666,26 +662,6 @@ module ProjectsHelper
"#{request.path}?#{options.to_param}"
end
def sidebar_settings_paths
%w[
projects#edit
integrations#show
services#edit
hooks#index
hooks#edit
access_tokens#index
hook_logs#show
repository#show
ci_cd#show
operations#show
badges#index
pages#show
packages_and_registries#show
projects/runners#show
projects/runners#edit
]
end
def sidebar_operations_paths
%w[
environments

View File

@ -34,20 +34,23 @@ class Namespace
sql = """
UPDATE namespaces
SET traversal_ids = cte.traversal_ids
FROM (#{recursive_traversal_ids(lock: true)}) as cte
FROM (#{recursive_traversal_ids}) as cte
WHERE namespaces.id = cte.id
AND namespaces.traversal_ids <> cte.traversal_ids
"""
Namespace.connection.exec_query(sql)
Namespace.transaction do
@root.lock!
Namespace.connection.exec_query(sql)
end
rescue ActiveRecord::Deadlocked
db_deadlock_counter.increment(source: 'Namespace#sync_traversal_ids!')
raise
end
# Identify all incorrect traversal_ids in the current namespace hierarchy.
def incorrect_traversal_ids(lock: false)
def incorrect_traversal_ids
Namespace
.joins("INNER JOIN (#{recursive_traversal_ids(lock: lock)}) as cte ON namespaces.id = cte.id")
.joins("INNER JOIN (#{recursive_traversal_ids}) as cte ON namespaces.id = cte.id")
.where('namespaces.traversal_ids <> cte.traversal_ids')
end
@ -58,13 +61,10 @@ class Namespace
#
# Note that the traversal_ids represent a calculated traversal path for the
# namespace and not the value stored within the traversal_ids attribute.
#
# Optionally locked with FOR UPDATE to ensure isolation between concurrent
# updates of the heirarchy.
def recursive_traversal_ids(lock: false)
def recursive_traversal_ids
root_id = Integer(@root.id)
sql = <<~SQL
<<~SQL
WITH RECURSIVE cte(id, traversal_ids, cycle) AS (
VALUES(#{root_id}, ARRAY[#{root_id}], false)
UNION ALL
@ -74,10 +74,6 @@ class Namespace
)
SELECT id, traversal_ids FROM cte
SQL
sql += ' FOR UPDATE' if lock
sql
end
# This is essentially Namespace#root_ancestor which will soon be rewritten

View File

@ -41,6 +41,7 @@ module Namespaces
UnboundedSearch = Class.new(StandardError)
included do
before_update :lock_both_roots, if: -> { sync_traversal_ids? && parent_id_changed? }
after_create :sync_traversal_ids, if: -> { sync_traversal_ids? }
after_update :sync_traversal_ids, if: -> { sync_traversal_ids? && saved_change_to_parent_id? }
@ -90,6 +91,23 @@ module Namespaces
Namespace::TraversalHierarchy.for_namespace(root_ancestor).sync_traversal_ids!
end
# Lock the root of the hierarchy we just left, and lock the root of the hierarchy
# we just joined. In most cases the two hierarchies will be the same.
def lock_both_roots
parent_ids = [
parent_id_was || self.id,
parent_id || self.id
].compact
roots = Gitlab::ObjectHierarchy
.new(Namespace.where(id: parent_ids))
.base_and_ancestors
.reorder(nil)
.where(parent_id: nil)
Namespace.lock.select(:id).where(id: roots).order(id: :asc).load
end
# Make sure we drop the STI `type = 'Group'` condition for better performance.
# Logically equivalent so long as hierarchies remain homogeneous.
def without_sti_condition

View File

@ -14,6 +14,12 @@ class BlobPresenter < Gitlab::View::Presenter::Delegated
)
end
def plain_data
return if blob.binary?
highlight(plain: false)
end
def web_url
url_helpers.project_blob_url(project, ref_qualified_path)
end

View File

@ -9,12 +9,6 @@ class SnippetBlobPresenter < BlobPresenter
render_rich_partial
end
def plain_data
return if blob.binary?
highlight(plain: false)
end
def raw_path
snippet_blob_raw_route(only_path: true)
end

View File

@ -20,6 +20,6 @@ class TestCaseEntity < Grape::Entity
alias_method :test_case, :object
def can_read_screenshots?
Feature.enabled?(:junit_pipeline_screenshots_view, options[:project]) && test_case.has_attachment?
test_case.has_attachment?
end
end

View File

@ -62,12 +62,12 @@ module SystemNotes
if time_spent == :reset
body = "removed time spent"
else
spent_at = noteable.spent_at
spent_at = noteable.spent_at&.to_date
parsed_time = Gitlab::TimeTrackingFormatter.output(time_spent.abs)
action = time_spent > 0 ? 'added' : 'subtracted'
text_parts = ["#{action} #{parsed_time} of time spent"]
text_parts << "at #{spent_at}" if spent_at
text_parts << "at #{spent_at}" if spent_at && spent_at != DateTime.current.to_date
body = text_parts.join(' ')
end

View File

@ -2,9 +2,6 @@
- add_page_specific_style 'page_bundles/import'
- breadcrumb_title _('Import groups')
%h1.gl-my-0.gl-py-4.gl-font-size-h1.gl-border-solid.gl-border-gray-200.gl-border-0.gl-border-b-1
= s_('BulkImport|Import groups from GitLab')
#import-groups-mount-element{ data: { status_path: status_import_bulk_imports_path(format: :json),
available_namespaces_path: import_available_namespaces_path(format: :json),
create_bulk_import_path: import_bulk_imports_path(format: :json),

View File

@ -1,61 +1,3 @@
- if project_nav_tab? :settings
= nav_link(path: sidebar_settings_paths) do
= link_to edit_project_path(@project) do
.nav-icon-container
= sprite_icon('settings')
%span.nav-item-name.qa-settings-item#js-onboarding-settings-link
= _('Settings')
%ul.sidebar-sub-level-items
- can_edit = can?(current_user, :admin_project, @project)
- if can_edit
= nav_link(path: sidebar_settings_paths, html_options: { class: "fly-out-top-item" } ) do
= link_to edit_project_path(@project) do
%strong.fly-out-top-item-name
= _('Settings')
%li.divider.fly-out-top-item
= nav_link(path: %w[projects#edit]) do
= link_to edit_project_path(@project), title: _('General'), class: 'qa-general-settings-link' do
%span
= _('General')
- if can_edit
= nav_link(controller: [:integrations, :services]) do
= link_to project_settings_integrations_path(@project), title: _('Integrations'), data: { qa_selector: 'integrations_settings_link' } do
%span
= _('Integrations')
= nav_link(controller: [:hooks, :hook_logs]) do
= link_to project_hooks_path(@project), title: _('Webhooks'), data: { qa_selector: 'webhooks_settings_link' } do
%span
= _('Webhooks')
- if can?(current_user, :read_resource_access_tokens, @project)
= nav_link(controller: [:access_tokens]) do
= link_to project_settings_access_tokens_path(@project), title: _('Access Tokens'), data: { qa_selector: 'access_tokens_settings_link' } do
%span
= _('Access Tokens')
= nav_link(controller: :repository) do
= link_to project_settings_repository_path(@project), title: _('Repository') do
%span
= _('Repository')
- if !@project.archived? && @project.feature_available?(:builds, current_user)
= nav_link(controller: [:ci_cd, 'projects/runners']) do
= link_to project_settings_ci_cd_path(@project), title: _('CI/CD') do
%span
= _('CI/CD')
- if settings_operations_available?
= nav_link(controller: [:operations]) do
= link_to project_settings_operations_path(@project), title: _('Operations'), data: { qa_selector: 'operations_settings_link' } do
= _('Operations')
- if @project.pages_available?
= nav_link(controller: :pages) do
= link_to project_pages_path(@project), title: _('Pages') do
%span
= _('Pages')
- if settings_packages_and_registries_enabled?(@project)
= nav_link(controller: :packages_and_registries) do
= link_to project_settings_packages_and_registries_path(@project), title: _('Packages & Registries'), data: { qa_selector: 'project_package_settings_link' } do
%span
= _('Packages & Registries')
-# Shortcut to Project > Activity
%li.hidden
= link_to activity_project_path(@project), title: _('Activity'), class: 'shortcuts-project-activity' do

View File

@ -1,8 +1,12 @@
- artifacts_endpoint_placeholder = ':pipeline_artifacts_id'
= javascript_tag do
:plain
window.gl = window.gl || {};
window.gl.mrWidgetData = #{serialize_issuable(@merge_request, serializer: 'widget', issues_links: true)}
window.gl.mrWidgetData.artifacts_endpoint = '#{downloadable_artifacts_project_pipeline_path(@project, artifacts_endpoint_placeholder, format: :json)}';
window.gl.mrWidgetData.artifacts_endpoint_placeholder = '#{artifacts_endpoint_placeholder}';
window.gl.mrWidgetData.squash_before_merge_help_path = '#{help_page_path("user/project/merge_requests/squash_and_merge")}';
window.gl.mrWidgetData.ci_troubleshooting_docs_path = '#{help_page_path('ci/troubleshooting.md')}';
window.gl.mrWidgetData.mr_troubleshooting_docs_path = '#{help_page_path('user/project/merge_requests/reviews/index.md', anchor: 'troubleshooting')}';

View File

@ -0,0 +1,5 @@
---
title: Expose blob plain data in GraphQL
merge_request: 61016
author:
type: added

View File

@ -0,0 +1,5 @@
---
title: Removes multiple_cache_per_job feature flag and associated code
merge_request:
author: Laura Montemayor
type: removed

View File

@ -0,0 +1,5 @@
---
title: Remove mobsf service for mobsf SAST job
merge_request: 60770
author:
type: changed

View File

@ -0,0 +1,5 @@
---
title: Retain timelog spent_at time
merge_request: 60191
author: Lee Tickett @leetickett
type: changed

View File

@ -0,0 +1,5 @@
---
title: Lazy load artifacts dropdown in pipelines merge request widget
merge_request: 61055
author:
type: added

View File

@ -0,0 +1,5 @@
---
title: Show unit report attachments in the pipeline test report
merge_request: 61075
author:
type: added

View File

@ -0,0 +1,6 @@
---
title: Suppress all non-nullable field errors for assignee widget graphql queries
to remove assignee fetching error messages in boards
merge_request: 61091
author:
type: fixed

View File

@ -0,0 +1,5 @@
---
title: Implement bulk import for all groups on the page
merge_request: 61097
author:
type: added

View File

@ -1,8 +0,0 @@
---
name: junit_pipeline_screenshots_view
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/31029
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/202114
milestone: '13.0'
type: development
group: group::verify testing
default_enabled: false

View File

@ -1,8 +0,0 @@
---
name: multiple_cache_per_job
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/53410
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/321877
milestone: '13.10'
type: development
group: group::pipeline authoring
default_enabled: true

View File

@ -11,6 +11,7 @@ milestone: "9.1"
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/1521
time_frame: none
data_source: database
instrumentation_class: 'Gitlab::Usage::Metrics::Instrumentations::UuidMetric'
distribution:
- ee
- ce

View File

@ -51,6 +51,10 @@
"type": "string",
"enum": ["database", "redis", "redis_hll", "prometheus", "ruby"]
},
"instrumentation_class": {
"type": "string",
"pattern": "^(Gitlab::Usage::Metrics::Instrumentations::)(([A-Z][a-z]+)+::)*(([A-Z][a-z]+)+)$"
},
"distribution": {
"type": "array",
"items": {

View File

@ -11790,6 +11790,7 @@ Returns [`Tree`](#tree).
| <a id="repositoryblobname"></a>`name` | [`String`](#string) | Blob name. |
| <a id="repositorybloboid"></a>`oid` | [`String!`](#string) | OID of the blob. |
| <a id="repositoryblobpath"></a>`path` | [`String!`](#string) | Path of the blob. |
| <a id="repositoryblobplaindata"></a>`plainData` | [`String`](#string) | Blob plain highlighted data. |
| <a id="repositoryblobrawblob"></a>`rawBlob` | [`String`](#string) | The raw content of the blob. |
| <a id="repositoryblobrawpath"></a>`rawPath` | [`String`](#string) | Web path to download the raw blob. |
| <a id="repositoryblobrawsize"></a>`rawSize` | [`Int`](#int) | Size (in bytes) of the blob, or the blob target if stored externally. |

View File

@ -333,32 +333,17 @@ If parsing JUnit report XML results in an error, an indicator is shown next to t
## Viewing JUnit screenshots on GitLab
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/202114) in GitLab 13.0.
> - It's deployed behind a feature flag, disabled by default.
> - To use it in GitLab self-managed instances, ask a GitLab administrator to [enable it](#enabling-the-junit-screenshots-feature). **(FREE SELF)**
WARNING:
This feature might not be available to you. Check the **version history** note above for details.
When [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/6061) is complete, the attached file is visible on the pipeline details page.
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/202114) in GitLab 13.0 behind the `:junit_pipeline_screenshots_view` feature flag, disabled by default.
> - The feature flag was removed and was [made generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/216979) in GitLab 13.12.
If JUnit report format XML files contain an `attachment` tag, GitLab parses the attachment.
Upload your screenshots as [artifacts](yaml/README.md#artifactsreportsjunit) to GitLab. The `attachment` tag **must** contain the absolute path to the screenshots you uploaded.
```xml
<testcase time="1.00" name="Test">
<system-out>[[ATTACHMENT|/absolute/path/to/some/file]]</system-out>
</testcase>
```
### Enabling the JUnit screenshots feature **(FREE SELF)**
Upload your screenshots as [artifacts](yaml/README.md#artifactsreportsjunit) to GitLab. The `attachment` tag **must** contain the absolute path to the screenshots you uploaded.
This feature comes with the `:junit_pipeline_screenshots_view` feature flag disabled by default.
To enable this feature, ask a GitLab administrator with [Rails console access](../administration/feature_flags.md#how-to-enable-and-disable-features-behind-flags) to run the
following command:
```ruby
Feature.enable(:junit_pipeline_screenshots_view)
```
A link to the test case attachment will appear in the test case details in [the pipeline test report](#viewing-unit-test-reports-on-gitlab).

View File

@ -2970,11 +2970,7 @@ You can specify a [fallback cache key](#fallback-cache-key) to use if the specif
##### Multiple caches
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/32814) in GitLab 13.10.
> - [Deployed behind a feature flag](../../user/feature_flags.md), disabled by default.
> - [Enabled by default](https://gitlab.com/gitlab-org/gitlab/-/issues/321877) in GitLab 13.11.
> - Enabled on GitLab.com.
> - Recommended for production use.
> - For GitLab self-managed instances, GitLab administrators can opt to [disable it](#enable-or-disable-multiple-caches). **(FREE SELF)**
> - [Feature Flag removed](https://gitlab.com/gitlab-org/gitlab/-/issues/321877), in GitLab 13.12.
You can have a maximum of four caches:
@ -3001,25 +2997,6 @@ test-job:
If multiple caches are combined with a [Fallback cache key](#fallback-cache-key),
the fallback is fetched multiple times if multiple caches are not found.
##### Enable or disable multiple caches **(FREE SELF)**
The multiple caches feature is under development but ready for production use.
It is deployed behind a feature flag that is **enabled by default**.
[GitLab administrators with access to the GitLab Rails console](../../administration/feature_flags.md)
can opt to disable it.
To enable it:
```ruby
Feature.enable(:multiple_cache_per_job)
```
To disable it:
```ruby
Feature.disable(:multiple_cache_per_job)
```
#### Fallback cache key
> [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/1534) in GitLab Runner 13.4.

View File

@ -60,6 +60,8 @@ info: To determine the technical writer assigned to the Stage/Group associated w
- [Updating multiple values](setting_multiple_values.md)
- [Constraints naming conventions](constraint_naming_convention.md)
- [Query performance guidelines](../query_performance.md)
- [Pagination guidelines](pagination_guidelines.md)
- [Pagination performance guidelines](pagination_performance_guidelines.md)
## Case studies

View File

@ -0,0 +1,315 @@
---
stage: Enablement
group: Database
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Pagination guidelines
This document gives an overview of the current capabilities and provides best practices for paginating over data in GitLab, and in particular for PostgreSQL.
## Why do we need pagination?
Pagination is a popular technique to avoid loading too much data in one web request. This usually happens when we render a list of records. A common scenario is visualizing parent-children relations (has many) on the UI.
Example: listing issues within a project
As the number of issues grows within the project, the list gets longer. To render the list, the backend does the following:
1. Loads the records from the database, usually in a particular order.
1. Serializes the records in Ruby. Build Ruby (ActiveRecord) objects and then build a JSON or HTML string.
1. Sends the response back to the browser.
1. The browser renders the content.
We have two options for rendering the content:
- HTML: backend deals with the rendering (HAML template).
- JSON: the client (client-side JavaScript) transforms the payload into HTML.
Rendering long lists can significantly affect both the frontend and backend performance:
- The database will need to read a lot of data from the disk.
- The result of the query (records) will eventually be transformed to Ruby objects which increases memory allocation.
- Large responses will take more time to send over the wire, to the user's browser.
- Rendering long lists might freeze the browser (bad user experience).
With pagination, the data is split into equal pieces (pages). On the first visit, the user receives only a limited number of items (page size). The user can see more items by paginating forward which results in a new HTTP request and a new database query.
![Project issues page with pagination](../img/project_issues_pagination_v13_11.jpg)
## General guidelines for paginating
### Pick the right approach
Let the database handle the pagination, filtering, and data retrieval. Implementing in-memory pagination on the backend (`paginate_array` from kaminari) or on the frontend (JavaScript) might work for a few hundreds of records. If application limits are not defined, things can get out of control quickly.
### Reduce complexity
When we list records on the page we often provide additional filters and different sort options. This can complicate things on the backend side significantly.
For the MVC version, consider the following:
- Reduce the number of sort options to the minimum.
- Reduce the number of filters (dropdown, search bar) to the minimum.
To make sorting and pagination efficient, for each sort option we need at least two database indexes (ascending, descending order). If we add filter options (by state or by author), we might need more indexes to maintain good performance. Note that indexes are not free, they can significantly affect the `UPDATE` query timings.
It's not possible to make all filter and sort combinations performant, so we should try optimizing the performance by usage patterns.
### Prepare for scaling
Offset-based pagination is the easiest way to paginate over records, however, it does not scale well for large tables. As a long-term solution, keyset pagination is preferred. The tooling around keyset pagination is not as mature as for offset pagination so currently, it's easier to start with offset pagination and then switch to keyset pagination.
To avoid losing functionality and maintaining backward compatibility when switching pagination methods, it's advised to consider the following approach in the design phase:
- Avoid presenting total counts, prefer limit counts.
- Example: count maximum 1001 records, and then on the UI show 1000+ if the count is 1001, show the actual number otherwise.
- See the [badge counters approach](../merge_request_performance_guidelines.md#badge-counters) for more information.
- Avoid using page numbers, use next and previous page buttons.
- Keyset pagination doesn't support page numbers.
- For APIs, advise against building URLs for the next page by "hand".
- Promote the usage of the [`Link` header](../../api/README.md#pagination-link-header) where the URLs for the next and previous page are provided by the backend.
- This way changing the URL structure is possible without breaking backward compatibility.
NOTE:
Infinite scroll can use keyset pagination without affecting the user experience since there are no exposed page numbers.
## Options for pagination
### Offset pagination
The most common way to paginate lists is using offset-based pagination (UI and REST API). It's backed by the popular [kaminari](https://github.com/kaminari/kaminari) Ruby gem, which provides convenient helper methods to implement pagination on ActiveRecord queries.
Offset-based pagination is leveraging the `LIMIT` and `OFFSET` SQL clauses to take out a specific slice from the table.
Example database query when looking for the 2nd page of the issues within our project:
```sql
SELECT issues.* FROM issues WHERE project_id = 1 ORDER BY id LIMIT 20 OFFSET 20
```
1. Move an imaginary pointer over the table rows and skip 20 rows.
1. Take the next 20 rows.
Notice that the query also orders the rows by the primary key (`id`). When paginating data, specifying the order is very important. Without it, the returned rows are non-deterministic and can confuse the end-user.
#### Page numbers
Example pagination bar:
![Page selector rendered by kaminari](../img/offset_pagination_ui_v13_11.jpg)
The kaminari gem renders a nice pagination bar on the UI with page numbers and optionally quick shortcuts the next, previous, first, and last page buttons. To render these buttons, kaminari needs to know the number of rows, and for that, a count query is executed.
```sql
SELECT COUNT(*) FROM issues WHERE project_id = 1
```
#### Performance
##### Index coverage
To achieve the good performance, the `ORDER BY` clause needs to be covered by an index.
Assuming that we have the following index:
```sql
CREATE INDEX index_on_issues_project_id ON issues (project_id);
```
Let's try to request the first page:
```sql
SELECT issues.* FROM issues WHERE project_id = 1 ORDER BY id LIMIT 20;
```
We can produce the same query in Rails:
```ruby
Issue.where(project_id: 1).page(1).per(20)
```
The SQL query will return a maximum of 20 rows from the database. However, it doesn't mean that the database will only read 20 rows from the disk to produce the result.
This is what will happen:
1. The database will try to plan the execution in the most efficient way possible based on the table statistics and the available indexes.
1. The planner knows that we have an index covering the `project_id` column.
1. The database will read all rows using the index on `project_id`.
1. The rows at this point are not sorted, so the database will need to sort the rows.
1. The database returns the first 20 rows.
In case the project has 10_000 rows, the database will read 10_000 rows and sort them in memory (or on disk). This is not going to scale well in the long term.
To fix this we need the following index:
```sql
CREATE INDEX index_on_issues_project_id ON issues (project_id, id);
```
By making the `id` column part of the index, the previous query will read maximum 20 rows. The query will perform well regardless of the number of issues within a project. So with this change, we've also improved the initial page load (when the user loads the issue page).
NOTE:
Here we're leveraging the ordered property of the b-tree database index. Values in the index are sorted so reading 20 rows will not require further sorting.
#### Limitations
##### `COUNT(*)` on a large dataset
Kaminari by default executes a count query to determine the number of pages for rendering the page links. Count queries can be quite expensive for a large table, in an unfortunate scenario the queries will simply time out.
To work around this, we can run kaminari without invoking the count SQL query.
```ruby
Issue.where(project_id: 1).page(1).per(20).without_count
```
In this case, the count query will not be executed and the pagination will no longer render the page numbers. We'll see only the next and previous links.
##### `OFFSET` on a large dataset
When we paginate over a large dataset, we might notice that the response time will get slower and slower. This is due to the `OFFSET` clause that seeks through the rows and skips N rows.
From the user point of view, this might not be always noticeable. As the user paginates forward, the previous rows might be still in the buffer cache of the database. If the user shares the link with someone else and it's opened after a few minutes or hours, the response time might be significantly higher or it would even time out.
When requesting a large page number, the database needs to read `PAGE * PAGE_SIZE` rows. This makes offset pagination **unsuitable for large database tables**.
Example: listing users on the Admin page
Listing users with a very simple SQL query:
```sql
SELECT "users".* FROM "users" ORDER BY "users"."id" DESC LIMIT 20 OFFSET 0
```
The query execution plan shows that this query is efficient, the database only read 20 rows from the database (`rows=20`):
```plaintext
Limit (cost=0.43..3.19 rows=20 width=1309) (actual time=0.098..2.093 rows=20 loops=1)
Buffers: shared hit=103
-> Index Scan Backward using users_pkey on users (cost=0.43..X rows=X width=1309) (actual time=0.097..2.087 rows=20 loops=1)
Buffers: shared hit=103
Planning Time: 0.333 ms
Execution Time: 2.145 ms
(6 rows)
```
See the [Understanding EXPLAIN plans](../understanding_explain_plans.md) to find more information about reading execution plans.
Let's visit the 50_000th page:
```sql
SELECT "users".* FROM "users" ORDER BY "users"."id" DESC LIMIT 20 OFFSET 999980;
```
The plan shows that the database reads 1_000_000 rows to return 20 rows, with a very high execution time (5.5 seconds):
```plaintext
Limit (cost=137878.89..137881.65 rows=20 width=1309) (actual time=5523.588..5523.667 rows=20 loops=1)
Buffers: shared hit=1007901 read=14774 written=609
I/O Timings: read=420.591 write=57.344
-> Index Scan Backward using users_pkey on users (cost=0.43..X rows=X width=1309) (actual time=0.060..5459.353 rows=1000000 loops=1)
Buffers: shared hit=1007901 read=14774 written=609
I/O Timings: read=420.591 write=57.344
Planning Time: 0.821 ms
Execution Time: 5523.745 ms
(8 rows)
```
We can argue that a normal user will not be going to visit these pages, however, API users could easily navigate to very high page numbers (scraping, collecting data).
### Keyset pagination
Keyset pagination addresses the performance concerns of "skipping" previous rows when requesting a large page, however, it's not a drop-in replacement for offset-based pagination. Keyset pagination is used only in the [GraphQL API](../graphql_guide/pagination.md)
Consider the following `issues` table:
|`id`|`project_id`|
|-|-|
|1|1|
|2|1|
|3|2|
|4|1|
|5|1|
|6|2|
|7|2|
|8|1|
|9|1|
|10|2|
Let's paginate over the whole table ordered by the primary key (`id`). The query for the first page is the same as the offset pagination query, for simplicity, we use 5 as the page size:
```sql
SELECT "issues".* FROM "issues" ORDER BY "issues"."id" ASC LIMIT 5
```
Notice that we didn't add the `OFFSET` clause.
To get to the next page, we need to extract values that are part of the `ORDER BY` clause from the last row. In this case, we just need the `id`, which is 5. Now we construct the query for the next page:
```sql
SELECT "issues".* FROM "issues" WHERE "issues"."id" > 5 ORDER BY "issues"."id" ASC LIMIT 5
```
Looking at the query execution plan, we can see that this query read only 5 rows (offset-based pagination would read 10 rows):
```plaintext
Limit (cost=0.56..2.08 rows=5 width=1301) (actual time=0.093..0.137 rows=5 loops=1)
-> Index Scan using issues_pkey on issues (cost=0.56..X rows=X width=1301) (actual time=0.092..0.136 rows=5 loops=1)
Index Cond: (id > 5)
Planning Time: 7.710 ms
Execution Time: 0.224 ms
(5 rows)
```
#### Limitations
##### No page numbers
Offset pagination provides an easy way to request a specific page. We can simply edit the URL and modify the `page=` URL parameter. Keyset pagination cannot provide page numbers because the paging logic might depend on different columns.
In the previous example, the column is the `id`, so we might see something like this in the `URL`:
```plaintext
id_after=5
```
In GraphQL, the parameters are serialized to JSON and then encoded:
```plaintext
eyJpZCI6Ijk0NzMzNTk0IiwidXBkYXRlZF9hdCI6IjIwMjEtMDQtMDkgMDg6NTA6MDUuODA1ODg0MDAwIFVUQyJ9
```
NOTE:
Pagination parameters will be visible to the user, so we need to be careful about which columns we order by.
Keyset pagination can only provide the next, previous, first, and last pages.
##### Complexity
Building queries when we order by a single column is very easy, however, things get more complex if tie-breaker or multi-column ordering is used. The complexity increases if the columns are nullable.
Example: ordering by `id` and `created_at` where `created_at` is nullable, query for getting the second page:
```sql
SELECT "issues".*
FROM "issues"
WHERE (("issues"."id" > 99
AND "issues"."created_at" = '2021-02-16 11:26:17.408466')
OR ("issues"."created_at" > '2021-02-16 11:26:17.408466')
OR ("issues"."created_at" IS NULL))
ORDER BY "issues"."created_at" DESC NULLS LAST, "issues"."id" DESC
LIMIT 20
```
##### Tooling
Using keyset pagination outside of GraphQL is not straightforward. We have the low-level blocks for building keyset pagination database queries, however, the usage in application code is still not streamlined yet.
#### Performance
Keyset pagination provides stable performance regardless of the number of pages we moved forward. To achieve this performance, the paginated query needs an index that covers all the columns in the `ORDER BY` clause, similarly to the offset pagination.
### General performance guidelines
See the [pagination general performance guidelines page](pagination_performance_guidelines.md).

View File

@ -0,0 +1,325 @@
---
stage: Enablement
group: Database
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Pagination performance guidelines
The following document gives a few ideas for improving the pagination (sorting) performance. These apply both on [offset](pagination_guidelines.md#offset-pagination) and [keyset](pagination_guidelines.md#keyset-pagination) paginations.
## Tie-breaker column
When ordering the columns it's advised to order by distinct columns only. Consider the following example:
|`id`|`created_at`|
|-|-|
|1|2021-01-04 14:13:43|
|2|2021-01-05 19:03:12|
|3|2021-01-05 19:03:12|
If we order by `created_at`, the result would likely depend on how the records are located on the disk.
Using the tie-breaker column is advised when the data is exposed via a well defined interface and its consumed
by an automated process, such as an API. Without the tie-breaker column, the order of the rows could change
(data is re-imported) which could cause problems that are hard to debug, such as:
- An integration comparing the rows to determine changes breaks.
- E-tag cache values change, which requires a complete re-download.
```sql
SELECT issues.* FROM issues ORDER BY created_at;
```
We can fix this by adding a second column to `ORDER BY`:
```sql
SELECT issues.* FROM issues ORDER BY created_at, id;
```
This change makes the order distinct so we have "stable" sorting.
NOTE:
To make the query efficient, we need an index covering both columns: `(created_at, id)`. The order of the columns **should match** the columns in the `ORDER BY` clause.
## Ordering by joined table column
Oftentimes, we want to order the data by a column on a joined database table. The following example orders `issues` records by the `first_mentioned_in_commit_at` metric column:
```sql
SELECT issues.* FROM issues
INNER JOIN issue_metrics on issue_metrics.issue_id=issues.id
WHERE issues.project_id = 2
ORDER BY issue_metrics.first_mentioned_in_commit_at DESC, issues.id DESC
LIMIT 20
OFFSET 0
```
With PostgreSQL version 11, the planner will first look up all issues matching the `project_id` filter and then join all `issue_metrics` rows. The ordering of rows will happen in memory. In case the joined relation is always present (1:1 relationship), the database will read `N * 2` rows where N is the number of rows matching the `project_id` filter.
For performance reasons, we should avoid mixing columns from different tables when specifying the `ORDER BY` clause.
In this particular case there is no simple way (like index creation) to improve the query. We might think that changing the `issues.id` column to `issue_metrics.issue_id` will help, however, this will likely make the query perform worse because it might force the database to process all rows in the `issue_metrics` table.
One idea to address this problem is denormalization. Adding the `project_id` column to the `issue_metrics` table will make the filtering and sorting efficient:
```sql
SELECT issues.* FROM issues
INNER JOIN issue_metrics on issue_metrics.issue_id=issues.id
WHERE issue_metrics.project_id = 2
ORDER BY issue_metrics.first_mentioned_in_commit_at DESC, issue_metrics.issue_id DESC
LIMIT 20
OFFSET 0
```
NOTE:
The query will require an index on `issue_metrics` table with the following column configuration: `(project_id, first_mentioned_in_commit_at DESC, issue_id DESC)`.
## Filtering
### By project
Filtering by a project is a very common use case since we have many features on the project level. Examples: merge requests, issues, boards, iterations.
These features will have a filter on `project_id` in their base query. Loading issues for a project:
```ruby
project = Project.find(5)
# order by internal id
issues = project.issues.order(:iid).page(1).per(20)
```
To make the base query efficient, there is usually a database index covering the `project_id` column. This significantly reduces the number of rows the database needs to scan. Without the index, the whole `issues` table would be read (full table scan) by the database.
Since `project_id` is a foreign key, we might have the following index available:
```sql
"index_issues_on_project_id" btree (project_id)
```
GitLab 13.11 has the following index definition on the `issues` table:
```sql
"index_issues_on_project_id_and_iid" UNIQUE, btree (project_id, iid)
```
This index fully covers the database query and the pagination.
### By group
Unfortunately, there is no efficient way to sort and paginate on the group level. The database query execution time will increase based on the number of records in the group.
Things get worse when group level actually means group and its subgroups. To load the first page, the database needs to look up the group hierarchy, find all projects and then look up all issues.
The main reason behind the inefficient queries on the group level is the way our database schema is designed; our core domain models are associated with a project, and projects are associated with groups. This doesn't mean that the database structure is bad, it's just in a well-normalized form that is not optimized for efficient group level queries. We might need to look into denormalization in the long term.
Example: List issues in a group
```ruby
group = Group.find(9970)
Issue.where(project_id: group.projects).order(:iid).page(1).per(20)
```
The generated SQL query:
```sql
SELECT "issues".*
FROM "issues"
WHERE "issues"."project_id" IN
(SELECT "projects"."id"
FROM "projects"
WHERE "projects"."namespace_id" = 5)
ORDER BY "issues"."iid" ASC
LIMIT 20
OFFSET 0
```
The execution plan shows that we read significantly more rows than requested (20), and the rows are sorted in memory:
```plaintext
Limit (cost=10716.87..10716.92 rows=20 width=1300) (actual time=1472.305..1472.308 rows=20 loops=1)
-> Sort (cost=10716.87..10717.03 rows=61 width=1300) (actual time=1472.303..1472.305 rows=20 loops=1)
Sort Key: issues.iid
Sort Method: top-N heapsort Memory: 41kB
-> Nested Loop (cost=1.00..10715.25 rows=61 width=1300) (actual time=0.215..1331.647 rows=177267 loops=1)
-> Index Only Scan using index_projects_on_namespace_id_and_id on projects (cost=0.44..3.77 rows=19 width=4) (actual time=0.077..1.057 rows=270 loops=1)
Index Cond: (namespace_id = 9970)
Heap Fetches: 25
-> Index Scan using index_issues_on_project_id_and_iid on issues (cost=0.56..559.28 rows=448 width=1300) (actual time=0.101..4.781 rows=657 loops=270)
Index Cond: (project_id = projects.id)
Planning Time: 12.281 ms
Execution Time: 1472.391 ms
(12 rows)
```
#### Columns in the same database table
Filtering by columns located in the same database table can be improved with an index. In case we want to support filtering by the `state_id` column, we can add the following index:
```sql
"index_issues_on_project_id_and_state_id_and_iid" UNIQUE, btree (project_id, state_id, iid)
```
Example query in Rails:
```ruby
project = Project.find(5)
# order by internal id
issues = project.issues.opened.order(:iid).page(1).per(20)
```
SQL query:
```sql
SELECT "issues".*
FROM "issues"
WHERE
"issues"."project_id" = 5
AND ("issues"."state_id" IN (1))
ORDER BY "issues"."iid" ASC
LIMIT 20
OFFSET 0
```
Keep in mind that the index above will not support the following project level query:
```sql
SELECT "issues".*
FROM "issues"
WHERE "issues"."project_id" = 5
ORDER BY "issues"."iid" ASC
LIMIT 20
OFFSET 0
```
#### Special case: confidential flag
In the `issues` table, we have a boolean field (`confidential`) that marks an issue confidential. This makes the issue invisible (filtered out) for non-member users.
Example SQL query:
```sql
SELECT "issues".*
FROM "issues"
WHERE "issues"."project_id" = 5
AND "issues"."confidential" = FALSE
ORDER BY "issues"."iid" ASC
LIMIT 20
OFFSET 0
```
We might be tempted to add an index on `project_id`, `confidential`, and `iid` to improve the database query, however, in this case it's probably unnecessary. Based on the data distribution in the table, confidential issues are rare. Filtering them out does not make the database query significantly slower. The database might read a few extra rows, the performance difference might not even be visible to the end-user.
On the other hand, if we would implement a special filter where we only show confidential issues, we will surely need the index. Finding 20 confidential issues might require the database to scan hundreds of rows or in the worst case, all issues in the project.
NOTE:
Be aware of the data distribution and the table access patterns (how features work) when introducing a new database index. Sampling production data might be necessary to make the right decision.
#### Columns in a different database table
Example: filtering issues in a project by an assignee
```ruby
project = Project.find(5)
project
.issues
.joins(:issue_assignees)
.where(issue_assignees: { user_id: 10 })
.order(:iid)
.page(1)
.per(20)
```
```sql
SELECT "issues".*
FROM "issues"
INNER JOIN "issue_assignees" ON "issue_assignees"."issue_id" = "issues"."id"
WHERE "issues"."project_id" = 5
AND "issue_assignees"."user_id" = 10
ORDER BY "issues"."iid" ASC
LIMIT 20
OFFSET 0
```
Example database (oversimplified) execution plan:
1. The database parses the SQL query and detects the `JOIN`.
1. The database splits the query into two subqueries.
- `SELECT "issue_assignees".* FROM "issue_assignees" WHERE "issue_assignees"."user_id" = 10`
- `SELECT "issues".* FROM "issues" WHERE "issues"."project_id" = 5`
1. The database estimates the number of rows and the costs to run these queries.
1. The database executes the cheapest query first.
1. Using the query result, load the rows from the other table (from the other query) using the JOIN column and filter the rows further.
In this particular example, the `issue_assignees` query would likely be executed first.
Running the query in production for the GitLab project produces the following execution plan:
```plaintext
Limit (cost=411.20..411.21 rows=1 width=1300) (actual time=24.071..24.077 rows=20 loops=1)
-> Sort (cost=411.20..411.21 rows=1 width=1300) (actual time=24.070..24.073 rows=20 loops=1)
Sort Key: issues.iid
Sort Method: top-N heapsort Memory: 91kB
-> Nested Loop (cost=1.00..411.19 rows=1 width=1300) (actual time=0.826..23.705 rows=190 loops=1)
-> Index Scan using index_issue_assignees_on_user_id on issue_assignees (cost=0.44..81.37 rows=92 width=4) (actual time=0.741..13.202 rows=215 loops=1)
Index Cond: (user_id = 4156052)
-> Index Scan using issues_pkey on issues (cost=0.56..3.58 rows=1 width=1300) (actual time=0.048..0.048 rows=1 loops=215)
Index Cond: (id = issue_assignees.issue_id)
Filter: (project_id = 278964)
Rows Removed by Filter: 0
Planning Time: 1.141 ms
Execution Time: 24.170 ms
(13 rows)
```
The query looks up the `assignees` first, filtered by the `user_id` (`user_id = 4156052`) and it finds 215 rows. Using that 215 rows, the database will look up the 215 associated issue rows by the primary key. Notice that the filter on the `project_id` column is not backed by an index.
In most cases, we are lucky that the joined relation will not be going to return too many rows, therefore, we will end up with a relatively efficient database query that accesses low number of rows. As the database grows, these queries might start to behave differently. Let's say the number `issue_assignees` records for a particular user is very high (millions), then this join query will not perform well, and it will likely time out.
A similar problem could be a double join, where the filter exists in the 2nd JOIN query. Example: `Issue -> LabelLink -> Label(name=bug)`.
There is no easy way to fix these problems. Denormalization of data could help significantly, however, it has also negative effects (data duplication and keeping the data up to date).
Ideas for improving the `issue_assignees` filter:
- Add `project_id` column to the `issue_assignees` table so when JOIN-ing, the extra `project_id` filter will further filter the rows. The sorting will likely happen in memory:
```sql
SELECT "issues".*
FROM "issues"
INNER JOIN "issue_assignees" ON "issue_assignees"."issue_id" = "issues"."id"
WHERE "issues"."project_id" = 5
AND "issue_assignees"."user_id" = 10
AND "issue_assignees"."project_id" = 5
ORDER BY "issues"."iid" ASC
LIMIT 20
OFFSET 0
```
- Add the `iid` column to the `issue_assignees` table. Notice that the `ORDER BY` column is different and the `project_id` filter is gone from the `issues` table:
```sql
SELECT "issues".*
FROM "issues"
INNER JOIN "issue_assignees" ON "issue_assignees"."issue_id" = "issues"."id"
WHERE "issue_assignees"."user_id" = 10
AND "issue_assignees"."project_id" = 5
ORDER BY "issue_assignees"."iid" ASC
LIMIT 20
OFFSET 0
```
The query now performs well for any number of `issue_assignees` records, however, we pay a very high price for it:
- Two columns are duplicated which increases the database size.
- We need to keep the two columns in sync.
- We need more indexes on the `issue_assignees` table to support the query.
- The new database query is very specific to the assignee search and needs complex backend code to build it.
- If the assignee is filtered by the user, then order by a different column, remove the `project_id` filter, etc.
NOTE:
Currently we're not doing these kinds of denormalization at GitLab.

View File

@ -285,7 +285,7 @@ defer it to another release if there is risk of important data loss.
Follow these best practices for best results:
- When working in batches, keep the batch size under 9,000 documents
and `throttle_delay` over 3 minutes. The bulk indexer is set to run
and `throttle_delay` for at least 3 minutes. The bulk indexer is set to run
every 1 minute and process a batch of 10,000 documents. These limits
allow the bulk indexer time to process records before another migration
batch is attempted.

View File

@ -12,6 +12,10 @@ GitLab uses two primary types of pagination: **offset** and **keyset**
(sometimes called cursor-based) pagination.
The GraphQL API mainly uses keyset pagination, falling back to offset pagination when needed.
### Performance considerations
See the [general pagination guidelines section](../database/pagination_guidelines.md) for more information.
### Offset pagination
This is the traditional, page-by-page pagination, that is most common,

View File

@ -491,6 +491,48 @@ To avoid this error, use the applicable HTML entity code (`&lt;` or `&gt;`) inst
// => 'In < 1 hour'
```
### Numbers
Different locales may use different number formats. To support localization of numbers, we use `formatNumber`,
which leverages [`toLocaleString()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/toLocaleString).
`formatNumber` formats numbers as strings using the current user locale by default.
- In JavaScript
```javascript
import { formatNumber } from '~/locale';
// Assuming "User Preferences > Language" is set to "English":
const tenThousand = formatNumber(10000); // "10,000" (uses comma as decimal symbol in English locale)
const fiftyPercent = formatNumber(0.5, { style: 'percent' }) // "50%" (other options are passed to toLocaleString)
```
- In Vue templates
```html
<script>
import { formatNumber } from '~/locale';
export default {
//...
methods: {
// ...
formatNumber,
},
}
</script>
<template>
<div class="my-number">
{{ formatNumber(10000) }} <!-- 10,000 -->
</div>
<div class="my-percent">
{{ formatNumber(0.5, { style: 'percent' }) }} <!-- 50% -->
</div>
</template>
```
### Dates / times
- In JavaScript:

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

View File

@ -426,6 +426,8 @@ Take into consideration the following when choosing a pagination strategy:
The database has to sort and iterate all previous items, and this operation usually
can result in substantial load put on database.
You can find useful tips related to pagination in the [pagination guidelines](database/pagination_guidelines.md).
## Badge counters
Counters should always be truncated. It means that we don't want to present

View File

@ -7,39 +7,22 @@ module Gitlab
include ::Gitlab::Utils::StrongMemoize
def initialize(cache, pipeline)
if multiple_cache_per_job?
cache = Array.wrap(cache)
@cache = cache.map do |cache|
Gitlab::Ci::Pipeline::Seed::Build::Cache
.new(pipeline, cache)
end
else
@cache = Gitlab::Ci::Pipeline::Seed::Build::Cache
.new(pipeline, cache)
cache = Array.wrap(cache)
@cache = cache.map do |cache|
Gitlab::Ci::Pipeline::Seed::Build::Cache
.new(pipeline, cache)
end
end
def cache_attributes
strong_memoize(:cache_attributes) do
if multiple_cache_per_job?
if @cache.empty?
{}
else
{ options: { cache: @cache.map(&:attributes) } }
end
if @cache.empty?
{}
else
@cache.build_attributes
{ options: { cache: @cache.map(&:attributes) } }
end
end
end
private
def multiple_cache_per_job?
strong_memoize(:multiple_cache_per_job) do
::Gitlab::Ci::Features.multiple_cache_per_job?
end
end
end
end
end

View File

@ -4,88 +4,52 @@ module Gitlab
module Ci
class Config
module Entry
##
# Entry that represents a cache configuration
#
class Cache < ::Gitlab::Config::Entry::Simplifiable
strategy :Caches, if: -> (config) { Feature.enabled?(:multiple_cache_per_job, default_enabled: :yaml) }
strategy :Cache, if: -> (config) { Feature.disabled?(:multiple_cache_per_job, default_enabled: :yaml) }
class Cache < ::Gitlab::Config::Entry::Node
include ::Gitlab::Config::Entry::Configurable
include ::Gitlab::Config::Entry::Validatable
include ::Gitlab::Config::Entry::Attributable
class Caches < ::Gitlab::Config::Entry::ComposableArray
include ::Gitlab::Config::Entry::Validatable
ALLOWED_KEYS = %i[key untracked paths when policy].freeze
ALLOWED_POLICY = %w[pull-push push pull].freeze
DEFAULT_POLICY = 'pull-push'
ALLOWED_WHEN = %w[on_success on_failure always].freeze
DEFAULT_WHEN = 'on_success'
MULTIPLE_CACHE_LIMIT = 4
validations do
validates :config, type: Hash, allowed_keys: ALLOWED_KEYS
validates :policy,
inclusion: { in: ALLOWED_POLICY, message: 'should be pull-push, push, or pull' },
allow_blank: true
validations do
validate do
unless config.is_a?(Hash) || config.is_a?(Array)
errors.add(:config, 'can only be a Hash or an Array')
end
if config.is_a?(Array) && config.count > MULTIPLE_CACHE_LIMIT
errors.add(:config, "no more than #{MULTIPLE_CACHE_LIMIT} caches can be created")
end
end
end
def initialize(*args)
super
@key = nil
end
def composable_class
Entry::Cache::Cache
with_options allow_nil: true do
validates :when,
inclusion: {
in: ALLOWED_WHEN,
message: 'should be on_success, on_failure or always'
}
end
end
class Cache < ::Gitlab::Config::Entry::Node
include ::Gitlab::Config::Entry::Configurable
include ::Gitlab::Config::Entry::Validatable
include ::Gitlab::Config::Entry::Attributable
entry :key, Entry::Key,
description: 'Cache key used to define a cache affinity.'
ALLOWED_KEYS = %i[key untracked paths when policy].freeze
ALLOWED_POLICY = %w[pull-push push pull].freeze
DEFAULT_POLICY = 'pull-push'
ALLOWED_WHEN = %w[on_success on_failure always].freeze
DEFAULT_WHEN = 'on_success'
entry :untracked, ::Gitlab::Config::Entry::Boolean,
description: 'Cache all untracked files.'
validations do
validates :config, type: Hash, allowed_keys: ALLOWED_KEYS
validates :policy,
inclusion: { in: ALLOWED_POLICY, message: 'should be pull-push, push, or pull' },
allow_blank: true
entry :paths, Entry::Paths,
description: 'Specify which paths should be cached across builds.'
with_options allow_nil: true do
validates :when,
inclusion: {
in: ALLOWED_WHEN,
message: 'should be on_success, on_failure or always'
}
end
end
attributes :policy, :when
entry :key, Entry::Key,
description: 'Cache key used to define a cache affinity.'
def value
result = super
entry :untracked, ::Gitlab::Config::Entry::Boolean,
description: 'Cache all untracked files.'
result[:key] = key_value
result[:policy] = policy || DEFAULT_POLICY
# Use self.when to avoid conflict with reserved word
result[:when] = self.when || DEFAULT_WHEN
entry :paths, Entry::Paths,
description: 'Specify which paths should be cached across builds.'
attributes :policy, :when
def value
result = super
result[:key] = key_value
result[:policy] = policy || DEFAULT_POLICY
# Use self.when to avoid conflict with reserved word
result[:when] = self.when || DEFAULT_WHEN
result
end
result
end
class UnknownStrategy < ::Gitlab::Config::Entry::Node

View File

@ -0,0 +1,40 @@
# frozen_string_literal: true
module Gitlab
module Ci
class Config
module Entry
##
# Entry that represents caches configuration
#
class Caches < ::Gitlab::Config::Entry::ComposableArray
include ::Gitlab::Config::Entry::Validatable
MULTIPLE_CACHE_LIMIT = 4
validations do
validate do
unless config.is_a?(Hash) || config.is_a?(Array)
errors.add(:config, 'can only be a Hash or an Array')
end
if config.is_a?(Array) && config.count > MULTIPLE_CACHE_LIMIT
errors.add(:config, "no more than #{MULTIPLE_CACHE_LIMIT} caches can be created")
end
end
end
def initialize(*args)
super
@key = nil
end
def composable_class
Entry::Cache
end
end
end
end
end
end

View File

@ -37,7 +37,7 @@ module Gitlab
description: 'Script that will be executed after each job.',
inherit: true
entry :cache, Entry::Cache,
entry :cache, Entry::Caches,
description: 'Configure caching between build jobs.',
inherit: true

View File

@ -64,7 +64,7 @@ module Gitlab
description: 'Commands that will be executed when finishing job.',
inherit: true
entry :cache, Entry::Cache,
entry :cache, Entry::Caches,
description: 'Cache definition for this job.',
inherit: true

View File

@ -61,7 +61,7 @@ module Gitlab
description: 'Deprecated: stages for this pipeline.',
reserved: true
entry :cache, Entry::Cache,
entry :cache, Entry::Caches,
description: 'Configure caching between build jobs.',
reserved: true

View File

@ -56,8 +56,8 @@ module Gitlab
::Feature.enabled?(:codequality_mr_diff, project, default_enabled: false)
end
def self.multiple_cache_per_job?
::Feature.enabled?(:multiple_cache_per_job, default_enabled: :yaml)
def self.gldropdown_tags_enabled?
::Feature.enabled?(:gldropdown_tags, default_enabled: :yaml)
end
end
end

View File

@ -157,11 +157,6 @@ gosec-sast:
mobsf-android-sast:
extends: .sast-analyzer
services:
# this version must match with analyzer version mentioned in: https://gitlab.com/gitlab-org/security-products/analyzers/mobsf/-/blob/master/Dockerfile
# Unfortunately, we need to keep track of mobsf version in 2 different places for now.
- name: opensecurity/mobile-security-framework-mobsf:v3.4.0
alias: mobsf
image:
name: "$SAST_ANALYZER_IMAGE"
variables:
@ -169,7 +164,6 @@ mobsf-android-sast:
# override the analyzer image with a custom value. This may be subject to change or
# breakage across GitLab releases.
SAST_ANALYZER_IMAGE: "$SECURE_ANALYZERS_PREFIX/mobsf:$SAST_ANALYZER_IMAGE_TAG"
MOBSF_API_KEY: key
rules:
- if: $SAST_DISABLED
when: never
@ -183,11 +177,6 @@ mobsf-android-sast:
mobsf-ios-sast:
extends: .sast-analyzer
services:
# this version must match with analyzer version mentioned in: https://gitlab.com/gitlab-org/security-products/analyzers/mobsf/-/blob/master/Dockerfile
# Unfortunately, we need to keep track of mobsf version in 2 different places for now.
- name: opensecurity/mobile-security-framework-mobsf:v3.4.0
alias: mobsf
image:
name: "$SAST_ANALYZER_IMAGE"
variables:
@ -195,7 +184,6 @@ mobsf-ios-sast:
# override the analyzer image with a custom value. This may be subject to change or
# breakage across GitLab releases.
SAST_ANALYZER_IMAGE: "$SECURE_ANALYZERS_PREFIX/mobsf:$SAST_ANALYZER_IMAGE_TAG"
MOBSF_API_KEY: key
rules:
- if: $SAST_DISABLED
when: never

View File

@ -19,7 +19,7 @@ module Gitlab
def execute
return if @spend_arg.blank?
return [get_time, DateTime.now.to_date] unless date_present?
return [get_time, DateTime.current] unless date_present?
return unless valid_date?
[get_time, get_date]

View File

@ -9,8 +9,13 @@ module Gitlab
def self.subscriptions_url
ENV.fetch('CUSTOMER_PORTAL_URL', default_subscriptions_url)
end
def self.payment_form_url
"#{self.subscriptions_url}/payment_forms/cc_validation"
end
end
end
Gitlab::SubscriptionPortal.prepend_mod
Gitlab::SubscriptionPortal::SUBSCRIPTIONS_URL = Gitlab::SubscriptionPortal.subscriptions_url.freeze
Gitlab::SubscriptionPortal::PAYMENT_FORM_URL = Gitlab::SubscriptionPortal.payment_form_url.freeze

View File

@ -65,6 +65,10 @@ module Gitlab
@definitions ||= load_all!
end
def all
@all ||= definitions.map { |_key_path, definition| definition }
end
def schemer
@schemer ||= ::JSONSchemer.schema(Pathname.new(METRIC_SCHEMA_PATH))
end

View File

@ -0,0 +1,19 @@
# frozen_string_literal: true
module Gitlab
module Usage
module Metrics
module Instrumentations
class BaseMetric
include Gitlab::Utils::UsageData
attr_reader :time_frame
def initialize(time_frame:)
@time_frame = time_frame
end
end
end
end
end
end

View File

@ -0,0 +1,68 @@
# frozen_string_literal: true
module Gitlab
module Usage
module Metrics
module Instrumentations
class DatabaseMetric < BaseMetric
# Usage Example
#
# class CountUsersCreatingIssuesMetric < DatabaseMetric
# operation :distinct_count, column: :author_id
#
# relation do |database_time_constraints|
# ::Issue.where(database_time_constraints)
# end
# end
class << self
def start(&block)
@metric_start = block
end
def finish(&block)
@metric_finish = block
end
def relation(&block)
@metric_relation = block
end
def operation(symbol, column: nil)
@metric_operation = symbol
@column = column
end
attr_reader :metric_operation, :metric_relation, :metric_start, :metric_finish, :column
end
def value
method(self.class.metric_operation)
.call(relation,
self.class.column,
start: self.class.metric_start&.call,
finish: self.class.metric_finish&.call)
end
def relation
self.class.metric_relation.call.where(time_constraints)
end
private
def time_constraints
case time_frame
when '28d'
{ created_at: 30.days.ago..2.days.ago }
when 'all'
{}
when 'none'
nil
else
raise "Unknown time frame: #{time_frame} for DatabaseMetric"
end
end
end
end
end
end
end

View File

@ -0,0 +1,32 @@
# frozen_string_literal: true
module Gitlab
module Usage
module Metrics
module Instrumentations
class GenericMetric < BaseMetric
# Usage example
#
# class UuidMetric < GenericMetric
# value do
# Gitlab::CurrentSettings.uuid
# end
# end
class << self
def value(&block)
@metric_value = block
end
attr_reader :metric_value
end
def value
alt_usage_data do
self.class.metric_value.call
end
end
end
end
end
end
end

View File

@ -0,0 +1,45 @@
# frozen_string_literal: true
module Gitlab
module Usage
module Metrics
module Instrumentations
class RedisHLLMetric < BaseMetric
# Usage example
#
# class CountUsersVisitingAnalyticsValuestreamMetric < RedisHLLMetric
# event_names :g_analytics_valuestream
# end
class << self
def event_names(events = nil)
@mentric_events = events
end
attr_reader :metric_events
end
def value
redis_usage_data do
event_params = time_constraints.merge(event_names: self.class.metric_events)
Gitlab::UsageDataCounters::HLLRedisCounter.unique_events(**event_params)
end
end
private
def time_constraints
case time_frame
when '28d'
{ start_date: 4.weeks.ago.to_date, end_date: Date.current }
when '7d'
{ start_date: 7.days.ago.to_date, end_date: Date.current }
else
raise "Unknown time frame: #{time_frame} for TimeConstraint"
end
end
end
end
end
end
end

View File

@ -0,0 +1,15 @@
# frozen_string_literal: true
module Gitlab
module Usage
module Metrics
module Instrumentations
class UuidMetric < GenericMetric
value do
Gitlab::CurrentSettings.uuid
end
end
end
end
end
end

View File

@ -0,0 +1,27 @@
# frozen_string_literal: true
module Gitlab
module Usage
module Metrics
class KeyPathProcessor
class << self
def process(key_path, value)
unflatten(key_path.split('.'), value)
end
private
def unflatten(keys, value)
loop do
value = { keys.pop.to_sym => value }
break if keys.blank?
end
value
end
end
end
end
end
end

View File

@ -0,0 +1,28 @@
# frozen_string_literal: true
module Gitlab
class UsageDataMetrics
class << self
# Build the Usage Ping JSON payload from metrics YAML definitions which have instrumentation class set
def uncached_data
::Gitlab::Usage::MetricDefinition.all.map do |definition|
instrumentation_class = definition.attributes[:instrumentation_class]
if instrumentation_class.present?
metric_value = instrumentation_class.constantize.new(time_frame: definition.attributes[:time_frame]).value
metric_payload(definition.key_path, metric_value)
else
{}
end
end.reduce({}, :deep_merge)
end
private
def metric_payload(key_path, value)
::Gitlab::Usage::Metrics::KeyPathProcessor.process(key_path, value)
end
end
end
end

View File

@ -0,0 +1,145 @@
# frozen_string_literal: true
module Sidebars
module Projects
module Menus
class SettingsMenu < ::Sidebars::Menu
override :configure_menu_items
def configure_menu_items
return false unless can?(context.current_user, :admin_project, context.project)
add_item(general_menu_item)
add_item(integrations_menu_item)
add_item(webhooks_menu_item)
add_item(access_tokens_menu_item)
add_item(repository_menu_item)
add_item(ci_cd_menu_item)
add_item(operations_menu_item)
add_item(pages_menu_item)
add_item(packages_and_registries_menu_item)
true
end
override :link
def link
edit_project_path(context.project)
end
override :title
def title
_('Settings')
end
override :title_html_options
def title_html_options
{
id: 'js-onboarding-settings-link'
}
end
override :sprite_icon
def sprite_icon
'settings'
end
private
def general_menu_item
::Sidebars::MenuItem.new(
title: _('General'),
link: edit_project_path(context.project),
active_routes: { path: 'projects#edit' },
item_id: :general
)
end
def integrations_menu_item
::Sidebars::MenuItem.new(
title: _('Integrations'),
link: project_settings_integrations_path(context.project),
active_routes: { path: %w[integrations#show services#edit] },
item_id: :integrations
)
end
def webhooks_menu_item
::Sidebars::MenuItem.new(
title: _('Webhooks'),
link: project_hooks_path(context.project),
active_routes: { path: %w[hooks#index hooks#edit hook_logs#show] },
item_id: :webhooks
)
end
def access_tokens_menu_item
return unless can?(context.current_user, :read_resource_access_tokens, context.project)
::Sidebars::MenuItem.new(
title: _('Access Tokens'),
link: project_settings_access_tokens_path(context.project),
active_routes: { path: 'access_tokens#index' },
item_id: :access_tokens
)
end
def repository_menu_item
::Sidebars::MenuItem.new(
title: _('Repository'),
link: project_settings_repository_path(context.project),
active_routes: { path: 'repository#show' },
item_id: :repository
)
end
def ci_cd_menu_item
return if context.project.archived?
return unless context.project.feature_available?(:builds, context.current_user)
::Sidebars::MenuItem.new(
title: _('CI/CD'),
link: project_settings_ci_cd_path(context.project),
active_routes: { path: 'ci_cd#show' },
item_id: :ci_cd
)
end
def operations_menu_item
return if context.project.archived?
return unless can?(context.current_user, :admin_operations, context.project)
::Sidebars::MenuItem.new(
title: _('Operations'),
link: project_settings_operations_path(context.project),
active_routes: { path: 'operations#show' },
item_id: :operations
)
end
def pages_menu_item
return unless context.project.pages_available?
::Sidebars::MenuItem.new(
title: _('Pages'),
link: project_pages_path(context.project),
active_routes: { path: 'pages#show' },
item_id: :pages
)
end
def packages_and_registries_menu_item
return unless Gitlab.config.registry.enabled
return if Feature.disabled?(:sidebar_refactor, context.current_user)
return unless can?(context.current_user, :destroy_container_image, context.project)
::Sidebars::MenuItem.new(
title: _('Packages & Registries'),
link: project_settings_packages_and_registries_path(context.project),
active_routes: { path: 'packages_and_registries#index' },
item_id: :packages_and_registries
)
end
end
end
end
end

View File

@ -24,6 +24,7 @@ module Sidebars
add_menu(Sidebars::Projects::Menus::ExternalWikiMenu.new(context))
add_menu(Sidebars::Projects::Menus::SnippetsMenu.new(context))
add_menu(Sidebars::Projects::Menus::MembersMenu.new(context))
add_menu(Sidebars::Projects::Menus::SettingsMenu.new(context))
end
override :render_raw_menus_partial

View File

@ -29,5 +29,10 @@ namespace :gitlab do
items = Gitlab::Usage::MetricDefinition.definitions
Gitlab::Usage::Docs::Renderer.new(items).write
end
desc 'GitLab | UsageDataMetrics | Generate usage ping from metrics definition YAML files in JSON'
task generate_from_yaml: :environment do
puts Gitlab::Json.pretty_generate(Gitlab::UsageDataMetrics.uncached_data)
end
end
end

Some files were not shown because too many files have changed in this diff Show More