Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
8f2f35ad2e
commit
84b4743475
|
|
@ -37,6 +37,7 @@ const LineHighlighter = function (options = {}) {
|
|||
options.fileHolderSelector = options.fileHolderSelector || '.file-holder';
|
||||
options.scrollFileHolder = options.scrollFileHolder || false;
|
||||
options.hash = options.hash || window.location.hash;
|
||||
options.scrollBehavior = options.scrollBehavior || 'smooth';
|
||||
|
||||
this.options = options;
|
||||
this._hash = options.hash;
|
||||
|
|
@ -74,6 +75,7 @@ LineHighlighter.prototype.highlightHash = function (newHash) {
|
|||
// Scroll to the first highlighted line on initial load
|
||||
// Add an offset of -100 for some context
|
||||
offset: -100,
|
||||
behavior: this.options.scrollBehavior,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,10 +24,6 @@ export default {
|
|||
prop: 'selectedGroup',
|
||||
},
|
||||
props: {
|
||||
accessLevels: {
|
||||
type: Object,
|
||||
required: true,
|
||||
},
|
||||
groupsFilter: {
|
||||
type: String,
|
||||
required: false,
|
||||
|
|
@ -58,13 +54,6 @@ export default {
|
|||
isFetchResultEmpty() {
|
||||
return this.groups.length === 0;
|
||||
},
|
||||
defaultFetchOptions() {
|
||||
return {
|
||||
exclude_internal: true,
|
||||
active: true,
|
||||
min_access_level: this.accessLevels.Guest,
|
||||
};
|
||||
},
|
||||
},
|
||||
watch: {
|
||||
searchTerm() {
|
||||
|
|
@ -107,9 +96,13 @@ export default {
|
|||
fetchGroups() {
|
||||
switch (this.groupsFilter) {
|
||||
case GROUP_FILTERS.DESCENDANT_GROUPS:
|
||||
return getDescendentGroups(this.parentGroupId, this.searchTerm, this.defaultFetchOptions);
|
||||
return getDescendentGroups(
|
||||
this.parentGroupId,
|
||||
this.searchTerm,
|
||||
this.$options.defaultFetchOptions,
|
||||
);
|
||||
default:
|
||||
return getGroups(this.searchTerm, this.defaultFetchOptions);
|
||||
return getGroups(this.searchTerm, this.$options.defaultFetchOptions);
|
||||
}
|
||||
},
|
||||
},
|
||||
|
|
@ -118,6 +111,10 @@ export default {
|
|||
searchPlaceholder: s__('GroupSelect|Search groups'),
|
||||
emptySearchResult: s__('GroupSelect|No matching results'),
|
||||
},
|
||||
defaultFetchOptions: {
|
||||
exclude_internal: true,
|
||||
active: true,
|
||||
},
|
||||
};
|
||||
</script>
|
||||
<template>
|
||||
|
|
|
|||
|
|
@ -155,7 +155,6 @@ export default {
|
|||
<template #select>
|
||||
<group-select
|
||||
v-model="groupToBeSharedWith"
|
||||
:access-levels="accessLevels"
|
||||
:groups-filter="groupSelectFilter"
|
||||
:parent-group-id="groupSelectParentId"
|
||||
:invalid-groups="invalidGroups"
|
||||
|
|
|
|||
|
|
@ -1,31 +0,0 @@
|
|||
<script>
|
||||
import { GlIcon, GlLink } from '@gitlab/ui';
|
||||
|
||||
export default {
|
||||
components: {
|
||||
GlIcon,
|
||||
GlLink,
|
||||
},
|
||||
props: {
|
||||
lines: {
|
||||
type: Number,
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
};
|
||||
</script>
|
||||
<template>
|
||||
<div class="line-numbers">
|
||||
<gl-link
|
||||
v-for="line in lines"
|
||||
:id="`L${line}`"
|
||||
:key="line"
|
||||
class="diff-line-num gl-shadow-none!"
|
||||
:to="`#LC${line}`"
|
||||
:data-line-number="line"
|
||||
>
|
||||
<gl-icon :size="12" name="link" />
|
||||
{{ line }}
|
||||
</gl-link>
|
||||
</div>
|
||||
</template>
|
||||
|
|
@ -0,0 +1,103 @@
|
|||
<script>
|
||||
import { GlIntersectionObserver, GlSafeHtmlDirective } from '@gitlab/ui';
|
||||
import ChunkLine from './chunk_line.vue';
|
||||
|
||||
/*
|
||||
* We only highlight the chunk that is currently visible to the user.
|
||||
* By making use of the Intersection Observer API we can determine when a chunk becomes visible and highlight it accordingly.
|
||||
*
|
||||
* Content that is not visible to the user (i.e. not highlighted) do not need to look nice,
|
||||
* so by making text transparent and rendering raw (non-highlighted) text,
|
||||
* the browser spends less resources on painting content that is not immediately relevant.
|
||||
*
|
||||
* Why use transparent text as opposed to hiding content entirely?
|
||||
* 1. If content is hidden entirely, native find text (⌘ + F) won't work.
|
||||
* 2. When URL contains line numbers, the browser needs to be able to jump to the correct line.
|
||||
*/
|
||||
export default {
|
||||
components: {
|
||||
ChunkLine,
|
||||
GlIntersectionObserver,
|
||||
},
|
||||
directives: {
|
||||
SafeHtml: GlSafeHtmlDirective,
|
||||
},
|
||||
props: {
|
||||
chunkIndex: {
|
||||
type: Number,
|
||||
required: false,
|
||||
default: 0,
|
||||
},
|
||||
isHighlighted: {
|
||||
type: Boolean,
|
||||
required: true,
|
||||
},
|
||||
content: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
startingFrom: {
|
||||
type: Number,
|
||||
required: false,
|
||||
default: 0,
|
||||
},
|
||||
totalLines: {
|
||||
type: Number,
|
||||
required: false,
|
||||
default: 0,
|
||||
},
|
||||
language: {
|
||||
type: String,
|
||||
required: false,
|
||||
default: null,
|
||||
},
|
||||
},
|
||||
computed: {
|
||||
lines() {
|
||||
return this.content.split('\n');
|
||||
},
|
||||
},
|
||||
methods: {
|
||||
handleChunkAppear() {
|
||||
if (!this.isHighlighted) {
|
||||
this.$emit('appear', this.chunkIndex);
|
||||
}
|
||||
},
|
||||
},
|
||||
};
|
||||
</script>
|
||||
<template>
|
||||
<div>
|
||||
<gl-intersection-observer @appear="handleChunkAppear">
|
||||
<div v-if="isHighlighted">
|
||||
<chunk-line
|
||||
v-for="(line, index) in lines"
|
||||
:key="index"
|
||||
:number="startingFrom + index + 1"
|
||||
:content="line"
|
||||
:language="language"
|
||||
/>
|
||||
</div>
|
||||
<div v-else class="gl-display-flex">
|
||||
<div class="gl-display-flex gl-flex-direction-column">
|
||||
<a
|
||||
v-for="(n, index) in totalLines"
|
||||
:id="`L${startingFrom + index + 1}`"
|
||||
:key="index"
|
||||
class="gl-ml-5 gl-text-transparent"
|
||||
:href="`#L${startingFrom + index + 1}`"
|
||||
:data-line-number="startingFrom + index + 1"
|
||||
data-testid="line-number"
|
||||
>
|
||||
{{ startingFrom + index + 1 }}
|
||||
</a>
|
||||
</div>
|
||||
<div
|
||||
v-safe-html="content"
|
||||
class="gl-white-space-pre-wrap! gl-text-transparent"
|
||||
data-testid="content"
|
||||
></div>
|
||||
</div>
|
||||
</gl-intersection-observer>
|
||||
</div>
|
||||
</template>
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
<script>
|
||||
import { GlLink, GlSafeHtmlDirective } from '@gitlab/ui';
|
||||
|
||||
export default {
|
||||
components: {
|
||||
GlLink,
|
||||
},
|
||||
directives: {
|
||||
SafeHtml: GlSafeHtmlDirective,
|
||||
},
|
||||
props: {
|
||||
number: {
|
||||
type: Number,
|
||||
required: true,
|
||||
},
|
||||
content: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
language: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
};
|
||||
</script>
|
||||
<template>
|
||||
<div class="gl-display-flex">
|
||||
<div class="line-numbers gl-pt-0! gl-pb-0! gl-absolute gl-z-index-3">
|
||||
<gl-link
|
||||
:id="`L${number}`"
|
||||
class="file-line-num diff-line-num gl-user-select-none"
|
||||
:to="`#L${number}`"
|
||||
:data-line-number="number"
|
||||
>
|
||||
{{ number }}
|
||||
</gl-link>
|
||||
</div>
|
||||
|
||||
<pre
|
||||
class="code highlight gl-p-0! gl-w-full gl-overflow-visible! gl-ml-11!"
|
||||
><code><span :id="`LC${number}`" v-safe-html="content" :lang="language" class="line" data-testid="content"></span></code></pre>
|
||||
</div>
|
||||
</template>
|
||||
|
|
@ -109,3 +109,5 @@ export const ROUGE_TO_HLJS_LANGUAGE_MAP = {
|
|||
xquery: 'xquery',
|
||||
yaml: 'yaml',
|
||||
};
|
||||
|
||||
export const LINES_PER_CHUNK = 70;
|
||||
|
|
|
|||
|
|
@ -1,16 +1,21 @@
|
|||
<script>
|
||||
import { GlSafeHtmlDirective, GlLoadingIcon } from '@gitlab/ui';
|
||||
import LineNumbers from '~/vue_shared/components/line_numbers.vue';
|
||||
import { sanitize } from '~/lib/dompurify';
|
||||
import { ROUGE_TO_HLJS_LANGUAGE_MAP } from './constants';
|
||||
import { wrapLines } from './utils';
|
||||
|
||||
const LINE_SELECT_CLASS_NAME = 'hll';
|
||||
import LineHighlighter from '~/blob/line_highlighter';
|
||||
import { ROUGE_TO_HLJS_LANGUAGE_MAP, LINES_PER_CHUNK } from './constants';
|
||||
import Chunk from './components/chunk.vue';
|
||||
|
||||
/*
|
||||
* This component is optimized to handle source code with many lines of code by splitting source code into chunks of 70 lines of code,
|
||||
* we highlight and display the 1st chunk (L1-70) to the user as quickly as possible.
|
||||
*
|
||||
* The rest of the lines (L71+) is rendered once the browser goes into an idle state (requestIdleCallback).
|
||||
* Each chunk is self-contained, this ensures when for example the width of a container on line 1000 changes,
|
||||
* it does not trigger a repaint on a parent element that wraps all 1000 lines.
|
||||
*/
|
||||
export default {
|
||||
components: {
|
||||
LineNumbers,
|
||||
GlLoadingIcon,
|
||||
Chunk,
|
||||
},
|
||||
directives: {
|
||||
SafeHtml: GlSafeHtmlDirective,
|
||||
|
|
@ -27,46 +32,92 @@ export default {
|
|||
content: this.blob.rawTextBlob,
|
||||
language: ROUGE_TO_HLJS_LANGUAGE_MAP[this.blob.language],
|
||||
hljs: null,
|
||||
firstChunk: null,
|
||||
chunks: {},
|
||||
isLoading: true,
|
||||
isLineSelected: false,
|
||||
lineHighlighter: null,
|
||||
};
|
||||
},
|
||||
computed: {
|
||||
splitContent() {
|
||||
return this.content.split('\n');
|
||||
},
|
||||
lineNumbers() {
|
||||
return this.content.split('\n').length;
|
||||
},
|
||||
highlightedContent() {
|
||||
let highlightedContent;
|
||||
let { language } = this;
|
||||
|
||||
if (this.hljs) {
|
||||
if (!language) {
|
||||
const hljsHighlightAuto = this.hljs.highlightAuto(this.content);
|
||||
|
||||
highlightedContent = hljsHighlightAuto.value;
|
||||
language = hljsHighlightAuto.language;
|
||||
} else if (this.languageDefinition) {
|
||||
highlightedContent = this.hljs.highlight(this.content, { language: this.language }).value;
|
||||
}
|
||||
}
|
||||
|
||||
return wrapLines(highlightedContent, language);
|
||||
return this.splitContent.length;
|
||||
},
|
||||
},
|
||||
watch: {
|
||||
highlightedContent() {
|
||||
this.$nextTick(() => this.selectLine());
|
||||
},
|
||||
$route() {
|
||||
this.selectLine();
|
||||
},
|
||||
},
|
||||
async mounted() {
|
||||
async created() {
|
||||
this.generateFirstChunk();
|
||||
this.hljs = await this.loadHighlightJS();
|
||||
|
||||
if (this.language) {
|
||||
this.languageDefinition = await this.loadLanguage();
|
||||
}
|
||||
|
||||
// Highlight the first chunk as soon as highlight.js is available
|
||||
this.highlightChunk(null, true);
|
||||
|
||||
window.requestIdleCallback(async () => {
|
||||
// Generate the remaining chunks once the browser idles to ensure the browser resources are spent on the most important things first
|
||||
this.generateRemainingChunks();
|
||||
this.isLoading = false;
|
||||
await this.$nextTick();
|
||||
this.lineHighlighter = new LineHighlighter({ scrollBehavior: 'auto' });
|
||||
});
|
||||
},
|
||||
methods: {
|
||||
generateFirstChunk() {
|
||||
const lines = this.splitContent.splice(0, LINES_PER_CHUNK);
|
||||
this.firstChunk = this.createChunk(lines);
|
||||
},
|
||||
generateRemainingChunks() {
|
||||
const result = {};
|
||||
for (let i = 0; i < this.splitContent.length; i += LINES_PER_CHUNK) {
|
||||
const chunkIndex = Math.floor(i / LINES_PER_CHUNK);
|
||||
const lines = this.splitContent.slice(i, i + LINES_PER_CHUNK);
|
||||
result[chunkIndex] = this.createChunk(lines, i + LINES_PER_CHUNK);
|
||||
}
|
||||
|
||||
this.chunks = result;
|
||||
},
|
||||
createChunk(lines, startingFrom = 0) {
|
||||
return {
|
||||
content: lines.join('\n'),
|
||||
startingFrom,
|
||||
totalLines: lines.length,
|
||||
language: this.language,
|
||||
isHighlighted: false,
|
||||
};
|
||||
},
|
||||
highlightChunk(index, isFirstChunk) {
|
||||
const chunk = isFirstChunk ? this.firstChunk : this.chunks[index];
|
||||
|
||||
if (chunk.isHighlighted) {
|
||||
return;
|
||||
}
|
||||
|
||||
const { highlightedContent, language } = this.highlight(chunk.content, this.language);
|
||||
|
||||
Object.assign(chunk, { language, content: highlightedContent, isHighlighted: true });
|
||||
|
||||
this.selectLine();
|
||||
},
|
||||
highlight(content, language) {
|
||||
let detectedLanguage = language;
|
||||
let highlightedContent;
|
||||
if (this.hljs) {
|
||||
if (!detectedLanguage) {
|
||||
const hljsHighlightAuto = this.hljs.highlightAuto(content);
|
||||
highlightedContent = hljsHighlightAuto.value;
|
||||
detectedLanguage = hljsHighlightAuto.language;
|
||||
} else if (this.languageDefinition) {
|
||||
highlightedContent = this.hljs.highlight(content, { language: this.language }).value;
|
||||
}
|
||||
}
|
||||
|
||||
return { highlightedContent, language: detectedLanguage };
|
||||
},
|
||||
loadHighlightJS() {
|
||||
// If no language can be mapped to highlight.js we load all common languages else we load only the core (smallest footprint)
|
||||
return !this.language ? import('highlight.js/lib/common') : import('highlight.js/lib/core');
|
||||
|
|
@ -83,21 +134,14 @@ export default {
|
|||
|
||||
return languageDefinition;
|
||||
},
|
||||
selectLine() {
|
||||
const hash = sanitize(this.$route.hash);
|
||||
const lineToSelect = hash && this.$el.querySelector(hash);
|
||||
|
||||
if (!lineToSelect) {
|
||||
async selectLine() {
|
||||
if (this.isLineSelected || !this.lineHighlighter) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.$options.currentlySelectedLine) {
|
||||
this.$options.currentlySelectedLine.classList.remove(LINE_SELECT_CLASS_NAME);
|
||||
}
|
||||
|
||||
lineToSelect.classList.add(LINE_SELECT_CLASS_NAME);
|
||||
this.$options.currentlySelectedLine = lineToSelect;
|
||||
lineToSelect.scrollIntoView({ behavior: 'smooth', block: 'center' });
|
||||
this.isLineSelected = true;
|
||||
await this.$nextTick();
|
||||
this.lineHighlighter.highlightHash(this.$route.hash);
|
||||
},
|
||||
},
|
||||
userColorScheme: window.gon.user_color_scheme,
|
||||
|
|
@ -105,16 +149,35 @@ export default {
|
|||
};
|
||||
</script>
|
||||
<template>
|
||||
<gl-loading-icon v-if="!highlightedContent" size="sm" class="gl-my-5" />
|
||||
<div
|
||||
v-else
|
||||
class="file-content code js-syntax-highlight blob-content gl-display-flex"
|
||||
class="file-content code js-syntax-highlight blob-content gl-display-flex gl-flex-direction-column gl-overflow-auto"
|
||||
:class="$options.userColorScheme"
|
||||
data-type="simple"
|
||||
data-qa-selector="blob_viewer_file_content"
|
||||
>
|
||||
<line-numbers :lines="lineNumbers" />
|
||||
<pre class="code highlight gl-pb-0!"><code v-safe-html="highlightedContent"></code>
|
||||
</pre>
|
||||
<chunk
|
||||
v-if="firstChunk"
|
||||
:lines="firstChunk.lines"
|
||||
:total-lines="firstChunk.totalLines"
|
||||
:content="firstChunk.content"
|
||||
:starting-from="firstChunk.startingFrom"
|
||||
:is-highlighted="firstChunk.isHighlighted"
|
||||
:language="firstChunk.language"
|
||||
/>
|
||||
|
||||
<gl-loading-icon v-if="isLoading" size="sm" class="gl-my-5" />
|
||||
<chunk
|
||||
v-for="(chunk, key, index) in chunks"
|
||||
v-else
|
||||
:key="key"
|
||||
:lines="chunk.lines"
|
||||
:content="chunk.content"
|
||||
:total-lines="chunk.totalLines"
|
||||
:starting-from="chunk.startingFrom"
|
||||
:is-highlighted="chunk.isHighlighted"
|
||||
:chunk-index="index"
|
||||
:language="chunk.language"
|
||||
@appear="highlightChunk"
|
||||
/>
|
||||
</div>
|
||||
</template>
|
||||
|
|
|
|||
|
|
@ -1,28 +0,0 @@
|
|||
export const wrapLines = (content, language) => {
|
||||
const isValidLanguage = /^[a-z\d\-_]+$/.test(language); // To prevent the possibility of a vulnerability we only allow languages that contain alphanumeric characters ([a-z\d), dashes (-) or underscores (_).
|
||||
|
||||
return (
|
||||
content &&
|
||||
content
|
||||
.split('\n')
|
||||
.map((line, i) => {
|
||||
let formattedLine;
|
||||
const attributes = `id="LC${i + 1}" lang="${isValidLanguage ? language : ''}"`;
|
||||
|
||||
if (line.includes('<span class="hljs') && !line.includes('</span>')) {
|
||||
/**
|
||||
* In some cases highlight.js will wrap multiple lines in a span, in these cases we want to append the line number to the existing span
|
||||
*
|
||||
* example (before): <span class="hljs-code">```bash
|
||||
* example (after): <span id="LC67" class="hljs-code">```bash
|
||||
*/
|
||||
formattedLine = line.replace(/(?=class="hljs)/, `${attributes} `);
|
||||
} else {
|
||||
formattedLine = `<span ${attributes} class="line">${line}</span>`;
|
||||
}
|
||||
|
||||
return formattedLine;
|
||||
})
|
||||
.join('\n')
|
||||
);
|
||||
};
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class RemoveTempIndexOnIdFromVulnerabilityOccurrences < Gitlab::Database::Migration[1.0]
|
||||
INDEX_NAME = 'tmp_idx_vulnerability_occurrences_on_id_where_report_type_7_99'
|
||||
|
||||
disable_ddl_transaction!
|
||||
|
||||
def up
|
||||
remove_concurrent_index_by_name :vulnerability_occurrences, name: INDEX_NAME
|
||||
end
|
||||
|
||||
def down
|
||||
add_concurrent_index :vulnerability_occurrences, :id, where: 'report_type IN (7, 99)', name: INDEX_NAME
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1 @@
|
|||
92485ceb25d5733efe9a35a5bd64c9f33253bb10f815590518c6fc8d1c9e5f39
|
||||
|
|
@ -29567,8 +29567,6 @@ CREATE INDEX tmp_gitlab_subscriptions_max_seats_used_migration ON gitlab_subscri
|
|||
|
||||
CREATE INDEX tmp_gitlab_subscriptions_max_seats_used_migration_2 ON gitlab_subscriptions USING btree (id) WHERE ((start_date < '2021-08-02'::date) AND (max_seats_used <> 0) AND (max_seats_used > seats_in_use) AND (max_seats_used > seats));
|
||||
|
||||
CREATE INDEX tmp_idx_vulnerability_occurrences_on_id_where_report_type_7_99 ON vulnerability_occurrences USING btree (id) WHERE (report_type = ANY (ARRAY[7, 99]));
|
||||
|
||||
CREATE INDEX tmp_index_ci_job_artifacts_on_id_where_trace_and_expire_at ON ci_job_artifacts USING btree (id) WHERE ((file_type = 3) AND (expire_at = ANY (ARRAY['2021-04-22 00:00:00+00'::timestamp with time zone, '2021-05-22 00:00:00+00'::timestamp with time zone, '2021-06-22 00:00:00+00'::timestamp with time zone, '2022-01-22 00:00:00+00'::timestamp with time zone, '2022-02-22 00:00:00+00'::timestamp with time zone, '2022-03-22 00:00:00+00'::timestamp with time zone, '2022-04-22 00:00:00+00'::timestamp with time zone])));
|
||||
|
||||
CREATE INDEX tmp_index_container_repositories_on_id_migration_state ON container_repositories USING btree (id, migration_state);
|
||||
|
|
|
|||
|
|
@ -43,23 +43,26 @@ repository storage is either:
|
|||
- Read requests are distributed between multiple Gitaly nodes, which can improve performance.
|
||||
- Write requests are broadcast to repository replicas.
|
||||
|
||||
## Guidance regarding Gitaly Cluster
|
||||
## Before deploying Gitaly Cluster
|
||||
|
||||
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please review existing technical limitations and considerations prior to deploying Gitaly Cluster.
|
||||
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management.
|
||||
Before deploying Gitaly Cluster, please review:
|
||||
|
||||
- [Known issues](#known-issues)
|
||||
- Existing [known issues](#known-issues).
|
||||
- [Snapshot limitations](#snapshot-backup-and-recovery-limitations).
|
||||
- [Configuration guidance](configure_gitaly.md) and [Repository storage options](../repository_storage_paths.md) to make
|
||||
sure that Gitaly Cluster is the best setup for you.
|
||||
|
||||
Please also review the [configuration guidance](configure_gitaly.md) and [Repository storage options](../repository_storage_paths.md) to make sure that Gitaly Cluster is the best set-up for you. Finally, refer to the following guidance:
|
||||
If you have:
|
||||
|
||||
- If you have not yet migrated to Gitaly Cluster and want to continue using NFS, remain on the
|
||||
service you are using. NFS is supported in 14.x releases.
|
||||
- If you have not yet migrated to Gitaly Cluster but want to migrate away from NFS, you have two options - a sharded Gitaly instance or Gitaly Cluster.
|
||||
- If you have migrated to Gitaly Cluster and the limitations and tradeoffs are not suitable for your environment, your options are:
|
||||
1. [Migrate off Gitaly Cluster](#migrate-off-gitaly-cluster) back to your NFS solution
|
||||
1. [Migrate off Gitaly Cluster](#migrate-off-gitaly-cluster) to NFS solution or to a sharded Gitaly instance.
|
||||
- Not yet migrated to Gitaly Cluster and want to continue using NFS, remain on the service you are using. NFS is
|
||||
supported in 14.x releases but is [deprecated](../../update/deprecations.md#reminder-support-for-nfs-repository-storage).
|
||||
Support for storing Git repository data on NFS will end for all versions of GitLab with the release of 15.0.
|
||||
- Not yet migrated to Gitaly Cluster but want to migrate away from NFS, you have two options:
|
||||
- A sharded Gitaly instance.
|
||||
- Gitaly Cluster.
|
||||
|
||||
Reach out to your Technical Account Manager or customer support if you have any questions.
|
||||
Contact your Technical Account Manager or customer support if you have any questions.
|
||||
|
||||
### Known issues
|
||||
|
||||
|
|
@ -69,19 +72,20 @@ the current status of these issues, please refer to the referenced issues and ep
|
|||
| Issue | Summary | How to avoid |
|
||||
|:--------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------|
|
||||
| Gitaly Cluster + Geo - Issues retrying failed syncs | If Gitaly Cluster is used on a Geo secondary site, repositories that have failed to sync could continue to fail when Geo tries to resync them. Recovering from this state requires assistance from support to run manual steps. Work is in-progress to update Gitaly Cluster to [identify repositories with a unique and persistent identifier](https://gitlab.com/gitlab-org/gitaly/-/issues/3485), which is expected to resolve the issue. | No known solution at this time. |
|
||||
| Database inconsistencies due to repository access outside of Gitaly Cluster's control | Operations that write to the repository storage that do not go through normal Gitaly Cluster methods can cause database inconsistencies. These can include (but are not limited to) snapshot restoration for cluster node disks, node upgrades which modify files under Git control, or any other disk operation that may touch repository storage external to GitLab. The Gitaly team is actively working to provide manual commands to [reconcile the Praefect database with the repository storage](https://gitlab.com/groups/gitlab-org/-/epics/6723). | Don't directly change repositories on any Gitaly Cluster node at this time. |
|
||||
| Praefect unable to insert data into the database due to migrations not being applied after an upgrade | If the database is not kept up to date with completed migrations, then the Praefect node is unable to perform normal operation. | Make sure the Praefect database is up and running with all migrations completed (For example: `/opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml sql-migrate-status` should show a list of all applied migrations). Consider [requesting live upgrade assistance](https://about.gitlab.com/support/scheduling-live-upgrade-assistance.html) so your upgrade plan can be reviewed by support. |
|
||||
| Restoring a Gitaly Cluster node from a snapshot in a running cluster | Because the Gitaly Cluster runs with consistent state, introducing a single node that is behind will result in the cluster not being able to reconcile the nodes data and other nodes data | Don't restore a single Gitaly Cluster node from a backup snapshot. If you must restore from backup, it's best to snapshot all Gitaly Cluster nodes at the same time and take a database dump of the Praefect database. |
|
||||
|
||||
### Snapshot backup and recovery limitations
|
||||
|
||||
Gitaly Cluster does not support snapshot backups because these can cause issues where the Praefect
|
||||
database becomes out of sync with the disk storage. Because of how Praefect rebuilds the replication
|
||||
metadata of Gitaly disk information during a restore, we recommend using the
|
||||
[official backup and restore Rake tasks](../../raketasks/backup_restore.md). If you are unable to use this method, please contact customer support for restoration help.
|
||||
Gitaly Cluster does not support snapshot backups. Snapshot backups can cause issues where the Praefect database becomes
|
||||
out of sync with the disk storage. Because of how Praefect rebuilds the replication metadata of Gitaly disk information
|
||||
during a restore, we recommend using the [official backup and restore Rake tasks](../../raketasks/backup_restore.md).
|
||||
|
||||
To track progress on work on a solution for manually re-synchronizing the Praefect database with
|
||||
disk storage, see [this epic](https://gitlab.com/groups/gitlab-org/-/epics/6575).
|
||||
If you are unable to use this method, please contact customer support for restoration help.
|
||||
|
||||
We are tracking in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/351383) improvements to the
|
||||
[official backup and restore Rake tasks](../../raketasks/backup_restore.md) to add support for incremental backups. For
|
||||
more information, see [this epic](https://gitlab.com/groups/gitlab-org/-/epics/2094).
|
||||
|
||||
### What to do if you are on Gitaly Cluster experiencing an issue or limitation
|
||||
|
||||
|
|
@ -361,7 +365,8 @@ For more information on configuring Gitaly Cluster, see [Configure Gitaly Cluste
|
|||
|
||||
## Migrating to Gitaly Cluster
|
||||
|
||||
Please see [current guidance on Gitaly Cluster](#guidance-regarding-gitaly-cluster). The basic process for migrating to Gitaly Cluster involves:
|
||||
See the [Before deploying Gitaly Cluster](#before-deploying-gitaly-cluster) section before continuing. The basic process
|
||||
for migrating to Gitaly Cluster involves:
|
||||
|
||||
1. Create the required storage. Refer to
|
||||
[repository storage recommendations](faq.md#what-are-some-repository-storage-recommendations).
|
||||
|
|
@ -378,16 +383,14 @@ NOTE:
|
|||
GitLab requires a `default` repository storage to be configured.
|
||||
[Read more about this limitation](configure_gitaly.md#gitlab-requires-a-default-repository-storage).
|
||||
|
||||
### Migrate off Gitaly Cluster
|
||||
## Migrate off Gitaly Cluster
|
||||
|
||||
If you have repositories stored on a Gitaly Cluster, but you'd like to migrate
|
||||
them back to direct Gitaly storage:
|
||||
If the limitations and tradeoffs of Gitaly Cluster are found to be not suitable for your environment, you can Migrate
|
||||
off Gitaly Cluster to a sharded Gitaly instance:
|
||||
|
||||
1. Create and configure a new
|
||||
[Gitaly server](configure_gitaly.md#run-gitaly-on-its-own-server).
|
||||
1. [Move the repositories](../operations/moving_repositories.md#move-repositories)
|
||||
to the newly created storage. You can move them by shard or by group, which gives you the opportunity to spread them over
|
||||
multiple Gitaly servers.
|
||||
1. Create and configure a new [Gitaly server](configure_gitaly.md#run-gitaly-on-its-own-server).
|
||||
1. [Move the repositories](../operations/moving_repositories.md#move-repositories) to the newly created storage. You can
|
||||
move them by shard or by group, which gives you the opportunity to spread them over multiple Gitaly servers.
|
||||
|
||||
## Monitor Gitaly and Gitaly Cluster
|
||||
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ full list of reference architectures, see
|
|||
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
|
||||
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
|
||||
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster). If Gitaly Sharded is desired, the same specs listed above for `Gitaly` should be used.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
NOTE:
|
||||
|
|
@ -1157,8 +1157,8 @@ are supported and can be added if needed.
|
|||
In this configuration, every Git repository is stored on every Gitaly node in the cluster, with one being designated the primary, and failover occurs automatically if the primary node goes down.
|
||||
|
||||
NOTE:
|
||||
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster).
|
||||
For implementations with Gitaly Sharded, the same Gitaly specs should be used. Follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md) instead of this section.
|
||||
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster).
|
||||
For implementations with sharded Gitaly, use the same Gitaly specs. Follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md) instead of this section.
|
||||
|
||||
The recommended cluster setup includes the following components:
|
||||
|
||||
|
|
@ -2269,7 +2269,7 @@ services where applicable):
|
|||
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
|
||||
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
|
||||
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster). If Gitaly Sharded is desired, the same specs listed above for `Gitaly` should be used.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
NOTE:
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ full list of reference architectures, see
|
|||
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
|
||||
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
|
||||
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster). If Gitaly Sharded is desired, the same specs listed above for `Gitaly` should be used.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
NOTE:
|
||||
|
|
@ -1163,8 +1163,8 @@ fault tolerant solution for storing Git repositories.
|
|||
In this configuration, every Git repository is stored on every Gitaly node in the cluster, with one being designated the primary, and failover occurs automatically if the primary node goes down.
|
||||
|
||||
NOTE:
|
||||
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster).
|
||||
For implementations with Gitaly Sharded, the same Gitaly specs should be used. Follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md) instead of this section.
|
||||
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster).
|
||||
For implementations with sharded Gitaly, use the same Gitaly specs. Follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md) instead of this section.
|
||||
|
||||
The recommended cluster setup includes the following components:
|
||||
|
||||
|
|
@ -2267,7 +2267,7 @@ services where applicable):
|
|||
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
|
||||
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
|
||||
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster). If Gitaly Sharded is desired, the same specs listed above for `Gitaly` should be used.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
NOTE:
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ For a full list of reference architectures, see
|
|||
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
|
||||
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
|
||||
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster). If Gitaly Sharded is desired, the same specs listed above for `Gitaly` should be used.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
NOTE:
|
||||
|
|
@ -1104,8 +1104,8 @@ The following IPs will be used as an example:
|
|||
In this configuration, every Git repository is stored on every Gitaly node in the cluster, with one being designated the primary, and failover occurs automatically if the primary node goes down.
|
||||
|
||||
NOTE:
|
||||
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster).
|
||||
For implementations with Gitaly Sharded, the same Gitaly specs should be used. Follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md) instead of this section.
|
||||
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster).
|
||||
For implementations with sharded Gitaly, use the same Gitaly specs. Follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md) instead of this section.
|
||||
|
||||
The recommended cluster setup includes the following components:
|
||||
|
||||
|
|
@ -2225,7 +2225,7 @@ services where applicable):
|
|||
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
|
||||
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
|
||||
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster). If Gitaly Sharded is desired, the same specs listed above for `Gitaly` should be used.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
NOTE:
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ full list of reference architectures, see
|
|||
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
|
||||
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
|
||||
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster). If Gitaly Sharded is desired, the same specs listed above for `Gitaly` should be used.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
NOTE:
|
||||
|
|
@ -1170,8 +1170,8 @@ Advanced [configuration options](https://docs.gitlab.com/omnibus/settings/redis.
|
|||
In this configuration, every Git repository is stored on every Gitaly node in the cluster, with one being designated the primary, and failover occurs automatically if the primary node goes down.
|
||||
|
||||
NOTE:
|
||||
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster).
|
||||
For implementations with Gitaly Sharded, the same Gitaly specs should be used. Follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md) instead of this section.
|
||||
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster).
|
||||
For implementations with sharded Gitaly, use the same Gitaly specs. Follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md) instead of this section.
|
||||
|
||||
The recommended cluster setup includes the following components:
|
||||
|
||||
|
|
@ -2283,7 +2283,7 @@ services where applicable):
|
|||
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
|
||||
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
|
||||
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster). If Gitaly Sharded is desired, the same specs listed above for `Gitaly` should be used.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
NOTE:
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ costly-to-operate environment by using the
|
|||
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
|
||||
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
|
||||
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster). If Gitaly Sharded is desired, the same specs listed above for `Gitaly` should be used.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
NOTE:
|
||||
|
|
@ -1101,8 +1101,8 @@ The following IPs will be used as an example:
|
|||
In this configuration, every Git repository is stored on every Gitaly node in the cluster, with one being designated the primary, and failover occurs automatically if the primary node goes down.
|
||||
|
||||
NOTE:
|
||||
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster).
|
||||
For implementations with Gitaly Sharded, the same Gitaly specs should be used. Follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md) instead of this section.
|
||||
Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster).
|
||||
For implementations with sharded Gitaly, use the same Gitaly specs. Follow the [separate Gitaly documentation](../gitaly/configure_gitaly.md) instead of this section.
|
||||
|
||||
The recommended cluster setup includes the following components:
|
||||
|
||||
|
|
@ -2201,7 +2201,7 @@ services where applicable):
|
|||
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
|
||||
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
|
||||
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Please [review the existing technical limitations and considerations prior to deploying Gitaly Cluster](../gitaly/index.md#guidance-regarding-gitaly-cluster). If Gitaly Sharded is desired, the same specs listed above for `Gitaly` should be used.
|
||||
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
|
||||
<!-- markdownlint-enable MD029 -->
|
||||
|
||||
NOTE:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,101 @@
|
|||
---
|
||||
stage: Enablement
|
||||
group: Database
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
---
|
||||
|
||||
# Database Lab and Postgres.ai
|
||||
|
||||
Internal users at GitLab have access to the Database Lab Engine (DLE) and
|
||||
[postgres.ai](https://console.postgres.ai/) for testing performance of database queries
|
||||
on replicated production data. Unlike a typical read-only production replica, in the DLE you can
|
||||
also create, update, and delete rows. You can also test the performance of
|
||||
schema changes, like additional indexes or columns, in an isolated copy of production data.
|
||||
|
||||
## Access Database Lab Engine
|
||||
|
||||
Access to the DLE is helpful for:
|
||||
|
||||
- Database reviewers and maintainers.
|
||||
- Engineers who work on merge requests that have large effects on databases.
|
||||
|
||||
To access the DLE's services, you can:
|
||||
|
||||
- Perform query testing in the `#database_lab` Slack channel, or in the Postgres.ai web console.
|
||||
Employees access both services with their GitLab Google account. Query testing
|
||||
provides `EXPLAIN` (analyze, buffers) plans for queries executed there.
|
||||
- Migration testing by triggering a job as a part of a merge request.
|
||||
- Direct `psql` access to DLE instead of a production replica. Available to authorized users only.
|
||||
To request `psql` access, file an [access request](https://about.gitlab.com/handbook/business-technology/team-member-enablement/onboarding-access-requests/access-requests/#individual-or-bulk-access-request).
|
||||
|
||||
For more assistance, use the `#database` Slack channel.
|
||||
|
||||
NOTE:
|
||||
If you need only temporary access to a production replica, instead of a Database Lab
|
||||
clone, follow the runbook procedure for connecting to the
|
||||
[database console with Teleport](https://gitlab.com/gitlab-com/runbooks/-/blob/master/docs/Teleport/Connect_to_Database_Console_via_Teleport.md).
|
||||
This procedure is similar to [Rails console access with Teleport](https://gitlab.com/gitlab-com/runbooks/-/blob/master/docs/Teleport/Connect_to_Rails_Console_via_Teleport.md#how-to-use-teleport-to-connect-to-rails-console).
|
||||
|
||||
### Query testing
|
||||
|
||||
You can access Database Lab's query analysis features either:
|
||||
|
||||
- In the `#database_lab` Slack channel. Shows everyone's commands and results, but
|
||||
your own commands are still isolated in their own clone.
|
||||
- In [the Postgres.ai web console](https://console.postgres.ai/GitLab/joe-instances).
|
||||
Shows only the commands you run.
|
||||
|
||||
#### Generate query plans
|
||||
|
||||
Query plans are an essential part of the database review process. These plans
|
||||
enable us to decide quickly if a given query can be performant on GitLab.com.
|
||||
Running the `explain` command generates an `explain` plan and a link to the Postgres.ai
|
||||
console with more query analysis. For example, running `EXPLAIN SELECT * FROM application_settings`
|
||||
does the following:
|
||||
|
||||
1. Runs `explain (analyze, buffers) select * from application_settings;` against a database clone.
|
||||
1. Responds with timing and buffer details from the run.
|
||||
1. Provides a [detailed, shareable report on the results](https://console.postgres.ai/shared/24d543c9-893b-4ff6-8deb-a8f902f85a53).
|
||||
|
||||
#### Making schema changes
|
||||
|
||||
Sometimes when testing queries, a contributor may realize that the query needs an index
|
||||
or other schema change to make added queries more performant. To test the query, run the `exec` command.
|
||||
For example, running this command:
|
||||
|
||||
```sql
|
||||
exec CREATE INDEX on application_settings USING btree (instance_administration_project_id)
|
||||
```
|
||||
|
||||
creates the specified index on the table. You can [test queries](#generate-query-plans) leveraging
|
||||
the new index. `exec` does not return any results, only the time required to execute the query.
|
||||
|
||||
#### Reset the clone
|
||||
|
||||
After many changes, such as after a destructive query or an ineffective index,
|
||||
you must start over. To reset your designated clone, run `reset`.
|
||||
|
||||
### Migration testing
|
||||
|
||||
For information on testing migrations, review our
|
||||
[database migration testing documentation](database_migration_pipeline.md).
|
||||
|
||||
### Access the console with `psql`
|
||||
|
||||
Team members with [`psql` access](#access-database-lab-engine), can gain direct access
|
||||
to a clone via `psql`. Access to `psql` enables you to see data, not just metadata.
|
||||
|
||||
To connect to a clone using `psql`:
|
||||
|
||||
1. Create a clone from the [desired instance](https://console.postgres.ai/gitlab/instances/).
|
||||
1. Provide a **Clone ID**: Something that uniquely identifies your clone, such as `yourname-testing-gitlabissue`.
|
||||
1. Provide a **Database username** and **Database password**: Connects `psql` to your clone.
|
||||
1. Select **Enable deletion protection** if you want to preserve your clone. Avoid selecting this option.
|
||||
Clones are removed after 12 hours.
|
||||
1. In the **Clone details** page of the Postgres.ai web interface, copy and run
|
||||
the command to start SSH port forwarding for the clone.
|
||||
1. In the **Clone details** page of the Postgres.ai web interface, copy and run the `psql` connection string.
|
||||
Use the password provided at setup.
|
||||
|
||||
After you connect, use clone like you would any `psql` console in production, but with
|
||||
the added benefit and safety of an isolated writeable environment.
|
||||
|
|
@ -290,3 +290,43 @@ executed synchronously every time the given event is published.
|
|||
|
||||
For complex conditions it's best to subscribe to all the events and then handle the logic
|
||||
in the `handle_event` method of the subscriber worker.
|
||||
|
||||
## Testing
|
||||
|
||||
A publisher doesn't must care what subscribed to the event being published. The publisher's
|
||||
responsibility is to ensure that the event is published correctly.
|
||||
|
||||
To test that an event has been published correctly, we can use the RSpec matcher `:publish_event`:
|
||||
|
||||
```ruby
|
||||
it 'publishes a ProjectDeleted event with project id and namespace id' do
|
||||
expected_data = { project_id: project.id, namespace_id: project.namespace_id }
|
||||
|
||||
# The matcher verifies that when the block is called, the block publishes the expected event and data.
|
||||
expect { destroy_project(project, user, {}) }
|
||||
.to publish_event(Projects::ProjectDeletedEvent)
|
||||
.with(expected_data)
|
||||
end
|
||||
```
|
||||
|
||||
The subscriber must ensure that a published event can be consumed correctly. For this purpose
|
||||
we have added helpers and shared examples to standardize the way we test subscribers:
|
||||
|
||||
```ruby
|
||||
RSpec.describe MergeRequests::UpdateHeadPipelineWorker do
|
||||
let(:event) { Ci::PipelineCreatedEvent.new(data: ({ pipeline_id: pipeline.id })) }
|
||||
|
||||
# This shared example ensures that an event is published and correctly processed by
|
||||
# the current subscriber (`described_class`).
|
||||
it_behaves_like 'consumes the published event' do
|
||||
let(:event) { event }
|
||||
end
|
||||
|
||||
it 'does something' do
|
||||
# This helper directly executes `perform` ensuring that `handle_event` is called correctly.
|
||||
consume_event(subscriber: described_class, event: event)
|
||||
|
||||
# run expectations
|
||||
end
|
||||
end
|
||||
```
|
||||
|
|
|
|||
|
|
@ -112,12 +112,27 @@ you can reply to comments by sending an email.
|
|||
|
||||
You can use [Markdown](../markdown.md) and [quick actions](../project/quick_actions.md) in your email replies.
|
||||
|
||||
## Who can edit comments
|
||||
## Edit a comment
|
||||
|
||||
You can edit your own comment at any time.
|
||||
|
||||
Anyone with at least the Maintainer role can also edit a comment made by someone else.
|
||||
|
||||
To edit a comment:
|
||||
|
||||
1. On the comment, select **Edit comment** (**{pencil}**).
|
||||
1. Make your edits.
|
||||
1. Select **Save changes**.
|
||||
|
||||
### Editing a comment to add a mention
|
||||
|
||||
By default, when you mention a user, GitLab [creates a to-do item](../todos.md#actions-that-create-to-do-items)
|
||||
for them, and sends them a [notification email](../profile/notifications.md).
|
||||
|
||||
If you edit an existing comment to add a user mention that wasn't there before, GitLab:
|
||||
|
||||
- Creates a to-do item for the mentioned user.
|
||||
- Does not send a notification email.
|
||||
|
||||
## Prevent comments by locking an issue
|
||||
|
||||
You can prevent public comments in an issue or merge request.
|
||||
|
|
|
|||
|
|
@ -21,11 +21,14 @@ that happen there.
|
|||
You might receive notifications for one of the following reasons:
|
||||
|
||||
- You participate in an issue, merge request, epic, or design. You become a participant when you comment
|
||||
or edit, or someone mentions you.
|
||||
or edit, or someone mentions <sup>1</sup> you.
|
||||
- You've [enabled notifications in an issue, merge request, or epic](#notifications-on-issues-merge-requests-and-epics).
|
||||
- You've configured notifications for the [project](#change-level-of-project-notifications) or [group](#group-notifications).
|
||||
- You're subscribed to group or project pipeline notifications via the pipeline emails [integration](../project/integrations/overview.md).
|
||||
|
||||
1. GitLab doesn't send a notification when
|
||||
[a comment is edited to include a user mention](../discussions/index.md#editing-a-comment-to-add-a-mention).
|
||||
|
||||
NOTE:
|
||||
Administrators can block notifications, preventing them from being sent.
|
||||
|
||||
|
|
|
|||
|
|
@ -88,34 +88,33 @@ module Backup
|
|||
remove_old
|
||||
end
|
||||
|
||||
progress.puts "Warning: Your gitlab.rb and gitlab-secrets.json files contain sensitive data \n" \
|
||||
puts_time "Warning: Your gitlab.rb and gitlab-secrets.json files contain sensitive data \n" \
|
||||
"and are not included in this backup. You will need these files to restore a backup.\n" \
|
||||
"Please back them up manually.".color(:red)
|
||||
progress.puts "Backup task is done."
|
||||
puts_time "Backup task is done."
|
||||
end
|
||||
|
||||
def run_create_task(task_name)
|
||||
definition = @definitions[task_name]
|
||||
|
||||
build_backup_information
|
||||
puts_time "Dumping #{definition.task.human_name} ... ".color(:blue)
|
||||
|
||||
unless definition.task.enabled
|
||||
puts_time "[DISABLED]".color(:cyan)
|
||||
puts_time "Dumping #{definition.task.human_name} ... ".color(:blue) + "[DISABLED]".color(:cyan)
|
||||
return
|
||||
end
|
||||
|
||||
if skipped?(task_name)
|
||||
puts_time "[SKIPPED]".color(:cyan)
|
||||
puts_time "Dumping #{definition.task.human_name} ... ".color(:blue) + "[SKIPPED]".color(:cyan)
|
||||
return
|
||||
end
|
||||
|
||||
puts_time "Dumping #{definition.task.human_name} ... ".color(:blue)
|
||||
definition.task.dump(File.join(Gitlab.config.backup.path, definition.destination_path))
|
||||
|
||||
puts_time "done".color(:green)
|
||||
puts_time "Dumping #{definition.task.human_name} ... ".color(:blue) + "done".color(:green)
|
||||
|
||||
rescue Backup::DatabaseBackupError, Backup::FileBackupError => e
|
||||
progress.puts "#{e.message}"
|
||||
puts_time "Dumping #{definition.task.human_name} failed: #{e.message}".color(:red)
|
||||
end
|
||||
|
||||
def restore
|
||||
|
|
@ -136,21 +135,21 @@ module Backup
|
|||
|
||||
remove_tmp
|
||||
|
||||
puts "Warning: Your gitlab.rb and gitlab-secrets.json files contain sensitive data \n" \
|
||||
"and are not included in this backup. You will need to restore these files manually.".color(:red)
|
||||
puts "Restore task is done."
|
||||
puts_time "Warning: Your gitlab.rb and gitlab-secrets.json files contain sensitive data \n" \
|
||||
"and are not included in this backup. You will need to restore these files manually.".color(:red)
|
||||
puts_time "Restore task is done."
|
||||
end
|
||||
|
||||
def run_restore_task(task_name)
|
||||
definition = @definitions[task_name]
|
||||
|
||||
puts_time "Restoring #{definition.task.human_name} ... ".color(:blue)
|
||||
|
||||
unless definition.task.enabled
|
||||
puts_time "[DISABLED]".color(:cyan)
|
||||
puts_time "Restoring #{definition.task.human_name} ... ".color(:blue) + "[DISABLED]".color(:cyan)
|
||||
return
|
||||
end
|
||||
|
||||
puts_time "Restoring #{definition.task.human_name} ... ".color(:blue)
|
||||
|
||||
warning = definition.task.pre_restore_warning
|
||||
if warning.present?
|
||||
puts_time warning.color(:red)
|
||||
|
|
@ -159,7 +158,7 @@ module Backup
|
|||
|
||||
definition.task.restore(File.join(Gitlab.config.backup.path, definition.destination_path))
|
||||
|
||||
puts_time "done".color(:green)
|
||||
puts_time "Restoring #{definition.task.human_name} ... ".color(:blue) + "done".color(:green)
|
||||
|
||||
warning = definition.task.post_restore_warning
|
||||
if warning.present?
|
||||
|
|
@ -209,103 +208,104 @@ module Backup
|
|||
def pack
|
||||
Dir.chdir(backup_path) do
|
||||
# create archive
|
||||
progress.print "Creating backup archive: #{tar_file} ... "
|
||||
puts_time "Creating backup archive: #{tar_file} ... ".color(:blue)
|
||||
# Set file permissions on open to prevent chmod races.
|
||||
tar_system_options = { out: [tar_file, 'w', Gitlab.config.backup.archive_permissions] }
|
||||
if Kernel.system('tar', '-cf', '-', *backup_contents, tar_system_options)
|
||||
progress.puts "done".color(:green)
|
||||
puts_time "Creating backup archive: #{tar_file} ... ".color(:blue) + 'done'.color(:green)
|
||||
else
|
||||
puts "creating archive #{tar_file} failed".color(:red)
|
||||
puts_time "Creating archive #{tar_file} failed".color(:red)
|
||||
raise Backup::Error, 'Backup failed'
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def upload
|
||||
progress.print "Uploading backup archive to remote storage #{remote_directory} ... "
|
||||
|
||||
connection_settings = Gitlab.config.backup.upload.connection
|
||||
if connection_settings.blank?
|
||||
progress.puts "skipped".color(:yellow)
|
||||
puts_time "Uploading backup archive to remote storage #{remote_directory} ... ".color(:blue) + "[SKIPPED]".color(:cyan)
|
||||
return
|
||||
end
|
||||
|
||||
puts_time "Uploading backup archive to remote storage #{remote_directory} ... ".color(:blue)
|
||||
|
||||
directory = connect_to_remote_directory
|
||||
upload = directory.files.create(create_attributes)
|
||||
|
||||
if upload
|
||||
if upload.respond_to?(:encryption) && upload.encryption
|
||||
progress.puts "done (encrypted with #{upload.encryption})".color(:green)
|
||||
puts_time "Uploading backup archive to remote storage #{remote_directory} ... ".color(:blue) + "done (encrypted with #{upload.encryption})".color(:green)
|
||||
else
|
||||
progress.puts "done".color(:green)
|
||||
puts_time "Uploading backup archive to remote storage #{remote_directory} ... ".color(:blue) + "done".color(:green)
|
||||
end
|
||||
else
|
||||
puts "uploading backup to #{remote_directory} failed".color(:red)
|
||||
puts_time "Uploading backup to #{remote_directory} failed".color(:red)
|
||||
raise Backup::Error, 'Backup failed'
|
||||
end
|
||||
end
|
||||
|
||||
def cleanup
|
||||
progress.print "Deleting tmp directories ... "
|
||||
puts_time "Deleting tar staging files ... ".color(:blue)
|
||||
|
||||
remove_backup_path(MANIFEST_NAME)
|
||||
@definitions.each do |_, definition|
|
||||
remove_backup_path(definition.cleanup_path || definition.destination_path)
|
||||
end
|
||||
|
||||
puts_time "Deleting tar staging files ... ".color(:blue) + 'done'.color(:green)
|
||||
end
|
||||
|
||||
def remove_backup_path(path)
|
||||
return unless File.exist?(File.join(backup_path, path))
|
||||
absolute_path = File.join(backup_path, path)
|
||||
return unless File.exist?(absolute_path)
|
||||
|
||||
FileUtils.rm_rf(File.join(backup_path, path))
|
||||
progress.puts "done".color(:green)
|
||||
puts_time "Cleaning up #{absolute_path}"
|
||||
FileUtils.rm_rf(absolute_path)
|
||||
end
|
||||
|
||||
def remove_tmp
|
||||
# delete tmp inside backups
|
||||
progress.print "Deleting backups/tmp ... "
|
||||
puts_time "Deleting backups/tmp ... ".color(:blue)
|
||||
|
||||
if FileUtils.rm_rf(File.join(backup_path, "tmp"))
|
||||
progress.puts "done".color(:green)
|
||||
else
|
||||
puts "deleting backups/tmp failed".color(:red)
|
||||
end
|
||||
FileUtils.rm_rf(File.join(backup_path, "tmp"))
|
||||
puts_time "Deleting backups/tmp ... ".color(:blue) + "done".color(:green)
|
||||
end
|
||||
|
||||
def remove_old
|
||||
# delete backups
|
||||
progress.print "Deleting old backups ... "
|
||||
keep_time = Gitlab.config.backup.keep_time.to_i
|
||||
|
||||
if keep_time > 0
|
||||
removed = 0
|
||||
if keep_time <= 0
|
||||
puts_time "Deleting old backups ... ".color(:blue) + "[SKIPPED]".color(:cyan)
|
||||
return
|
||||
end
|
||||
|
||||
Dir.chdir(backup_path) do
|
||||
backup_file_list.each do |file|
|
||||
# For backward compatibility, there are 3 names the backups can have:
|
||||
# - 1495527122_gitlab_backup.tar
|
||||
# - 1495527068_2017_05_23_gitlab_backup.tar
|
||||
# - 1495527097_2017_05_23_9.3.0-pre_gitlab_backup.tar
|
||||
matched = backup_file?(file)
|
||||
next unless matched
|
||||
puts_time "Deleting old backups ... ".color(:blue)
|
||||
removed = 0
|
||||
|
||||
timestamp = matched[1].to_i
|
||||
Dir.chdir(backup_path) do
|
||||
backup_file_list.each do |file|
|
||||
# For backward compatibility, there are 3 names the backups can have:
|
||||
# - 1495527122_gitlab_backup.tar
|
||||
# - 1495527068_2017_05_23_gitlab_backup.tar
|
||||
# - 1495527097_2017_05_23_9.3.0-pre_gitlab_backup.tar
|
||||
matched = backup_file?(file)
|
||||
next unless matched
|
||||
|
||||
if Time.at(timestamp) < (Time.now - keep_time)
|
||||
begin
|
||||
FileUtils.rm(file)
|
||||
removed += 1
|
||||
rescue StandardError => e
|
||||
progress.puts "Deleting #{file} failed: #{e.message}".color(:red)
|
||||
end
|
||||
timestamp = matched[1].to_i
|
||||
|
||||
if Time.at(timestamp) < (Time.now - keep_time)
|
||||
begin
|
||||
FileUtils.rm(file)
|
||||
removed += 1
|
||||
rescue StandardError => e
|
||||
puts_time "Deleting #{file} failed: #{e.message}".color(:red)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
progress.puts "done. (#{removed} removed)".color(:green)
|
||||
else
|
||||
progress.puts "skipping".color(:yellow)
|
||||
end
|
||||
|
||||
puts_time "Deleting old backups ... ".color(:blue) + "done. (#{removed} removed)".color(:green)
|
||||
end
|
||||
|
||||
def verify_backup_version
|
||||
|
|
@ -327,7 +327,7 @@ module Backup
|
|||
|
||||
def unpack
|
||||
if ENV['BACKUP'].blank? && non_tarred_backup?
|
||||
progress.puts "Non tarred backup found in #{backup_path}, using that"
|
||||
puts_time "Non tarred backup found in #{backup_path}, using that"
|
||||
|
||||
return false
|
||||
end
|
||||
|
|
@ -335,15 +335,15 @@ module Backup
|
|||
Dir.chdir(backup_path) do
|
||||
# check for existing backups in the backup dir
|
||||
if backup_file_list.empty?
|
||||
progress.puts "No backups found in #{backup_path}"
|
||||
progress.puts "Please make sure that file name ends with #{FILE_NAME_SUFFIX}"
|
||||
puts_time "No backups found in #{backup_path}"
|
||||
puts_time "Please make sure that file name ends with #{FILE_NAME_SUFFIX}"
|
||||
exit 1
|
||||
elsif backup_file_list.many? && ENV["BACKUP"].nil?
|
||||
progress.puts 'Found more than one backup:'
|
||||
puts_time 'Found more than one backup:'
|
||||
# print list of available backups
|
||||
progress.puts " " + available_timestamps.join("\n ")
|
||||
progress.puts 'Please specify which one you want to restore:'
|
||||
progress.puts 'rake gitlab:backup:restore BACKUP=timestamp_of_backup'
|
||||
puts_time " " + available_timestamps.join("\n ")
|
||||
puts_time 'Please specify which one you want to restore:'
|
||||
puts_time 'rake gitlab:backup:restore BACKUP=timestamp_of_backup'
|
||||
exit 1
|
||||
end
|
||||
|
||||
|
|
@ -354,16 +354,16 @@ module Backup
|
|||
end
|
||||
|
||||
unless File.exist?(tar_file)
|
||||
progress.puts "The backup file #{tar_file} does not exist!"
|
||||
puts_time "The backup file #{tar_file} does not exist!"
|
||||
exit 1
|
||||
end
|
||||
|
||||
progress.print 'Unpacking backup ... '
|
||||
puts_time 'Unpacking backup ... '.color(:blue)
|
||||
|
||||
if Kernel.system(*%W(tar -xf #{tar_file}))
|
||||
progress.puts 'done'.color(:green)
|
||||
puts_time 'Unpacking backup ... '.color(:blue) + 'done'.color(:green)
|
||||
else
|
||||
progress.puts 'unpacking backup failed'.color(:red)
|
||||
puts_time 'Unpacking backup failed'.color(:red)
|
||||
exit 1
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@
|
|||
require 'spec_helper'
|
||||
|
||||
RSpec.describe 'Groups > Members > Manage groups', :js do
|
||||
include Select2Helper
|
||||
include Spec::Support::Helpers::Features::MembersHelpers
|
||||
include Spec::Support::Helpers::Features::InviteMembersModalHelper
|
||||
include Spec::Support::Helpers::ModalHelpers
|
||||
|
|
@ -119,16 +118,92 @@ RSpec.describe 'Groups > Members > Manage groups', :js do
|
|||
|
||||
describe 'group search results' do
|
||||
let_it_be(:group, refind: true) { create(:group) }
|
||||
let_it_be(:group_within_hierarchy) { create(:group, parent: group) }
|
||||
let_it_be(:group_outside_hierarchy) { create(:group) }
|
||||
|
||||
before_all do
|
||||
group.add_owner(user)
|
||||
group_within_hierarchy.add_owner(user)
|
||||
group_outside_hierarchy.add_owner(user)
|
||||
context 'with instance admin considerations' do
|
||||
let_it_be(:group_to_share) { create(:group) }
|
||||
|
||||
context 'when user is an admin' do
|
||||
let_it_be(:admin) { create(:admin) }
|
||||
|
||||
before do
|
||||
sign_in(admin)
|
||||
gitlab_enable_admin_mode_sign_in(admin)
|
||||
end
|
||||
|
||||
it 'shows groups where the admin has no direct membership' do
|
||||
visit group_group_members_path(group)
|
||||
|
||||
click_on 'Invite a group'
|
||||
click_on 'Select a group'
|
||||
wait_for_requests
|
||||
|
||||
page.within(group_dropdown_selector) do
|
||||
expect_to_have_group(group_to_share)
|
||||
expect_not_to_have_group(group)
|
||||
end
|
||||
end
|
||||
|
||||
it 'shows groups where the admin has at least guest level membership' do
|
||||
group_to_share.add_guest(admin)
|
||||
|
||||
visit group_group_members_path(group)
|
||||
|
||||
click_on 'Invite a group'
|
||||
click_on 'Select a group'
|
||||
wait_for_requests
|
||||
|
||||
page.within(group_dropdown_selector) do
|
||||
expect_to_have_group(group_to_share)
|
||||
expect_not_to_have_group(group)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user is not an admin' do
|
||||
before do
|
||||
group.add_owner(user)
|
||||
end
|
||||
|
||||
it 'shows groups where the user has no direct membership' do
|
||||
visit group_group_members_path(group)
|
||||
|
||||
click_on 'Invite a group'
|
||||
click_on 'Select a group'
|
||||
wait_for_requests
|
||||
|
||||
page.within(group_dropdown_selector) do
|
||||
expect_not_to_have_group(group_to_share)
|
||||
expect_not_to_have_group(group)
|
||||
end
|
||||
end
|
||||
|
||||
it 'shows groups where the user has at least guest level membership' do
|
||||
group_to_share.add_guest(user)
|
||||
|
||||
visit group_group_members_path(group)
|
||||
|
||||
click_on 'Invite a group'
|
||||
click_on 'Select a group'
|
||||
wait_for_requests
|
||||
|
||||
page.within(group_dropdown_selector) do
|
||||
expect_to_have_group(group_to_share)
|
||||
expect_not_to_have_group(group)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when the invite members group modal is enabled' do
|
||||
context 'when user is not an admin and there are hierarchy considerations' do
|
||||
let_it_be(:group_within_hierarchy) { create(:group, parent: group) }
|
||||
let_it_be(:group_outside_hierarchy) { create(:group) }
|
||||
|
||||
before_all do
|
||||
group.add_owner(user)
|
||||
group_within_hierarchy.add_owner(user)
|
||||
group_outside_hierarchy.add_owner(user)
|
||||
end
|
||||
|
||||
it 'does not show self or ancestors', :aggregate_failures do
|
||||
group_sibbling = create(:group, parent: group)
|
||||
group_sibbling.add_owner(user)
|
||||
|
|
@ -139,40 +214,45 @@ RSpec.describe 'Groups > Members > Manage groups', :js do
|
|||
click_on 'Select a group'
|
||||
wait_for_requests
|
||||
|
||||
page.within('[data-testid="group-select-dropdown"]') do
|
||||
expect(page).to have_selector("[entity-id='#{group_outside_hierarchy.id}']")
|
||||
expect(page).to have_selector("[entity-id='#{group_sibbling.id}']")
|
||||
expect(page).not_to have_selector("[entity-id='#{group.id}']")
|
||||
expect(page).not_to have_selector("[entity-id='#{group_within_hierarchy.id}']")
|
||||
page.within(group_dropdown_selector) do
|
||||
expect_to_have_group(group_outside_hierarchy)
|
||||
expect_to_have_group(group_sibbling)
|
||||
expect_not_to_have_group(group)
|
||||
expect_not_to_have_group(group_within_hierarchy)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when sharing with groups outside the hierarchy is enabled' do
|
||||
it 'shows groups within and outside the hierarchy in search results' do
|
||||
visit group_group_members_path(group)
|
||||
context 'when sharing with groups outside the hierarchy is enabled' do
|
||||
it 'shows groups within and outside the hierarchy in search results' do
|
||||
visit group_group_members_path(group)
|
||||
|
||||
click_on 'Invite a group'
|
||||
click_on 'Select a group'
|
||||
click_on 'Invite a group'
|
||||
click_on 'Select a group'
|
||||
wait_for_requests
|
||||
|
||||
expect(page).to have_text group_within_hierarchy.name
|
||||
expect(page).to have_text group_outside_hierarchy.name
|
||||
end
|
||||
end
|
||||
|
||||
context 'when sharing with groups outside the hierarchy is disabled' do
|
||||
before do
|
||||
group.namespace_settings.update!(prevent_sharing_groups_outside_hierarchy: true)
|
||||
page.within(group_dropdown_selector) do
|
||||
expect_to_have_group(group_within_hierarchy)
|
||||
expect_to_have_group(group_outside_hierarchy)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
it 'shows only groups within the hierarchy in search results' do
|
||||
visit group_group_members_path(group)
|
||||
context 'when sharing with groups outside the hierarchy is disabled' do
|
||||
before do
|
||||
group.namespace_settings.update!(prevent_sharing_groups_outside_hierarchy: true)
|
||||
end
|
||||
|
||||
click_on 'Invite a group'
|
||||
click_on 'Select a group'
|
||||
it 'shows only groups within the hierarchy in search results' do
|
||||
visit group_group_members_path(group)
|
||||
|
||||
expect(page).to have_text group_within_hierarchy.name
|
||||
expect(page).not_to have_text group_outside_hierarchy.name
|
||||
click_on 'Invite a group'
|
||||
click_on 'Select a group'
|
||||
|
||||
page.within(group_dropdown_selector) do
|
||||
expect_to_have_group(group_within_hierarchy)
|
||||
expect_not_to_have_group(group_outside_hierarchy)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ RSpec.describe 'Blob button line permalinks (BlobLinePermalinkUpdater)', :js do
|
|||
find('#L3').click
|
||||
find("#L5").click
|
||||
|
||||
expect(find('.js-data-file-blob-permalink-url')['href']).to eq(get_absolute_url(project_blob_path(project, tree_join(sha, path), anchor: "LC5")))
|
||||
expect(find('.js-data-file-blob-permalink-url')['href']).to eq(get_absolute_url(project_blob_path(project, tree_join(sha, path), anchor: "L5")))
|
||||
end
|
||||
|
||||
it 'with initial fragment hash, changes fragment hash if line number clicked' do
|
||||
|
|
@ -50,7 +50,7 @@ RSpec.describe 'Blob button line permalinks (BlobLinePermalinkUpdater)', :js do
|
|||
find('#L3').click
|
||||
find("#L5").click
|
||||
|
||||
expect(find('.js-data-file-blob-permalink-url')['href']).to eq(get_absolute_url(project_blob_path(project, tree_join(sha, path), anchor: "LC5")))
|
||||
expect(find('.js-data-file-blob-permalink-url')['href']).to eq(get_absolute_url(project_blob_path(project, tree_join(sha, path), anchor: "L5")))
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -75,7 +75,7 @@ RSpec.describe 'Blob button line permalinks (BlobLinePermalinkUpdater)', :js do
|
|||
find('#L3').click
|
||||
find("#L5").click
|
||||
|
||||
expect(find('.js-blob-blame-link')['href']).to eq(get_absolute_url(project_blame_path(project, tree_join('master', path), anchor: "LC5")))
|
||||
expect(find('.js-blob-blame-link')['href']).to eq(get_absolute_url(project_blame_path(project, tree_join('master', path), anchor: "L5")))
|
||||
end
|
||||
|
||||
it 'with initial fragment hash, changes fragment hash if line number clicked' do
|
||||
|
|
@ -86,7 +86,7 @@ RSpec.describe 'Blob button line permalinks (BlobLinePermalinkUpdater)', :js do
|
|||
find('#L3').click
|
||||
find("#L5").click
|
||||
|
||||
expect(find('.js-blob-blame-link')['href']).to eq(get_absolute_url(project_blame_path(project, tree_join('master', path), anchor: "LC5")))
|
||||
expect(find('.js-blob-blame-link')['href']).to eq(get_absolute_url(project_blame_path(project, tree_join('master', path), anchor: "L5")))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -17,20 +17,18 @@ RSpec.describe 'Project > Members > Invite group', :js do
|
|||
|
||||
visit project_project_members_path(project)
|
||||
|
||||
expect(page).to have_selector('button[data-test-id="invite-group-button"]')
|
||||
expect(page).to have_selector(invite_group_selector)
|
||||
end
|
||||
|
||||
it 'does not display the button when visiting the page not signed in' do
|
||||
it 'does not display the button when visiting the page not signed in' do
|
||||
project = create(:project, namespace: create(:group))
|
||||
|
||||
visit project_project_members_path(project)
|
||||
|
||||
expect(page).not_to have_selector('button[data-test-id="invite-group-button"]')
|
||||
expect(page).not_to have_selector(invite_group_selector)
|
||||
end
|
||||
|
||||
describe 'Share with group lock' do
|
||||
let(:invite_group_selector) { 'button[data-test-id="invite-group-button"]' }
|
||||
|
||||
shared_examples 'the project can be shared with groups' do
|
||||
it 'the "Invite a group" button exists' do
|
||||
visit project_project_members_path(project)
|
||||
|
|
@ -158,21 +156,95 @@ RSpec.describe 'Project > Members > Invite group', :js do
|
|||
describe 'the groups dropdown' do
|
||||
let_it_be(:parent_group) { create(:group, :public) }
|
||||
let_it_be(:project_group) { create(:group, :public, parent: parent_group) }
|
||||
let_it_be(:public_sub_subgroup) { create(:group, :public, parent: project_group) }
|
||||
let_it_be(:public_sibbling_group) { create(:group, :public, parent: parent_group) }
|
||||
let_it_be(:private_sibbling_group) { create(:group, :private, parent: parent_group) }
|
||||
let_it_be(:private_membership_group) { create(:group, :private) }
|
||||
let_it_be(:public_membership_group) { create(:group, :public) }
|
||||
let_it_be(:project) { create(:project, group: project_group) }
|
||||
|
||||
before do
|
||||
private_membership_group.add_guest(maintainer)
|
||||
public_membership_group.add_maintainer(maintainer)
|
||||
context 'with instance admin considerations' do
|
||||
let_it_be(:group_to_share) { create(:group) }
|
||||
|
||||
sign_in(maintainer)
|
||||
context 'when user is an admin' do
|
||||
let_it_be(:admin) { create(:admin) }
|
||||
|
||||
before do
|
||||
sign_in(admin)
|
||||
gitlab_enable_admin_mode_sign_in(admin)
|
||||
end
|
||||
|
||||
it 'shows groups where the admin has no direct membership' do
|
||||
visit project_project_members_path(project)
|
||||
|
||||
click_on 'Invite a group'
|
||||
click_on 'Select a group'
|
||||
wait_for_requests
|
||||
|
||||
page.within(group_dropdown_selector) do
|
||||
expect_to_have_group(group_to_share)
|
||||
end
|
||||
end
|
||||
|
||||
it 'shows groups where the admin has at least guest level membership' do
|
||||
group_to_share.add_guest(admin)
|
||||
|
||||
visit project_project_members_path(project)
|
||||
|
||||
click_on 'Invite a group'
|
||||
click_on 'Select a group'
|
||||
wait_for_requests
|
||||
|
||||
page.within(group_dropdown_selector) do
|
||||
expect_to_have_group(group_to_share)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user is not an admin' do
|
||||
before do
|
||||
project.add_maintainer(maintainer)
|
||||
sign_in(maintainer)
|
||||
end
|
||||
|
||||
it 'does not show groups where the user has no direct membership' do
|
||||
visit project_project_members_path(project)
|
||||
|
||||
click_on 'Invite a group'
|
||||
click_on 'Select a group'
|
||||
wait_for_requests
|
||||
|
||||
page.within(group_dropdown_selector) do
|
||||
expect_not_to_have_group(group_to_share)
|
||||
end
|
||||
end
|
||||
|
||||
it 'shows groups where the user has at least guest level membership' do
|
||||
group_to_share.add_guest(maintainer)
|
||||
|
||||
visit project_project_members_path(project)
|
||||
|
||||
click_on 'Invite a group'
|
||||
click_on 'Select a group'
|
||||
wait_for_requests
|
||||
|
||||
page.within(group_dropdown_selector) do
|
||||
expect_to_have_group(group_to_share)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'for a project in a nested group' do
|
||||
let_it_be(:public_sub_subgroup) { create(:group, :public, parent: project_group) }
|
||||
let_it_be(:public_sibbling_group) { create(:group, :public, parent: parent_group) }
|
||||
let_it_be(:private_sibbling_group) { create(:group, :private, parent: parent_group) }
|
||||
let_it_be(:private_membership_group) { create(:group, :private) }
|
||||
let_it_be(:public_membership_group) { create(:group, :public) }
|
||||
let_it_be(:project) { create(:project, group: project_group) }
|
||||
|
||||
before do
|
||||
private_membership_group.add_guest(maintainer)
|
||||
public_membership_group.add_maintainer(maintainer)
|
||||
|
||||
sign_in(maintainer)
|
||||
end
|
||||
|
||||
it 'does not show the groups inherited from projects' do
|
||||
project.add_maintainer(maintainer)
|
||||
public_sibbling_group.add_maintainer(maintainer)
|
||||
|
|
@ -183,7 +255,7 @@ RSpec.describe 'Project > Members > Invite group', :js do
|
|||
click_on 'Select a group'
|
||||
wait_for_requests
|
||||
|
||||
page.within('[data-testid="group-select-dropdown"]') do
|
||||
page.within(group_dropdown_selector) do
|
||||
expect_to_have_group(public_membership_group)
|
||||
expect_to_have_group(public_sibbling_group)
|
||||
expect_to_have_group(private_membership_group)
|
||||
|
|
@ -204,7 +276,7 @@ RSpec.describe 'Project > Members > Invite group', :js do
|
|||
click_on 'Select a group'
|
||||
wait_for_requests
|
||||
|
||||
page.within('[data-testid="group-select-dropdown"]') do
|
||||
page.within(group_dropdown_selector) do
|
||||
expect_to_have_group(public_membership_group)
|
||||
expect_to_have_group(public_sibbling_group)
|
||||
expect_to_have_group(private_membership_group)
|
||||
|
|
@ -215,14 +287,10 @@ RSpec.describe 'Project > Members > Invite group', :js do
|
|||
expect_not_to_have_group(project_group)
|
||||
end
|
||||
end
|
||||
|
||||
def expect_to_have_group(group)
|
||||
expect(page).to have_selector("[entity-id='#{group.id}']")
|
||||
end
|
||||
|
||||
def expect_not_to_have_group(group)
|
||||
expect(page).not_to have_selector("[entity-id='#{group.id}']")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def invite_group_selector
|
||||
'button[data-test-id="invite-group-button"]'
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import waitForPromises from 'helpers/wait_for_promises';
|
|||
import * as groupsApi from '~/api/groups_api';
|
||||
import GroupSelect from '~/invite_members/components/group_select.vue';
|
||||
|
||||
const accessLevels = { Guest: 10, Reporter: 20, Developer: 30, Maintainer: 40, Owner: 50 };
|
||||
const group1 = { id: 1, full_name: 'Group One', avatar_url: 'test' };
|
||||
const group2 = { id: 2, full_name: 'Group Two', avatar_url: 'test' };
|
||||
const allGroups = [group1, group2];
|
||||
|
|
@ -13,7 +12,6 @@ const createComponent = (props = {}) => {
|
|||
return mount(GroupSelect, {
|
||||
propsData: {
|
||||
invalidGroups: [],
|
||||
accessLevels,
|
||||
...props,
|
||||
},
|
||||
});
|
||||
|
|
@ -66,9 +64,8 @@ describe('GroupSelect', () => {
|
|||
resolveApiRequest({ data: allGroups });
|
||||
|
||||
expect(groupsApi.getGroups).toHaveBeenCalledWith(group1.name, {
|
||||
active: true,
|
||||
exclude_internal: true,
|
||||
min_access_level: accessLevels.Guest,
|
||||
active: true,
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -1,37 +0,0 @@
|
|||
import { shallowMount } from '@vue/test-utils';
|
||||
import { GlIcon, GlLink } from '@gitlab/ui';
|
||||
import LineNumbers from '~/vue_shared/components/line_numbers.vue';
|
||||
|
||||
describe('Line Numbers component', () => {
|
||||
let wrapper;
|
||||
const lines = 10;
|
||||
|
||||
const createComponent = () => {
|
||||
wrapper = shallowMount(LineNumbers, { propsData: { lines } });
|
||||
};
|
||||
|
||||
const findGlIcon = () => wrapper.findComponent(GlIcon);
|
||||
const findLineNumbers = () => wrapper.findAllComponents(GlLink);
|
||||
const findFirstLineNumber = () => findLineNumbers().at(0);
|
||||
|
||||
beforeEach(() => createComponent());
|
||||
|
||||
afterEach(() => wrapper.destroy());
|
||||
|
||||
describe('rendering', () => {
|
||||
it('renders Line Numbers', () => {
|
||||
expect(findLineNumbers().length).toBe(lines);
|
||||
expect(findFirstLineNumber().attributes()).toMatchObject({
|
||||
id: 'L1',
|
||||
to: '#LC1',
|
||||
});
|
||||
});
|
||||
|
||||
it('renders a link icon', () => {
|
||||
expect(findGlIcon().props()).toMatchObject({
|
||||
size: 12,
|
||||
name: 'link',
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
import { GlLink } from '@gitlab/ui';
|
||||
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
|
||||
import ChunkLine from '~/vue_shared/components/source_viewer/components/chunk_line.vue';
|
||||
|
||||
const DEFAULT_PROPS = {
|
||||
number: 2,
|
||||
content: '// Line content',
|
||||
language: 'javascript',
|
||||
};
|
||||
|
||||
describe('Chunk Line component', () => {
|
||||
let wrapper;
|
||||
|
||||
const createComponent = (props = {}) => {
|
||||
wrapper = shallowMountExtended(ChunkLine, { propsData: { ...DEFAULT_PROPS, ...props } });
|
||||
};
|
||||
|
||||
const findLink = () => wrapper.findComponent(GlLink);
|
||||
const findContent = () => wrapper.findByTestId('content');
|
||||
|
||||
beforeEach(() => {
|
||||
createComponent();
|
||||
});
|
||||
|
||||
afterEach(() => wrapper.destroy());
|
||||
|
||||
describe('rendering', () => {
|
||||
it('renders a line number', () => {
|
||||
expect(findLink().attributes()).toMatchObject({
|
||||
'data-line-number': `${DEFAULT_PROPS.number}`,
|
||||
to: `#L${DEFAULT_PROPS.number}`,
|
||||
id: `L${DEFAULT_PROPS.number}`,
|
||||
});
|
||||
|
||||
expect(findLink().text()).toBe(DEFAULT_PROPS.number.toString());
|
||||
});
|
||||
|
||||
it('renders content', () => {
|
||||
expect(findContent().attributes()).toMatchObject({
|
||||
id: `LC${DEFAULT_PROPS.number}`,
|
||||
lang: DEFAULT_PROPS.language,
|
||||
});
|
||||
|
||||
expect(findContent().text()).toBe(DEFAULT_PROPS.content);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
import { GlIntersectionObserver } from '@gitlab/ui';
|
||||
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
|
||||
import Chunk from '~/vue_shared/components/source_viewer/components/chunk.vue';
|
||||
import ChunkLine from '~/vue_shared/components/source_viewer/components/chunk_line.vue';
|
||||
|
||||
const DEFAULT_PROPS = {
|
||||
chunkIndex: 2,
|
||||
isHighlighted: false,
|
||||
content: '// Line 1 content \n // Line 2 content',
|
||||
startingFrom: 140,
|
||||
totalLines: 50,
|
||||
language: 'javascript',
|
||||
};
|
||||
|
||||
describe('Chunk component', () => {
|
||||
let wrapper;
|
||||
|
||||
const createComponent = (props = {}) => {
|
||||
wrapper = shallowMountExtended(Chunk, { propsData: { ...DEFAULT_PROPS, ...props } });
|
||||
};
|
||||
|
||||
const findIntersectionObserver = () => wrapper.findComponent(GlIntersectionObserver);
|
||||
const findChunkLines = () => wrapper.findAllComponents(ChunkLine);
|
||||
const findLineNumbers = () => wrapper.findAllByTestId('line-number');
|
||||
const findContent = () => wrapper.findByTestId('content');
|
||||
|
||||
beforeEach(() => {
|
||||
createComponent();
|
||||
});
|
||||
|
||||
afterEach(() => wrapper.destroy());
|
||||
|
||||
describe('Intersection observer', () => {
|
||||
it('renders an Intersection observer component', () => {
|
||||
expect(findIntersectionObserver().exists()).toBe(true);
|
||||
});
|
||||
|
||||
it('emits an appear event when intersection-observer appears', () => {
|
||||
findIntersectionObserver().vm.$emit('appear');
|
||||
|
||||
expect(wrapper.emitted('appear')).toEqual([[DEFAULT_PROPS.chunkIndex]]);
|
||||
});
|
||||
|
||||
it('does not emit an appear event is isHighlighted is true', () => {
|
||||
createComponent({ isHighlighted: true });
|
||||
findIntersectionObserver().vm.$emit('appear');
|
||||
|
||||
expect(wrapper.emitted('appear')).toEqual(undefined);
|
||||
});
|
||||
});
|
||||
|
||||
describe('rendering', () => {
|
||||
it('does not render a Chunk Line component if isHighlighted is false', () => {
|
||||
expect(findChunkLines().length).toBe(0);
|
||||
});
|
||||
|
||||
it('renders simplified line numbers and content if isHighlighted is false', () => {
|
||||
expect(findLineNumbers().length).toBe(DEFAULT_PROPS.totalLines);
|
||||
|
||||
expect(findLineNumbers().at(0).attributes()).toMatchObject({
|
||||
'data-line-number': `${DEFAULT_PROPS.startingFrom + 1}`,
|
||||
href: `#L${DEFAULT_PROPS.startingFrom + 1}`,
|
||||
id: `L${DEFAULT_PROPS.startingFrom + 1}`,
|
||||
});
|
||||
|
||||
expect(findContent().text()).toBe(DEFAULT_PROPS.content);
|
||||
});
|
||||
|
||||
it('renders Chunk Line components if isHighlighted is true', () => {
|
||||
const splitContent = DEFAULT_PROPS.content.split('\n');
|
||||
createComponent({ isHighlighted: true });
|
||||
|
||||
expect(findChunkLines().length).toBe(splitContent.length);
|
||||
|
||||
expect(findChunkLines().at(0).props()).toMatchObject({
|
||||
number: DEFAULT_PROPS.startingFrom + 1,
|
||||
content: splitContent[0],
|
||||
language: DEFAULT_PROPS.language,
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -1,23 +1,35 @@
|
|||
import hljs from 'highlight.js/lib/core';
|
||||
import { GlLoadingIcon } from '@gitlab/ui';
|
||||
import Vue, { nextTick } from 'vue';
|
||||
import Vue from 'vue';
|
||||
import VueRouter from 'vue-router';
|
||||
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
|
||||
import SourceViewer from '~/vue_shared/components/source_viewer/source_viewer.vue';
|
||||
import Chunk from '~/vue_shared/components/source_viewer/components/chunk.vue';
|
||||
import { ROUGE_TO_HLJS_LANGUAGE_MAP } from '~/vue_shared/components/source_viewer/constants';
|
||||
import LineNumbers from '~/vue_shared/components/line_numbers.vue';
|
||||
import waitForPromises from 'helpers/wait_for_promises';
|
||||
import * as sourceViewerUtils from '~/vue_shared/components/source_viewer/utils';
|
||||
import LineHighlighter from '~/blob/line_highlighter';
|
||||
|
||||
jest.mock('~/blob/line_highlighter');
|
||||
jest.mock('highlight.js/lib/core');
|
||||
Vue.use(VueRouter);
|
||||
const router = new VueRouter();
|
||||
|
||||
const generateContent = (content, totalLines = 1) => {
|
||||
let generatedContent = '';
|
||||
for (let i = 0; i < totalLines; i += 1) {
|
||||
generatedContent += `Line: ${i + 1} = ${content}\n`;
|
||||
}
|
||||
return generatedContent;
|
||||
};
|
||||
|
||||
const execImmediately = (callback) => callback();
|
||||
|
||||
describe('Source Viewer component', () => {
|
||||
let wrapper;
|
||||
const language = 'docker';
|
||||
const mappedLanguage = ROUGE_TO_HLJS_LANGUAGE_MAP[language];
|
||||
const content = `// Some source code`;
|
||||
const chunk1 = generateContent('// Some source code 1', 70);
|
||||
const chunk2 = generateContent('// Some source code 2', 70);
|
||||
const content = chunk1 + chunk2;
|
||||
const DEFAULT_BLOB_DATA = { language, rawTextBlob: content };
|
||||
const highlightedContent = `<span data-testid='test-highlighted' id='LC1'>${content}</span><span id='LC2'></span>`;
|
||||
|
||||
|
|
@ -29,15 +41,12 @@ describe('Source Viewer component', () => {
|
|||
await waitForPromises();
|
||||
};
|
||||
|
||||
const findLoadingIcon = () => wrapper.findComponent(GlLoadingIcon);
|
||||
const findLineNumbers = () => wrapper.findComponent(LineNumbers);
|
||||
const findHighlightedContent = () => wrapper.findByTestId('test-highlighted');
|
||||
const findFirstLine = () => wrapper.find('#LC1');
|
||||
const findChunks = () => wrapper.findAllComponents(Chunk);
|
||||
|
||||
beforeEach(() => {
|
||||
hljs.highlight.mockImplementation(() => ({ value: highlightedContent }));
|
||||
hljs.highlightAuto.mockImplementation(() => ({ value: highlightedContent }));
|
||||
jest.spyOn(sourceViewerUtils, 'wrapLines');
|
||||
jest.spyOn(window, 'requestIdleCallback').mockImplementation(execImmediately);
|
||||
|
||||
return createComponent();
|
||||
});
|
||||
|
|
@ -45,6 +54,8 @@ describe('Source Viewer component', () => {
|
|||
afterEach(() => wrapper.destroy());
|
||||
|
||||
describe('highlight.js', () => {
|
||||
beforeEach(() => createComponent({ language: mappedLanguage }));
|
||||
|
||||
it('registers the language definition', async () => {
|
||||
const languageDefinition = await import(`highlight.js/lib/languages/${mappedLanguage}`);
|
||||
|
||||
|
|
@ -54,72 +65,46 @@ describe('Source Viewer component', () => {
|
|||
);
|
||||
});
|
||||
|
||||
it('highlights the content', () => {
|
||||
expect(hljs.highlight).toHaveBeenCalledWith(content, { language: mappedLanguage });
|
||||
it('highlights the first chunk', () => {
|
||||
expect(hljs.highlight).toHaveBeenCalledWith(chunk1.trim(), { language: mappedLanguage });
|
||||
});
|
||||
|
||||
describe('auto-detects if a language cannot be loaded', () => {
|
||||
beforeEach(() => createComponent({ language: 'some_unknown_language' }));
|
||||
|
||||
it('highlights the content with auto-detection', () => {
|
||||
expect(hljs.highlightAuto).toHaveBeenCalledWith(content);
|
||||
expect(hljs.highlightAuto).toHaveBeenCalledWith(chunk1.trim());
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('rendering', () => {
|
||||
it('renders a loading icon if no highlighted content is available yet', async () => {
|
||||
hljs.highlight.mockImplementation(() => ({ value: null }));
|
||||
await createComponent();
|
||||
it('renders the first chunk', async () => {
|
||||
const firstChunk = findChunks().at(0);
|
||||
|
||||
expect(findLoadingIcon().exists()).toBe(true);
|
||||
expect(firstChunk.props('content')).toContain(chunk1);
|
||||
|
||||
expect(firstChunk.props()).toMatchObject({
|
||||
totalLines: 70,
|
||||
startingFrom: 0,
|
||||
});
|
||||
});
|
||||
|
||||
it('calls the wrapLines helper method with highlightedContent and mappedLanguage', () => {
|
||||
expect(sourceViewerUtils.wrapLines).toHaveBeenCalledWith(highlightedContent, mappedLanguage);
|
||||
});
|
||||
it('renders the second chunk', async () => {
|
||||
const secondChunk = findChunks().at(1);
|
||||
|
||||
it('renders Line Numbers', () => {
|
||||
expect(findLineNumbers().props('lines')).toBe(1);
|
||||
});
|
||||
expect(secondChunk.props('content')).toContain(chunk2.trim());
|
||||
|
||||
it('renders the highlighted content', () => {
|
||||
expect(findHighlightedContent().exists()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('selecting a line', () => {
|
||||
let firstLine;
|
||||
let firstLineElement;
|
||||
|
||||
beforeEach(() => {
|
||||
firstLine = findFirstLine();
|
||||
firstLineElement = firstLine.element;
|
||||
|
||||
jest.spyOn(firstLineElement, 'scrollIntoView');
|
||||
jest.spyOn(firstLineElement.classList, 'add');
|
||||
jest.spyOn(firstLineElement.classList, 'remove');
|
||||
});
|
||||
|
||||
it('adds the highlight (hll) class', async () => {
|
||||
wrapper.vm.$router.push('#LC1');
|
||||
await nextTick();
|
||||
|
||||
expect(firstLineElement.classList.add).toHaveBeenCalledWith('hll');
|
||||
});
|
||||
|
||||
it('removes the highlight (hll) class from a previously highlighted line', async () => {
|
||||
wrapper.vm.$router.push('#LC2');
|
||||
await nextTick();
|
||||
|
||||
expect(firstLineElement.classList.remove).toHaveBeenCalledWith('hll');
|
||||
});
|
||||
|
||||
it('scrolls the line into view', () => {
|
||||
expect(firstLineElement.scrollIntoView).toHaveBeenCalledWith({
|
||||
behavior: 'smooth',
|
||||
block: 'center',
|
||||
expect(secondChunk.props()).toMatchObject({
|
||||
totalLines: 70,
|
||||
startingFrom: 70,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('LineHighlighter', () => {
|
||||
it('instantiates the lineHighlighter class', async () => {
|
||||
expect(LineHighlighter).toHaveBeenCalledWith({ scrollBehavior: 'auto' });
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,26 +0,0 @@
|
|||
import { wrapLines } from '~/vue_shared/components/source_viewer/utils';
|
||||
|
||||
describe('Wrap lines', () => {
|
||||
it.each`
|
||||
content | language | output
|
||||
${'line 1'} | ${'javascript'} | ${'<span id="LC1" lang="javascript" class="line">line 1</span>'}
|
||||
${'line 1\nline 2'} | ${'html'} | ${`<span id="LC1" lang="html" class="line">line 1</span>\n<span id="LC2" lang="html" class="line">line 2</span>`}
|
||||
${'<span class="hljs-code">line 1\nline 2</span>'} | ${'html'} | ${`<span id="LC1" lang="html" class="hljs-code">line 1\n<span id="LC2" lang="html" class="line">line 2</span></span>`}
|
||||
${'<span class="hljs-code">```bash'} | ${'bash'} | ${'<span id="LC1" lang="bash" class="hljs-code">```bash'}
|
||||
${'<span class="hljs-code">```bash'} | ${'valid-language1'} | ${'<span id="LC1" lang="valid-language1" class="hljs-code">```bash'}
|
||||
${'<span class="hljs-code">```bash'} | ${'valid_language2'} | ${'<span id="LC1" lang="valid_language2" class="hljs-code">```bash'}
|
||||
`('returns lines wrapped in spans containing line numbers', ({ content, language, output }) => {
|
||||
expect(wrapLines(content, language)).toBe(output);
|
||||
});
|
||||
|
||||
it.each`
|
||||
language
|
||||
${'invalidLanguage>'}
|
||||
${'"invalidLanguage"'}
|
||||
${'<invalidLanguage'}
|
||||
`('returns lines safely without XSS language is not valid', ({ language }) => {
|
||||
expect(wrapLines('<span class="hljs-code">```bash', language)).toBe(
|
||||
'<span id="LC1" lang="" class="hljs-code">```bash',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
|
@ -28,7 +28,7 @@ RSpec.describe Backup::Manager do
|
|||
it 'calls the named task' do
|
||||
expect(task).to receive(:dump)
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Dumping my task ... ')
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'done')
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Dumping my task ... done')
|
||||
|
||||
subject.run_create_task('my_task')
|
||||
end
|
||||
|
|
@ -37,8 +37,7 @@ RSpec.describe Backup::Manager do
|
|||
let(:enabled) { false }
|
||||
|
||||
it 'informs the user' do
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Dumping my task ... ')
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: '[DISABLED]')
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Dumping my task ... [DISABLED]')
|
||||
|
||||
subject.run_create_task('my_task')
|
||||
end
|
||||
|
|
@ -48,8 +47,7 @@ RSpec.describe Backup::Manager do
|
|||
it 'informs the user' do
|
||||
stub_env('SKIP', 'my_task')
|
||||
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Dumping my task ... ')
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: '[SKIPPED]')
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Dumping my task ... [SKIPPED]')
|
||||
|
||||
subject.run_create_task('my_task')
|
||||
end
|
||||
|
|
@ -78,7 +76,7 @@ RSpec.describe Backup::Manager do
|
|||
it 'calls the named task' do
|
||||
expect(task).to receive(:restore)
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Restoring my task ... ').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'done').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Restoring my task ... done').ordered
|
||||
|
||||
subject.run_restore_task('my_task')
|
||||
end
|
||||
|
|
@ -87,8 +85,7 @@ RSpec.describe Backup::Manager do
|
|||
let(:enabled) { false }
|
||||
|
||||
it 'informs the user' do
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Restoring my task ... ').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: '[DISABLED]').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Restoring my task ... [DISABLED]').ordered
|
||||
|
||||
subject.run_restore_task('my_task')
|
||||
end
|
||||
|
|
@ -100,7 +97,7 @@ RSpec.describe Backup::Manager do
|
|||
it 'displays and waits for the user' do
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Restoring my task ... ').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Watch out!').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'done').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Restoring my task ... done').ordered
|
||||
expect(Gitlab::TaskHelpers).to receive(:ask_to_continue)
|
||||
expect(task).to receive(:restore)
|
||||
|
||||
|
|
@ -124,7 +121,7 @@ RSpec.describe Backup::Manager do
|
|||
|
||||
it 'displays and waits for the user' do
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Restoring my task ... ').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'done').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Restoring my task ... done').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Watch out!').ordered
|
||||
expect(Gitlab::TaskHelpers).to receive(:ask_to_continue)
|
||||
expect(task).to receive(:restore)
|
||||
|
|
@ -134,7 +131,7 @@ RSpec.describe Backup::Manager do
|
|||
|
||||
it 'does not continue when the user quits' do
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Restoring my task ... ').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'done').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Restoring my task ... done').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Watch out!').ordered
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: 'Quitting...').ordered
|
||||
expect(task).to receive(:restore)
|
||||
|
|
@ -170,7 +167,9 @@ RSpec.describe Backup::Manager do
|
|||
|
||||
before do
|
||||
allow(ActiveRecord::Base.connection).to receive(:reconnect!)
|
||||
allow(Gitlab::BackupLogger).to receive(:info)
|
||||
allow(Kernel).to receive(:system).and_return(true)
|
||||
allow(YAML).to receive(:load_file).and_call_original
|
||||
allow(YAML).to receive(:load_file).with(File.join(Gitlab.config.backup.path, 'backup_information.yml'))
|
||||
.and_return(backup_information)
|
||||
|
||||
|
|
@ -185,6 +184,20 @@ RSpec.describe Backup::Manager do
|
|||
expect(Kernel).to have_received(:system).with(*tar_cmdline)
|
||||
end
|
||||
|
||||
context 'tar fails' do
|
||||
before do
|
||||
expect(Kernel).to receive(:system).with(*tar_cmdline).and_return(false)
|
||||
end
|
||||
|
||||
it 'logs a failure' do
|
||||
expect do
|
||||
subject.create # rubocop:disable Rails/SaveBang
|
||||
end.to raise_error(Backup::Error, 'Backup failed')
|
||||
|
||||
expect(Gitlab::BackupLogger).to have_received(:info).with(message: "Creating archive #{tar_file} failed")
|
||||
end
|
||||
end
|
||||
|
||||
context 'when BACKUP is set' do
|
||||
let(:tar_file) { 'custom_gitlab_backup.tar' }
|
||||
|
||||
|
|
@ -248,6 +261,7 @@ RSpec.describe Backup::Manager do
|
|||
end
|
||||
|
||||
before do
|
||||
allow(Gitlab::BackupLogger).to receive(:info)
|
||||
allow(Dir).to receive(:chdir).and_yield
|
||||
allow(Dir).to receive(:glob).and_return(files)
|
||||
allow(FileUtils).to receive(:rm)
|
||||
|
|
@ -266,7 +280,7 @@ RSpec.describe Backup::Manager do
|
|||
end
|
||||
|
||||
it 'prints a skipped message' do
|
||||
expect(progress).to have_received(:puts).with('skipping')
|
||||
expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... [SKIPPED]')
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -290,7 +304,7 @@ RSpec.describe Backup::Manager do
|
|||
end
|
||||
|
||||
it 'prints a done message' do
|
||||
expect(progress).to have_received(:puts).with('done. (0 removed)')
|
||||
expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... done. (0 removed)')
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -307,7 +321,7 @@ RSpec.describe Backup::Manager do
|
|||
end
|
||||
|
||||
it 'prints a done message' do
|
||||
expect(progress).to have_received(:puts).with('done. (0 removed)')
|
||||
expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... done. (0 removed)')
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -348,7 +362,7 @@ RSpec.describe Backup::Manager do
|
|||
end
|
||||
|
||||
it 'prints a done message' do
|
||||
expect(progress).to have_received(:puts).with('done. (8 removed)')
|
||||
expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... done. (8 removed)')
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -372,11 +386,11 @@ RSpec.describe Backup::Manager do
|
|||
end
|
||||
|
||||
it 'sets the correct removed count' do
|
||||
expect(progress).to have_received(:puts).with('done. (7 removed)')
|
||||
expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... done. (7 removed)')
|
||||
end
|
||||
|
||||
it 'prints the error from file that could not be removed' do
|
||||
expect(progress).to have_received(:puts).with(a_string_matching(message))
|
||||
expect(Gitlab::BackupLogger).to have_received(:info).with(message: a_string_matching(message))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -386,6 +400,7 @@ RSpec.describe Backup::Manager do
|
|||
let(:backup_filename) { File.basename(backup_file.path) }
|
||||
|
||||
before do
|
||||
allow(Gitlab::BackupLogger).to receive(:info)
|
||||
allow(subject).to receive(:tar_file).and_return(backup_filename)
|
||||
|
||||
stub_backup_setting(
|
||||
|
|
@ -462,7 +477,7 @@ RSpec.describe Backup::Manager do
|
|||
it 'sets encryption attributes' do
|
||||
subject.create # rubocop:disable Rails/SaveBang
|
||||
|
||||
expect(progress).to have_received(:puts).with("done (encrypted with AES256)")
|
||||
expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Uploading backup archive to remote storage directory ... done (encrypted with AES256)')
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -473,7 +488,7 @@ RSpec.describe Backup::Manager do
|
|||
it 'sets encryption attributes' do
|
||||
subject.create # rubocop:disable Rails/SaveBang
|
||||
|
||||
expect(progress).to have_received(:puts).with("done (encrypted with AES256)")
|
||||
expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Uploading backup archive to remote storage directory ... done (encrypted with AES256)')
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -488,7 +503,7 @@ RSpec.describe Backup::Manager do
|
|||
it 'sets encryption attributes' do
|
||||
subject.create # rubocop:disable Rails/SaveBang
|
||||
|
||||
expect(progress).to have_received(:puts).with("done (encrypted with aws:kms)")
|
||||
expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Uploading backup archive to remote storage directory ... done (encrypted with aws:kms)')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -570,6 +585,7 @@ RSpec.describe Backup::Manager do
|
|||
Rake.application.rake_require 'tasks/gitlab/shell'
|
||||
Rake.application.rake_require 'tasks/cache'
|
||||
|
||||
allow(Gitlab::BackupLogger).to receive(:info)
|
||||
allow(task1).to receive(:restore).with(File.join(Gitlab.config.backup.path, 'task1.tar.gz'))
|
||||
allow(task2).to receive(:restore).with(File.join(Gitlab.config.backup.path, 'task2.tar.gz'))
|
||||
allow(YAML).to receive(:load_file).with(File.join(Gitlab.config.backup.path, 'backup_information.yml'))
|
||||
|
|
@ -634,7 +650,10 @@ RSpec.describe Backup::Manager do
|
|||
end
|
||||
|
||||
context 'when BACKUP variable is set to a correct file' do
|
||||
let(:tar_cmdline) { %w{tar -xf 1451606400_2016_01_01_1.2.3_gitlab_backup.tar} }
|
||||
|
||||
before do
|
||||
allow(Gitlab::BackupLogger).to receive(:info)
|
||||
allow(Dir).to receive(:glob).and_return(
|
||||
[
|
||||
'1451606400_2016_01_01_1.2.3_gitlab_backup.tar'
|
||||
|
|
@ -649,8 +668,21 @@ RSpec.describe Backup::Manager do
|
|||
it 'unpacks the file' do
|
||||
subject.restore
|
||||
|
||||
expect(Kernel).to have_received(:system)
|
||||
.with("tar", "-xf", "1451606400_2016_01_01_1.2.3_gitlab_backup.tar")
|
||||
expect(Kernel).to have_received(:system).with(*tar_cmdline)
|
||||
end
|
||||
|
||||
context 'tar fails' do
|
||||
before do
|
||||
expect(Kernel).to receive(:system).with(*tar_cmdline).and_return(false)
|
||||
end
|
||||
|
||||
it 'logs a failure' do
|
||||
expect do
|
||||
subject.restore
|
||||
end.to raise_error(SystemExit)
|
||||
|
||||
expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Unpacking backup failed')
|
||||
end
|
||||
end
|
||||
|
||||
context 'on version mismatch' do
|
||||
|
|
@ -680,7 +712,7 @@ RSpec.describe Backup::Manager do
|
|||
|
||||
subject.restore
|
||||
|
||||
expect(progress).to have_received(:print).with('Deleting backups/tmp ... ')
|
||||
expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting backups/tmp ... ')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -731,7 +763,7 @@ RSpec.describe Backup::Manager do
|
|||
|
||||
subject.restore
|
||||
|
||||
expect(progress).to have_received(:print).with('Deleting backups/tmp ... ')
|
||||
expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting backups/tmp ... ')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -43,6 +43,18 @@ module Spec
|
|||
|
||||
fill_in 'YYYY-MM-DD', with: expires_at.strftime('%Y-%m-%d') if expires_at
|
||||
end
|
||||
|
||||
def group_dropdown_selector
|
||||
'[data-testid="group-select-dropdown"]'
|
||||
end
|
||||
|
||||
def expect_to_have_group(group)
|
||||
expect(page).to have_selector("[entity-id='#{group.id}']")
|
||||
end
|
||||
|
||||
def expect_not_to_have_group(group)
|
||||
expect(page).not_to have_selector("[entity-id='#{group.id}']")
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
RSpec.shared_examples 'subscribes to event' do
|
||||
include AfterNextHelpers
|
||||
|
||||
it 'consumes the published event', :sidekiq_inline do
|
||||
expect_next(described_class)
|
||||
.to receive(:handle_event)
|
||||
.with(instance_of(event.class))
|
||||
.and_call_original
|
||||
|
||||
::Gitlab::EventStore.publish(event)
|
||||
end
|
||||
end
|
||||
|
||||
def consume_event(subscriber:, event:)
|
||||
subscriber.new.perform(event.class.name, event.data)
|
||||
end
|
||||
|
|
@ -199,18 +199,25 @@ RSpec.describe 'gitlab:app namespace rake task', :delete do
|
|||
end
|
||||
|
||||
it 'logs the progress to log file' do
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping database ... ")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "[SKIPPED]")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping database ... [SKIPPED]")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping repositories ... ")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping repositories ... done")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping uploads ... ")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping uploads ... done")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping builds ... ")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping builds ... done")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping artifacts ... ")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping artifacts ... done")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping pages ... ")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping pages ... done")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping lfs objects ... ")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping lfs objects ... done")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping terraform states ... ")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping terraform states ... done")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping container registry images ... ")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping container registry images ... done")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping packages ... ")
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "done").exactly(9).times
|
||||
expect(Gitlab::BackupLogger).to receive(:info).with(message: "Dumping packages ... done")
|
||||
|
||||
backup_tasks.each do |task|
|
||||
run_rake_task("gitlab:backup:#{task}:create")
|
||||
|
|
|
|||
|
|
@ -11,11 +11,9 @@ RSpec.describe MergeRequests::UpdateHeadPipelineWorker do
|
|||
let(:pipeline) { create(:ci_pipeline, project: project, ref: ref) }
|
||||
let(:event) { Ci::PipelineCreatedEvent.new(data: { pipeline_id: pipeline.id }) }
|
||||
|
||||
subject { consume_event(event) }
|
||||
subject { consume_event(subscriber: described_class, event: event) }
|
||||
|
||||
def consume_event(event)
|
||||
described_class.new.perform(event.class.name, event.data)
|
||||
end
|
||||
it_behaves_like 'subscribes to event'
|
||||
|
||||
context 'when merge requests already exist for this source branch', :sidekiq_inline do
|
||||
let(:merge_request_1) do
|
||||
|
|
|
|||
|
|
@ -9,11 +9,9 @@ RSpec.describe Namespaces::UpdateRootStatisticsWorker do
|
|||
Projects::ProjectDeletedEvent.new(data: { project_id: 1, namespace_id: namespace_id })
|
||||
end
|
||||
|
||||
subject { consume_event(event) }
|
||||
subject { consume_event(subscriber: described_class, event: event) }
|
||||
|
||||
def consume_event(event)
|
||||
described_class.new.perform(event.class.name, event.data)
|
||||
end
|
||||
it_behaves_like 'subscribes to event'
|
||||
|
||||
it 'enqueues ScheduleAggregationWorker' do
|
||||
expect(Namespaces::ScheduleAggregationWorker).to receive(:perform_async).with(namespace_id)
|
||||
|
|
|
|||
Loading…
Reference in New Issue