Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-01-24 12:06:49 +00:00
parent 4dcdd5bebb
commit af5193aa3c
104 changed files with 1500 additions and 2493 deletions

View File

@ -1215,7 +1215,6 @@ Gitlab/NamespacedClass:
- 'lib/gitlab/untrusted_regexp.rb'
- 'lib/gitlab/untrusted_regexp/ruby_syntax.rb'
- 'lib/gitlab/uploads_transfer.rb'
- 'lib/gitlab/url_blocker.rb'
- 'lib/gitlab/url_builder.rb'
- 'lib/gitlab/url_helpers.rb'
- 'lib/gitlab/url_sanitizer.rb'

View File

@ -2671,7 +2671,6 @@ Layout/LineLength:
- 'lib/gitlab/tracking.rb'
- 'lib/gitlab/tracking/destinations/snowplow.rb'
- 'lib/gitlab/tracking/event_definition.rb'
- 'lib/gitlab/url_blocker.rb'
- 'lib/gitlab/usage/metric_definition.rb'
- 'lib/gitlab/usage/metrics/aggregates/aggregate.rb'
- 'lib/gitlab/usage/metrics/aggregates/sources/calculations/intersection.rb'
@ -3801,7 +3800,6 @@ Layout/LineLength:
- 'spec/lib/gitlab/tracking/event_definition_spec.rb'
- 'spec/lib/gitlab/tracking/standard_context_spec.rb'
- 'spec/lib/gitlab/tracking_spec.rb'
- 'spec/lib/gitlab/url_blocker_spec.rb'
- 'spec/lib/gitlab/url_builder_spec.rb'
- 'spec/lib/gitlab/usage/metric_definition_spec.rb'
- 'spec/lib/gitlab/usage/metric_spec.rb'
@ -4024,7 +4022,6 @@ Layout/LineLength:
- 'spec/models/todo_spec.rb'
- 'spec/models/upload_spec.rb'
- 'spec/models/uploads/fog_spec.rb'
- 'spec/models/user_detail_spec.rb'
- 'spec/models/user_spec.rb'
- 'spec/models/wiki_page_spec.rb'
- 'spec/models/x509_certificate_spec.rb'

View File

@ -103,7 +103,6 @@ Performance/MapCompact:
- 'lib/gitlab/git/commit.rb'
- 'lib/gitlab/git/conflict/file.rb'
- 'lib/gitlab/sql/pattern.rb'
- 'lib/gitlab/url_blocker.rb'
- 'qa/qa/page/component/issuable/sidebar.rb'
- 'qa/qa/support/formatters/test_metrics_formatter.rb'
- 'qa/qa/tools/ci/ff_changes.rb'

View File

@ -4679,7 +4679,6 @@ RSpec/FeatureCategory:
- 'spec/models/user_agent_detail_spec.rb'
- 'spec/models/user_canonical_email_spec.rb'
- 'spec/models/user_custom_attribute_spec.rb'
- 'spec/models/user_detail_spec.rb'
- 'spec/models/user_highest_role_spec.rb'
- 'spec/models/user_mentions/commit_user_mention_spec.rb'
- 'spec/models/user_mentions/issue_user_mention_spec.rb'

View File

@ -2416,7 +2416,6 @@ RSpec/NamedSubject:
- 'spec/lib/gitlab/tree_summary_spec.rb'
- 'spec/lib/gitlab/unicode_spec.rb'
- 'spec/lib/gitlab/untrusted_regexp_spec.rb'
- 'spec/lib/gitlab/url_blocker_spec.rb'
- 'spec/lib/gitlab/url_builder_spec.rb'
- 'spec/lib/gitlab/usage/metric_definition_spec.rb'
- 'spec/lib/gitlab/usage/metrics/instrumentations/database_metric_spec.rb'

View File

@ -115,7 +115,6 @@ Style/ArgumentsForwarding:
- 'lib/gitlab/redis/multi_store.rb'
- 'lib/gitlab/repository_cache.rb'
- 'lib/gitlab/tracking.rb'
- 'lib/gitlab/url_blocker.rb'
- 'lib/gitlab/url_builder.rb'
- 'lib/gitlab/usage/metrics/query.rb'
- 'lib/gitlab_settings/settings.rb'

View File

@ -538,7 +538,6 @@ Style/GuardClause:
- 'lib/gitlab/sidekiq_config/cli_methods.rb'
- 'lib/gitlab/sidekiq_middleware/size_limiter/compressor.rb'
- 'lib/gitlab/sql/set_operator.rb'
- 'lib/gitlab/url_blocker.rb'
- 'lib/gitlab/usage/metrics/instrumentations/count_bulk_imports_entities_metric.rb'
- 'lib/gitlab/utils/override.rb'
- 'lib/gitlab/webpack/manifest.rb'

View File

@ -881,7 +881,6 @@ Style/IfUnlessModifier:
- 'lib/gitlab/suggestions/suggestion_set.rb'
- 'lib/gitlab/template_parser/eval_state.rb'
- 'lib/gitlab/untrusted_regexp.rb'
- 'lib/gitlab/url_blocker.rb'
- 'lib/gitlab/usage_data_counters/base_counter.rb'
- 'lib/gitlab/usage_data_counters/hll_redis_counter.rb'
- 'lib/gitlab/utils/delegator_override.rb'

View File

@ -2620,7 +2620,6 @@ Style/InlineDisableAnnotation:
- 'lib/gitlab/throttle.rb'
- 'lib/gitlab/tracking.rb'
- 'lib/gitlab/uploads/migration_helper.rb'
- 'lib/gitlab/url_blocker.rb'
- 'lib/gitlab/url_builder.rb'
- 'lib/gitlab/usage/metrics/instrumentations/count_imported_projects_metric.rb'
- 'lib/gitlab/usage/metrics/instrumentations/database_metric.rb'

View File

@ -1 +1 @@
7eb79ebcb084d4e881777f44ca5055cce6e60ccf
4261fb1dccde53037494d42745c59583f6644553

View File

@ -71,8 +71,8 @@
{"name":"capybara-screenshot","version":"1.0.26","platform":"ruby","checksum":"816b9370a07752097c82a05f568aaf5d3b7f45c3db5d3aab2014071e1b3c0c77"},
{"name":"carrierwave","version":"1.3.4","platform":"ruby","checksum":"81772dabd1830edbd7f4526d2ae2c79f974f1d48900c3f03f7ecb7c657463a21"},
{"name":"cbor","version":"0.5.9.6","platform":"ruby","checksum":"434a147658dd1df24ec9e7b3297c1fd4f8a691c97d0e688b3049df8e728b2114"},
{"name":"character_set","version":"1.4.1","platform":"java","checksum":"38b632136b40e02fecba2898497b07ac640cc121f17ac536eaf19873d50053d0"},
{"name":"character_set","version":"1.4.1","platform":"ruby","checksum":"f71b1ac35b21c4c6f9f26b8a67c7eec8e10bdf0da17488ac7f8fae756d9f8062"},
{"name":"character_set","version":"1.8.0","platform":"java","checksum":"2d94ac33d6569434cf1ba464012b5e98010f5dafbd7b750e8d7db79f4c8eb8f7"},
{"name":"character_set","version":"1.8.0","platform":"ruby","checksum":"2b7317462adaedff0bd1576ae86d71bc5efe133a5d0b7c257021b00fe3153f51"},
{"name":"charlock_holmes","version":"0.7.7","platform":"ruby","checksum":"1790eca3f661ffa6bbf5866c53c7191e4b8472626fc4997ff9dbe7c425e2cb43"},
{"name":"chef-config","version":"18.3.0","platform":"ruby","checksum":"c183a2ff41da8d63b1e4a60853c9c701a053ab9afe13df767a578db5f07072df"},
{"name":"chef-utils","version":"18.3.0","platform":"ruby","checksum":"827f7aace26ba9f5f8aca45059644205cc715baded80229f1fd5518d21970701"},
@ -502,7 +502,6 @@
{"name":"rb-inotify","version":"0.10.1","platform":"ruby","checksum":"050062d4f31d307cca52c3f6a7f4b946df8de25fc4bd373e1a5142e41034a7ca"},
{"name":"rb_sys","version":"0.9.86","platform":"ruby","checksum":"65d35ad5f2f2e7257607310186d6a178f34d0fee807d3b1af5611db6a5503a8c"},
{"name":"rbtrace","version":"0.4.14","platform":"ruby","checksum":"162bbf89cecabfc4f09c869b655f6f3a679c4870ebb7cbdcadf7393a81cc1769"},
{"name":"rbtree","version":"0.4.6","platform":"ruby","checksum":"14eea4469b24fd2472542e5f3eb105d6344c8ccf36f0b56d55fdcfeb4e0f10fc"},
{"name":"rchardet","version":"1.8.0","platform":"ruby","checksum":"693acd5253d5ade81a51940697955f6dd4bb2f0d245bda76a8e23deec70a52c7"},
{"name":"re2","version":"2.7.0","platform":"aarch64-linux","checksum":"778921298b6e8aba26a6230dd298c9b361b92e45024f81fa6aee788060fa307c"},
{"name":"re2","version":"2.7.0","platform":"arm-linux","checksum":"d328b5286d83ae265e13b855da8e348a976f80f91b748045b52073a570577954"},
@ -592,7 +591,6 @@
{"name":"sentry-raven","version":"3.1.2","platform":"ruby","checksum":"103d3b122958810d34898ce2e705bcf549ddb9d855a70ce9a3970ee2484f364a"},
{"name":"sentry-ruby","version":"5.10.0","platform":"ruby","checksum":"115c24c0aee1309210f3a2988fb118e2bec1f11609feeda90e694388b1183619"},
{"name":"sentry-sidekiq","version":"5.10.0","platform":"ruby","checksum":"cc81018d0733fb1be3fb5641c9e0b61030bbeaa1d0b23ca64797d70def7aea1a"},
{"name":"set","version":"1.0.2","platform":"ruby","checksum":"02ffa4de1f2621495e05b72326040dd014d7abbcb02fea698bc600a389992c02"},
{"name":"sexp_processor","version":"4.17.1","platform":"ruby","checksum":"91110946720307f30bf1d549e90d9a529fef40d1fc471c069c8cca7667015da0"},
{"name":"shellany","version":"0.0.1","platform":"ruby","checksum":"0e127a9132698766d7e752e82cdac8250b6adbd09e6c0a7fbbb6f61964fedee7"},
{"name":"shoulda-matchers","version":"5.1.0","platform":"ruby","checksum":"a01d20589989e9653ab4a28c67d9db2b82bcf0a2496cf01d5e1a95a4aaaf5b07"},
@ -613,8 +611,6 @@
{"name":"snowplow-tracker","version":"0.8.0","platform":"ruby","checksum":"7ba6f4f1443a829845fd28e63eda72d9d3d247f485310ddcccaebbc52b734a38"},
{"name":"solargraph","version":"0.47.2","platform":"ruby","checksum":"87ca4b799b9155c2c31c15954c483e952fdacd800f52d6709b901dd447bcac6a"},
{"name":"sorbet-runtime","version":"0.5.11144","platform":"ruby","checksum":"cb36dfc4ede6d206fa6f7587d4be7c8b4fcd3cc9fd5792614fb9b6c7030548a0"},
{"name":"sorted_set","version":"1.0.3","platform":"java","checksum":"996283f2e5c6e838825bcdcee31d6306515ae5f24bcb0ee4ce09dfff32919b8c"},
{"name":"sorted_set","version":"1.0.3","platform":"ruby","checksum":"4f2b8bee6e8c59cbd296228c0f1f81679357177a8b6859dcc2a99e86cce6372f"},
{"name":"spamcheck","version":"1.3.0","platform":"ruby","checksum":"a46082752257838d8484c844736e309ec499f85dcc51283a5f973b33f1c994f5"},
{"name":"spring","version":"4.1.0","platform":"ruby","checksum":"f17f080fb0df558d663c897a6229ed3d5cc54819ab51876ea6eef49a67f0a3cb"},
{"name":"spring-commands-rspec","version":"1.0.4","platform":"ruby","checksum":"6202e54fa4767452e3641461a83347645af478bf45dddcca9737b43af0dd1a2c"},

View File

@ -383,8 +383,7 @@ GEM
mime-types (>= 1.16)
ssrf_filter (~> 1.0, < 1.1.0)
cbor (0.5.9.6)
character_set (1.4.1)
sorted_set (~> 1.0)
character_set (1.8.0)
charlock_holmes (0.7.7)
chef-config (18.3.0)
addressable
@ -1350,7 +1349,6 @@ GEM
ffi (>= 1.0.6)
msgpack (>= 0.4.3)
optimist (>= 3.0.0)
rbtree (0.4.6)
rchardet (1.8.0)
re2 (2.7.0)
mini_portile2 (~> 2.8.5)
@ -1547,7 +1545,6 @@ GEM
sentry-sidekiq (5.10.0)
sentry-ruby (~> 5.10.0)
sidekiq (>= 3.0)
set (1.0.2)
sexp_processor (4.17.1)
shellany (0.0.1)
shoulda-matchers (5.1.0)
@ -1601,9 +1598,6 @@ GEM
tilt (~> 2.0)
yard (~> 0.9, >= 0.9.24)
sorbet-runtime (0.5.11144)
sorted_set (1.0.3)
rbtree
set (~> 1.0)
spamcheck (1.3.0)
grpc (~> 1.0)
spring (4.1.0)

View File

@ -3,5 +3,3 @@ export const getModifierKey = (removeSuffix = false) => {
const winKey = `Ctrl${removeSuffix ? '' : '+'}`;
return window.gl?.client?.isMac ? '⌘' : winKey;
};
export const PRELOAD_THROTTLE_TIMEOUT_MS = 4000;

View File

@ -5,7 +5,6 @@ import GfmAutoComplete, { defaultAutocompleteConfig } from 'ee_else_ce/gfm_auto_
import { disableButtonIfEmptyField } from '~/lib/utils/common_utils';
import dropzoneInput from './dropzone_input';
import { addMarkdownListeners, removeMarkdownListeners } from './lib/utils/text_markdown';
import { PRELOAD_THROTTLE_TIMEOUT_MS } from './constants';
export default class GLForm {
/**
@ -69,21 +68,6 @@ export default class GLForm {
);
this.autoComplete = new GfmAutoComplete(dataSources);
this.autoComplete.setup(this.form.find('.js-gfm-input'), this.enableGFM);
if (this.preloadMembers && dataSources?.members) {
// for now the preload is only implemented for the members
// timeout helping to trottle the preloads in the case content_editor
// is set as main comment editor and support for rspec tests
// https://gitlab.com/gitlab-org/gitlab/-/issues/427437
requestIdleCallback(() =>
setTimeout(
() => this.autoComplete?.fetchData($('.js-gfm-input'), '@'),
PRELOAD_THROTTLE_TIMEOUT_MS,
),
);
}
this.formDropzone = dropzoneInput(this.form, { parallelUploads: 1 });
if (this.form.is(':not(.js-no-autosize)')) {

View File

@ -1,3 +1,5 @@
import { __ } from '~/locale';
export const COMPONENTS = {
conflict: () => import('./conflicts.vue'),
discussions_not_resolved: () => import('./unresolved_discussions.vue'),
@ -5,3 +7,18 @@ export const COMPONENTS = {
need_rebase: () => import('./rebase.vue'),
default: () => import('./message.vue'),
};
export const FAILURE_REASONS = {
broken_status: __('Cannot merge the source into the target branch, due to a conflict.'),
ci_must_pass: __('Pipeline must succeed.'),
conflict: __('Merge conflicts must be resolved.'),
discussions_not_resolved: __('Unresolved discussions must be resolved.'),
draft_status: __('Merge request must not be draft.'),
not_open: __('Merge request must be open.'),
need_rebase: __('Merge request must be rebased, because a fast-forward merge is not possible.'),
not_approved: __('All required approvals must be given.'),
policies_denied: __('Denied licenses must be removed or approved.'),
merge_request_blocked: __('Merge request is blocked by another merge request.'),
status_checks_must_pass: __('Status checks must pass.'),
jira_association_missing: __('Either the title or description must reference a Jira issue.'),
};

View File

@ -1,6 +1,6 @@
<script>
import { __ } from '~/locale';
import StatusIcon from '../widget/status_icon.vue';
import { FAILURE_REASONS } from './constants';
const ICON_NAMES = {
failed: 'failed',
@ -8,21 +8,6 @@ const ICON_NAMES = {
success: 'success',
};
export const FAILURE_REASONS = {
broken_status: __('Cannot merge the source into the target branch, due to a conflict.'),
ci_must_pass: __('Pipeline must succeed.'),
conflict: __('Merge conflicts must be resolved.'),
discussions_not_resolved: __('Unresolved discussions must be resolved.'),
draft_status: __('Merge request must not be draft.'),
not_open: __('Merge request must be open.'),
need_rebase: __('Merge request must be rebased, because a fast-forward merge is not possible.'),
not_approved: __('All required approvals must be given.'),
policies_denied: __('Denied licenses must be removed or approved.'),
merge_request_blocked: __('Merge request is blocked by another merge request.'),
status_checks_must_pass: __('Status checks must pass.'),
jira_association_missing: __('Either the title or description must reference a Jira issue.'),
};
export default {
name: 'MergeChecksMessage',
components: {

View File

@ -3,7 +3,10 @@ import { GlSkeletonLoader } from '@gitlab/ui';
import { __, n__, sprintf } from '~/locale';
import { TYPENAME_MERGE_REQUEST } from '~/graphql_shared/constants';
import { convertToGraphQLId } from '~/graphql_shared/utils';
import { COMPONENTS } from '~/vue_merge_request_widget/components/checks/constants';
import {
COMPONENTS,
FAILURE_REASONS,
} from '~/vue_merge_request_widget/components/checks/constants';
import mergeRequestQueryVariablesMixin from '../mixins/merge_request_query_variables';
import mergeChecksQuery from '../queries/merge_checks.query.graphql';
import mergeChecksSubscription from '../queries/merge_checks.subscription.graphql';
@ -102,7 +105,7 @@ export default {
const order = ['FAILED', 'SUCCESS'];
return [...this.checks]
.filter((s) => s.status !== 'INACTIVE')
.filter((s) => s.status !== 'INACTIVE' && FAILURE_REASONS[s.identifier.toLowerCase()])
.sort((a, b) => order.indexOf(a.status) - order.indexOf(b.status));
},
failedChecks() {

View File

@ -78,16 +78,12 @@ export default {
required: false,
default: undefined,
},
multiSelectValues: {
type: Array,
required: false,
default: () => [],
},
},
data() {
return {
hasFetched: false, // use this to avoid flash of `No suggestions found` before fetching
searchKey: '',
selectedTokens: [],
recentSuggestions: this.config.recentSuggestionsStorageKey
? getRecentlyUsedSuggestions(this.config.recentSuggestionsStorageKey) ?? []
: [],
@ -197,6 +193,30 @@ export default {
}
},
},
value: {
deep: true,
immediate: true,
handler(newValue) {
const { data } = newValue;
if (!this.multiSelectEnabled) {
return;
}
// don't add empty values to selectedUsernames
if (!data) {
return;
}
if (Array.isArray(data)) {
this.selectedTokens = data;
// !active so we don't add strings while searching, e.g. r, ro, roo
// !includes so we don't add the same usernames (if @input is emitted twice)
} else if (!this.active && !this.selectedTokens.includes(data)) {
this.selectedTokens = this.selectedTokens.concat(data);
}
},
},
},
methods: {
handleInput: debounce(function debouncedSearch({ data, operator }) {
@ -222,7 +242,15 @@ export default {
}, DEBOUNCE_DELAY),
handleTokenValueSelected(selectedValue) {
if (this.multiSelectEnabled) {
this.$emit('token-selected', selectedValue);
const index = this.selectedTokens.indexOf(selectedValue);
if (index > -1) {
this.selectedTokens.splice(index, 1);
} else {
this.selectedTokens.push(selectedValue);
}
// need to clear search
this.$emit('input', { ...this.value, data: '' });
}
const activeTokenValue = this.getActiveTokenValue(this.suggestions, selectedValue);
@ -253,7 +281,7 @@ export default {
:config="validatedConfig"
:value="value"
:active="active"
:multi-select-values="multiSelectValues"
:multi-select-values="selectedTokens"
v-bind="$attrs"
v-on="$listeners"
@input="handleInput"
@ -265,6 +293,7 @@ export default {
:view-token-props="/* eslint-disable @gitlab/vue-no-new-non-primitive-in-template */ {
...viewTokenProps,
activeTokenValue,
selectedTokens,
} /* eslint-enable @gitlab/vue-no-new-non-primitive-in-template */"
></slot>
</template>
@ -274,6 +303,7 @@ export default {
:view-token-props="/* eslint-disable @gitlab/vue-no-new-non-primitive-in-template */ {
...viewTokenProps,
activeTokenValue,
selectedTokens,
} /* eslint-enable @gitlab/vue-no-new-non-primitive-in-template */"
></slot>
</template>
@ -290,17 +320,26 @@ export default {
</template>
<template v-if="showRecentSuggestions">
<gl-dropdown-section-header>{{ __('Recently used') }}</gl-dropdown-section-header>
<slot name="suggestions-list" :suggestions="recentSuggestions"></slot>
<slot
name="suggestions-list"
:suggestions="recentSuggestions"
:selections="selectedTokens"
></slot>
<gl-dropdown-divider />
</template>
<slot
v-if="showPreloadedSuggestions"
name="suggestions-list"
:suggestions="preloadedSuggestions"
:selections="selectedTokens"
></slot>
<gl-loading-icon v-if="suggestionsLoading" size="sm" />
<template v-else-if="showAvailableSuggestions">
<slot name="suggestions-list" :suggestions="availableSuggestions"></slot>
<slot
name="suggestions-list"
:suggestions="availableSuggestions"
:selections="selectedTokens"
></slot>
</template>
<gl-dropdown-text v-else-if="showNoMatchesText">
{{ __('No matches found') }}

View File

@ -6,8 +6,7 @@ import { __ } from '~/locale';
import { WORKSPACE_GROUP, WORKSPACE_PROJECT } from '~/issues/constants';
import usersAutocompleteQuery from '~/graphql_shared/queries/users_autocomplete.query.graphql';
import glFeatureFlagMixin from '~/vue_shared/mixins/gl_feature_flags_mixin';
import { OPERATORS_TO_GROUP, OPTIONS_NONE_ANY } from '../constants';
import { OPTIONS_NONE_ANY } from '../constants';
import BaseToken from './base_token.vue';
@ -19,7 +18,6 @@ export default {
GlIntersperse,
GlFilteredSearchSuggestion,
},
mixins: [glFeatureFlagMixin()],
props: {
config: {
type: Object,
@ -40,7 +38,6 @@ export default {
users: this.config.initialUsers || [],
allUsers: this.config.initialUsers || [],
loading: false,
selectedUsernames: [],
};
},
computed: {
@ -56,39 +53,6 @@ export default {
fetchUsersQuery() {
return this.config.fetchUsers ? this.config.fetchUsers : this.fetchUsersBySearchTerm;
},
multiSelectEnabled() {
return (
this.config.multiSelect &&
this.glFeatures.groupMultiSelectTokens &&
OPERATORS_TO_GROUP.includes(this.value.operator)
);
},
},
watch: {
value: {
deep: true,
immediate: true,
handler(newValue) {
const { data } = newValue;
if (!this.multiSelectEnabled) {
return;
}
// don't add empty values to selectedUsernames
if (!data) {
return;
}
if (Array.isArray(data)) {
this.selectedUsernames = data;
// !active so we don't add strings while searching, e.g. r, ro, roo
// !includes so we don't add the same usernames (if @input is emitted twice)
} else if (!this.active && !this.selectedUsernames.includes(data)) {
this.selectedUsernames = this.selectedUsernames.concat(data);
}
},
},
},
methods: {
getActiveUser(users, data) {
@ -104,26 +68,6 @@ export default {
const user = this.getActiveUser(this.allUsers, username);
return this.getAvatarUrl(user);
},
addCheckIcon(username) {
return this.multiSelectEnabled && this.selectedUsernames.includes(username);
},
addPadding(username) {
return this.multiSelectEnabled && !this.selectedUsernames.includes(username);
},
handleSelected(username) {
if (!this.multiSelectEnabled) {
return;
}
const index = this.selectedUsernames.indexOf(username);
if (index > -1) {
this.selectedUsernames.splice(index, 1);
} else {
this.selectedUsernames.push(username);
}
this.$emit('input', { ...this.value, data: '' });
},
fetchUsersBySearchTerm(search) {
return this.$apollo
.query({
@ -171,16 +115,14 @@ export default {
:get-active-token-value="getActiveUser"
:default-suggestions="defaultUsers"
:preloaded-suggestions="preloadedUsers"
:multi-select-values="selectedUsernames"
v-bind="$attrs"
@fetch-suggestions="fetchUsers"
@token-selected="handleSelected"
v-on="$listeners"
>
<template #view="{ viewTokenProps: { inputValue, activeTokenValue } }">
<gl-intersperse v-if="multiSelectEnabled" separator=",">
<template #view="{ viewTokenProps: { inputValue, activeTokenValue, selectedTokens } }">
<gl-intersperse v-if="selectedTokens.length > 0" separator=",">
<span
v-for="(username, index) in selectedUsernames"
v-for="(username, index) in selectedTokens"
:key="username"
:class="{ 'gl-ml-2': index > 0 }"
><gl-avatar :size="16" :src="avatarFor(username)" class="gl-mr-1" />{{
@ -198,7 +140,7 @@ export default {
{{ activeTokenValue ? activeTokenValue.name : inputValue }}
</template>
</template>
<template #suggestions-list="{ suggestions }">
<template #suggestions-list="{ suggestions, selections = [] }">
<gl-filtered-search-suggestion
v-for="user in suggestions"
:key="user.username"
@ -206,10 +148,10 @@ export default {
>
<div
class="gl-display-flex gl-align-items-center"
:class="{ 'gl-pl-6': addPadding(user.username) }"
:class="{ 'gl-pl-6': !selections.includes(user.username) }"
>
<gl-icon
v-if="addCheckIcon(user.username)"
v-if="selections.includes(user.username)"
name="check"
class="gl-mr-3 gl-text-secondary gl-flex-shrink-0"
/>

View File

@ -1,43 +0,0 @@
# frozen_string_literal: true
module Projects
module Gcp
module ArtifactRegistry
class BaseController < ::Projects::ApplicationController
before_action :ensure_feature_flag
before_action :ensure_saas
before_action :authorize_read_container_image!
before_action :ensure_private_project
feature_category :container_registry
urgency :low
private
def ensure_feature_flag
return if Feature.enabled?(:gcp_technical_demo, project)
@error = 'Feature flag disabled'
render
end
def ensure_saas
return if Gitlab.com_except_jh? # rubocop: disable Gitlab/AvoidGitlabInstanceChecks -- demo requirement
@error = "Can't run here"
render
end
def ensure_private_project
return if project.private?
@error = 'Can only run on private projects'
render
end
end
end
end
end

View File

@ -1,135 +0,0 @@
# frozen_string_literal: true
module Projects
module Gcp
module ArtifactRegistry
class DockerImagesController < Projects::Gcp::ArtifactRegistry::BaseController
before_action :require_gcp_params
before_action :handle_pagination
REPO_NAME_REGEX = %r{/repositories/(.*)/dockerImages/}
def index
result = service.execute(page_token: params[:page_token])
if result.success?
@docker_images = process_docker_images(result.payload[:images] || [])
@next_page_token = result.payload[:next_page_token]
@artifact_repository_name = artifact_repository_name
@error = @docker_images.blank? ? 'No docker images' : false
else
@error = result.message
end
end
private
def service
::GoogleCloudPlatform::ArtifactRegistry::ListDockerImagesService.new(
project: @project,
current_user: current_user,
params: {
gcp_project_id: gcp_project_id,
gcp_location: gcp_location,
gcp_repository: gcp_ar_repository,
gcp_wlif: gcp_wlif_url
}
)
end
def process_docker_images(raw_images)
raw_images.map { |r| process_docker_image(r) }
end
def process_docker_image(raw_image)
DockerImage.new(
name: raw_image[:name],
uri: raw_image[:uri],
tags: raw_image[:tags],
image_size_bytes: raw_image[:size_bytes],
media_type: raw_image[:media_type],
upload_time: raw_image[:uploaded_at],
build_time: raw_image[:built_at],
update_time: raw_image[:updated_at]
)
end
def artifact_repository_name
return unless @docker_images.present?
(@docker_images.first.name || '')[REPO_NAME_REGEX, 1]
end
def handle_pagination
@page = Integer(params[:page] || 1)
@page_tokens = {}
@previous_page_token = nil
if params[:page_tokens]
@page_tokens = ::Gitlab::Json.parse(Base64.decode64(params[:page_tokens]))
@previous_page_token = @page_tokens[(@page - 1).to_s]
end
@page_tokens[@page.to_s] = params[:page_token]
@page_tokens = Base64.encode64(::Gitlab::Json.dump(@page_tokens.compact))
end
def require_gcp_params
return unless gcp_project_id.blank? || gcp_location.blank? || gcp_ar_repository.blank? || gcp_wlif_url.blank?
redirect_to new_namespace_project_gcp_artifact_registry_setup_path
end
def gcp_project_id
params[:gcp_project_id]
end
def gcp_location
params[:gcp_location]
end
def gcp_ar_repository
params[:gcp_ar_repository]
end
def gcp_wlif_url
params[:gcp_wlif_url]
end
class DockerImage
include ActiveModel::API
attr_accessor :name, :uri, :tags, :image_size_bytes, :upload_time, :media_type, :build_time, :update_time
SHORT_NAME_REGEX = %r{dockerImages/(.*)$}
def short_name
(name || '')[SHORT_NAME_REGEX, 1]
end
def updated_at
return unless update_time
Time.zone.parse(update_time)
end
def built_at
return unless build_time
Time.zone.parse(build_time)
end
def uploaded_at
return unless upload_time
Time.zone.parse(upload_time)
end
def details_url
"https://#{uri}"
end
end
end
end
end
end

View File

@ -1,11 +0,0 @@
# frozen_string_literal: true
module Projects
module Gcp
module ArtifactRegistry
class SetupController < ::Projects::Gcp::ArtifactRegistry::BaseController
def new; end
end
end
end
end

View File

@ -7,6 +7,11 @@ module Resolvers
default_value: false,
description: 'Include also subgroup projects.'
argument :include_archived, GraphQL::Types::Boolean,
required: false,
default_value: true,
description: 'Include also archived projects.'
argument :not_aimed_for_deletion, GraphQL::Types::Boolean,
required: false,
default_value: false,
@ -65,6 +70,7 @@ module Resolvers
def finder_params(args)
{
include_subgroups: args.dig(:include_subgroups),
include_archived: args.dig(:include_archived),
not_aimed_for_deletion: args.dig(:not_aimed_for_deletion),
sort: args.dig(:sort),
search: args.dig(:search),

View File

@ -33,6 +33,10 @@ module DashboardHelper
end
end
end
def user_groups_requiring_reauth
[]
end
end
DashboardHelper.prepend_mod_with('DashboardHelper')

View File

@ -254,6 +254,10 @@ module TodosHelper
!todo.build_failed? && !todo.unmergeable?
end
def todo_groups_requiring_saml_reauth(_todos)
[]
end
private
def todos_design_path(todo, path_options)

View File

@ -381,7 +381,7 @@ class NotifyPreview < ActionMailer::Preview
def custom_email_credential
@custom_email_credential ||= project.service_desk_custom_email_credential || ServiceDesk::CustomEmailCredential.create!(
project: project,
smtp_address: 'smtp.gmail.com', # Use gmail, because Gitlab::UrlBlocker resolves DNS
smtp_address: 'smtp.gmail.com', # Use gmail, because Gitlab::HTTP_V2::UrlBlocker resolves DNS
smtp_port: 587,
smtp_username: 'user@gmail.com',
smtp_password: 'supersecret'

View File

@ -472,12 +472,8 @@ class Namespace < ApplicationRecord
false
end
def all_project_ids
all_projects.pluck(:id)
end
def all_project_ids_except(ids)
all_projects.where.not(id: ids).pluck(:id)
all_project_ids.where.not(id: ids)
end
# Deprecated, use #licensed_feature_available? instead. Remove once Namespace#feature_available? isn't used anymore.

View File

@ -10,8 +10,62 @@ module Namespaces
after_destroy :invalidate_descendants_cache
end
override :self_and_descendant_ids
def self_and_descendant_ids
return super unless attempt_to_use_cached_data?
scope_with_cached_ids(
super,
self.class,
Namespaces::Descendants.arel_table[:self_and_descendant_group_ids]
)
end
override :all_project_ids
def all_project_ids
return super unless attempt_to_use_cached_data?
scope_with_cached_ids(
all_projects.select(:id),
Project,
Namespaces::Descendants.arel_table[:all_project_ids]
)
end
private
# This method implements an OR based cache lookup using COALESCE, similar what you would do in Ruby:
# return cheap_cached_data || expensive_uncached_data
def scope_with_cached_ids(consistent_ids_scope, model, cached_ids_column)
# Look up the cached ids and unnest them into rows if the cache is up to date.
cache_lookup_query = Namespaces::Descendants
.where(outdated_at: nil, namespace_id: id)
.select(cached_ids_column.as('ids'))
# Invoke the consistent lookup query and collect the ids as a single array value
consistent_descendant_ids_scope = model
.from(consistent_ids_scope.arel.as(model.table_name))
.reselect(Arel::Nodes::NamedFunction.new('ARRAY_AGG', [model.arel_table[:id]]).as('ids'))
.unscope(where: :type)
from = <<~SQL
UNNEST(
COALESCE(
(SELECT ids FROM (#{cache_lookup_query.to_sql}) cached_query),
(SELECT ids FROM (#{consistent_descendant_ids_scope.to_sql}) consistent_query))
) AS #{model.table_name}(id)
SQL
model
.from(from)
.unscope(where: :type)
.select(:id)
end
def attempt_to_use_cached_data?
Feature.enabled?(:group_hierarchy_optimization, self, type: :beta)
end
override :sync_traversal_ids
def sync_traversal_ids
super

View File

@ -106,6 +106,10 @@ module Namespaces
end
end
def all_project_ids
all_projects.select(:id)
end
def self_and_descendants
return super unless use_traversal_ids?

View File

@ -19,6 +19,12 @@ module Namespaces
end
alias_method :recursive_root_ancestor, :root_ancestor
def all_project_ids
namespace = user_namespace? ? self : recursive_self_and_descendant_ids
Project.where(namespace: namespace).select(:id)
end
alias_method :recursive_all_project_ids, :all_project_ids
# Returns all ancestors, self, and descendants of the current namespace.
def self_and_hierarchy
object_hierarchy(self.class.where(id: id))

View File

@ -39,6 +39,7 @@ class UserDetail < MainClusterwide::ApplicationRecord
validates :skype, length: { maximum: DEFAULT_FIELD_LENGTH }, allow_blank: true
validates :twitter, length: { maximum: DEFAULT_FIELD_LENGTH }, allow_blank: true
validates :website_url, length: { maximum: DEFAULT_FIELD_LENGTH }, url: true, allow_blank: true, if: :website_url_changed?
validates :onboarding_status, json_schema: { filename: 'user_detail_onboarding_status' }
before_validation :sanitize_attrs
before_save :prevent_nil_fields

View File

@ -61,7 +61,8 @@ module Groups
params[:namespace_descendants_attributes] = {
traversal_ids: group.traversal_ids,
all_project_ids: [],
self_and_descendant_group_ids: []
self_and_descendant_group_ids: [],
outdated_at: Time.current
}
else
return unless group.namespace_descendants

View File

@ -0,0 +1,17 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Onboarding Status",
"description": "Onboarding Status items recorded during onboarding/registration",
"type": "object",
"properties": {
"step_url": {
"description": "Onboarding step the user is currently on or last step before finishing",
"type": "string"
},
"email_opt_in": {
"description": "Setting to guide marketing email opt-ins outside of the product. See https://gitlab.com/gitlab-org/gitlab/-/issues/435741",
"type": "boolean"
}
},
"additionalProperties": false
}

View File

@ -6,6 +6,8 @@
= auto_discovery_link_tag(:atom, safe_params.merge(rss_url_options).to_h, title: "#{current_user.name} issues")
= render_dashboard_ultimate_trial(current_user)
= render_if_exists 'shared/dashboard/saml_reauth_notice',
groups_requiring_saml_reauth: user_groups_requiring_reauth
.page-title-holder.gl-display-flex.gl-align-items-center
%h1.page-title.gl-font-size-h-display= _('Issues')

View File

@ -11,6 +11,8 @@
add_page_specific_style 'page_bundles/issuable_list'
= render_dashboard_ultimate_trial(current_user)
= render_if_exists 'shared/dashboard/saml_reauth_notice',
groups_requiring_saml_reauth: user_groups_requiring_reauth
.page-title-holder.d-flex.align-items-start.flex-column.flex-sm-row.align-items-sm-center
%h1.page-title.gl-font-size-h-display= title

View File

@ -2,7 +2,10 @@
= render_two_factor_auth_recovery_settings_check
= render_dashboard_ultimate_trial(current_user)
= render_if_exists 'dashboard/todos/saml_reauth_notice'
= render_if_exists 'shared/dashboard/saml_reauth_notice',
groups_requiring_saml_reauth: todo_groups_requiring_saml_reauth(@todos)
- add_page_specific_style 'page_bundles/todos'
- add_page_specific_style 'page_bundles/issuable'
- filter_by_done = params[:state] == 'done'

View File

@ -2,7 +2,7 @@
- custom_text = custom_sign_in_description
!!! 5
%html.html-devise-layout{ class: user_application_theme, lang: I18n.locale }
= render "layouts/head", { startup_filename: 'signin' }
= render "layouts/head"
%body.gl-h-full.login-page.navless{ class: "#{system_message_class} #{client_class_list}", data: { page: body_data_page, testid: 'login-page' } }
= header_message
= render "layouts/init_client_detection_flags"
@ -15,12 +15,12 @@
.row.gl-mt-5.gl-row-gap-5
.col-md.order-12.sm-bg-gray
.col-sm-12
%h1.mb-3.gl-font-size-h2
%h1.gl-mb-5.gl-font-size-h2
= brand_title
= custom_text
.col-md.order-md-12
.col-sm-12.bar
.gl-text-center
.gl-text-center.gl-mb-5
= brand_image
= yield
- else

View File

@ -1,33 +0,0 @@
.gl-display-flex.gl-flex-direction-column
.gl-display-flex.gl-flex-direction-column.gl-border-b-solid.gl-border-t-solid.gl-border-t-1.gl-border-b-1.gl-border-t-transparent.gl-border-b-gray-100
.gl-display-flex.gl-align-items-center.gl-py-3
.gl-display-flex.gl-flex-direction-column.gl-sm-flex-direction-row.gl-justify-content-space-between.gl-align-items-stretch.gl-flex-grow-1
.gl-display-flex.gl-flex-direction-column.gl-mb-3.gl-sm-mb-0.gl-min-w-0.gl-flex-grow-1
.gl-display-flex.gl-align-items-center.gl-text-body.gl-font-weight-bold.gl-font-size-h2
%span.gl-text-body.gl-font-weight-bold= docker_image.short_name
.gl-bg-gray-50.gl-inset-border-1-gray-100.gl-rounded-base.gl-pt-6
.gl-display-flex.gl-align-items-top.gl-font-monospace.gl-font-sm.gl-word-break-all.gl-p-4.gl-border-b-solid.gl-border-gray-100.gl-border-b-1
= sprite_icon('information-o', css_class: 'gl-text-gray-500 gl-mr-3 gl-icon s16')
Full name: #{docker_image.name}
.gl-display-flex.gl-align-items-top.gl-font-monospace.gl-font-sm.gl-word-break-all.gl-p-4.gl-border-b-solid.gl-border-gray-100.gl-border-b-1
= sprite_icon('earth', css_class: 'gl-text-gray-500 gl-mr-3 gl-icon s16')
%a{ href: docker_image.details_url, target: 'blank', rel: 'noopener noreferrer' }
Artifact Registry details page
.gl-display-flex.gl-align-items-top.gl-font-monospace.gl-font-sm.gl-word-break-all.gl-p-4.gl-border-b-solid.gl-border-gray-100.gl-border-b-1
= sprite_icon('doc-code', css_class: 'gl-text-gray-500 gl-mr-3 gl-icon s16')
Media Type: #{docker_image.media_type}
.gl-display-flex.gl-align-items-top.gl-font-monospace.gl-font-sm.gl-word-break-all.gl-p-4.gl-border-b-solid.gl-border-gray-100.gl-border-b-1
= sprite_icon('archive', css_class: 'gl-text-gray-500 gl-mr-3 gl-icon s16')
Size: #{number_to_human_size(docker_image.image_size_bytes)}
.gl-display-flex.gl-align-items-top.gl-font-monospace.gl-font-sm.gl-word-break-all.gl-p-4.gl-border-b-solid.gl-border-gray-100.gl-border-b-1
= sprite_icon('calendar', css_class: 'gl-text-gray-500 gl-mr-3 gl-icon s16')
Built at: #{docker_image.built_at&.to_fs}
.gl-display-flex.gl-align-items-top.gl-font-monospace.gl-font-sm.gl-word-break-all.gl-p-4.gl-border-b-solid.gl-border-gray-100.gl-border-b-1
= sprite_icon('calendar', css_class: 'gl-text-gray-500 gl-mr-3 gl-icon s16')
Uploaded at: #{docker_image.uploaded_at&.to_fs}
.gl-display-flex.gl-align-items-top.gl-font-monospace.gl-font-sm.gl-word-break-all.gl-p-4.gl-border-b-solid.gl-border-gray-100.gl-border-b-1
= sprite_icon('calendar', css_class: 'gl-text-gray-500 gl-mr-3 gl-icon s16')
Updated at: #{docker_image.updated_at&.to_fs}
- if docker_image.tags.present?
.gl-display-flex.gl-align-items-center.gl-text-gray-500.gl-min-h-6.gl-min-w-0.gl-flex-grow-1.gl-pt-4
= render partial: 'docker_image_tag', collection: docker_image.tags

View File

@ -1 +0,0 @@
%a.gl-button.btn.btn-md.btn-default.gl-mr-3!= docker_image_tag

View File

@ -1,13 +0,0 @@
.gl-display-flex.gl-justify-content-center
%nav.gl-pagination.gl-mt-3
.gl-keyset-pagination.btn-group
- if @page > 1
= link_to 'Prev', namespace_project_gcp_artifact_registry_docker_images_path(params[:namespace_id], params[:project_id], page_token: @previous_page_token, page_tokens: @page_tokens, page: @page - 1, gcp_project_id: params[:gcp_project_id], gcp_location: params[:gcp_location], gcp_ar_repository: params[:gcp_ar_repository], gcp_wlif_url: params[:gcp_wlif_url]), class: 'btn btn-default btn-md gl-button'
- else
%span.btn.btn-default.btn-md.gl-button.disabled= 'Prev'
- if @next_page_token.present?
= link_to 'Next', namespace_project_gcp_artifact_registry_docker_images_path(params[:namespace_id], params[:project_id], page_token: @next_page_token, page_tokens: @page_tokens, page: @page + 1, gcp_project_id: params[:gcp_project_id], gcp_location: params[:gcp_location], gcp_ar_repository: params[:gcp_ar_repository], gcp_wlif_url: params[:gcp_wlif_url]), class: 'btn btn-default btn-md gl-button'
- else
%span.btn.btn-default.btn-md.gl-button.disabled= 'Next'

View File

@ -1,23 +0,0 @@
- page_title 'Artifact Registry Docker Images'
- unless @error
.gl-display-flex.gl-flex-direction-column
.gl-display-flex.gl-justify-content-space-between.gl-py-3
.gl-flex-direction-column.gl-flex-grow-1
.gl-display-flex
.gl-display-flex.gl-flex-direction-column
%h2.gl-font-size-h1.gl-mt-3.gl-mb-0 Docker Images of #{@artifact_repository_name}
= render partial: 'pagination'
= render partial: 'docker_image', collection: @docker_images
= render partial: 'pagination'
- else
.flash-container.flash-container-page.sticky
.gl-alert.flash-notice.gl-alert-info
.gl-alert-icon-container
= sprite_icon('information-o', css_class: 's16 gl-alert-icon gl-alert-icon-no-title')
.gl-alert-content
.gl-alert-body
- if @error
= @error
- else
Nothing to show here.

View File

@ -1,31 +0,0 @@
- page_title 'Artifact Registry Setup'
- if @error.present?
.flash-container.flash-container-page.sticky
.gl-alert.flash-notice.gl-alert-info
.gl-alert-icon-container
= sprite_icon('information-o', css_class: 's16 gl-alert-icon gl-alert-icon-no-title')
.gl-alert-content
.gl-alert-body= @error
- else
%p
= form_tag namespace_project_gcp_artifact_registry_docker_images_path , method: :get do
.form-group.row
= label_tag :gcp_project_id, 'Google Project ID', class: 'col-form-label col-md-2'
.col-md-4
= text_field_tag :gcp_project_id, nil, class: 'form-control gl-form-input gl-mr-3'
.form-group.row
= label_tag :gcp_location, 'Google Project Location', class: 'col-form-label col-md-2'
.col-md-4
= text_field_tag :gcp_location, nil, class: 'form-control gl-form-input gl-mr-3'
.form-group.row
= label_tag :gcp_ar_repository, 'Artifact Registry Repository Name', class: 'col-form-label col-md-2'
.col-md-4
= text_field_tag :gcp_ar_repository, nil, class: 'form-control gl-form-input gl-mr-3'
.form-group.row
= label_tag :gcp_wlif_url, 'Worflow Identity Federation url', class: 'col-form-label col-md-2'
.col-md-4
= text_field_tag :gcp_wlif_url, nil, class: 'form-control gl-form-input gl-mr-3'
.form-actions
= submit_tag 'Setup', class: 'gl-button btn btn-confirm'

View File

@ -1,3 +1,4 @@
- breadcrumb_title(s_("Wiki|New Page")) unless @page.persisted?
- wiki_page_title @page, @page.persisted? ? _('Edit') : _('New')
- add_page_specific_style 'page_bundles/wiki'
- @gfm_form = true
@ -16,7 +17,7 @@
&middot;
= s_("Wiki|Edit Page")
- else
= s_("Wiki|Create New Page")
= s_("Wiki|New Page")
.nav-controls.pb-md-3.pb-lg-0
- if @page.persisted?

381
bin/saas-feature.rb Executable file
View File

@ -0,0 +1,381 @@
#!/usr/bin/env ruby
#
# Generate a SaaS feature entry file in the correct location.
#
# Automatically stages the file and amends the previous commit if the `--amend`
# argument is used.
require 'fileutils'
require 'httparty'
require 'json'
require 'optparse'
require 'readline'
require 'shellwords'
require 'uri'
require 'yaml'
require_relative '../lib/gitlab/popen'
module SaasFeatureHelpers
Abort = Class.new(StandardError)
Done = Class.new(StandardError)
def capture_stdout(cmd)
output = IO.popen(cmd, &:read)
fail_with "command failed: #{cmd.join(' ')}" unless $?.success?
output
end
def fail_with(message)
raise Abort, "\e[31merror\e[0m #{message}"
end
end
class SaasFeatureOptionParser
extend SaasFeatureHelpers
WWW_GITLAB_COM_SITE = 'https://about.gitlab.com'
WWW_GITLAB_COM_GROUPS_JSON = "#{WWW_GITLAB_COM_SITE}/groups.json".freeze
COPY_COMMANDS = [
'pbcopy', # macOS
'xclip -selection clipboard', # Linux
'xsel --clipboard --input', # Linux
'wl-copy' # Wayland
].freeze
OPEN_COMMANDS = [
'open', # macOS
'xdg-open' # Linux
].freeze
Options = Struct.new(
:name,
:group,
:milestone,
:amend,
:dry_run,
:force,
:introduced_by_url,
keyword_init: true
)
class << self
def parse(argv)
options = Options.new
parser = OptionParser.new do |opts|
opts.banner = "Usage: #{__FILE__} [options] <saas-feature>\n\n"
# Note: We do not provide a shorthand for this in order to match the `git
# commit` interface
opts.on('--amend', 'Amend the previous commit') do |value|
options.amend = value
end
opts.on('-f', '--force', 'Overwrite an existing entry') do |value|
options.force = value
end
opts.on('-m', '--introduced-by-url [string]', String, 'URL of merge request introducing the SaaS feature') do |value|
options.introduced_by_url = value
end
opts.on('-M', '--milestone [string]', String, 'Milestone in which the SaaS feature was introduced') do |value|
options.milestone = value
end
opts.on('-n', '--dry-run', "Don't actually write anything, just print") do |value|
options.dry_run = value
end
opts.on('-g', '--group [string]', String, 'The group introducing a SaaS feature, like: `group::project management`') do |value|
options.group = value if group_labels.include?(value)
end
opts.on('-h', '--help', 'Print help message') do
$stdout.puts opts
raise Done.new
end
end
parser.parse!(argv)
unless argv.one?
$stdout.puts parser.help
$stdout.puts
raise Abort, 'SaaS feature name is required'
end
# Name is a first name
options.name = argv.first.downcase.tr('-', '_')
options
end
def groups
@groups ||= fetch_json(WWW_GITLAB_COM_GROUPS_JSON)
end
def group_labels
@group_labels ||= groups.map { |_, group| group['label'] }.sort
end
def find_group_by_label(label)
groups.find { |_, group| group['label'] == label }[1]
end
def group_list
group_labels.map.with_index do |group_label, index|
"#{index + 1}. #{group_label}"
end
end
def fzf_available?
find_compatible_command(%w[fzf])
end
def prompt_readline(prompt:)
Readline.readline('?> ', false)&.strip
end
def prompt_fzf(list:, prompt:)
arr = list.join("\n")
selection = IO.popen(%W[fzf --tac --prompt #{prompt}], "r+") do |pipe|
pipe.puts(arr)
pipe.close_write
pipe.readlines
end.join.strip
selection[/(\d+)\./, 1]
end
def print_list(list)
return if list.empty?
$stdout.puts list.join("\n")
end
def print_prompt(prompt)
$stdout.puts
$stdout.puts ">> #{prompt}:"
$stdout.puts
end
def prompt_list(prompt:, list: nil)
if fzf_available?
prompt_fzf(list: list, prompt: prompt)
else
prompt_readline(prompt: prompt)
end
end
def fetch_json(json_url)
json = with_retries { HTTParty.get(json_url, format: :plain) }
JSON.parse(json)
end
def with_retries(attempts: 3)
yield
rescue Errno::ECONNRESET, OpenSSL::SSL::SSLError, Net::OpenTimeout
retry if (attempts -= 1).positive?
raise
end
def read_group
prompt = 'Specify the group label to which the SaaS feature belongs, from the following list'
unless fzf_available?
print_prompt(prompt)
print_list(group_list)
end
loop do
group = prompt_list(prompt: prompt, list: group_list)
group = group_labels[group.to_i - 1] unless group.to_i.zero?
if group_labels.include?(group)
$stdout.puts "You picked the group '#{group}'"
return group
else
$stderr.puts "The group label isn't in the above labels list"
end
end
end
def read_introduced_by_url
read_url('URL of the MR introducing the SaaS feature (enter to skip and let Danger provide a suggestion directly in the MR):')
end
def read_milestone
milestone = File.read('VERSION')
milestone.gsub(/^(\d+\.\d+).*$/, '\1').chomp
end
def read_url(prompt)
$stdout.puts
$stdout.puts ">> #{prompt}"
loop do
url = Readline.readline('?> ', false)&.strip
url = nil if url.empty?
return url if url.nil? || valid_url?(url)
end
end
def valid_url?(url)
unless url.start_with?('https://')
$stderr.puts 'URL needs to start with https://'
return false
end
response = HTTParty.head(url)
return true if response.success?
$stderr.puts "URL '#{url}' isn't valid!"
end
def open_url!(url)
_, open_url_status = Gitlab::Popen.popen([open_command, url])
open_url_status
end
def copy_to_clipboard!(text)
IO.popen(copy_to_clipboard_command.shellsplit, 'w') do |pipe|
pipe.print(text)
end
end
def copy_to_clipboard_command
find_compatible_command(COPY_COMMANDS)
end
def open_command
find_compatible_command(OPEN_COMMANDS)
end
def find_compatible_command(commands)
commands.find do |command|
Gitlab::Popen.popen(%W[which #{command.split(' ')[0]}])[1] == 0
end
end
end
end
class SaasFeatureCreator
include SaasFeatureHelpers
attr_reader :options
def initialize(options)
@options = options
end
def execute
assert_feature_branch!
assert_name!
assert_existing_saas_feature!
options.group ||= SaasFeatureOptionParser.read_group
options.introduced_by_url ||= SaasFeatureOptionParser.read_introduced_by_url
options.milestone ||= SaasFeatureOptionParser.read_milestone
$stdout.puts "\e[32mcreate\e[0m #{file_path}"
$stdout.puts contents
unless options.dry_run
write
amend_commit if options.amend
end
if editor
system(editor, file_path)
end
end
private
def contents
config_hash.to_yaml
end
def config_hash
{
'name' => options.name,
'introduced_by_url' => options.introduced_by_url,
'milestone' => options.milestone,
'group' => options.group
}
end
def write
FileUtils.mkdir_p(File.dirname(file_path))
File.write(file_path, contents)
end
def editor
ENV['EDITOR']
end
def amend_commit
fail_with 'git add failed' unless system(*%W[git add #{file_path}])
system('git commit --amend')
end
def assert_feature_branch!
return unless branch_name == 'master'
fail_with 'Create a branch first!'
end
def assert_existing_saas_feature!
existing_path = all_saas_feature_names[options.name]
return unless existing_path
return if options.force
fail_with "#{existing_path} already exists! Use `--force` to overwrite."
end
def assert_name!
return if options.name.match(/\A[a-z0-9_-]+\Z/)
fail_with 'Provide a name for the SaaS feature that is [a-z0-9_-]'
end
def file_path
saas_features_path.sub('*.yml', options.name + '.yml')
end
def all_saas_feature_names
# check flatten needs
@all_saas_feature_names ||=
Dir.glob(saas_features_path).map do |path|
[File.basename(path, '.yml'), path]
end.to_h
end
def saas_features_path
File.join('ee', 'config', 'saas_features', '*.yml')
end
def branch_name
@branch_name ||= capture_stdout(%w[git symbolic-ref --short HEAD]).strip
end
end
if $0 == __FILE__
begin
options = SaasFeatureOptionParser.parse(ARGV)
SaasFeatureCreator.new(options).execute
rescue SaasFeatureHelpers::Abort => ex
$stderr.puts ex.message
exit 1
rescue SaasFeatureHelpers::Done
exit
end
end
# vim: ft=ruby

View File

@ -1,8 +0,0 @@
---
name: gcp_technical_demo
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/139802
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/434685
milestone: '16.7'
type: development
group: group::container registry
default_enabled: false

View File

@ -20,7 +20,7 @@ module RestClient
raise ArgumentError, "URL is blocked: #{e.message}"
end
# Gitlab::UrlBlocker returns a Addressable::URI which we need to coerce
# Gitlab::HTTP_V2::UrlBlocker returns a Addressable::URI which we need to coerce
# to URI so that rest-client can use it to determine if it's a
# URI::HTTPS or not. It uses it to set `net.use_ssl` to true or not:
#

View File

@ -1,13 +0,0 @@
# frozen_string_literal: true
# All routing related to gcp
# that is already under /-/ scope only
scope format: false do
namespace :gcp do
namespace :artifact_registry do
resources :docker_images, only: :index
resources :setup, only: :new
end
end
end

View File

@ -408,7 +408,6 @@ constraints(::Constraints::ProjectUrlConstrainer.new) do
# its preferable to keep it below all other project routes
draw :repository
draw :wiki
draw :gcp
namespace :import do
resource :jira, only: [:show], controller: :jira

View File

@ -13,7 +13,7 @@ class MigrateDataFromWorkspacesUrlColumn < Gitlab::Database::Migration[2.2]
each_batch_range('workspaces', scope: ->(table) { table.all }, of: BATCH_SIZE) do |min, max|
execute(<<~SQL)
UPDATE workspaces
SET url_prefix = CONCAT('https://#{DEFAULT_PORT}-', name),
SET url_prefix = CONCAT('#{DEFAULT_PORT}-', name),
dns_zone = remote_development_agent_configs.dns_zone,
url_query_string = CASE
WHEN POSITION('?' IN url) > 0

View File

@ -0,0 +1,10 @@
# frozen_string_literal: true
class AddOnboardingStatusToUserDetails < Gitlab::Database::Migration[2.2]
milestone '16.9'
enable_lock_retries!
def change
add_column :user_details, :onboarding_status, :jsonb, default: {}, null: false
end
end

View File

@ -0,0 +1,20 @@
# frozen_string_literal: true
class UpdateWorkspacesUrlPrefixColumn < Gitlab::Database::Migration[2.2]
milestone '16.8'
disable_ddl_transaction!
restrict_gitlab_migration gitlab_schema: :gitlab_main
def up
execute(<<~SQL)
UPDATE workspaces
SET url_prefix=REPLACE(url_prefix, 'https://', '')
WHERE url_prefix LIKE 'https://%'
SQL
end
def down
# no-op
end
end

View File

@ -0,0 +1,35 @@
# frozen_string_literal: true
class EnsureBackfillForCiBuildsIntegerColumnsIsFinished < Gitlab::Database::Migration[2.2]
include Gitlab::Database::MigrationHelpers::ConvertToBigint
milestone '16.9'
restrict_gitlab_migration gitlab_schema: :gitlab_ci
disable_ddl_transaction!
TABLE_NAME = :ci_builds
COLUMN_NAMES = %w[
auto_canceled_by_id
commit_id
erased_by_id
project_id
runner_id
trigger_request_id
upstream_pipeline_id
user_id
]
BIGINT_COLUMN_NAMES = COLUMN_NAMES.map { |name| "#{name}_convert_to_bigint" }
def up
ensure_batched_background_migration_is_finished(
job_class_name: 'CopyColumnUsingBackgroundMigrationJob',
table_name: TABLE_NAME,
column_name: 'id',
job_arguments: [COLUMN_NAMES, BIGINT_COLUMN_NAMES]
)
end
def down
# no-op
end
end

View File

@ -0,0 +1,27 @@
# frozen_string_literal: true
class PrepareAsyncIndexesForPCiBuildsAutoCanceledById < Gitlab::Database::Migration[2.2]
include Gitlab::Database::PartitioningMigrationHelpers
milestone '16.9'
disable_ddl_transaction!
INDEX_NAME = "p_ci_builds_auto_canceled_by_id_bigint_idx"
TABLE_NAME = :p_ci_builds
COLUMN_NAME = :auto_canceled_by_id_convert_to_bigint
WHERE_CLAUSE = "auto_canceled_by_id_convert_to_bigint IS NOT NULL"
def up
Gitlab::Database::PostgresPartitionedTable.each_partition(TABLE_NAME) do |partition|
index_name = generated_index_name(partition.identifier, INDEX_NAME)
prepare_async_index partition.identifier, COLUMN_NAME, name: index_name, where: WHERE_CLAUSE
end
end
def down
Gitlab::Database::PostgresPartitionedTable.each_partition(TABLE_NAME) do |partition|
index_name = generated_index_name(partition.identifier, INDEX_NAME)
unprepare_async_index partition.identifier, COLUMN_NAME, name: index_name, where: WHERE_CLAUSE
end
end
end

View File

@ -0,0 +1,32 @@
# frozen_string_literal: true
class PrepareAsyncIndexesForPCiBuildsCommitIdPart1 < Gitlab::Database::Migration[2.2]
include Gitlab::Database::PartitioningMigrationHelpers
milestone '16.9'
disable_ddl_transaction!
INDEXES = [
[[:commit_id_convert_to_bigint, :status, :type], "p_ci_builds_commit_id_bigint_status_type_idx"],
[[:commit_id_convert_to_bigint, :type, :name, :ref], "p_ci_builds_commit_id_bigint_type_name_ref_idx"]
]
TABLE_NAME = :p_ci_builds
def up
Gitlab::Database::PostgresPartitionedTable.each_partition(TABLE_NAME) do |partition|
INDEXES.each do |columns, routing_table_index_name|
index_name = generated_index_name(partition.identifier, routing_table_index_name)
prepare_async_index partition.identifier, columns, name: index_name
end
end
end
def down
Gitlab::Database::PostgresPartitionedTable.each_partition(TABLE_NAME) do |partition|
INDEXES.each do |columns, routing_table_index_name|
index_name = generated_index_name(partition.identifier, routing_table_index_name)
unprepare_async_index partition.identifier, columns, name: index_name
end
end
end
end

View File

@ -0,0 +1 @@
d58f59f84c1d9c08f8ba3466c844b01a1ab8ea429de9b0fb43dcd53e7611e2d6

View File

@ -0,0 +1 @@
a404dcf1d57886af351b696f8cbc451e7e81d69618a6bd3c30e1676a99172ca6

View File

@ -0,0 +1 @@
554281098ec89db0f0bc0402e34c1fe413003afa63b0001da8d5ad772dd9725c

View File

@ -0,0 +1 @@
3feac709d7c1e6d911068d096e876fcbdace5c3e47036a3c8d9bb347a4d6f4ba

View File

@ -0,0 +1 @@
93164c045f7581f7dbcd606f2217514344bf2fdb72a6ece05241214e7a7f3c9d

View File

@ -24956,6 +24956,7 @@ CREATE TABLE user_details (
email_reset_offered_at timestamp with time zone,
mastodon text DEFAULT ''::text NOT NULL,
project_authorizations_recalculated_at timestamp with time zone DEFAULT '2010-01-01 00:00:00+00'::timestamp with time zone NOT NULL,
onboarding_status jsonb DEFAULT '{}'::jsonb NOT NULL,
CONSTRAINT check_245664af82 CHECK ((char_length(webauthn_xid) <= 100)),
CONSTRAINT check_444573ee52 CHECK ((char_length(skype) <= 500)),
CONSTRAINT check_466a25be35 CHECK ((char_length(twitter) <= 500)),

View File

@ -20240,6 +20240,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="groupprojectshascodecoverage"></a>`hasCodeCoverage` | [`Boolean`](#boolean) | Returns only the projects which have code coverage. |
| <a id="groupprojectshasvulnerabilities"></a>`hasVulnerabilities` | [`Boolean`](#boolean) | Returns only the projects which have vulnerabilities. |
| <a id="groupprojectsids"></a>`ids` | [`[ID!]`](#id) | Filter projects by IDs. |
| <a id="groupprojectsincludearchived"></a>`includeArchived` | [`Boolean`](#boolean) | Include also archived projects. |
| <a id="groupprojectsincludesubgroups"></a>`includeSubgroups` | [`Boolean`](#boolean) | Include also subgroup projects. |
| <a id="groupprojectsnotaimedfordeletion"></a>`notAimedForDeletion` | [`Boolean`](#boolean) | Include projects that are not aimed for deletion. |
| <a id="groupprojectssbomcomponentid"></a>`sbomComponentId` | [`ID`](#id) | Return only the projects related to the specified SBOM component. |
@ -23297,6 +23298,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="namespaceprojectshascodecoverage"></a>`hasCodeCoverage` | [`Boolean`](#boolean) | Returns only the projects which have code coverage. |
| <a id="namespaceprojectshasvulnerabilities"></a>`hasVulnerabilities` | [`Boolean`](#boolean) | Returns only the projects which have vulnerabilities. |
| <a id="namespaceprojectsids"></a>`ids` | [`[ID!]`](#id) | Filter projects by IDs. |
| <a id="namespaceprojectsincludearchived"></a>`includeArchived` | [`Boolean`](#boolean) | Include also archived projects. |
| <a id="namespaceprojectsincludesubgroups"></a>`includeSubgroups` | [`Boolean`](#boolean) | Include also subgroup projects. |
| <a id="namespaceprojectsnotaimedfordeletion"></a>`notAimedForDeletion` | [`Boolean`](#boolean) | Include projects that are not aimed for deletion. |
| <a id="namespaceprojectssbomcomponentid"></a>`sbomComponentId` | [`ID`](#id) | Return only the projects related to the specified SBOM component. |

Binary file not shown.

After

Width:  |  Height:  |  Size: 150 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 152 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 270 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

View File

@ -3,7 +3,7 @@ status: proposed
creation-date: "2023-09-11"
authors: [ "@abrandl" ]
coach: "@andrewn"
approvers: [ "@swiskow", "@rnienaber", "@o-lluch" ]
approvers: [ "@swiskow", "@lmcandrew", "@o-lluch" ]
---
<!-- Blueprints often contain forward-looking statements -->
@ -19,7 +19,7 @@ We make use of [Tamland](https://gitlab.com/gitlab-com/gl-infra/tamland), a tool
We propose to include Tamland as a part of the GitLab Dedicated stack and execute forecasting from within the tenant environments.
Tamland predicts SLO violations and their respective dates, which need to be reviewed and acted upon.
In terms of team organisation, the Dedicated team is proposed to own the tenant-side setup for Tamland and to own the predicted SLO violations, with the help and guidance of the Scalability::Projections team, which drives further development, documentation and overall guidance for capacity planning, including for Dedicated.
In terms of team organisation, the Dedicated team is proposed to own the tenant-side setup for Tamland and to own the predicted SLO violations, with the help and guidance of the Scalability::Observability team, which drives further development, documentation and overall guidance for capacity planning, including for Dedicated.
With this setup, we aim to turn Tamland into a more generic tool, which can be used in various environments including but not limited to Dedicated tenants.
Long-term, we think of including Tamland in self-managed installations and think of Tamland as a candidate for open source release.
@ -32,8 +32,8 @@ Long-term, we think of including Tamland in self-managed installations and think
It implements [capacity planning](https://about.gitlab.com/handbook/engineering/infrastructure/capacity-planning/) for GitLab.com, which is a [controlled activity covered by SOC 2](https://gitlab.com/gitlab-com/gl-security/security-assurance/security-compliance-commercial-and-dedicated/observation-management/-/issues/604).
As of today, it is used exclusively for GitLab.com to predict upcoming SLO violations across hundreds of monitored infrastructure components.
Tamland produces a [report](https://gitlab-com.gitlab.io/gl-infra/tamland/intro.html) (hosted on GitLab Pages) containing forecast plots, information around predicted violations and other information around the components monitored.
Any predicted SLO violation result in a capacity warning issue being created in the [issue tracker for capacity planning](https://gitlab.com/gitlab-com/gl-infra/capacity-planning/-/boards/2816983) on GitLab.com.
Tamland produces a [report](https://gitlab-com.gitlab.io/gl-infra/tamland/intro.html) (internal link, hosted on GitLab Pages) containing forecast plots, information around predicted violations and other information around the components monitored.
Any predicted SLO violation results in a capacity warning issue being created in the [issue tracker for capacity planning](https://gitlab.com/gitlab-com/gl-infra/capacity-planning/-/boards/2816983) on GitLab.com.
At present, Tamland is quite tailor made and specific for GitLab.com:
@ -44,13 +44,13 @@ At present, Tamland is quite tailor made and specific for GitLab.com:
For illustration, we can see a saturation forecast plot below for the `disk_space` resource for a PostgreSQL service called `patroni-ci`.
Within the 90 days forecast horizon, we predict a violation of the `soft` SLO (set at 85% saturation) and this resulted in the creation of a [capacity planning issue](https://gitlab.com/gitlab-com/gl-infra/capacity-planning/-/issues/1219) for further review and potential actions.
At present, the Scalability::Projections group reviews those issues and engages with the respective DRI for the service in question to remedy a saturation concern.
At present, the Scalability::Observability group reviews those issues and engages with the respective DRI for the service in question to remedy a saturation concern.
<img src="images/image-20230911144743188.png" alt="image-20230911144743188" style="zoom:67%;" />
For GitLab.com capacity planning, we operate Tamland from a scheduled CI pipeline with access to the central Thanos, which provides saturation and utilization metrics for GitLab.com.
The CI pipeline produces the desired report, exposes it on GitLab Pages and also creates capacity planning issues.
Scalability::Projections runs a capacity planning triage rotation which entails reviewing and prioritizing any open issues and their respective saturation concerns.
Scalability::Observability runs a capacity planning triage rotation which entails reviewing and prioritizing any open issues and their respective saturation concerns.
### Problem Statement
@ -62,7 +62,7 @@ These metrics are standardized in the [metrics catalog](https://gitlab.com/gitla
In order to provide capacity planning and forecasts for saturation metrics for each tenant, we'd like to get Tamland set up for GitLab Dedicated.
While Tamland is developed by the Scalability::Projections and this team also owns the capacity planning process for GitLab.com, they don't have access to any of the Dedicated infrastructure as we have strong isolation implemented for Dedicated environments.
While Tamland is developed by the Scalability::Observability and this team also owns the capacity planning process for GitLab.com, they don't have access to any of the Dedicated infrastructure as we have strong isolation implemented for Dedicated environments.
As such, the technical design choices are going to affect how those teams interact and vice versa. We include this consideration into this documentation as we think the organisational aspect is a crucial part of it.
### Key questions
@ -79,21 +79,27 @@ As such, the technical design choices are going to affect how those teams intera
1. Tamland's output is forecasting data only (plots, SLO violation dates, etc. - no report, no issue management - see below)
1. Tamland stores the output data in a S3 bucket for further inspection
#### Non-goals
### Goals: Iteration 1
##### Reporting
In Iteration 0, we've integrated Tamland into GitLab Dedicated environments and started to generate forecasting data for each tenant regularly.
As of today, it's not quite clear yet how we'd like to consume forecasting data across tenants.
In contrast to GitLab.com, we generate forecasts across a potentially large number of tenants.
At this point, we suspect that we're more interested in an aggregate report across tenants rather than individual, very detailed saturation forecasts.
As such, this is subject to refinement in a further iteration once we have the underlying data available and gathered practical insight in how we consume this information.
In order to consume this data and make it actionable, this iteration is about providing reporting functionality for GitLab Dedicated:
We generate a GitLab Pages deployed static site that contains individual Tamland reports for all tenants.
##### Issue management
We use the default Tamland report to generate the per-tenant report.
In a future iteration, we may want to provide another type of report specifically tailored for GitLab Dedicated needs.
While each predicted SLO violation results in the creation of a GitLab issue, this may not be the right mode of raising awareness for Dedicated.
Similar to the reporting side, this is subject to further discussion once we have data to look at.
### Goals: Iteration 2
##### Customizing forecasting models
In order to raise awareness for a predicted SLO violation, Tamland has functionality to manage a GitLab issue tracker and create an issue for a capacity warning.
We use this, for example, to manage capacity warnings for GitLab.com using the [`gitlab-com` capacity planning tracker](https://gitlab.com/gitlab-com/gl-infra/capacity-planning-trackers/gitlab-com/-/issues).
For GitLab Dedicated tenants, we suggest to use the `gitlab-dedicated` capacity planning tracker in a similar fashion:
For each predicted SLO violation with reasonable confidence, we create a capacity warning issue on this tracker and use a scoped label to distinguish warnings for different tenants (see below for more details).
### Non-goals
#### Customizing forecasting models
Forecasting models can and should be tuned and informed with domain knowledge to produce accurate forecasts.
This information is a part of the Tamland manifest.
@ -105,9 +111,11 @@ Dedicated environments are fully isolated and run their own Prometheus instance
Tamland will run from each individual Dedicated tenant environment, consume metrics from Prometheus and store the resulting data in S3.
From there, we consume forecast data and act on it.
![tamland-as-part-of-stack](images/tamland-as-part-of-stack.png)
![dedicated-capacity-planning-forecasting](images/dedicated-capacity-planning-forecasting.png)
### Storage for output and cache
### Generating forecasts
#### Storage for output and cache
Any data Tamland relies on is stored in a S3 bucket.
We use one bucket per tenant to clearly separate data between tenants.
@ -117,7 +125,7 @@ We use one bucket per tenant to clearly separate data between tenants.
There is no need for a persistent state across Tamland runs aside from the S3 bucket.
### Benefits of executing inside tenant environments
#### Benefits of executing inside tenant environments
Each Tamland run for a single environment (tenant) can take a few hours to execute.
With the number of tenants expected to increase significantly, we need to consider scaling the execution environment for Tamland.
@ -125,11 +133,11 @@ With the number of tenants expected to increase significantly, we need to consid
In this design, Tamland becomes a part of the Dedicated stack and a component of the individual tenant environment.
As such, scaling the execution environment for Tamland is solved by design, because tenant forecasts execute inherently parallel in their respective environments.
### Distribution model: Docker
#### Distribution model: Docker + Helm chart
Tamland is released as a Docker image, see [Tamland's README](https://gitlab.com/gitlab-com/gl-infra/tamland/-/blob/main/README.md) for further details.
Tamland is released as a Docker image along with a Helm chart, see [Tamland's README](https://gitlab.com/gitlab-com/gl-infra/tamland/-/blob/main/README.md) for further details.
### Tamland manifest
#### Tamland Manifest
The manifest contains information about which saturation metrics to forecast on (see this [manifest example](https://gitlab.com/gitlab-com/gl-infra/tamland/-/blob/62854e1afbc2ed3160a55a738ea587e0cf7f994f/saturation.json) for GitLab.com).
This will be generated from the metrics catalog and will be the same for all tenants for starters.
@ -139,10 +147,33 @@ On a regular basis, a scheduled pipeline grabs the metrics catalog, generates th
On the Dedicated tenants, we download the latest version of the committed JSON manifest from `tamland-dedicated` and use this as input to execute Tamland.
### Acting on forecast insights
### Capacity planning reports and Capacity Warnings
When Tamland forecast data is available for a tenant, the Dedicated teams consume this data and act on it accordingly.
The Scalability::Observability group is going to support and guide this process to get started and help interpret data, along with implementing Tamland features required to streamline this process for Dedicated in further iterations.
Based on Tamland's forecasting data, we generate reports to display forecasting information and enable teams to act on capacity warnings by creating capacity warnings in a GitLab issue tracker.
![dedicated-capacity-planning-reporting](images/dedicated-capacity-planning-reporting.png)
The Scalability::Observability team maintains an [internal GitLab project called `gitlab-dedicated`](https://gitlab.com/gitlab-com/gl-infra/capacity-planning-trackers/gitlab-dedicated).
This project contains a scheduled CI pipeline to regularly produce a [static site deployed to GitLab Pages (only available internally)](https://gitlab-com.gitlab.io/gl-infra/capacity-planning-trackers/gitlab-dedicated/).
It also contains functionality to create and manage capacity warnings in the issue tracker of this project.
CI configuration for this project contains a list of tenants along with their respective metadata (e.g. AWS account, codename, etc.).
For each configured tenant, the CI pipeline uses a central IAM role in the amp account.
With this role, a tenant-specific IAM role can be assumed, which has read-only access to the respective S3 bucket containing the tenant's forecasting data.
The CI pipeline produces a standard Tamland report for each tenant and integrates all individual reports into a single static site.
This site provides unified access to capacity forecasting insights across tenant environments.
Along with the report, the CI pipeline also reacts to predicted SLO violations and creates a capacity warning issue in the project's issue tracker.
As the tracker is being used for *all* GitLab Dedicated tenants, we employ a `~tenant:CN` label to distinguish tenant environments (e.g. we use `~tenant:C1` for the tenant with codename C1).
These issues contain further information about the tenant and component affected, along with forecasts and status information.
The intention here is to create visibility into predicted SLO violations and provide a way for the Dedicated team to engage with capacity warnings directly (e.g. for discussion, work scheduling etc.).
Overall, the Dedicated teams and operators use the Tamland report and issue tracker to act on capacity warnings.
In order to get started, we suggest the Dedicated group to take a regular pass across the capacity warnings and triage those.
For additional visibility, we may want to consider enabling getting Slack updates sent out for new capacity warnings created.
## Alternative Solution
@ -150,7 +181,7 @@ The Scalability::Observability group is going to support and guide this process
An alternative design, we don't consider an option at this point, is to setup Tamland as a Service and run it fully **outside** of tenant environments.
![tamland-as-a-service](images/tamland-as-a-service.png)
![dedicated-capacity-planning-as-a-service](images/dedicated-capacity-planning-as-a-service.png)
In this design, a central Prometheus/Thanos instance is needed to provide the metrics data for Tamland.
Dedicated tenants use remote-write to push their Prometheus data to the central Thanos instance.

View File

@ -38,8 +38,8 @@ in your `.gitlab-ci.yml` file. Each image runs a specific version of macOS and X
| VM image | Status | |
|----------------------------|--------|--------------|
| `macos-12-xcode-14` | `Deprecated` | (Removal in GitLab 16.10) |
| `macos-13-xcode-14` | `GA` | [Preinstalled Software](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/job-images/-/blob/main/toolchain/macos-13.yml) |
| `macos-14-xcode-15` | `GA` | [Preinstalled Software](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/job-images/-/blob/main/toolchain/macos-14.yml) |
| `macos-13-xcode-14` | `GA` | [Preinstalled Software](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/job-images/-/blob/36d443841732f2d4f7e3de1bce63f530edef1676/toolchain/macos-13.yml) |
| `macos-14-xcode-15` | `GA` | [Preinstalled Software](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/job-images/-/blob/36d443841732f2d4f7e3de1bce63f530edef1676/toolchain/macos-14.yml) |
If no image is specified, the macOS runner uses `macos-13-xcode-14`.

View File

@ -63,6 +63,29 @@ Each SaaS feature is defined in a separate YAML file consisting of a number of f
| `milestone` | no | Milestone in which the SaaS feature was created. |
| `group` | no | The [group](https://about.gitlab.com/handbook/product/categories/#devops-stages) that owns the feature flag. |
#### Create a new SaaS feature file definition
The GitLab codebase provides [`bin/saas-feature.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/bin/saas-feature.rb),
a dedicated tool to create new SaaS feature definitions.
The tool asks various questions about the new SaaS feature, then creates
a YAML definition in `ee/config/saas_features`.
Only SaaS features that have a YAML definition file can be used when running the development or testing environments.
```shell
bin/saas-feature my_saas_feature
You picked the group 'group::acquisition'
>> URL of the MR introducing the SaaS feature (enter to skip and let Danger provide a suggestion directly in the MR):
?> https://gitlab.com/gitlab-org/gitlab/-/merge_requests/38602
create ee/config/saas_features/my_saas_feature.yml
---
name: my_saas_feature
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/38602
milestone: '16.8'
group: group::acquisition
```
### Opting out of a SaaS-only feature on another SaaS instance (JiHu)
Prepend the `ee/lib/ee/gitlab/saas.rb` module and override the `Gitlab::Saas.feature_available?` method.

View File

@ -168,74 +168,87 @@ For example, you see in `GroupPolicy` that there is an ability called
`read_project_security_dashboard`. You'd like to make both customizable. Rather
than adding a row to the `member_roles` table for each ability, consider
renaming them to `read_security_dashboard` and adding `read_security_dashboard`
to the `member_roles` table. This is more expected because it means that
enabling `read_security_dashboard` on the parent group will enable the custom role.
For example, `GroupPolicy` has an ability called `read_group_security_dashboard` and `ProjectPolicy` has an ability
called `read_project_security_dashboard`. If you would like to make both customizable, rather than adding a row to the
`member_roles` table for each ability, consider renaming them to `read_security_dashboard` and adding
`read_security_dashboard` to the `member_roles` table. This convention means that enabling `read_security_dashboard` on
to the `member_roles` table. Enabling `read_security_dashboard` on
the parent group will allow the custom role to access the group security dashboard and the project security dashboard
for each project in that group. Enabling the same permission on a specific project will allow access to that projects'
security dashboard.
### Implement a new ability
To add a new ability to a custom role:
#### Step 1. Generate a configuration file
- Generate YAML file by running `./ee/bin/custom-ability` generator
- Add a new column to `member_roles` table, either manually or by running `custom_roles:code` generator, eg. by running `rails generate gitlab:custom_roles:code --ability new_ability_name`. The ability parameter is case sensitive and has to exactly match the permission name from the YAML file.
- Add the ability to the respective Policy for example in [this change in merge request 114734](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/114734/diffs#diff-content-edcbe28bdecbd848d4d9efdc5b5e9bddd2a7299e).
- Update the specs. Don't forget to add a spec to `ee/spec/requests/custom_roles` - the spec template file was pre-generated if you used the code generator
- Compile the documentation by running `bundle exec rake gitlab:custom_roles:compile_docs`
- Update the GraphQL documentation by running `bundle exec rake gitlab:graphql:compile_docs`
- Run `./ee/bin/custom-ability <ABILITY_NAME>` to generate a configuration file for the new ability.
- This will generate a YAML file in `ee/config/custom_abilities` which follows the following schema:
Examples of merge requests adding new abilities to custom roles:
| Field | Required | Description |
| ----- | -------- |--------------|
| `name` | yes | Unique, lowercase and underscored name describing the custom ability. Must match the filename. |
| `description` | yes | Human-readable description of the custom ability. |
| `feature_category` | yes | Name of the feature category. For example, `vulnerability_management`. |
| `introduced_by_issue` | yes | Issue URL that proposed the addition of this custom ability. |
| `introduced_by_mr` | yes | MR URL that added this custom ability. |
| `milestone` | yes | Milestone in which this custom ability was added. |
| `group_ability` | yes | Boolean value to indicate whether this ability is checked on group level. |
| `project_ability` | yes | Boolean value to whether this ability is checked on project level. |
| `requirements` | no | The list of custom permissions this ability is dependent on. For instance `admin_vulnerability` is dependent on `read_vulnerability`. If none, then enter `[]` |
- [Read code](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/106256)
- [Read vulnerability](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/114734)
- [Admin vulnerability](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/121534)
#### Step 2: Create a migration file
The above merge requests don't use YAML files and code generators. Some of the changes are not needed anymore. We will update the documentation once we have a permission implemented using the generators.
- Run `bundle exec rails generate gitlab:custom_roles:code --ability <ABILITY_NAME>` which will generate a migration file to add the ability as a boolean column to the `member_roles` table.
If you have any concerns, put the new ability behind a feature flag.
#### Step 3: Update policies
#### Documenting handling the feature flag
- When you introduce a new custom ability under a feature flag, add the `feature_flag` attribute to the appropriate ability YAML file.
- When you enable the ability by default, add the `feature_flag_enabled_milestone` and `feature_flag_enabled_mr` attributes to the appropriate ability YAML file and regenerate the documentation.
- You do not have to include these attributes in the YAML file if the feature flag is enabled by default in the same release as the ability is introduced.
#### Testing
Unit tests are preferred to test out changes to any policies affected by the
addition of new custom permissions. Custom Roles is an Ultimate tier feature so
these tests can be found in the `ee/spec/policies` directory. The [spec file](https://gitlab.com/gitlab-org/gitlab/-/blob/13baa4e8c92a56260591a5bf0a58d3339890ee10/ee/spec/policies/project_policy_spec.rb#L2726-2740) for
the `ProjectPolicy` contains shared examples that can be used to test out the
following conditions:
- when the `custom_roles` licensed feature is not enabled
- when the `custom_roles` licensed feature is enabled
- when a user is a member of a custom role via an inherited group member
- when a user is a member of a custom role via a direct group member
- when a user is a member of a custom role via a direct project membership
Below is an example for testing out `ProjectPolicy` related changes.
- If the ability is checked on a group level, add rule(s) to GroupPolicy to enable the ability.
- For example: if the ability we would like to add is `read_dependency`, then an update to `ee/app/policies/ee/group_policy.rb` would look like as follows:
```ruby
context 'for a role with `custom_permission` enabled' do
let(:member_role_abilities) { { custom_permission: true } }
let(:allowed_abilities) { [:custom_permission] }
desc "Custom role on group that enables read dependency"
condition(:role_enables_read_dependency) do
::Auth::MemberRoleAbilityLoader.new(
user: @user,
resource: @subject,
ability: :read_dependency
).has_ability?
end
it_behaves_like 'custom roles abilities'
end
rule { custom_roles_allowed & role_enables_read_dependency }.policy do
enable :read_dependency
end
```
Request specs are preferred to test out any endpoint that allow access via a custom role permission.
This includes controllers, REST API, and GraphQL. Examples of request specs can be found in `ee/spec/requests/custom_roles/`. In this directory you will find a sub-directory named after each permission that can be enabled via a custom role.
The `custom_roles` licensed feature must be enabled to test this functionality.
- Similarly, If the ability is checked on a project level, add rule(s) to ProjectPolicy to enable the ability.
- For example: if the ability we would like to add is `read_dependency`, then an update to `ee/app/policies/ee/project_policy.rb` would look like as follows:
Below is an example of the typical setup that is required to test out a
Rails Controller endpoint.
```ruby
desc "Custom role on project that enables read dependency"
condition(:role_enables_read_dependency) do
::Auth::MemberRoleAbilityLoader.new(
user: @user,
resource: @subject,
ability: :read_dependency
).has_ability?
end
rule { custom_roles_allowed & role_enables_read_dependency }.policy do
enable :read_dependency
end
```
- Not all abilities need to be enabled on both levels, for instance `admin_terraform_state` allows users to manage a project's terraform state. It only needs to be enabled on the project level and not the group level, and thus only needs to be configured in `ee/app/policies/ee/project_policy.rb`.
#### Step 4: Verify
- Ensure SaaS mode is enabled with `GITLAB_SIMULATE_SAAS=1`.
- Navigate to any Group that you are an owner of, then go to `Settings -> Roles and Permissions`.
- Click on `Add new role` and create a custom role with the permission you have just created.
- Navigate to the Group's `Manage -> Members` page and assign a member to this newly created custom role.
- Next, log-in as that member and ensure that you are able to access the page that the custom ability is intended for.
#### Step 5: Add specs
- Add the ability as a trait in the `MemberRoles` factory, `ee/spec/factories/member_roles.rb`.
- Add tests to `ee/spec/requests/custom_roles/<ABILITY_NAME>/request_spec.rb` to ensure that once the user has been assigned the custom ability, they can successfully access the controllers, REST API endpoints and GraphQL API endpoints.
- Below is an example of the typical setup that is required to test a Rails Controller endpoint.
```ruby
let_it_be(:user) { create(:user) }
@ -245,6 +258,7 @@ Rails Controller endpoint.
before do
stub_licensed_features(custom_roles: true)
sign_in(user)
end
@ -260,8 +274,7 @@ Rails Controller endpoint.
end
```
Below is an example of the typical setup that is required to test out a GraphQL
mutation.
- Below is an example of the typical setup that is required to test a GraphQL mutation.
```ruby
let_it_be(:user) { create(:user) }
@ -271,57 +284,36 @@ mutation.
before do
stub_licensed_features(custom_roles: true)
sign_in(user)
end
describe MyMutation do
include GraphqlHelpers
describe '#show' do
it 'allows access' do
post_graphql_mutation(graphql_mutation(:my_mutation, {
example: "Example"
}), current_user: user)
let(:mutation) { graphql_mutation(:my_mutation) }
expect(response).to have_gitlab_http_status(:success)
mutation_response = graphql_mutation_response(:my_mutation)
expect(mutation_response).to be_present
expect(mutation_response["errors"]).to be_empty
end
it_behaves_like 'a working graphql query'
end
end
```
[`GITLAB_DEBUG_POLICIES=true`](#finding-existing-abilities-checks) can be used
to troubleshoot runtime policy decisions.
- Add tests to `ProjectPolicy` and/or `GroupPolicy`. Below is an example for testing `ProjectPolicy` related changes.
## Custom abilities definition
```ruby
context 'for a member role with read_dependency true' do
let(:member_role_abilities) { { read_dependency: true } }
let(:allowed_abilities) { [:read_dependency] }
All new custom abilities must have a type definition stored in `ee/config/custom_abilities` that contains a single source of truth for every ability that is part of custom roles feature.
it_behaves_like 'custom roles abilities'
end
```
### Add a new custom ability definition
#### Step 6: Update documentation
To add a new custom ability:
1. Create the YAML definition. You can either:
- Use the `ee/bin/custom-ability` CLI to create the YAML definition automatically.
- Perform manual steps to create a new file in `ee/config/custom_abilities/` with the filename matching the name of the ability name.
1. Add contents to the file that conform to the [schema](#schema) defined in `ee/config/custom_abilities/types/type_schema.json`.
1. Add [tests](#testing) for the new ability in `ee/spec/requests/custom_roles/` with a new directory named after the ability name.
### Schema
| Field | Required | Description |
| ----- | -------- |--------------|
| `name` | yes | Unique, lowercase and underscored name describing the custom ability. Must match the filename. |
| `description` | yes | Human-readable description of the custom ability. |
| `feature_category` | yes | Name of the feature category. For example, `vulnerability_management`. |
| `introduced_by_issue` | yes | Issue URL that proposed the addition of this custom ability. |
| `introduced_by_mr` | no | MR URL that added this custom ability. |
| `milestone` | yes | Milestone in which this custom ability was added. |
| `group_ability` | yes | Indicate whether this ability is checked on group level. |
| `project_ability` | yes | Indicate whether this ability is checked on project level. |
| `requirements` | no | The custom abilities that need to be enabled for this ability. |
| `skip_seat_consumption` | yes | Indicate wheter this ability should be skiped when counting licensed users. |
- Update the list of custom abilities by running `bundle exec rake gitlab:custom_roles:compile_docs`
- Update the GraphQL documentation by running `bundle exec rake gitlab:graphql:compile_docs`
### Privilege escalation consideration

View File

@ -258,7 +258,7 @@ the mitigations for a new feature.
#### URL blocker & validation libraries
[`Gitlab::UrlBlocker`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/url_blocker.rb) can be used to validate that a
[`Gitlab::HTTP_V2::UrlBlocker`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/gems/gitlab-http/lib/gitlab/http_v2/url_blocker.rb) can be used to validate that a
provided URL meets a set of constraints. Importantly, when `dns_rebind_protection` is `true`, the method returns a known-safe URI where the hostname
has been replaced with an IP address. This prevents DNS rebinding attacks, because the DNS record has been resolved. However, if we ignore this returned
value, we **will not** be protected against DNS rebinding.
@ -1234,7 +1234,7 @@ These types of bugs are often seen in environments which allow multi-threading a
**Example 1:** you have a model which accepts a URL as input. When the model is created you verify that the URL host resolves to a public IP address, to prevent attackers making internal network calls. But DNS records can change ([DNS rebinding](#server-side-request-forgery-ssrf)]). An attacker updates the DNS record to `127.0.0.1`, and when your code resolves those URL host it results in sending a potentially malicious request to a server on the internal network. The property was valid at the "time of check", but invalid and malicious at "time of use".
GitLab-specific example can be found in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/214401) where, although `Gitlab::UrlBlocker.validate!` was called, the returned value was not used. This made it vulnerable to TOCTOU bug and SSRF protection bypass through [DNS rebinding](#server-side-request-forgery-ssrf). The fix was to [use the validated IP address](https://gitlab.com/gitlab-org/gitlab/-/commit/7af8abd4df9a98f7a1ae7c4ec9840d0a7a8c684d).
GitLab-specific example can be found in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/214401) where, although `Gitlab::HTTP_V2::UrlBlocker.validate!` was called, the returned value was not used. This made it vulnerable to TOCTOU bug and SSRF protection bypass through [DNS rebinding](#server-side-request-forgery-ssrf). The fix was to [use the validated IP address](https://gitlab.com/gitlab-org/gitlab/-/commit/85c6a73598e72ab104ab29b72bf83661cd961646).
**Example 2:** you have a feature which schedules jobs. When the user schedules the job, they have permission to do so. But imagine if, between the time they schedule the job and the time it is run, their permissions are restricted. Unless you re-check permissions at time of use, you could inadvertently allow unauthorized activity.
@ -1264,9 +1264,9 @@ end
- Use your framework's validations and database features to impose constraints and atomic reads and writes.
- Read about [Server Side Request Forgery (SSRF) and DNS rebinding](#server-side-request-forgery-ssrf)
An example of well implemented `Gitlab::UrlBlocker.validate!` call that prevents TOCTOU bug:
An example of well implemented `Gitlab::HTTP_V2::UrlBlocker.validate!` call that prevents TOCTOU bug:
1. [Preventing DNS rebinding in Gitea importer](https://gitlab.com/gitlab-org/gitlab/-/commit/7af8abd4df9a98f7a1ae7c4ec9840d0a7a8c684d)
1. [Preventing DNS rebinding in Gitea importer](https://gitlab.com/gitlab-org/gitlab/-/commit/85c6a73598e72ab104ab29b72bf83661cd961646)
### Resources

View File

@ -23,7 +23,7 @@ module Gitlab
socket = Socket.sockaddr_in(port, ip_address)
addr = Addrinfo.new(socket)
# See Gitlab::UrlBlocker
# See Gitlab::HTTP_V2::UrlBlocker
allow(Addrinfo).to receive(:getaddrinfo)
.with(url.hostname, url.port, nil, :STREAM)
.and_return([addr])
@ -35,7 +35,7 @@ module Gitlab
socket = Socket.sockaddr_in(port, ip_address)
addr = Addrinfo.new(socket)
# See Gitlab::UrlBlocker
# See Gitlab::HTTP_V2::UrlBlocker
allow(Addrinfo).to receive(:getaddrinfo).and_call_original
allow(Addrinfo).to receive(:getaddrinfo)
.with(url.hostname, anything, nil, :STREAM)

View File

@ -11,6 +11,7 @@ module Gitlab
AmbigiousSpecificationError = Class.new(Error)
TooManyIncludesError = Class.new(Error)
TooMuchDataInPipelineTreeError = Class.new(Error)
InvalidTypeError = Class.new(Error)
def initialize(values, context)
@locations = Array.wrap(values.fetch(:include, [])).compact

View File

@ -25,8 +25,10 @@ module Gitlab
location = variables_expander.expand(location)
normalize_location_string(location)
else
elsif location.is_a?(Hash)
location.deep_symbolize_keys
else
raise Mapper::InvalidTypeError, 'Each include must be a hash or a string'
end
end
end

View File

@ -14,7 +14,18 @@ module Gitlab
end
def pages_url(with_unique_domain: false)
find_url(with_unique_domain).downcase
return namespace_in_path_url(with_unique_domain && unique_domain_enabled?) if config.namespace_in_path
return unique_url if with_unique_domain && unique_domain_enabled?
project_path_url = "#{config.protocol}://#{project_path}".downcase
# If the project path is the same as host, we serve it as group page
# On development we ignore the URL port to make it work on GDK
return namespace_url if Rails.env.development? && portless(namespace_url) == project_path_url
# If the project path is the same as host, we serve it as group page
return namespace_url if namespace_url == project_path_url
"#{namespace_url}/#{project_path}"
end
def unique_host
@ -52,21 +63,6 @@ module Gitlab
attr_reader :project, :project_path
def find_url(with_unique_domain)
return namespace_in_path_url(with_unique_domain && unique_domain_enabled?) if config.namespace_in_path
return unique_url if with_unique_domain && unique_domain_enabled?
project_path_url = "#{config.protocol}://#{project_path}"
# If the project path is the same as host, we serve it as group page
# On development we ignore the URL port to make it work on GDK
return namespace_url if Rails.env.development? && portless(namespace_url) == project_path_url
# If the project path is the same as host, we serve it as group page
return namespace_url if namespace_url == project_path_url
"#{namespace_url}/#{project_path}"
end
def namespace_url
@namespace_url ||= url_for(project_namespace)
end
@ -79,13 +75,14 @@ module Gitlab
@pages_url ||= URI(config.url)
.tap { |url| url.port = config.port }
.to_s
.downcase
end
def namespace_in_path_url(with_unique_domain)
if with_unique_domain
"#{pages_base_url}/#{project.project_setting.pages_unique_domain}"
"#{pages_base_url}/#{project.project_setting.pages_unique_domain}".downcase
else
"#{pages_base_url}/#{project_namespace}/#{project_path}"
"#{pages_base_url}/#{project_namespace}/#{project_path}".downcase
end
end
@ -94,6 +91,7 @@ module Gitlab
.tap { |url| url.port = config.port }
.tap { |url| url.host.prepend("#{subdomain}.") }
.to_s
.downcase
end
def portless(url)

View File

@ -1,429 +0,0 @@
# frozen_string_literal: true
#
# IMPORTANT: With the new development of the 'gitlab-http' gem (https://gitlab.com/gitlab-org/gitlab/-/issues/415686),
# no additional change should be implemented in this class. This class will be removed after migrating all
# the usages to the new gem.
#
require 'resolv'
require 'ipaddress'
module Gitlab
class UrlBlocker
GETADDRINFO_TIMEOUT_SECONDS = 15
DENY_ALL_REQUESTS_EXCEPT_ALLOWED_DEFAULT = proc { deny_all_requests_except_allowed_app_setting }.freeze
# Result stores the validation result:
# uri - The original URI requested
# hostname - The hostname that should be used to connect. For DNS
# rebinding protection, this will be the resolved IP address of
# the hostname.
# use_proxy -
# If true, this means that the proxy server specified in the
# http_proxy/https_proxy environment variables should be used.
#
# If false, this either means that no proxy server was specified
# or that the hostname in the URL is exempt via the no_proxy
# environment variable. This allows the caller to disable usage
# of a proxy since the IP address may be used to
# connect. Otherwise, Net::HTTP may erroneously compare the IP
# address against the no_proxy list.
Result = Struct.new(:uri, :hostname, :use_proxy)
class << self
# Validates the given url according to the constraints specified by arguments.
#
# ports - Raises error if the given URL port is not between given ports.
# allow_localhost - Raises error if URL resolves to a localhost IP address and argument is false.
# allow_local_network - Raises error if URL resolves to a link-local address and argument is false.
# allow_object_storage - Avoid raising an error if URL resolves to an object storage endpoint and argument is true.
# ascii_only - Raises error if URL has unicode characters and argument is true.
# enforce_user - Raises error if URL user doesn't start with alphanumeric characters and argument is true.
# enforce_sanitization - Raises error if URL includes any HTML/CSS/JS tags and argument is true.
# deny_all_requests_except_allowed - Raises error if URL is not in the allow list and argument is true. Can be Boolean or Proc. Defaults to instance app setting.
#
# Returns a Result object.
# rubocop:disable Metrics/ParameterLists
def validate_url_with_proxy!(
url,
schemes:,
ports: [],
allow_localhost: false,
allow_local_network: true,
allow_object_storage: false,
ascii_only: false,
enforce_user: false,
enforce_sanitization: false,
deny_all_requests_except_allowed: DENY_ALL_REQUESTS_EXCEPT_ALLOWED_DEFAULT,
dns_rebind_protection: true)
# rubocop:enable Metrics/ParameterLists
return Result.new(nil, nil, true) if url.nil?
raise ArgumentError, 'The schemes is a required argument' if schemes.blank?
# Param url can be a string, URI or Addressable::URI
uri = parse_url(url)
validate_uri(
uri: uri,
schemes: schemes,
ports: ports,
enforce_sanitization: enforce_sanitization,
enforce_user: enforce_user,
ascii_only: ascii_only
)
begin
address_info = get_address_info(uri)
rescue SocketError
proxy_in_use = uri_under_proxy_setting?(uri, nil)
return Result.new(uri, nil, proxy_in_use) unless enforce_address_info_retrievable?(uri, dns_rebind_protection, deny_all_requests_except_allowed)
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, 'Host cannot be resolved or invalid'
end
ip_address = ip_address(address_info)
proxy_in_use = uri_under_proxy_setting?(uri, ip_address)
# Ignore DNS rebind protection when a proxy is being used, as DNS
# rebinding is expected behavior.
dns_rebind_protection &&= !proxy_in_use
return Result.new(uri, nil, proxy_in_use) if domain_in_allow_list?(uri)
protected_uri_with_hostname = enforce_uri_hostname(ip_address, uri, dns_rebind_protection, proxy_in_use)
return protected_uri_with_hostname if ip_in_allow_list?(ip_address, port: get_port(uri))
# Allow url from the GitLab instance itself but only for the configured hostname and ports
return protected_uri_with_hostname if internal?(uri)
return protected_uri_with_hostname if allow_object_storage && object_storage_endpoint?(uri)
validate_deny_all_requests_except_allowed!(deny_all_requests_except_allowed)
validate_local_request(
address_info: address_info,
allow_localhost: allow_localhost,
allow_local_network: allow_local_network
)
protected_uri_with_hostname
end
def blocked_url?(url, **kwargs)
validate!(url, **kwargs)
false
rescue Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError
true
end
# For backwards compatibility, Returns an array with [<uri>, <original-hostname>].
# Issue for refactoring: https://gitlab.com/gitlab-org/gitlab/-/issues/410890
def validate!(...)
result = validate_url_with_proxy!(...)
[result.uri, result.hostname]
end
private
# Returns the given URI with IP address as hostname and the original hostname respectively
# in an Array.
#
# It checks whether the resolved IP address matches with the hostname. If not, it changes
# the hostname to the resolved IP address.
#
# The original hostname is used to validate the SSL, given in that scenario
# we'll be making the request to the IP address, instead of using the hostname.
def enforce_uri_hostname(ip_address, uri, dns_rebind_protection, proxy_in_use)
return Result.new(uri, nil, proxy_in_use) unless dns_rebind_protection && ip_address && ip_address != uri.hostname
new_uri = uri.dup
new_uri.hostname = ip_address
Result.new(new_uri, uri.hostname, proxy_in_use)
end
def ip_address(address_info)
address_info.first&.ip_address
end
def validate_uri(uri:, schemes:, ports:, enforce_sanitization:, enforce_user:, ascii_only:)
validate_html_tags(uri) if enforce_sanitization
return if internal?(uri)
validate_scheme(uri.scheme, schemes)
validate_port(get_port(uri), ports) if ports.any?
validate_user(uri.user) if enforce_user
validate_hostname(uri.hostname)
validate_unicode_restriction(uri) if ascii_only
end
def uri_under_proxy_setting?(uri, ip_address)
return false unless Gitlab.http_proxy_env?
# `no_proxy|NO_PROXY` specifies addresses for which the proxy is not
# used. If it's empty, there are no exceptions and this URI
# will be under proxy settings.
return true if no_proxy_env.blank?
# `no_proxy|NO_PROXY` is being used. We must check whether it
# applies to this specific URI.
::URI::Generic.use_proxy?(uri.hostname, ip_address, get_port(uri), no_proxy_env)
end
# Returns addrinfo object for the URI.
#
# @param uri [Addressable::URI]
#
# @raise [Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, ArgumentError] - raised if host is too long.
#
# @return [Array<Addrinfo>]
def get_address_info(uri)
Timeout.timeout(GETADDRINFO_TIMEOUT_SECONDS) do
Addrinfo.getaddrinfo(uri.hostname, get_port(uri), nil, :STREAM).map do |addr|
addr.ipv6_v4mapped? ? addr.ipv6_to_ipv4 : addr
end
end
rescue Timeout::Error => e
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, e.message
rescue ArgumentError => e
# Addrinfo.getaddrinfo errors if the domain exceeds 1024 characters.
raise unless e.message.include?('hostname too long')
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, "Host is too long (maximum is 1024 characters)"
end
def enforce_address_info_retrievable?(uri, dns_rebind_protection, deny_all_requests_except_allowed)
# Do not enforce if URI is in the allow list
return false if domain_in_allow_list?(uri)
# Enforce if the instance should block requests
return true if deny_all_requests_except_allowed?(deny_all_requests_except_allowed)
# Do not enforce if DNS rebinding protection is disabled
return false unless dns_rebind_protection
# Do not enforce if proxy is used
return false if Gitlab.http_proxy_env?
# In the test suite we use a lot of mocked urls that are either invalid or
# don't exist. In order to avoid modifying a ton of tests and factories
# we allow invalid urls unless the environment variable RSPEC_ALLOW_INVALID_URLS
# is not true
return false if Rails.env.test? && ENV['RSPEC_ALLOW_INVALID_URLS'] == 'true'
true
end
def validate_local_request(
address_info:,
allow_localhost:,
allow_local_network:)
return if allow_local_network && allow_localhost
unless allow_localhost
validate_localhost(address_info)
validate_loopback(address_info)
end
unless allow_local_network
validate_local_network(address_info)
validate_link_local(address_info)
validate_shared_address(address_info)
validate_limited_broadcast_address(address_info)
end
end
def validate_shared_address(addrs_info)
netmask = IPAddr.new('100.64.0.0/10')
return unless addrs_info.any? { |addr| netmask.include?(addr.ip_address) }
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, "Requests to the shared address space are not allowed"
end
def get_port(uri)
uri.port || uri.default_port
end
def validate_html_tags(uri)
uri_str = uri.to_s
sanitized_uri = ActionController::Base.helpers.sanitize(uri_str, tags: [])
if sanitized_uri != uri_str
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, 'HTML/CSS/JS tags are not allowed'
end
end
def parse_url(url)
Addressable::URI.parse(url).tap do |parsed_url|
raise Addressable::URI::InvalidURIError if multiline_blocked?(parsed_url)
end
rescue Addressable::URI::InvalidURIError, URI::InvalidURIError
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, 'URI is invalid'
end
def multiline_blocked?(parsed_url)
url = parsed_url.to_s
return true if /\n|\r/.match?(url)
# Google Cloud Storage uses a multi-line, encoded Signature query string
return false if %w[http https].include?(parsed_url.scheme&.downcase)
CGI.unescape(url) =~ /\n|\r/
end
def validate_port(port, ports)
return if port.blank?
# Only ports under 1024 are restricted
return if port >= 1024
return if ports.include?(port)
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError,
"Only allowed ports are #{ports.join(', ')}, and any over 1024"
end
def validate_scheme(scheme, schemes)
if scheme.blank? || (schemes.any? && schemes.exclude?(scheme))
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, "Only allowed schemes are #{schemes.join(', ')}"
end
end
def validate_user(value)
return if value.blank?
return if /\A\p{Alnum}/.match?(value)
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, "Username needs to start with an alphanumeric character"
end
def validate_hostname(value)
return if value.blank?
return if IPAddress.valid?(value)
return if /\A\p{Alnum}/.match?(value)
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, "Hostname or IP address invalid"
end
def validate_unicode_restriction(uri)
return if uri.to_s.ascii_only?
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, "URI must be ascii only #{uri.to_s.dump}"
end
def validate_localhost(addrs_info)
local_ips = ["::", "0.0.0.0"]
local_ips.concat(Socket.ip_address_list.map(&:ip_address))
return if (local_ips & addrs_info.map(&:ip_address)).empty?
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, "Requests to localhost are not allowed"
end
def validate_loopback(addrs_info)
return unless addrs_info.any? { |addr| addr.ipv4_loopback? || addr.ipv6_loopback? }
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, "Requests to loopback addresses are not allowed"
end
def validate_local_network(addrs_info)
return unless addrs_info.any? { |addr| addr.ipv4_private? || addr.ipv6_sitelocal? || addr.ipv6_unique_local? }
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, "Requests to the local network are not allowed"
end
def validate_link_local(addrs_info)
netmask = IPAddr.new('169.254.0.0/16')
return unless addrs_info.any? { |addr| addr.ipv6_linklocal? || netmask.include?(addr.ip_address) }
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, "Requests to the link local network are not allowed"
end
# Raises a Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError if the instance is configured to deny all requests.
#
# This should only be called after allow list checks have been made.
def validate_deny_all_requests_except_allowed!(should_deny)
return unless deny_all_requests_except_allowed?(should_deny)
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError,
"Requests to hosts and IP addresses not on the Allow List are denied"
end
# Raises a Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError if any IP in `addrs_info` is the limited
# broadcast address.
# https://datatracker.ietf.org/doc/html/rfc919#section-7
def validate_limited_broadcast_address(addrs_info)
blocked_ips = ["255.255.255.255"]
return if (blocked_ips & addrs_info.map(&:ip_address)).empty?
raise Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, "Requests to the limited broadcast address are not allowed"
end
def internal?(uri)
internal_web?(uri) || internal_shell?(uri)
end
def internal_web?(uri)
uri.scheme == config.gitlab.protocol &&
uri.hostname == config.gitlab.host &&
get_port(uri) == config.gitlab.port
end
def internal_shell?(uri)
uri.scheme == 'ssh' &&
uri.hostname == config.gitlab_shell.ssh_host &&
get_port(uri) == config.gitlab_shell.ssh_port
end
def enabled_object_storage_endpoints
ObjectStoreSettings::SUPPORTED_TYPES.collect do |type|
section_setting = config.try(type)
next unless section_setting && section_setting['enabled']
object_store_setting = section_setting['object_store']
next unless object_store_setting && object_store_setting['enabled']
object_store_setting.dig('connection', 'endpoint')
end.compact.uniq
end
def deny_all_requests_except_allowed?(should_deny)
should_deny.is_a?(Proc) ? should_deny.call : should_deny
end
def deny_all_requests_except_allowed_app_setting
Gitlab::CurrentSettings.current_application_settings? &&
Gitlab::CurrentSettings.deny_all_requests_except_allowed?
end
def object_storage_endpoint?(uri)
enabled_object_storage_endpoints.any? do |endpoint|
endpoint_uri = URI(endpoint)
uri.scheme == endpoint_uri.scheme &&
uri.hostname == endpoint_uri.hostname &&
get_port(uri) == get_port(endpoint_uri)
end
end
def domain_in_allow_list?(uri)
Gitlab::UrlBlockers::UrlAllowlist.domain_allowed?(uri.normalized_host, port: get_port(uri))
end
def ip_in_allow_list?(ip_address, port: nil)
Gitlab::UrlBlockers::UrlAllowlist.ip_allowed?(ip_address, port: port)
end
def config
Gitlab.config
end
def no_proxy_env
ENV['no_proxy'] || ENV['NO_PROXY']
end
end
end
end

View File

@ -23595,7 +23595,7 @@ msgstr ""
msgid "GroupSAML|Some branches are inaccessible because your SAML session has expired. To access the branches, select the groups path to reauthenticate."
msgstr ""
msgid "GroupSAML|Some to-do items may be hidden because your SAML session has expired. Select the groups path to reauthenticate and view the hidden to-do items."
msgid "GroupSAML|Some items may be hidden because your SAML session has expired. Select the groups path to reauthenticate and view any hidden items."
msgstr ""
msgid "GroupSAML|The SCIM token is now hidden. To see the value of the token again, you need to %{linkStart}reset it%{linkEnd}."
@ -55631,10 +55631,10 @@ msgstr ""
msgid "WikiPage|Write your content or drag files here…"
msgstr ""
msgid "Wiki|Create New Page"
msgid "Wiki|Edit Page"
msgstr ""
msgid "Wiki|Edit Page"
msgid "Wiki|New Page"
msgstr ""
msgid "Wiki|New page"

View File

@ -0,0 +1,218 @@
# frozen_string_literal: true
require 'fast_spec_helper'
require 'rspec-parameterized'
require_relative '../../bin/saas-feature'
RSpec.describe 'bin/saas-feature', feature_category: :feature_flags do
using RSpec::Parameterized::TableSyntax
let(:groups) { { geo: { label: 'group::geo' } } }
before do
allow(HTTParty)
.to receive(:get)
.with(SaasFeatureOptionParser::WWW_GITLAB_COM_GROUPS_JSON, format: :plain)
.and_return(groups.to_json)
end
describe SaasFeatureCreator do
let(:argv) { %w[saas-feature-name -g group::geo -m http://url -M 16.6] }
let(:options) { SaasFeatureOptionParser.parse(argv) }
let(:creator) { described_class.new(options) }
let(:existing_saas_features) do
{ 'existing_saas_feature' => File.join('ee', 'config', 'saas_features', 'existing_saas_feature.yml') }
end
before do
allow(creator).to receive(:all_saas_feature_names) { existing_saas_features }
allow(creator).to receive(:branch_name).and_return('feature-branch')
allow(creator).to receive(:editor).and_return(nil)
# ignore writes
allow(File).to receive(:write).and_return(true)
# ignore stdin
allow(Readline).to receive(:readline).and_raise('EOF')
end
subject(:execute) { creator.execute }
it 'properly creates a SaaS feature' do
expect(File).to receive(:write).with(
File.join('ee', 'config', 'saas_features', 'saas_feature_name.yml'),
anything)
expect { execute }.to output(/name: saas_feature_name/).to_stdout
end
context 'when running on master' do
it 'requires feature branch' do
expect(creator).to receive(:branch_name).and_return('master')
expect { execute }.to raise_error(SaasFeatureHelpers::Abort, /Create a branch first/)
end
end
context 'with SaaS feature name validation' do
where(:argv, :ex) do
%w[.invalid.saas.feature] | /Provide a name for the SaaS feature that is/
%w[existing-saas-feature] | /already exists!/
end
with_them do
it do
expect { execute }.to raise_error(ex)
end
end
end
end
describe SaasFeatureOptionParser do
describe '.parse' do
where(:param, :argv, :result) do
:name | %w[foo] | 'foo'
:amend | %w[foo --amend] | true
:force | %w[foo -f] | true
:force | %w[foo --force] | true
:introduced_by_url | %w[foo -m https://url] | 'https://url'
:introduced_by_url | %w[foo --introduced-by-url https://url] | 'https://url'
:dry_run | %w[foo -n] | true
:dry_run | %w[foo --dry-run] | true
:group | %w[foo -g group::geo] | 'group::geo'
:group | %w[foo --group group::geo] | 'group::geo'
:group | %w[foo -g invalid] | nil
:group | %w[foo --group invalid] | nil
end
with_them do
it do
options = described_class.parse(Array(argv))
expect(options.public_send(param)).to eq(result)
end
end
it 'missing SaaS feature name' do
expect do
expect { described_class.parse(%w[--amend]) }.to output(/SaaS feature name is required/).to_stdout
end.to raise_error(SaasFeatureHelpers::Abort)
end
it 'parses -h' do
expect do
expect { described_class.parse(%w[foo -h]) }.to output(/Usage:/).to_stdout
end.to raise_error(SaasFeatureHelpers::Done)
end
end
describe '.read_group' do
before do
allow(described_class).to receive(:fzf_available?).and_return(false)
end
context 'when valid group is given' do
let(:group) { 'group::geo' }
it 'reads group from stdin' do
expect(Readline).to receive(:readline).and_return(group)
expect do
expect(described_class.read_group).to eq('group::geo')
end.to output(/Specify the group label to which the SaaS feature belongs, from the following list/).to_stdout
end
end
context 'when valid index is given' do
it 'picks the group successfully' do
expect(Readline).to receive(:readline).and_return('1')
expect do
expect(described_class.read_group).to eq('group::geo')
end.to output(/Specify the group label to which the SaaS feature belongs, from the following list/).to_stdout
end
end
context 'with invalid group given' do
let(:type) { 'invalid' }
it 'shows error message and retries' do
expect(Readline).to receive(:readline).and_return(type)
expect(Readline).to receive(:readline).and_raise('EOF')
expect do
expect { described_class.read_group }.to raise_error(/EOF/)
end.to output(/Specify the group label to which the SaaS feature belongs, from the following list/).to_stdout
.and output(/The group label isn't in the above labels list/).to_stderr
end
end
context 'when invalid index is given' do
it 'shows error message and retries' do
expect(Readline).to receive(:readline).and_return('12')
expect(Readline).to receive(:readline).and_raise('EOF')
expect do
expect { described_class.read_group }.to raise_error(/EOF/)
end.to output(/Specify the group label to which the SaaS feature belongs, from the following list/).to_stdout
.and output(/The group label isn't in the above labels list/).to_stderr
end
end
end
describe '.read_introduced_by_url' do
context 'with valid URL given' do
let(:url) { 'https://merge-request' }
it 'reads URL from stdin' do
expect(Readline).to receive(:readline).and_return(url)
expect(HTTParty).to receive(:head).with(url).and_return(instance_double(HTTParty::Response, success?: true))
expect do
expect(described_class.read_introduced_by_url).to eq('https://merge-request')
end.to output(/URL of the MR introducing the SaaS feature/).to_stdout
end
end
context 'with invalid URL given' do
let(:url) { 'https://invalid' }
it 'shows error message and retries' do
expect(Readline).to receive(:readline).and_return(url)
expect(HTTParty).to receive(:head).with(url).and_return(instance_double(HTTParty::Response, success?: false))
expect(Readline).to receive(:readline).and_raise('EOF')
expect do
expect { described_class.read_introduced_by_url }.to raise_error(/EOF/)
end.to output(/URL of the MR introducing the SaaS feature/).to_stdout
.and output(/URL '#{url}' isn't valid/).to_stderr
end
end
context 'with empty URL given' do
let(:url) { '' }
it 'skips entry' do
expect(Readline).to receive(:readline).and_return(url)
expect do
expect(described_class.read_introduced_by_url).to be_nil
end.to output(/URL of the MR introducing the SaaS feature/).to_stdout
end
end
context 'with a non-URL given' do
let(:url) { 'malformed' }
it 'shows error message and retries' do
expect(Readline).to receive(:readline).and_return(url)
expect(Readline).to receive(:readline).and_raise('EOF')
expect do
expect { described_class.read_introduced_by_url }.to raise_error(/EOF/)
end.to output(/URL of the MR introducing the SaaS feature/).to_stdout
.and output(/URL needs to start with/).to_stderr
end
end
end
end
end

View File

@ -31,6 +31,21 @@ RSpec.describe Settings, feature_category: :system_access do
end
end
describe 'cron_jobs cron syntax is correct' do
it 'all cron entries are correct' do
Settings.cron_jobs.each_value do |job_config|
next unless job_config
job_class = job_config['job_class']
cron = job_config['cron']
next unless cron
expect(Fugit.parse_cron(cron)).not_to eq(nil), "The defined cron schedule (within #{job_class}) is invalid: '#{cron}'."
end
end
end
describe '.build_ci_component_fqdn' do
subject(:fqdn) { described_class.build_ci_component_fqdn }

View File

@ -8,5 +8,11 @@ FactoryBot.define do
traversal_ids { namespace.traversal_ids }
outdated_at { nil }
calculated_at { Time.current }
trait :up_to_date do
after(:create) do |record|
record.reload.update!(outdated_at: nil)
end
end
end
end

View File

@ -16,7 +16,7 @@ import {
DRAFT_CHECK_READY,
DRAFT_CHECK_ERROR,
} from '~/vue_merge_request_widget/components/checks/i18n';
import { FAILURE_REASONS } from '~/vue_merge_request_widget/components/checks/message.vue';
import { FAILURE_REASONS } from '~/vue_merge_request_widget/components/checks/constants';
import draftQuery from '~/vue_merge_request_widget/queries/states/draft.query.graphql';
import getStateQuery from '~/vue_merge_request_widget/queries/get_state.query.graphql';

View File

@ -136,19 +136,19 @@ describe('Merge request merge checks component', () => {
});
it.each`
identifier
${'conflict'}
${'discussions_not_resolved'}
${'need_rebase'}
${'default'}
`('renders $identifier merge check', async ({ identifier }) => {
identifier | componentName
${'conflict'} | ${'conflict'}
${'discussions_not_resolved'} | ${'discussions_not_resolved'}
${'need_rebase'} | ${'need_rebase'}
${'policies_denied'} | ${'default'}
`('renders $identifier merge check', async ({ identifier, componentName }) => {
shallowMountComponent({ mergeabilityChecks: [{ status: 'failed', identifier }] });
wrapper.findComponent(StateContainer).vm.$emit('toggle');
await waitForPromises();
const { default: component } = await COMPONENTS[identifier]();
const { default: component } = await COMPONENTS[componentName]();
expect(wrapper.findComponent(component).exists()).toBe(true);
});
@ -166,9 +166,9 @@ describe('Merge request merge checks component', () => {
it('sorts merge checks', async () => {
mountComponent({
mergeabilityChecks: [
{ identifier: 'discussions', status: 'SUCCESS' },
{ identifier: 'discussions', status: 'INACTIVE' },
{ identifier: 'rebase', status: 'FAILED' },
{ identifier: 'discussions_not_resolved', status: 'SUCCESS' },
{ identifier: 'status_checks_must_pass', status: 'INACTIVE' },
{ identifier: 'need_rebase', status: 'FAILED' },
],
});
@ -184,4 +184,21 @@ describe('Merge request merge checks component', () => {
expect.objectContaining({ status: 'SUCCESS' }),
);
});
it('does not render check component if no message exists', async () => {
mountComponent({
mergeabilityChecks: [
{ identifier: 'discussions_not_resolved', status: 'SUCCESS' },
{ identifier: 'fakemessage', status: 'FAILED' },
],
});
await waitForPromises();
await wrapper.findByTestId('widget-toggle').trigger('click');
const mergeChecks = wrapper.findAllByTestId('merge-check');
expect(mergeChecks.length).toBe(1);
});
});

View File

@ -122,6 +122,7 @@ describe('BaseToken', () => {
const findGlFilteredSearchToken = () => wrapper.findComponent(GlFilteredSearchToken);
const findMockSuggestionList = () => wrapper.findByTestId(mockSuggestionListTestId);
const getMockSuggestionListSuggestions = () =>
JSON.parse(findMockSuggestionList().attributes('data-suggestions'));
@ -410,30 +411,6 @@ describe('BaseToken', () => {
expect(setTokenValueToRecentlyUsed).not.toHaveBeenCalled();
});
it('emits token-selected event when groupMultiSelectTokens: true', () => {
const config = { ...mockConfig, multiSelect: true };
wrapper = createComponent({
props: { suggestions: mockLabels, config, value: { operator: '||' } },
groupMultiSelectTokens: true,
});
findGlFilteredSearchToken().vm.$emit('select', mockTokenValue.title);
expect(wrapper.emitted('token-selected')).toEqual([[mockTokenValue.title]]);
});
it('does not emit token-selected event when groupMultiSelectTokens: false', () => {
const config = { ...mockConfig, multiSelect: true };
wrapper = createComponent({
props: { suggestions: mockLabels, config, value: { operator: '||' } },
groupMultiSelectTokens: false,
});
findGlFilteredSearchToken().vm.$emit('select', mockTokenValue.title);
expect(wrapper.emitted('token-selected')).toBeUndefined();
});
});
});

View File

@ -1,4 +1,5 @@
import {
GlFilteredSearchToken,
GlFilteredSearchTokenSegment,
GlFilteredSearchSuggestion,
GlDropdownDivider,
@ -310,109 +311,6 @@ describe('UserToken', () => {
expect(mockInput).toHaveBeenLastCalledWith([{ data: 'mockData', operator: '=' }]);
});
describe('multiSelect', () => {
it('renders check icons in suggestions when multiSelect is true', async () => {
wrapper = createComponent({
value: { data: [mockUsers[0].username, mockUsers[1].username], operator: '||' },
data: {
users: mockUsers,
},
config: { ...mockAuthorToken, multiSelect: true },
active: true,
stubs: { Portal: true },
groupMultiSelectTokens: true,
});
await activateSuggestionsList();
const suggestions = wrapper.findAllComponents(GlFilteredSearchSuggestion);
expect(findIconAtSuggestion(0).props('name')).toBe('check');
expect(findIconAtSuggestion(1).props('name')).toBe('check');
expect(findIconAtSuggestion(2).exists()).toBe(false);
// test for left padding on unchecked items (so alignment is correct)
expect(suggestions.at(2).find('.gl-pl-6').exists()).toBe(true);
});
it('renders multiple users when multiSelect is true', async () => {
wrapper = createComponent({
value: { data: [mockUsers[0].username, mockUsers[1].username], operator: '||' },
data: {
users: mockUsers,
},
config: { ...mockAuthorToken, multiSelect: true, initialUsers: mockUsers },
groupMultiSelectTokens: true,
});
await nextTick();
const tokenSegments = wrapper.findAllComponents(GlFilteredSearchTokenSegment);
expect(tokenSegments).toHaveLength(3); // Author, =, "Administrator"
const tokenValue = tokenSegments.at(2);
const [user1, user2] = mockUsers;
expect(tokenValue.findAllComponents(GlAvatar).at(1).props('src')).toBe(
mockUsers[1].avatar_url,
);
expect(tokenValue.text()).toBe(`${user1.name},${user2.name}`);
});
it('adds new user to multi-select-values', () => {
wrapper = createComponent({
value: { data: [mockUsers[0].username], operator: '||' },
data: {
users: mockUsers,
},
config: { ...mockAuthorToken, multiSelect: true, initialUsers: mockUsers },
active: true,
groupMultiSelectTokens: true,
});
findBaseToken().vm.$emit('token-selected', mockUsers[1].username);
expect(findBaseToken().props().multiSelectValues).toEqual([
mockUsers[0].username,
mockUsers[1].username,
]);
});
it('removes existing user from array', () => {
const initialUsers = [mockUsers[0].username, mockUsers[1].username];
wrapper = createComponent({
value: { data: initialUsers, operator: '||' },
data: {
users: mockUsers,
},
config: { ...mockAuthorToken, multiSelect: true, initialUsers: mockUsers },
active: true,
groupMultiSelectTokens: true,
});
findBaseToken().vm.$emit('token-selected', mockUsers[0].username);
expect(findBaseToken().props().multiSelectValues).toEqual([mockUsers[1].username]);
});
it('clears input field after token selected', () => {
wrapper = createComponent({
value: { data: [mockUsers[0].username, mockUsers[1].username], operator: '||' },
data: {
users: mockUsers,
},
config: { ...mockAuthorToken, multiSelect: true, initialUsers: mockUsers },
active: true,
groupMultiSelectTokens: true,
});
findBaseToken().vm.$emit('token-selected', 'test');
expect(wrapper.emitted('input')).toEqual([[{ operator: '||', data: '' }]]);
});
});
describe('when loading', () => {
beforeEach(() => {
wrapper = createComponent({
@ -432,13 +330,55 @@ describe('UserToken', () => {
expect(firstSuggestion).toContain('@root');
});
it('does not show current user while searching', async () => {
wrapper.findComponent(BaseToken).vm.handleInput({ data: 'foo' });
it('does not show non-matching user while searching', async () => {
wrapper.findComponent(GlFilteredSearchToken).vm.$emit('input', { data: 'foo' });
await nextTick();
expect(wrapper.findComponent(GlFilteredSearchSuggestion).exists()).toBe(false);
});
});
describe('multiSelect', () => {
it('renders check icons in suggestions when multiSelect is true', async () => {
wrapper = createComponent({
value: { data: [mockUsers[0].username, mockUsers[1].username], operator: '||' },
data: {
users: mockUsers,
},
config: { ...mockAuthorToken, multiSelect: true },
active: true,
stubs: { Portal: true },
groupMultiSelectTokens: true,
});
await activateSuggestionsList();
const suggestions = wrapper.findAllComponents(GlFilteredSearchSuggestion);
expect(findIconAtSuggestion(0).props('name')).toBe('check');
expect(findIconAtSuggestion(1).props('name')).toBe('check');
expect(findIconAtSuggestion(2).exists()).toBe(false);
// test for left padding on unchecked items (so alignment is correct)
expect(suggestions.at(2).find('.gl-pl-6').exists()).toBe(true);
});
it('renders multiple users when multiSelect is true', async () => {
wrapper = createComponent({
value: { data: [mockUsers[0].username, mockUsers[1].username], operator: '||' },
data: {
users: mockUsers,
},
config: { ...mockAuthorToken, multiSelect: true, initialUsers: mockUsers },
groupMultiSelectTokens: true,
});
await nextTick();
const tokenSegments = wrapper.findAllComponents(GlFilteredSearchTokenSegment);
expect(tokenSegments).toHaveLength(3); // Author, =, "Administrator"
const tokenValue = tokenSegments.at(2);
const [user1, user2] = mockUsers;
expect(tokenValue.findAllComponents(GlAvatar).at(1).props('src')).toBe(
mockUsers[1].avatar_url,
);
expect(tokenValue.text()).toBe(`${user1.name},${user2.name}`);
});
});
});
});

View File

@ -25,7 +25,7 @@ RSpec.describe Resolvers::NamespaceProjectsResolver, feature_category: :groups_a
let(:group) { create(:group) }
let(:namespace) { group }
let(:project1) { create(:project, namespace: namespace) }
let(:project2) { create(:project, namespace: namespace) }
let(:project2) { create(:project, :archived, namespace: namespace) }
let(:project3) { create(:project, namespace: namespace, marked_for_deletion_at: 1.day.ago, pending_delete: true) }
let(:nested_group) { create(:group, parent: group) }
let(:nested_project) { create(:project, group: nested_group) }
@ -54,6 +54,12 @@ RSpec.describe Resolvers::NamespaceProjectsResolver, feature_category: :groups_a
expect(resolve_projects(arg)).to contain_exactly(project1, project2)
end
it 'can filter out archived projects' do
arg = { include_archived: false }
expect(resolve_projects(arg)).to contain_exactly(project1, project3)
end
it 'finds all projects not aimed for deletion including the subgroups' do
args[:not_aimed_for_deletion] = true

View File

@ -75,4 +75,10 @@ RSpec.describe DashboardHelper do
it { is_expected.to eq(false) }
end
describe '.user_groups_requiring_reauth', feature_category: :system_access do
it 'returns an empty array' do
expect(helper.user_groups_requiring_reauth).to match_array([])
end
end
end

View File

@ -443,4 +443,10 @@ RSpec.describe TodosHelper do
end
end
end
describe '.todo_groups_requiring_saml_reauth', feature_category: :system_access do
it 'returns an empty array' do
expect(helper.todo_groups_requiring_saml_reauth([])).to match_array([])
end
end
end

View File

@ -3,7 +3,7 @@
require 'spec_helper'
RSpec.describe 'rest-client dns rebinding protection' do
it_behaves_like 'a request using Gitlab::UrlBlocker' do
it_behaves_like 'a request using Gitlab::HTTP_V2::UrlBlocker' do
let(:http_method) { :get }
let(:url_blocked_error_class) { ArgumentError }

View File

@ -40,5 +40,14 @@ RSpec.describe Gitlab::Ci::Config::External::Mapper::Normalizer, feature_categor
{ remote: 'https://example.com/.gitlab-ci.yml' }]
)
end
context 'when the location value is an invalid type' do
let(:locations) { [123] }
it 'raises an error' do
expect { process }.to raise_error(
Gitlab::Ci::Config::External::Mapper::InvalidTypeError, /Each include must be a hash or a string/)
end
end
end
end

View File

@ -174,6 +174,15 @@ RSpec.describe Gitlab::Ci::Config::External::Mapper, feature_category: :pipeline
it_behaves_like 'logging config file fetch', 'config_file_fetch_project_content_duration_s', 1
end
context 'when the include value is a Boolean' do
let(:values) { { include: true } }
it 'raises an error' do
expect { process }.to raise_error(
Gitlab::Ci::Config::External::Mapper::InvalidTypeError, /Each include must be a hash or a string/)
end
end
end
context "when 'include' is defined as an array" do
@ -186,6 +195,15 @@ RSpec.describe Gitlab::Ci::Config::External::Mapper, feature_category: :pipeline
expect(subject).to all(respond_to(:valid?))
expect(subject).to all(respond_to(:content))
end
context 'when an include value is an Array' do
let(:values) { { include: [remote_url, [local_file]] } }
it 'raises an error' do
expect { process }.to raise_error(
Gitlab::Ci::Config::External::Mapper::InvalidTypeError, /Each include must be a hash or a string/)
end
end
end
context "when 'include' is defined as an array of hashes" do

View File

@ -143,6 +143,10 @@ RSpec.describe Gitlab::Database::LooseForeignKeys do
described_class.instance_variable_set(:@loose_foreign_keys_yaml, loose_foreign_keys_yaml)
end
after do
described_class.instance_variable_set(:@loose_foreign_keys_yaml, nil)
end
it 'raises Gitlab::Database::GitlabSchema::UnknownSchemaError error' do
expect { subject }.to raise_error(Gitlab::Database::GitlabSchema::UnknownSchemaError)
end

View File

@ -63,12 +63,6 @@ RSpec.describe Gitlab::Pages::UrlBuilder, feature_category: :pages do
it { is_expected.to eq('http://group.example.com/project') }
end
context 'when project is upper cased' do
let(:full_path) { 'group/Project' }
it { is_expected.to eq('http://group.example.com/project') }
end
context 'when project is in a nested group page' do
let(:full_path) { 'group/subgroup/project' }
@ -133,12 +127,6 @@ RSpec.describe Gitlab::Pages::UrlBuilder, feature_category: :pages do
it { is_expected.to eq('http://example.com/group/project') }
end
context 'when project is upper cased' do
let(:full_path) { 'group/Project' }
it { is_expected.to eq('http://example.com/group/project') }
end
context 'when project is in a nested group page' do
let(:full_path) { 'group/subgroup/project' }

File diff suppressed because it is too large Load Diff

View File

@ -3,90 +3,72 @@
require 'spec_helper'
RSpec.describe Namespaces::Traversal::Cached, feature_category: :database do
let_it_be_with_refind(:old_parent) { create(:group) }
let_it_be_with_refind(:new_parent) { create(:group) }
let_it_be_with_refind(:group) { create(:group, parent: old_parent) }
let_it_be_with_refind(:subgroup) { create(:group, parent: group) }
describe 'callbacks' do
let_it_be_with_refind(:old_parent) { create(:group) }
let_it_be_with_refind(:new_parent) { create(:group) }
let_it_be_with_refind(:group) { create(:group, parent: old_parent) }
let_it_be_with_refind(:subgroup) { create(:group, parent: group) }
context 'when the namespace_descendants_cache_expiration feature flag is off' do
let!(:cache) { create(:namespace_descendants, namespace: group) }
context 'when the namespace_descendants_cache_expiration feature flag is off' do
let!(:cache) { create(:namespace_descendants, namespace: group) }
before do
stub_feature_flags(namespace_descendants_cache_expiration: false)
end
it 'does not invalidate the cache' do
expect { group.update!(parent: new_parent) }.not_to change { cache.reload.outdated_at }
end
context 'when the group is deleted' do
it 'invalidates the cache' do
expect { group.destroy! }.not_to change { cache.reload.outdated_at }
before do
stub_feature_flags(namespace_descendants_cache_expiration: false)
end
end
end
context 'when no cached records are present' do
it 'does nothing' do
group.parent = new_parent
it 'does not invalidate the cache' do
expect { group.update!(parent: new_parent) }.not_to change { cache.reload.outdated_at }
end
expect { group.save! }.not_to change { Namespaces::Descendants.all.to_a }
end
end
context 'when the namespace record is UserNamespace' do
it 'does nothing' do
# we won't use the optimization for UserNamespace
namespace = create(:user_namespace)
cache = create(:namespace_descendants, namespace: namespace)
expect { namespace.destroy! }.not_to change { cache.reload.outdated_at }
end
end
context 'when cached record is present' do
let!(:cache) { create(:namespace_descendants, namespace: group) }
it 'invalidates the cache' do
expect { group.update!(parent: new_parent) }.to change { cache.reload.outdated_at }.from(nil)
end
it 'does not invalidate the cache of subgroups' do
subgroup_cache = create(:namespace_descendants, namespace: subgroup)
expect { group.update!(parent: new_parent) }.not_to change { subgroup_cache.reload.outdated_at }
end
context 'when a new subgroup is added' do
it 'invalidates the cache' do
expect { create(:group, parent: group) }.to change { cache.reload.outdated_at }
context 'when the group is deleted' do
it 'invalidates the cache' do
expect { group.destroy! }.not_to change { cache.reload.outdated_at }
end
end
end
context 'when a new project is added' do
it 'invalidates the cache' do
expect { create(:project, group: group) }.to change { cache.reload.outdated_at }
context 'when no cached records are present' do
it 'does nothing' do
group.parent = new_parent
expect { group.save! }.not_to change { Namespaces::Descendants.all.to_a }
end
end
end
context 'when parent group has cached record' do
it 'invalidates the parent cache' do
old_parent_cache = create(:namespace_descendants, namespace: old_parent)
new_parent_cache = create(:namespace_descendants, namespace: new_parent)
context 'when the namespace record is UserNamespace' do
it 'does nothing' do
# we won't use the optimization for UserNamespace
namespace = create(:user_namespace)
cache = create(:namespace_descendants, namespace: namespace)
group.update!(parent: new_parent)
expect(old_parent_cache.reload.outdated_at).not_to be_nil
expect(new_parent_cache.reload.outdated_at).not_to be_nil
expect { namespace.destroy! }.not_to change { cache.reload.outdated_at }
end
end
end
context 'when group is destroyed' do
it 'invalidates the cache' do
cache = create(:namespace_descendants, namespace: group)
context 'when cached record is present' do
let!(:cache) { create(:namespace_descendants, namespace: group) }
expect { group.destroy! }.to change { cache.reload.outdated_at }.from(nil)
it 'invalidates the cache' do
expect { group.update!(parent: new_parent) }.to change { cache.reload.outdated_at }.from(nil)
end
it 'does not invalidate the cache of subgroups' do
subgroup_cache = create(:namespace_descendants, namespace: subgroup)
expect { group.update!(parent: new_parent) }.not_to change { subgroup_cache.reload.outdated_at }
end
context 'when a new subgroup is added' do
it 'invalidates the cache' do
expect { create(:group, parent: group) }.to change { cache.reload.outdated_at }
end
end
context 'when a new project is added' do
it 'invalidates the cache' do
expect { create(:project, group: group) }.to change { cache.reload.outdated_at }
end
end
end
context 'when parent group has cached record' do
@ -94,10 +76,102 @@ RSpec.describe Namespaces::Traversal::Cached, feature_category: :database do
old_parent_cache = create(:namespace_descendants, namespace: old_parent)
new_parent_cache = create(:namespace_descendants, namespace: new_parent)
group.destroy!
group.update!(parent: new_parent)
expect(old_parent_cache.reload.outdated_at).not_to be_nil
expect(new_parent_cache.reload.outdated_at).to be_nil # no change
expect(new_parent_cache.reload.outdated_at).not_to be_nil
end
end
context 'when group is destroyed' do
it 'invalidates the cache' do
cache = create(:namespace_descendants, namespace: group)
expect { group.destroy! }.to change { cache.reload.outdated_at }.from(nil)
end
context 'when parent group has cached record' do
it 'invalidates the parent cache' do
old_parent_cache = create(:namespace_descendants, namespace: old_parent)
new_parent_cache = create(:namespace_descendants, namespace: new_parent)
group.destroy!
expect(old_parent_cache.reload.outdated_at).not_to be_nil
expect(new_parent_cache.reload.outdated_at).to be_nil # no change
end
end
end
end
describe 'query methods' do
let_it_be(:group) { create(:group) }
let_it_be(:subgroup) { create(:group, parent: group) }
let_it_be(:subsubgroup) { create(:group, parent: subgroup) }
let_it_be(:project1) { create(:project, group: group) }
let_it_be(:project2) { create(:project, group: subsubgroup) }
# deliberately making self_and_descendant_group_ids different from the actual
# self_and_descendant_ids so we can verify that the cached query is running.
let_it_be_with_refind(:namespace_descendants) do
create(:namespace_descendants,
:up_to_date,
namespace: group,
self_and_descendant_group_ids: [group.id, subgroup.id],
all_project_ids: [project1.id]
)
end
describe '#self_and_descendant_ids' do
subject(:ids) { group.self_and_descendant_ids.pluck(:id) }
it 'returns the cached values' do
expect(ids).to eq(namespace_descendants.self_and_descendant_group_ids)
end
context 'when the cache is outdated' do
it 'returns the values from the uncached self_and_descendant_ids query' do
namespace_descendants.update!(outdated_at: Time.current)
expect(ids.sort).to eq([group.id, subgroup.id, subsubgroup.id])
end
end
context 'when the group_hierarchy_optimization feature flag is disabled' do
before do
stub_feature_flags(group_hierarchy_optimization: false)
end
it 'returns the values from the uncached self_and_descendant_ids query' do
expect(ids.sort).to eq([group.id, subgroup.id, subsubgroup.id])
end
end
end
describe '#all_project_ids' do
subject(:ids) { group.all_project_ids.pluck(:id) }
it 'returns the cached values' do
expect(ids).to eq(namespace_descendants.all_project_ids)
end
context 'when the cache is outdated' do
it 'returns the values from the uncached all_project_ids query' do
namespace_descendants.update!(outdated_at: Time.current)
expect(ids.sort).to eq([project1.id, project2.id])
end
end
context 'when the group_hierarchy_optimization feature flag is disabled' do
before do
stub_feature_flags(group_hierarchy_optimization: false)
end
it 'returns the values from the uncached all_project_ids query' do
expect(ids.sort).to eq([project1.id, project2.id])
end
end
end
end

View File

@ -2,11 +2,76 @@
require 'spec_helper'
RSpec.describe UserDetail do
RSpec.describe UserDetail, feature_category: :system_access do
it { is_expected.to belong_to(:user) }
it { is_expected.to define_enum_for(:registration_objective).with_values([:basics, :move_repository, :code_storage, :exploring, :ci, :other, :joining_team]).with_suffix }
specify do
values = [:basics, :move_repository, :code_storage, :exploring, :ci, :other, :joining_team]
is_expected.to define_enum_for(:registration_objective).with_values(values).with_suffix
end
describe 'validations' do
context 'for onboarding_status json schema' do
let(:step_url) { '_some_string_' }
let(:email_opt_in) { true }
let(:onboarding_status) do
{
step_url: step_url,
email_opt_in: email_opt_in
}
end
it { is_expected.to allow_value(onboarding_status).for(:onboarding_status) }
context 'for step_url' do
let(:onboarding_status) do
{
step_url: step_url
}
end
it { is_expected.to allow_value(onboarding_status).for(:onboarding_status) }
context "when 'step_url' is invalid" do
let(:step_url) { [] }
it { is_expected.not_to allow_value(onboarding_status).for(:onboarding_status) }
end
end
context 'for email_opt_in' do
let(:onboarding_status) do
{
email_opt_in: email_opt_in
}
end
it { is_expected.to allow_value(onboarding_status).for(:onboarding_status) }
context "when 'email_opt_in' is invalid" do
let(:email_opt_in) { 'true' }
it { is_expected.not_to allow_value(onboarding_status).for(:onboarding_status) }
end
end
context 'when there is no data' do
let(:onboarding_status) { {} }
it { is_expected.to allow_value(onboarding_status).for(:onboarding_status) }
end
context 'when trying to store an unsupported key' do
let(:onboarding_status) do
{
unsupported_key: '_some_value_'
}
end
it { is_expected.not_to allow_value(onboarding_status).for(:onboarding_status) }
end
end
describe '#job_title' do
it { is_expected.not_to validate_presence_of(:job_title) }
it { is_expected.to validate_length_of(:job_title).is_at_most(200) }
@ -75,7 +140,8 @@ RSpec.describe UserDetail do
user_detail.mastodon = '@robin'
expect(user_detail).not_to be_valid
expect(user_detail.errors.full_messages).to match_array([_('Mastodon must contain only a mastodon username.')])
expect(user_detail.errors.full_messages)
.to match_array([_('Mastodon must contain only a mastodon username.')])
end
end
end

View File

@ -1,137 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::Gcp::ArtifactRegistry::DockerImagesController, feature_category: :container_registry do
let_it_be(:project) { create(:project, :private) }
let(:user) { project.owner }
let(:gcp_project_id) { 'gcp_project_id' }
let(:gcp_location) { 'gcp_location' }
let(:gcp_ar_repository) { 'gcp_ar_repository' }
let(:gcp_wlif_url) { 'gcp_wlif_url' }
describe '#index' do
let(:service_response) { ServiceResponse.success(payload: dummy_client_payload) }
let(:service_double) do
instance_double('GoogleCloudPlatform::ArtifactRegistry::ListDockerImagesService')
end
subject(:get_index_page) do
get(
project_gcp_artifact_registry_docker_images_path(
project,
gcp_project_id: gcp_project_id,
gcp_location: gcp_location,
gcp_ar_repository: gcp_ar_repository,
gcp_wlif_url: gcp_wlif_url
)
)
end
before do
allow_next_instance_of(GoogleCloudPlatform::ArtifactRegistry::ListDockerImagesService) do |service|
allow(service).to receive(:execute).and_return(service_response)
end
end
shared_examples 'returning the error message' do |message|
it 'displays an error message' do
sign_in(user)
get_index_page
expect(response).to have_gitlab_http_status(:success)
expect(response.body).to include(message)
end
end
context 'when on saas', :saas do
it 'returns the images' do
sign_in(user)
get_index_page
expect(response).to have_gitlab_http_status(:success)
expect(response.body).to include('image@sha256:6a')
expect(response.body).to include('tag1')
expect(response.body).to include('tag2')
expect(response.body).to include('Prev')
expect(response.body).to include('Next')
expect(response.body).to include('https://location.pkg.dev/project/repo/image@sha256:6a')
end
context 'when the service returns an error response' do
let(:service_response) { ServiceResponse.error(message: 'boom') }
it_behaves_like 'returning the error message', 'boom'
end
%i[gcp_project_id gcp_location gcp_ar_repository gcp_wlif_url].each do |field|
context "when a gcp parameter #{field} is missing" do
let(field) { nil }
it 'redirects to setup page' do
sign_in(user)
get_index_page
expect(response).to redirect_to new_project_gcp_artifact_registry_setup_path(project)
end
end
end
context 'with the feature flag disabled' do
before do
stub_feature_flags(gcp_technical_demo: false)
end
it_behaves_like 'returning the error message', 'Feature flag disabled'
end
context 'with non private project' do
before do
allow_next_found_instance_of(Project) do |project|
allow(project).to receive(:private?).and_return(false)
end
end
it_behaves_like 'returning the error message', 'Can only run on private projects'
end
context 'with unauthorized user' do
let_it_be(:user) { create(:user) }
it 'returns success' do
sign_in(user)
get_index_page
expect(response).to have_gitlab_http_status(:not_found)
end
end
end
context 'when not on saas' do
it_behaves_like 'returning the error message', "Can&#39;t run here"
end
def dummy_client_payload
{
images: [
{
built_at: '2023-11-30T23:23:11.980068941Z',
media_type: 'application/vnd.docker.distribution.manifest.v2+json',
name: 'projects/project/locations/location/repositories/repo/dockerImages/image@sha256:6a',
size_bytes: 2827903,
tags: %w[tag1 tag2],
updated_at: '2023-12-07T11:48:50.840751Z',
uploaded_at: '2023-12-07T11:48:47.598511Z',
uri: 'location.pkg.dev/project/repo/image@sha256:6a'
}
],
next_page_token: 'next_page_token'
}
end
end
end

View File

@ -1,73 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::Gcp::ArtifactRegistry::SetupController, feature_category: :container_registry do
let_it_be(:project) { create(:project, :private) }
let(:user) { project.owner }
describe '#new' do
subject(:get_setup_page) { get(new_project_gcp_artifact_registry_setup_path(project)) }
shared_examples 'returning the error message' do |message|
it 'displays an error message' do
sign_in(user)
get_setup_page
expect(response).to have_gitlab_http_status(:success)
expect(response.body).to include(message)
end
end
context 'when on saas', :saas do
it 'returns the setup page' do
sign_in(user)
get_setup_page
expect(response).to have_gitlab_http_status(:success)
expect(response.body).to include('Google Project ID')
expect(response.body).to include('Google Project Location')
expect(response.body).to include('Artifact Registry Repository Name')
expect(response.body).to include('Worflow Identity Federation url')
expect(response.body).to include('Setup')
end
context 'with the feature flag disabled' do
before do
stub_feature_flags(gcp_technical_demo: false)
end
it_behaves_like 'returning the error message', 'Feature flag disabled'
end
context 'with non private project' do
before do
allow_next_found_instance_of(Project) do |project|
allow(project).to receive(:private?).and_return(false)
end
end
it_behaves_like 'returning the error message', 'Can only run on private projects'
end
context 'with unauthorized user' do
let_it_be(:user) { create(:user) }
it 'returns success' do
sign_in(user)
get_setup_page
expect(response).to have_gitlab_http_status(:not_found)
end
end
end
context 'when not on saas' do
it_behaves_like 'returning the error message', "Can&#39;t run here"
end
end
end

View File

@ -457,6 +457,8 @@ RSpec.describe Groups::UpdateService, feature_category: :groups_and_projects do
context 'when enabling the setting' do
it 'creates the initial Namespaces::Descendants record' do
expect { result }.to change { public_group.reload.namespace_descendants.present? }.from(false).to(true)
expect(public_group.namespace_descendants.outdated_at).to be_present
end
end

View File

@ -8,7 +8,7 @@ module StubRequests
#
# It expects the final request to go to the `ip_address` instead the given url.
# That's primarily a DNS rebind attack prevention of Gitlab::HTTP
# (see: Gitlab::UrlBlocker).
# (see: Gitlab::HTTP_V2::UrlBlocker).
#
def stub_full_request(url, ip_address: IP_ADDRESS_STUB, port: 80, method: :get)
stub_dns(url, ip_address: ip_address, port: port)
@ -22,7 +22,7 @@ module StubRequests
socket = Socket.sockaddr_in(port, ip_address)
addr = Addrinfo.new(socket)
# See Gitlab::UrlBlocker
# See Gitlab::HTTP_V2::UrlBlocker
allow(Addrinfo).to receive(:getaddrinfo)
.with(url.hostname, url.port, nil, :STREAM)
.and_return([addr])
@ -34,7 +34,7 @@ module StubRequests
socket = Socket.sockaddr_in(port, ip_address)
addr = Addrinfo.new(socket)
# See Gitlab::UrlBlocker
# See Gitlab::HTTP_V2::UrlBlocker
allow(Addrinfo).to receive(:getaddrinfo).and_call_original
allow(Addrinfo).to receive(:getaddrinfo)
.with(url.hostname, anything, nil, :STREAM)

View File

@ -6714,7 +6714,6 @@
- './spec/lib/gitlab/uploads_transfer_spec.rb'
- './spec/lib/gitlab/url_blockers/domain_allowlist_entry_spec.rb'
- './spec/lib/gitlab/url_blockers/ip_allowlist_entry_spec.rb'
- './spec/lib/gitlab/url_blocker_spec.rb'
- './spec/lib/gitlab/url_blockers/url_allowlist_spec.rb'
- './spec/lib/gitlab/url_builder_spec.rb'
- './spec/lib/gitlab/url_sanitizer_spec.rb'

Some files were not shown because too many files have changed in this diff Show More