Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-04-26 09:14:30 +00:00
parent 88c8343af6
commit 5e7fac8adf
65 changed files with 568 additions and 1027 deletions

View File

@ -234,7 +234,7 @@ ping-appsec-for-sast-findings:
- .ping-appsec-for-sast-findings:rules
variables:
# Project Access Token bot ID for /gitlab-com/gl-security/appsec/sast-custom-rules
BOT_USER_ID: 14406065
BOT_USER_ID: 19650678
needs:
- semgrep-appsec-custom-rules
script:

View File

@ -46,14 +46,6 @@ export default {
stepTwoDescription: s__(
'Runners|To view the setup instructions, complete the previous form. The instructions help you set up an autoscaling fleet of runners to execute your CI/CD jobs in Google Cloud.',
),
projectIdLabel: s__('Runners|Google Cloud project ID'),
projectIdDescription: s__(
'Runners|To improve security, use a dedicated project for CI/CD, separate from resources and identity management projects. %{linkStart}Wheres my project ID in Google Cloud?%{linkEnd}',
),
zonesLinkText: s__('Runners|View available zones'),
machineTypeDescription: s__(
'Runners|For most CI/CD jobs, use a %{linkStart}N2D standard machine type%{linkEnd}.',
),
runnerSetupBtnText: s__('Runners|Setup instructions'),
copyCommands: __('Copy commands'),
emptyFieldsAlertMessage: s__(
@ -329,18 +321,20 @@ export default {
ref="cloudProjectId"
v-model="cloudProjectId"
name="cloudProjectId"
:label="$options.i18n.projectIdLabel"
:label="s__('Runners|Google Cloud project ID')"
:invalid-feedback-if-empty="s__('Runners|Project ID is required.')"
:invalid-feedback-if-malformed="
s__(
'Runners|Project ID must be 6 to 30 lowercase letters, digits, or hyphens. It needs to start with a lowercase letter and end with a letter or number.',
)
"
:invalid-feedback-if-malformed="s__('Runners|Project ID must have the right format.')"
:regexp="$options.GC_PROJECT_PATTERN"
data-testid="project-id-input"
>
<template #description>
<gl-sprintf :message="$options.i18n.projectIdDescription">
<gl-sprintf
:message="
s__(
'Runners|To improve security, use a project just for CI/CD. %{linkStart}Where\'s my project ID?%{linkEnd} Can be 6 to 30 lowercase letters, digits, or hyphens. Must start with a letter and end with a letter or number. Example: %{example}.',
)
"
>
<template #link="{ content }">
<gl-link
:href="$options.links.projectIdLink"
@ -351,6 +345,11 @@ export default {
<gl-icon name="external-link" :aria-label="$options.i18n.externalLink" />
</gl-link>
</template>
<!-- eslint-disable @gitlab/vue-require-i18n-strings -->
<template #example>
<code>my-sample-project-191923</code>
</template>
<!-- eslint-enable @gitlab/vue-require-i18n-strings -->
</gl-sprintf>
</template>
</google-cloud-field-group>
@ -360,9 +359,7 @@ export default {
v-model="region"
name="region"
:invalid-feedback-if-empty="s__('Runners|Region is required.')"
:invalid-feedback-if-malformed="
s__('Runners|Region must have the correct format. Example: us-central1')
"
:invalid-feedback-if-malformed="s__('Runners|Region must have the right format.')"
:regexp="$options.GC_REGION_PATTERN"
data-testid="region-input"
>
@ -377,6 +374,18 @@ export default {
</help-popover>
</div>
</template>
<template #description>
<gl-sprintf :message="s__('Runners|Must have the format %{format}. Example: %{example}.')">
<!-- eslint-disable @gitlab/vue-require-i18n-strings -->
<template #format>
<code>&lt;location&gt;-&lt;sublocation&gt;&lt;number&gt;</code>
</template>
<template #example>
<code>us-central1</code>
</template>
<!-- eslint-enable @gitlab/vue-require-i18n-strings -->
</gl-sprintf>
</template>
</google-cloud-field-group>
<google-cloud-field-group
@ -384,9 +393,7 @@ export default {
v-model="zone"
name="zone"
:invalid-feedback-if-empty="s__('Runners|Zone is required.')"
:invalid-feedback-if-malformed="
s__('Runners|Zone must have the correct format. Example: us-central1-a')
"
:invalid-feedback-if-malformed="s__('Runners|Zone must have the right format.')"
:regexp="$options.GC_ZONE_PATTERN"
data-testid="zone-input"
>
@ -406,10 +413,28 @@ export default {
</div>
</template>
<template #description>
<gl-link :href="$options.links.zonesLink" target="_blank" data-testid="zone-link">
{{ $options.i18n.zonesLinkText }}
<gl-icon name="external-link" :aria-label="$options.i18n.externalLink" />
</gl-link>
<gl-sprintf
:message="
s__(
'Runners|%{linkStart}View available zones%{linkEnd}. Must have the format %{format}. Example: %{example}.',
)
"
>
<template #link="{ content }">
<gl-link :href="$options.links.zonesLink" target="_blank" data-testid="zone-link">
{{ content }}
<gl-icon name="external-link" :aria-label="$options.i18n.externalLink" />
</gl-link>
</template>
<!-- eslint-disable @gitlab/vue-require-i18n-strings -->
<template #format>
<code>&lt;region&gt;-&lt;zone_letter&gt;</code>
</template>
<template #example>
<code>us-central1-a</code>
</template>
<!-- eslint-enable @gitlab/vue-require-i18n-strings -->
</gl-sprintf>
</template>
</google-cloud-field-group>
@ -418,11 +443,7 @@ export default {
v-model="machineType"
name="machineType"
:invalid-feedback-if-empty="s__('Runners|Machine type is required.')"
:invalid-feedback-if-malformed="
s__(
'Runners|Machine type must have the format `family-series-size`. Example: n2d-standard-2',
)
"
:invalid-feedback-if-malformed="s__('Runners|Machine type must have the right format.')"
:regexp="$options.GC_MACHINE_TYPE_PATTERN"
data-testid="machine-type-input"
>
@ -442,7 +463,13 @@ export default {
</div>
</template>
<template #description>
<gl-sprintf :message="$options.i18n.machineTypeDescription">
<gl-sprintf
:message="
s__(
'Runners|For most CI/CD jobs, use a %{linkStart}N2D standard machine type%{linkEnd}. Must have the format %{format}. Example: %{example}.',
)
"
>
<template #link="{ content }">
<gl-link
:href="$options.links.n2dMachineTypesLink"
@ -453,6 +480,14 @@ export default {
<gl-icon name="external-link" :aria-label="$options.i18n.externalLink" />
</gl-link>
</template>
<!-- eslint-disable @gitlab/vue-require-i18n-strings -->
<template #format>
<code>&lt;series&gt;-&lt;type&gt;</code>
</template>
<template #example>
<code>n2d-standard-2</code>
</template>
<!-- eslint-enable @gitlab/vue-require-i18n-strings -->
</gl-sprintf>
</template>
</google-cloud-field-group>

View File

@ -1,121 +0,0 @@
import $ from 'jquery';
/**
* Linked Tabs
*
* Handles persisting and restores the current tab selection and content.
* Reusable component for static content.
*
* ### Example Markup
*
* <ul class="nav-links tab-links">
* <li class="active">
* <a data-action="tab1" data-target="#tab1" data-toggle="tab" href="/path/tab1">
* Tab 1
* </a>
* </li>
* <li class="groups-tab">
* <a data-action="tab2" data-target="#tab2" data-toggle="tab" href="/path/tab2">
* Tab 2
* </a>
* </li>
*
*
* <div class="tab-content">
* <div class="tab-pane" id="tab1">
* Tab 1 Content
* </div>
* <div class="tab-pane" id="tab2">
* Tab 2 Content
* </div>
* </div>
*
*
* ### How to use
*
* new LinkedTabs({
* action: "#{controller.action_name}",
* defaultAction: 'tab1',
* parentEl: '.tab-links'
* });
*/
export default class LinkedTabs {
/**
* Binds the events and activates de default tab.
*
* @param {Object} options
*/
constructor(options = {}) {
this.options = options;
this.defaultAction = this.options.defaultAction;
this.action = this.options.action || this.defaultAction;
this.hashedTabs = this.options.hashedTabs || false;
if (this.action === 'show') {
this.action = this.defaultAction;
}
this.currentLocation = window.location;
if (this.hashedTabs) {
this.action = this.currentLocation.hash || this.action;
}
const tabSelector = `${this.options.parentEl} a[data-toggle="tab"]`;
// since this is a custom event we need jQuery :(
$(document)
.off('shown.bs.tab', tabSelector)
.on('shown.bs.tab', tabSelector, (e) => this.tabShown(e));
this.activateTab(this.action);
}
/**
* Handles the `shown.bs.tab` event to set the currect url action.
*
* @param {type} evt
* @return {Function}
*/
tabShown(evt) {
const source = evt.target.getAttribute('href');
return this.setCurrentAction(source);
}
/**
* Updates the URL with the path that matched the given action.
*
* @param {String} source
* @return {String}
*/
setCurrentAction(source) {
const copySource = source;
copySource.replace(/\/+$/, '');
const newState = this.hashedTabs
? copySource
: `${copySource}${this.currentLocation.search}${this.currentLocation.hash}`;
window.history.replaceState(
{
url: newState,
},
document.title,
newState,
);
return newState;
}
/**
* Given the current action activates the correct tab.
* http://getbootstrap.com/javascript/#tab-show
* Note: Will trigger `shown.bs.tab`
*/
activateTab() {
return $(`${this.options.parentEl} a[data-action='${this.action}']`).tab('show');
}
}

View File

@ -1,16 +0,0 @@
import axios from './axios_utils';
import { setFaviconOverlay, resetFavicon } from './favicon';
export const setCiStatusFavicon = (pageUrl) =>
axios
.get(pageUrl)
.then(({ data }) => {
if (data && data.favicon) {
return setFaviconOverlay(data.favicon);
}
return resetFavicon();
})
.catch((error) => {
resetFavicon();
throw error;
});

View File

@ -46,3 +46,8 @@ const OPERATORS_NOT_LIKE = [{ value: OPERERATOR_NOT_LIKE, description: OPERERATO
export const OPERATORS_LIKE_NOT = [...OPERATORS_LIKE, ...OPERATORS_NOT_LIKE];
export const CUSTOM_DATE_RANGE_OPTION = 'custom';
export const DATE_RANGE_QUERY_KEY = 'date_range';
export const DATE_RANGE_START_QUERY_KEY = 'date_start';
export const DATE_RANGE_END_QUERY_KEY = 'date_end';
export const FILTERED_SEARCH_TERM_QUERY_KEY = 'search';

View File

@ -1,3 +1,12 @@
import { isValidDate } from '~/lib/utils/datetime_utility';
import {
CUSTOM_DATE_RANGE_OPTION,
DATE_RANGE_QUERY_KEY,
DATE_RANGE_START_QUERY_KEY,
DATE_RANGE_END_QUERY_KEY,
} from './constants';
/**
* Return the data range for the given time period
* Accepted values are numbers followed by the unit 'm', 'h', 'd', e.g. '5m', '3h', '7d'
@ -31,3 +40,74 @@ export const periodToDate = (timePeriod) => {
}
return { min: new Date(maxMs - minMs), max: new Date(maxMs) };
};
/**
* Validates the date range query parameters and returns an object with the validated date range.
*
* @param {string} dateRangeValue - The value of the date range query parameter.
* @param {string} dateRangeStart - The value of the start date query parameter.
* @param {string} dateRangeEnd - The value of the end date query parameter.
* @returns {{value: string, startDate?: Date, endDate?: Date}} An object containing the validated date range.
*/
function validatedDateRangeQuery(dateRangeValue, dateRangeStart, dateRangeEnd) {
const DEFAULT_TIME_RANGE = '1h';
if (dateRangeValue === CUSTOM_DATE_RANGE_OPTION) {
if (isValidDate(new Date(dateRangeStart)) && isValidDate(new Date(dateRangeEnd))) {
return {
value: dateRangeValue,
startDate: new Date(dateRangeStart),
endDate: new Date(dateRangeEnd),
};
}
return {
value: DEFAULT_TIME_RANGE,
};
}
return {
value: dateRangeValue ?? DEFAULT_TIME_RANGE,
};
}
/**
* Converts a query object containing date range parameters to a validated date filter object.
*
* @param {Object} queryObj - The query object containing date range parameters.
* @param {string} queryObj[DATE_RANGE_QUERY_KEY] - The value of the date range query parameter.
* @param {string} queryObj[DATE_RANGE_START_QUERY_KEY] - The value of the start date query parameter.
* @param {string} queryObj[DATE_RANGE_END_QUERY_KEY] - The value of the end date query parameter.
* @returns {{value: string, startDate?: Date, endDate?: Date}} An object containing the validated date range.
*/
export function queryToDateFilterObj(queryObj) {
const {
[DATE_RANGE_QUERY_KEY]: dateRangeValue,
[DATE_RANGE_START_QUERY_KEY]: dateRangeStart,
[DATE_RANGE_END_QUERY_KEY]: dateRangeEnd,
} = queryObj;
return validatedDateRangeQuery(dateRangeValue, dateRangeStart, dateRangeEnd);
}
/**
* Converts a date filter object to a query object with date range parameters.
*
* @param {Object} dateFilter - The date filter object.
* @param {string} dateFilter.value - The value of the date range.
* @param {Date} [dateFilter.startDate] - The start date of the date range.
* @param {Date} [dateFilter.endDate] - The end date of the date range.
* @returns {Object} An object containing the date range query parameters.
*/
export function dateFilterObjToQuery(dateFilter = {}) {
return {
[DATE_RANGE_QUERY_KEY]: dateFilter.value,
...(dateFilter.value === CUSTOM_DATE_RANGE_OPTION
? {
[DATE_RANGE_START_QUERY_KEY]: dateFilter.startDate?.toISOString(),
[DATE_RANGE_END_QUERY_KEY]: dateFilter.endDate?.toISOString(),
}
: {
[DATE_RANGE_START_QUERY_KEY]: undefined,
[DATE_RANGE_END_QUERY_KEY]: undefined,
}),
};
}

View File

@ -8,21 +8,36 @@ import {
TOKEN_TYPE_TYPE,
TOKEN_TITLE_VERSION,
TOKEN_TYPE_VERSION,
TOKEN_TITLE_STATUS,
TOKEN_TYPE_STATUS,
} from '~/vue_shared/components/filtered_search_bar/constants';
import PersistedSearch from '~/packages_and_registries/shared/components/persisted_search.vue';
import { LIST_KEY_CREATED_AT } from '~/packages_and_registries/package_registry/constants';
import {
LIST_KEY_CREATED_AT,
PACKAGE_STATUS_OPTIONS,
PACKAGE_TYPES_OPTIONS,
} from '~/packages_and_registries/package_registry/constants';
import LocalStorageSync from '~/vue_shared/components/local_storage_sync.vue';
import PackageTypeToken from './tokens/package_type_token.vue';
export default {
tokens: [
{
type: TOKEN_TYPE_STATUS,
icon: 'status',
title: TOKEN_TITLE_STATUS,
unique: true,
token: GlFilteredSearchToken,
operators: OPERATORS_IS,
options: PACKAGE_STATUS_OPTIONS,
},
{
type: TOKEN_TYPE_TYPE,
icon: 'package',
title: TOKEN_TITLE_TYPE,
unique: true,
token: PackageTypeToken,
token: GlFilteredSearchToken,
operators: OPERATORS_IS,
options: PACKAGE_TYPES_OPTIONS,
},
{
type: TOKEN_TYPE_VERSION,
@ -70,36 +85,39 @@ export default {
},
methods: {
formatFilters(filters) {
const parsed = {
packageName: '',
packageType: undefined,
packageVersion: '',
};
return filters
.filter((filter) => filter.value?.data)
.reduce((acc, filter) => {
if (filter.type === TOKEN_TYPE_TYPE) {
return {
...acc,
packageType: filter.value.data.toUpperCase(),
};
}
return filters.reduce((acc, filter) => {
if (filter.type === TOKEN_TYPE_TYPE && filter.value?.data) {
return {
...acc,
packageType: filter.value.data.toUpperCase(),
};
}
if (filter.type === TOKEN_TYPE_VERSION) {
return {
...acc,
packageVersion: filter.value.data.trim(),
};
}
if (filter.type === TOKEN_TYPE_VERSION && filter.value?.data) {
return {
...acc,
packageVersion: filter.value.data.trim(),
};
}
if (filter.type === TOKEN_TYPE_STATUS) {
return {
...acc,
packageStatus: filter.value.data,
};
}
if (filter.type === FILTERED_SEARCH_TERM) {
return {
...acc,
packageName: `${acc.packageName} ${filter.value.data}`.trim(),
};
}
if (filter.type === FILTERED_SEARCH_TERM) {
return {
...acc,
packageName: filter.value.data.trim(),
};
}
return acc;
}, parsed);
return acc;
}, {});
},
updateSorting(newValue) {
this.sorting = { ...this.sorting, ...newValue };

View File

@ -170,6 +170,7 @@ export default {
<template v-else>
<gl-alert
v-if="showErrorPackageAlert"
class="gl-mt-5"
variant="danger"
:title="errorTitleAlert"
:primary-button-text="$options.i18n.deleteThisPackage"

View File

@ -1,26 +0,0 @@
<script>
import { GlFilteredSearchToken, GlFilteredSearchSuggestion } from '@gitlab/ui';
import { PACKAGE_TYPES } from '~/packages_and_registries/package_registry/constants';
export default {
components: {
GlFilteredSearchToken,
GlFilteredSearchSuggestion,
},
PACKAGE_TYPES,
};
</script>
<template>
<gl-filtered-search-token v-bind="{ ...$attrs }" v-on="$listeners">
<template #suggestions>
<gl-filtered-search-suggestion
v-for="(type, index) in $options.PACKAGE_TYPES"
:key="index"
:value="type"
>
{{ type }}
</gl-filtered-search-suggestion>
</template>
</gl-filtered-search-token>
</template>

View File

@ -205,17 +205,30 @@ export const SORT_FIELDS = [
},
];
export const PACKAGE_TYPES = [
s__('PackageRegistry|Composer'),
s__('PackageRegistry|Conan'),
s__('PackageRegistry|Generic'),
s__('PackageRegistry|Maven'),
s__('PackageRegistry|npm'),
s__('PackageRegistry|NuGet'),
s__('PackageRegistry|PyPI'),
s__('PackageRegistry|RubyGems'),
s__('PackageRegistry|Debian'),
s__('PackageRegistry|Helm'),
/* eslint-disable @gitlab/require-i18n-strings */
export const PACKAGE_TYPES_OPTIONS = [
{ value: 'Composer', title: s__('PackageRegistry|Composer') },
{ value: 'Conan', title: s__('PackageRegistry|Conan') },
{ value: 'Generic', title: s__('PackageRegistry|Generic') },
{ value: 'Maven', title: s__('PackageRegistry|Maven') },
{ value: 'npm', title: s__('PackageRegistry|npm') },
{ value: 'NuGet', title: s__('PackageRegistry|NuGet') },
{ value: 'PyPI', title: s__('PackageRegistry|PyPI') },
{ value: 'RubyGems', title: s__('PackageRegistry|RubyGems') },
{ value: 'Debian', title: s__('PackageRegistry|Debian') },
{ value: 'Helm', title: s__('PackageRegistry|Helm') },
];
/* eslint-enable @gitlab/require-i18n-strings */
export const PACKAGE_STATUS_OPTIONS = [
{
value: PACKAGE_DEFAULT_STATUS,
title: s__('PackageRegistry|Default'),
},
{ value: PACKAGE_ERROR_STATUS, title: s__('PackageRegistry|Error') },
{ value: 'HIDDEN', title: s__('PackageRegistry|Hidden') },
{ value: 'PENDING_DESTRUCTION', title: s__('PackageRegistry|Pending deletion') },
{ value: 'PROCESSING', title: s__('PackageRegistry|Processing') },
];
// links

View File

@ -10,6 +10,7 @@ query getPackages(
$packageName: String
$packageType: PackageTypeEnum
$packageVersion: String
$packageStatus: PackageStatus
$first: Int
$last: Int
$after: String
@ -22,6 +23,7 @@ query getPackages(
packageName: $packageName
packageType: $packageType
packageVersion: $packageVersion
status: $packageStatus
after: $after
before: $before
first: $first
@ -46,6 +48,7 @@ query getPackages(
packageName: $packageName
packageType: $packageType
packageVersion: $packageVersion
status: $packageStatus
after: $after
before: $before
first: $first

View File

@ -80,10 +80,8 @@ export default {
fullPath: this.fullPath,
sort: this.isGroupPage ? undefined : this.sort,
groupSort: this.isGroupPage ? this.sort : undefined,
packageName: this.filters?.packageName,
packageType: this.filters?.packageType,
packageVersion: this.filters?.packageVersion,
first: GRAPHQL_PAGE_SIZE,
...this.packageParams,
...this.pageParams,
};
},
@ -93,14 +91,32 @@ export default {
pageInfo() {
return this.packages?.pageInfo ?? {};
},
packageParams() {
return {
packageName: this.filters?.packageName,
packageType: this.filters?.packageType,
packageVersion: this.filters?.packageVersion,
packageStatus: this.filters?.packageStatus,
};
},
packagesCount() {
return this.packages?.count;
},
hasFilters() {
return this.filters.packageName || this.filters.packageType || this.filters.packageVersion;
return (
this.filters.packageName ||
this.filters.packageType ||
this.filters.packageVersion ||
this.filters.packageStatus
);
},
emptySearch() {
return !this.filters.packageName && !this.filters.packageType && !this.filters.packageVersion;
return (
!this.filters.packageName &&
!this.filters.packageType &&
!this.filters.packageVersion &&
!this.filters.packageStatus
);
},
emptyStateTitle() {
return this.emptySearch

View File

@ -10,7 +10,7 @@ export const searchArrayToFilterTokens = (search) =>
search.map((s) => keyValueToFilterToken(FILTERED_SEARCH_TERM, s));
export const extractFilterAndSorting = (queryObject) => {
const { type, search, version, sort, orderBy } = queryObject;
const { type, search, version, status, sort, orderBy } = queryObject;
const filters = [];
const sorting = {};
@ -20,6 +20,9 @@ export const extractFilterAndSorting = (queryObject) => {
if (version) {
filters.push(keyValueToFilterToken('version', version));
}
if (status) {
filters.push(keyValueToFilterToken('status', status));
}
if (search) {
filters.push(...searchArrayToFilterTokens(search));
}

View File

@ -216,7 +216,7 @@ export default {
),
domainAllowListLabel: s__('ApplicationSettings|Allowed domains for sign-ups'),
domainAllowListDescription: s__(
'ApplicationSettings|Only users with e-mail addresses that match these domain(s) can sign up. Wildcards allowed. Use separate lines for multiple entries. Example: domain.com, *.domain.com',
'ApplicationSettings|Only users with e-mail addresses that match these domain(s) can sign up. Wildcards allowed. Enter multiple entries on separate lines. Example: domain.com, *.domain.com',
),
userCapLabel: s__('ApplicationSettings|User cap'),
userCapDescription: s__(
@ -232,7 +232,7 @@ export default {
),
domainDenyListListLabel: s__('ApplicationSettings|Denied domains for sign-ups'),
domainDenyListListDescription: s__(
'ApplicationSettings|Users with e-mail addresses that match these domain(s) cannot sign up. Wildcards allowed. Use separate lines for multiple entries. Example: domain.com, *.domain.com',
'ApplicationSettings|Users with e-mail addresses that match these domain(s) cannot sign up. Wildcards allowed. Enter multiple entries on separate lines. Example: domain.com, *.domain.com',
),
domainPlaceholder: s__('ApplicationSettings|domain.com'),
emailRestrictionsEnabledGroupLabel: s__('ApplicationSettings|Email restrictions'),

View File

@ -1,33 +0,0 @@
import Pipelines from '~/pipelines';
export default () => {
const mergeRequestListToggle = document.querySelector('.js-toggle-mr-list');
const truncatedMergeRequestList = document.querySelector('.js-truncated-mr-list');
const fullMergeRequestList = document.querySelector('.js-full-mr-list');
if (mergeRequestListToggle) {
mergeRequestListToggle.addEventListener('click', (e) => {
e.preventDefault();
truncatedMergeRequestList.classList.toggle('hide');
fullMergeRequestList.classList.toggle('hide');
});
}
const pipelineTabLink = document.querySelector('.js-pipeline-tab-link a');
const { controllerAction } = document.querySelector('.js-pipeline-container').dataset;
if (pipelineTabLink) {
const pipelineStatusUrl = `${pipelineTabLink.getAttribute('href')}/status.json`;
// eslint-disable-next-line no-new
new Pipelines({
initTabs: true,
pipelineStatusUrl,
tabsOptions: {
action: controllerAction,
defaultAction: 'pipelines',
parentEl: '.pipelines-tabs',
},
});
}
};

View File

@ -1,5 +1,3 @@
import initPipelineDetails from '~/ci/pipeline_details/pipeline_details_bundle';
import initPipelines from '../init_pipelines';
initPipelines();
initPipelineDetails();

View File

@ -1,15 +0,0 @@
import LinkedTabs from './lib/utils/bootstrap_linked_tabs';
import { setCiStatusFavicon } from './lib/utils/favicon_ci';
export default class Pipelines {
constructor(options = {}) {
if (options.initTabs && options.tabsOptions) {
// eslint-disable-next-line no-new
new LinkedTabs(options.tabsOptions);
}
if (options.pipelineStatusUrl) {
setCiStatusFavicon(options.pipelineStatusUrl);
}
}
}

View File

@ -3,6 +3,7 @@
module Issuable
module Callbacks
class Base
Error = Class.new(StandardError)
include Gitlab::Allowable
def initialize(issuable:, current_user:, params: {})
@ -27,6 +28,10 @@ module Issuable
def has_permission?(permission)
can?(current_user, permission, issuable)
end
def raise_error(message)
raise ::Issuable::Callbacks::Base::Error, message
end
end
end
end

View File

@ -1,8 +1,10 @@
# frozen_string_literal: true
module WorkItems
module Issuable
module Callbacks
class TimeTracking < Base
ALLOWED_PARAMS = %i[time_estimate spend_time timelog].freeze
def after_initialize
if excluded_in_new_type?
params.delete(:time_estimate)
@ -10,7 +12,7 @@ module WorkItems
params.delete(:timelog)
end
return unless has_permission?(:admin_work_item)
return unless has_permission?(:"admin_#{issuable.to_ability_name}")
# below 2 parse_*_data methods, parse the data coming in from `time_tracking_widget` argument, in
# work item update mutation.
@ -19,8 +21,8 @@ module WorkItems
# we still need to set the data here, in case when we had no data coming in from the `time_tracking_widget`
# argument, but data was still set through updating the description and using quick actions.
work_item.time_estimate = params[:time_estimate] if params[:time_estimate].present?
work_item.spend_time = params[:spend_time] if params[:spend_time].present?
issuable.time_estimate = params[:time_estimate] if params.has_key?(:time_estimate)
issuable.spend_time = params[:spend_time] if params[:spend_time].present?
end
private

View File

@ -5,7 +5,8 @@ class IssuableBaseService < ::BaseContainerService
def available_callbacks
[
Issuable::Callbacks::Milestone
Issuable::Callbacks::Milestone,
Issuable::Callbacks::TimeTracking
].freeze
end

View File

@ -28,7 +28,7 @@ module WorkItems
else
error(work_item.errors.full_messages, :unprocessable_entity, pass_back: payload(work_item))
end
rescue ::WorkItems::Widgets::BaseService::WidgetError => e
rescue ::WorkItems::Widgets::BaseService::WidgetError, ::Issuable::Callbacks::Base::Error => e
error(e.message, :unprocessable_entity)
end

View File

@ -22,7 +22,7 @@ module WorkItems
else
error(updated_work_item.errors.full_messages, :unprocessable_entity, pass_back: payload(updated_work_item))
end
rescue ::WorkItems::Widgets::BaseService::WidgetError => e
rescue ::WorkItems::Widgets::BaseService::WidgetError, ::Issuable::Callbacks::Base::Error => e
error(e.message, :unprocessable_entity)
end

View File

@ -168,10 +168,10 @@ including:
- CI/CD job output logs
- CI/CD job artifacts
- LFS objects
- Terraform states ([introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/331806) in GitLab 14.7)
- Terraform states
- Container registry images
- GitLab Pages content
- Packages ([introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/332006) in GitLab 14.7)
- Packages
- Snippets
- [Group wikis](../../user/project/wiki/group.md)
- Project-level Secure Files ([introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/121142) in GitLab 16.1)
@ -563,21 +563,16 @@ sudo -u git -H bundle exec rake gitlab:backup:create REPOSITORIES_SERVER_SIDE=tr
#### Back up Git repositories concurrently
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/37158) in GitLab 13.3.
> - [Concurrent restore introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/69330) in GitLab 14.3
When using [multiple repository storages](../repository_storage_paths.md),
repositories can be backed up or restored concurrently to help fully use CPU time. The
following variables are available to modify the default behavior of the Rake
task:
- `GITLAB_BACKUP_MAX_CONCURRENCY`: The maximum number of projects to back up at
the same time. Defaults to the number of logical CPUs (in GitLab 14.1 and
earlier, defaults to `1`).
the same time. Defaults to the number of logical CPUs.
- `GITLAB_BACKUP_MAX_STORAGE_CONCURRENCY`: The maximum number of projects to
back up at the same time on each storage. This allows the repository backups
to be spread across storages. Defaults to `2` (in GitLab 14.1 and earlier,
defaults to `1`).
to be spread across storages. Defaults to `2`.
For example, with 4 repository storages:
@ -599,8 +594,6 @@ sudo -u git -H bundle exec rake gitlab:backup:create GITLAB_BACKUP_MAX_CONCURREN
#### Incremental repository backups
> - Introduced in GitLab 14.9 [with a flag](../feature_flags.md) named `incremental_repository_backup`. Disabled by default.
> - [Enabled on self-managed](https://gitlab.com/gitlab-org/gitlab/-/issues/355945) in GitLab 14.10.
> - `PREVIOUS_BACKUP` option [introduced](https://gitlab.com/gitlab-org/gitaly/-/issues/4184) in GitLab 15.0.
> - Server-side support for creating incremental backups [introduced](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/6475) in GitLab 16.6.
@ -617,26 +610,17 @@ support incremental backups for all subtasks.
Incremental repository backups can be faster than full repository backups because they only pack changes since the last backup into the backup bundle for each repository.
The incremental backup archives are not linked to each other: each archive is a self-contained backup of the instance. There must be an existing backup
to create an incremental backup from:
to create an incremental backup from.
- In GitLab 14.9 and 14.10, use the `BACKUP=<backup-id>` option to choose the backup to use. The chosen previous backup is overwritten.
- In GitLab 15.0 and later, use the `PREVIOUS_BACKUP=<backup-id>` option to choose the backup to use. By default, a backup file is created
as documented in the [Backup ID](index.md#backup-id) section. You can override the `<backup-id>` portion of the filename by setting the
[`BACKUP` environment variable](#backup-filename).
Use the `PREVIOUS_BACKUP=<backup-id>` option to choose the backup to use. By default, a backup file is created
as documented in the [Backup ID](index.md#backup-id) section. You can override the `<backup-id>` portion of the filename by setting the
[`BACKUP` environment variable](#backup-filename).
To create an incremental backup, run:
- In GitLab 15.0 or later:
```shell
sudo gitlab-backup create INCREMENTAL=yes PREVIOUS_BACKUP=<backup-id>
```
- In GitLab 14.9 and 14.10:
```shell
sudo gitlab-backup create INCREMENTAL=yes BACKUP=<backup-id>
```
```shell
sudo gitlab-backup create INCREMENTAL=yes PREVIOUS_BACKUP=<backup-id>
```
To create an [untarred](#skipping-tar-creation) incremental backup from a tarred backup, use `SKIP=tar`:
@ -740,8 +724,6 @@ For Linux package (Omnibus):
##### S3 Encrypted Buckets
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/64765) in GitLab 14.3.
AWS supports these [modes for server side encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/serv-side-encryption.html):
- Amazon S3-Managed Keys (SSE-S3)
@ -982,8 +964,6 @@ For self-compiled installations:
##### Using Azure Blob storage
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/25877) in GitLab 13.4.
::Tabs
:::TabTitle Linux package (Omnibus)
@ -1334,10 +1314,6 @@ for more details on what these parameters do.
#### `gitaly-backup` for repository backup and restore
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/333034) in GitLab 14.2.
> - [Deployed behind a feature flag](../../user/feature_flags.md), enabled by default.
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/333034) in GitLab 14.10. [Feature flag `gitaly_backup`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/83254) removed.
The `gitaly-backup` binary is used by the backup Rake task to create and restore repository backups from Gitaly.
`gitaly-backup` replaces the previous backup method that directly calls RPCs on Gitaly from GitLab.

View File

@ -137,7 +137,7 @@ sudo gitlab-ctl restart
sudo gitlab-rake gitlab:check SANITIZE=true
```
In GitLab 13.1 and later, check [database values can be decrypted](../raketasks/check.md#verify-database-values-can-be-decrypted-using-the-current-secrets)
Verify that the [database values can be decrypted](../raketasks/check.md#verify-database-values-can-be-decrypted-using-the-current-secrets)
especially if `/etc/gitlab/gitlab-secrets.json` was restored, or if a different server is
the target for the restore.
@ -334,8 +334,6 @@ The `force=yes` environment variable also disables these prompts.
### Excluding tasks on restore
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/19347) in GitLab 14.10.
You can exclude specific tasks on restore by adding the environment variable `SKIP`, whose values are a comma-separated list of the following options:
- `db` (database)

View File

@ -63,25 +63,13 @@ after which users must reactivate 2FA.
1. Enter the database console:
For the Linux package (Omnibus) GitLab 14.1 and earlier:
```shell
sudo gitlab-rails dbconsole
```
For the Linux package (Omnibus) GitLab 14.2 and later:
For the Linux package (Omnibus):
```shell
sudo gitlab-rails dbconsole --database main
```
For self-compiled installations, GitLab 14.1 and earlier:
```shell
sudo -u git -H bundle exec rails dbconsole -e production
```
For self-compiled installations, GitLab 14.2 and later:
For self-compiled installations:
```shell
sudo -u git -H bundle exec rails dbconsole -e production --database main
@ -116,25 +104,13 @@ You may need to reconfigure or restart GitLab for the changes to take effect.
1. Enter the database console:
For the Linux package (Omnibus) GitLab 14.1 and earlier:
```shell
sudo gitlab-rails dbconsole
```
For the Linux package (Omnibus) GitLab 14.2 and later:
For the Linux package (Omnibus):
```shell
sudo gitlab-rails dbconsole --database main
```
For self-compiled installations, GitLab 14.1 and earlier:
```shell
sudo -u git -H bundle exec rails dbconsole -e production
```
For self-compiled installations, GitLab 14.2 and later:
For self-compiled installations:
```shell
sudo -u git -H bundle exec rails dbconsole -e production --database main
@ -165,25 +141,13 @@ You may need to reconfigure or restart GitLab for the changes to take effect.
1. Enter the database console:
For the Linux package (Omnibus) GitLab 14.1 and earlier:
```shell
sudo gitlab-rails dbconsole
```
For the Linux package (Omnibus) GitLab 14.2 and later:
For the Linux package (Omnibus):
```shell
sudo gitlab-rails dbconsole --database main
```
For self-compiled installations, GitLab 14.1 and earlier:
```shell
sudo -u git -H bundle exec rails dbconsole -e production
```
For self-compiled installations, GitLab 14.2 and later:
For self-compiled installations:
```shell
sudo -u git -H bundle exec rails dbconsole -e production --database main
@ -220,25 +184,13 @@ You should verify that the secrets are the root cause before deleting any data.
1. Enter the database console:
For the Linux package (Omnibus) GitLab 14.1 and earlier:
```shell
sudo gitlab-rails dbconsole
```
For the Linux package (Omnibus) GitLab 14.2 and later:
For the Linux package (Omnibus):
```shell
sudo gitlab-rails dbconsole --database main
```
For self-compiled installations, GitLab 14.1 and earlier:
```shell
sudo -u git -H bundle exec rails dbconsole -e production
```
For self-compiled installations, GitLab 14.2 and later:
For self-compiled installations:
```shell
sudo -u git -H bundle exec rails dbconsole -e production --database main
@ -355,30 +307,18 @@ Truncate the filenames in the `uploads` table:
1. Enter the database console:
For the Linux package (Omnibus) GitLab 14.2 and later:
For the Linux package (Omnibus):
```shell
sudo gitlab-rails dbconsole --database main
```
For the Linux package (Omnibus) GitLab 14.1 and earlier:
```shell
sudo gitlab-rails dbconsole
```
For self-compiled installations, GitLab 14.2 and later:
For self-compiled installations:
```shell
sudo -u git -H bundle exec rails dbconsole -e production --database main
```
For self-compiled installations, GitLab 14.1 and earlier:
```shell
sudo -u git -H bundle exec rails dbconsole -e production
```
1. Search the `uploads` table for filenames longer than 246 characters:
The following query selects the `uploads` records with filenames longer than 246 characters in batches of 0 to 10000. This improves the performance on large GitLab instances with tables having thousand of records.

View File

@ -295,25 +295,6 @@ changing Git remotes and API URLs.
This command uses the changed `external_url` configuration defined
in `/etc/gitlab/gitlab.rb`.
1. For GitLab 12.0 through 12.7, you may need to update the **primary**
site's name in the database. This bug has been fixed in GitLab 12.8.
To determine if you need to do this, search for the
`gitlab_rails["geo_node_name"]` setting in your `/etc/gitlab/gitlab.rb`
file. If it is commented out with `#` or not found at all, then you
need to update the **primary** site's name in the database. You can search for it
like so:
```shell
grep "geo_node_name" /etc/gitlab/gitlab.rb
```
To update the **primary** site's name in the database:
```shell
gitlab-rails runner 'Gitlab::Geo.primary_node.update!(name: GeoNode.current_node_name)'
```
1. Verify you can connect to the newly promoted **primary** using its URL.
If you updated the DNS records for the primary domain, these changes may
not have yet propagated depending on the previous DNS records TTL.

View File

@ -182,7 +182,7 @@ follow these steps to avoid unnecessary data loss:
- Revoke object storage permissions from the **primary** site.
- Physically disconnect a machine.
### Promoting the **secondary** site running GitLab 14.5 and later
### Promoting the **secondary** site
1. SSH to every Sidekiq, PostgreSQL, and Gitaly node in the **secondary** site and run one of the following commands:
@ -214,73 +214,9 @@ follow these steps to avoid unnecessary data loss:
1. Verify you can connect to the newly promoted **primary** site using the URL used
previously for the **secondary** site.
1. If successful, the **secondary** site is now promoted to the **primary** site.
### Promoting the **secondary** site running GitLab 14.4 and earlier
WARNING:
The `gitlab-ctl promote-to-primary-node` and `gitlab-ctl promoted-db` commands are
deprecated in GitLab 14.5 and later, and [removed in GitLab 15.0](https://gitlab.com/gitlab-org/gitlab/-/issues/345207).
Use `gitlab-ctl geo promote` instead.
NOTE:
A new **secondary** should not be added at this time. If you want to add a new
**secondary**, do this after you have completed the entire process of promoting
the **secondary** to the **primary**.
WARNING:
If you encounter an `ActiveRecord::RecordInvalid: Validation failed: Name has already been taken` error during this process, read
[the troubleshooting advice](../../replication/troubleshooting/failover.md#fixing-errors-during-a-failover-or-when-promoting-a-secondary-to-a-primary-site).
The `gitlab-ctl promote-to-primary-node` command cannot be used in
conjunction with multiple servers, as it can only
perform changes on a **secondary** with only a single machine. Instead, you must
do this manually.
WARNING:
In GitLab 13.2 and 13.3, promoting a secondary site to a primary while the
secondary is paused fails. Do not pause replication before promoting a
secondary. If the site is paused, be sure to resume before promoting. This
issue has been fixed in GitLab 13.4 and later.
WARNING:
If the secondary site [has been paused](../../../geo/index.md#pausing-and-resuming-replication), this performs
a point-in-time recovery to the last known state.
Data that was created on the primary while the secondary was paused is lost.
1. SSH in to the PostgreSQL node in the **secondary** and promote PostgreSQL separately:
```shell
sudo gitlab-ctl promote-db
```
1. Edit `/etc/gitlab/gitlab.rb` on every machine in the **secondary** to
reflect its new status as **primary** by removing any lines that enabled the
`geo_secondary_role`:
```ruby
## In pre-11.5 documentation, the role was enabled as follows. Remove this line.
geo_secondary_role['enable'] = true
## In 11.5+ documentation, the role was enabled as follows. Remove this line.
roles ['geo_secondary_role']
```
After making these changes, [reconfigure GitLab](../../../restart_gitlab.md#reconfigure-a-linux-package-installation) each
machine so the changes take effect.
1. Promote the **secondary** to **primary**. SSH into a single Rails node
server and execute:
```shell
sudo gitlab-rake geo:set_secondary_as_primary
```
1. Verify you can connect to the newly promoted **primary** using the URL used
previously for the **secondary**.
1. Success! The **secondary** has now been promoted to **primary**.
### Next steps
To regain geographic redundancy as quickly as possible, you should

View File

@ -222,7 +222,7 @@ Note the following when promoting a secondary:
error during this process, read
[the troubleshooting advice](../../replication/troubleshooting/failover.md#fixing-errors-during-a-failover-or-when-promoting-a-secondary-to-a-primary-site).
To promote the secondary site running GitLab 14.5 and later:
To promote the secondary site:
1. SSH in to your **secondary** site and run one of the following commands:
@ -243,75 +243,6 @@ To promote the secondary site running GitLab 14.5 and later:
If successful, the **secondary** site is now promoted to the **primary** site.
To promote the secondary site running GitLab 14.4 and earlier:
WARNING:
The `gitlab-ctl promote-to-primary-node` and `gitlab-ctl promoted-db` commands are
deprecated in GitLab 14.5 and later, and [removed in GitLab 15.0](https://gitlab.com/gitlab-org/gitlab/-/issues/345207).
Use `gitlab-ctl geo promote` instead.
1. SSH in to your **secondary** site and login as root:
```shell
sudo -i
```
1. Edit `/etc/gitlab/gitlab.rb` to reflect its new status as **primary** by
removing any lines that enabled the `geo_secondary_role`:
```ruby
## In pre-11.5 documentation, the role was enabled as follows. Remove this line.
geo_secondary_role['enable'] = true
## In 11.5+ documentation, the role was enabled as follows. Remove this line.
roles ['geo_secondary_role']
```
1. Run the following command to list out all preflight checks and automatically
check if replication and verification are complete before scheduling a planned
failover to ensure the process goes smoothly:
NOTE:
In GitLab 13.7 and earlier, if you have a data type with zero items to sync,
this command reports `ERROR - Replication is not up-to-date` even if
replication is actually up-to-date. This bug was fixed in GitLab 13.8 and
later.
```shell
gitlab-ctl promotion-preflight-checks
```
1. Promote the **secondary**:
NOTE:
In GitLab 13.7 and earlier, if you have a data type with zero items to sync,
this command reports `ERROR - Replication is not up-to-date` even if
replication is actually up-to-date. If replication and verification output
shows that it is complete, you can add `--skip-preflight-checks` to make the
command complete promotion. This bug was fixed in GitLab 13.8 and later.
```shell
gitlab-ctl promote-to-primary-node
```
If you have already run the [preflight checks](../planned_failover.md#preflight-checks)
or don't want to run them, you can skip them:
```shell
gitlab-ctl promote-to-primary-node --skip-preflight-check
```
You can also promote the secondary site to primary **without any further confirmation**, even when preflight checks fail:
```shell
sudo gitlab-ctl promote-to-primary-node --force
```
1. Verify you can connect to the newly promoted **primary** site using the URL used
previously for the **secondary** site.
If successful, the **secondary** site is now promoted to the **primary** site.
### Next steps
To regain geographic redundancy as quickly as possible, you should

View File

@ -25,7 +25,7 @@ to clone and fetch large repositories, speeding up development and increasing th
Geo secondary sites transparently proxy write requests to the primary site. All Geo sites can be configured to respond to a single GitLab URL, to deliver a consistent, seamless, and comprehensive experience whichever site the user lands on.
To make sure you're using the right version of the documentation, go to [the Geo page on GitLab.com](https://gitlab.com/gitlab-org/gitlab/-/blob/master/doc/administration/geo/index.md) and choose the appropriate release from the **Switch branch/tag** dropdown list. For example, [`v13.7.6-ee`](https://gitlab.com/gitlab-org/gitlab/-/blob/v13.7.6-ee/doc/administration/geo/index.md).
To make sure you're using the right version of the documentation, go to [the Geo page on GitLab.com](https://gitlab.com/gitlab-org/gitlab/-/blob/master/doc/administration/geo/index.md) and choose the appropriate release from the **Switch branch/tag** dropdown list. For example, [`v15.7.6-ee`](https://gitlab.com/gitlab-org/gitlab/-/blob/v15.7.6-ee/doc/administration/geo/index.md).
Geo uses a set of defined terms that are described in the [Geo Glossary](glossary.md).
Be sure to familiarize yourself with those terms.
@ -237,8 +237,6 @@ For information on how to update your Geo sites to the latest GitLab version, se
### Pausing and resuming replication
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/35913) in GitLab 13.2.
WARNING:
Pausing and resuming of replication is only supported for Geo installations using a
Linux package-managed database. External databases are not supported.

View File

@ -405,8 +405,6 @@ Selective synchronization:
### Git operations on unreplicated repositories
> - [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/2562) in GitLab 12.10 for HTTP(S) and in GitLab 13.0 for SSH.
Git clone, pull, and push operations over HTTP(S) and SSH are supported for repositories that
exist on the **primary** site but not on **secondary** sites. This situation can occur
when:

View File

@ -11,7 +11,6 @@ DETAILS:
**Offering:** Self-managed
NOTE:
Since GitLab 14.6,
[GitLab Geo supports a location-aware URL including web UI and API traffic.](../secondary_proxy/location_aware_external_url.md)
This configuration is recommended over the location-aware Git remote URL
described in this document.

View File

@ -281,8 +281,6 @@ sudo gitlab-rake gitlab:geo:check
Ensure you have added the secondary site in the Admin Area under **Geo > Sites** on the web interface for the **primary** site.
Also ensure you entered the `gitlab_rails['geo_node_name']`
when adding the secondary site in the Admin Area of the **primary** site.
In GitLab 12.3 and earlier, edit the secondary site in the Admin Area of the **primary**
site and ensure that there is a trailing `/` in the `Name` field.
- Check returns `Exception: PG::UndefinedTable: ERROR: relation "geo_nodes" does not exist`.

View File

@ -36,9 +36,7 @@ You successfully promoted this node!
```
If you encounter this message when running `gitlab-rake geo:set_secondary_as_primary`
or `gitlab-ctl promote-to-primary-node`, either:
- Enter a Rails console and run:
or `gitlab-ctl promote-to-primary-node`, enter a Rails console and run:
```ruby
Rails.application.load_tasks; nil
@ -46,10 +44,6 @@ or `gitlab-ctl promote-to-primary-node`, either:
Rake::Task['geo:set_secondary_as_primary'].invoke
```
- Upgrade to GitLab 12.6.3 or later if it is safe to do so. For example,
if the failover was just a test. A
[caching-related bug](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/22021) was fixed.
### Message: ``NoMethodError: undefined method `secondary?' for nil:NilClass``
When [promoting a **secondary** site](../../disaster_recovery/index.md#step-3-promoting-a-secondary-site),

View File

@ -187,10 +187,6 @@ to respect the CIDR format (for example, `10.0.0.1/32`).
This happens if data is detected in the `projects` table. When one or more projects are detected, the operation
is aborted to prevent accidental data loss. To bypass this message, pass the `--force` option to the command.
In GitLab 13.4, a seed project is added when GitLab is first installed. This makes it necessary to pass `--force` even
on a new Geo secondary site. There is an [issue to account for seed projects](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5618)
when checking the database.
### Message: `FATAL: could not map anonymous shared memory: Cannot allocate memory`
If you see this message, it means that the secondary site's PostgreSQL tries to request memory that is higher than the available memory. There is an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/381585) that tracks this problem.
@ -380,8 +376,6 @@ This iterates over all package files on the secondary, looking at the
and then calculate this value on the secondary to check if they match. This
does not change anything in the UI.
For GitLab 14.4 and later:
```ruby
# Run on secondary
status = {}
@ -402,28 +396,6 @@ status.keys.each {|key| puts "#{key} count: #{status[key].count}"}
status
```
For GitLab 14.3 and earlier:
```ruby
# Run on secondary
status = {}
Packages::PackageFile.find_each do |package_file|
primary_checksum = package_file.verification_checksum
secondary_checksum = Packages::PackageFile.hexdigest(package_file.file.path)
verification_status = (primary_checksum == secondary_checksum)
status[verification_status.to_s] ||= []
status[verification_status.to_s] << package_file.id
end
# Count how many of each value we get
status.keys.each {|key| puts "#{key} count: #{status[key].count}"}
# See the output in its entirety
status
```
### Failed verification of Uploads on the primary Geo site
If verification of some uploads is failing on the primary Geo site with `verification_checksum = nil` and with the ``verification_failure = Error during verification: undefined method `underscore' for NilClass:Class``, this can be due to orphaned Uploads. The parent record owning the Upload (the upload's model) has somehow been deleted, but the Upload record still exists. These verification failures are false.

View File

@ -181,89 +181,6 @@ To solve this:
During a [backfill](../../index.md#backfill), failures are scheduled to be retried at the end
of the backfill queue, therefore these failures only clear up **after** the backfill completes.
## Sync failure message: "Verification failed with: Error during verification: File is not checksummable"
### Missing files on the Geo primary site
In GitLab 14.5 and earlier, certain data types which were missing on the Geo primary site were marked as "synced" on Geo secondary sites. This was because from the perspective of Geo secondary sites, the state matched the primary site and nothing more could be done on secondary sites.
Secondaries would regularly try to sync these files again by using the "verification" feature:
- Verification fails since the file doesn't exist.
- The file is marked "sync failed".
- Sync is retried.
- The file is marked "sync succeeded".
- The file is marked "needs verification".
- Repeat until the file is available again on the primary site.
This can be confusing to troubleshoot, since the registry entries are moved through a logical loop by various background jobs. Also, `last_sync_failure` and `verification_failure` are empty after "sync succeeded" but before verification is retried.
If you see sync failures repeatedly and alternately increase, while successes decrease and vice versa, this is likely to be caused by missing files on the primary site. You can confirm this by searching `geo.log` on secondary sites for `File is not checksummable` affecting the same files over and over.
After confirming this is the problem, the files on the primary site need to be fixed. Some possible causes:
- An NFS share became unmounted.
- A disk died or became corrupted.
- Someone unintentionally deleted a file or directory.
- Bugs in GitLab application:
- A file was moved when it shouldn't have been moved.
- A file wasn't moved when it should have been moved.
- A wrong path was generated in the code.
- A non-atomic backup was restored.
- Services or servers or network infrastructure was interrupted/restarted during use.
The appropriate action sometimes depends on the cause. For example, you can remount an NFS share. Often, a root cause may not be apparent or not useful to discover. If you have regular backups, it may be expedient to look through them and pull files from there.
In some cases, a file may be determined to be of low value, and so it may be worth deleting the record.
Geo itself is an excellent mitigation for files missing on the primary. If a file disappears on the primary but it was already synced to the secondary, you can grab the secondary's file. In cases like this, the `File is not checksummable` error message does not occur on Geo secondary sites, and only the primary logs this error message.
This problem is more likely to show up in Geo secondary sites which were set up long after the original GitLab site. In this case, Geo is only surfacing an existing problem.
This behavior affects only the following data types through GitLab 14.6:
| Data type | From version |
| ------------------------ | ------------ |
| Package registry | 13.10 |
| CI Pipeline Artifacts | 13.11 |
| Terraform State Versions | 13.12 |
| Infrastructure Registry (renamed to Terraform Module Registry in GitLab 15.11) | 14.0 |
| External MR diffs | 14.6 |
| LFS Objects | 14.6 |
| Pages Deployments | 14.6 |
| Uploads | 14.6 |
| CI Job Artifacts | 14.6 |
[Since GitLab 14.7, files that are missing on the primary site are now treated as sync failures](https://gitlab.com/gitlab-org/gitlab/-/issues/348745)
to make Geo visibly surface data loss risks. The sync/verification loop is
therefore short-circuited. `last_sync_failure` is now set to `The file is missing on the Geo primary site`.
### Failed syncs with GitLab-managed object storage replication
There is [an issue in GitLab 14.2 through 14.7](https://gitlab.com/gitlab-org/gitlab/-/issues/299819#note_822629467)
that affects Geo when the GitLab-managed object storage replication is used, causing blob object types to fail synchronization.
Since GitLab 14.2, verification failures result in synchronization failures and cause
a re-synchronization of these objects.
As verification is not implemented for files stored in object storage (see
[issue 13845](https://gitlab.com/gitlab-org/gitlab/-/issues/13845) for more details), this
results in a loop that consistently fails for all objects stored in object storage.
You can work around this by marking the objects as synced and succeeded verification, however
be aware that can also mark objects that may be
[missing from the primary](#missing-files-on-the-geo-primary-site).
To do that, enter the [Rails console](../../../operations/rails_console.md)
and run:
```ruby
Gitlab::Geo.verification_enabled_replicator_classes.each do |klass|
updated = klass.registry_class.failed.where(last_sync_failure: "Verification failed with: Error during verification: File is not checksummable").update_all(verification_checksum: '0000000000000000000000000000000000000000', verification_state: 2, verification_failure: nil, verification_retry_at: nil, state: 2, last_sync_failure: nil, retry_at: nil, verification_retry_count: 0, retry_count: 0)
pp "Updated #{updated} #{klass.replicable_name_plural}"
end
```
## Message: curl 18 transfer closed with outstanding read data remaining & fetch-pack: unexpected disconnect while reading sideband packet
Unstable networking conditions can cause Gitaly to fail when trying to fetch large repository

View File

@ -10,9 +10,6 @@ DETAILS:
**Tier:** Premium, Ultimate
**Offering:** Self-managed
> - [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/5914) in GitLab 14.4 [with a flag](../../feature_flags.md) named `geo_secondary_proxy`. Disabled by default.
> - [Enabled by default for unified URLs](https://gitlab.com/gitlab-org/gitlab/-/issues/325732) in GitLab 14.6.
> - [Disabled by default for different URLs](https://gitlab.com/gitlab-org/gitlab/-/issues/325732) in GitLab 14.6 [with a flag](../../feature_flags.md) named `geo_secondary_proxy_separate_urls`.
> - [Enabled by default for different URLs](https://gitlab.com/gitlab-org/gitlab/-/issues/346112) in GitLab 15.1.
Use Geo proxying to:

View File

@ -616,15 +616,13 @@ On all GitLab Geo **secondary** sites:
## Multi-node database replication
In GitLab 14.0, Patroni replaced `repmgr` as the supported
[highly available PostgreSQL solution](../../postgresql/replication_and_failover.md).
NOTE:
If you still haven't [migrated from repmgr to Patroni](#migrating-from-repmgr-to-patroni) you're highly advised to do so.
Patroni is the supported
[highly available PostgreSQL solution](../../postgresql/replication_and_failover.md). If you still haven't [migrated from repmgr to Patroni](#migrating-from-repmgr-to-patroni), you're highly advised to do so.
### Migrating from repmgr to Patroni
1. Before migrating, you should ensure there is no replication lag between the **primary** and **secondary** sites and that replication is paused. In GitLab 13.2 and later, you can pause and resume replication with `gitlab-ctl geo-replication-pause` and `gitlab-ctl geo-replication-resume` on a Geo secondary database node.
1. Before migrating, you should ensure there is no replication lag between the **primary** and **secondary** sites and that replication is paused. You can pause and resume replication with `gitlab-ctl geo-replication-pause` and `gitlab-ctl geo-replication-resume` on a Geo secondary database node.
1. Follow the [instructions to migrate repmgr to Patroni](../../postgresql/replication_and_failover.md#switching-from-repmgr-to-patroni). When configuring Patroni on each **primary** site database node, add `patroni['replication_slots'] = { '<slot_name>' => 'physical' }`
to `gitlab.rb` where `<slot_name>` is the name of the replication slot for your **secondary** site. This ensures that Patroni recognizes the replication slot as permanent and doesn't drop it upon restarting.
1. If database replication to the **secondary** site was paused before migration, resume replication after Patroni is confirmed as working on the **primary** site.

View File

@ -61,8 +61,6 @@ you can decrease them.
## Set up the internal URLs
> - Setting up internal URLs in secondary sites was [introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/77179) in GitLab 14.7.
You can set up a different URL for synchronization between the primary and secondary site.
The **primary** site's Internal URL is used by **secondary** sites to contact it
@ -90,12 +88,6 @@ breaking communication between **primary** and **secondary** sites when using
HTTPS, customize your Internal URL to point to a load balancer with TLS
terminated at the load balancer.
WARNING:
Starting with GitLab 13.3 and [until 13.11](https://gitlab.com/gitlab-org/gitlab/-/issues/325522),
if you use an internal URL that is not accessible to the users, the
OAuth authorization flow does not work properly, because users are redirected
to the internal URL instead of the external one.
## Multiple secondary sites behind a load balancer
**Secondary** sites can use identical external URLs if

View File

@ -651,7 +651,7 @@ setting is used:
| `ci_max_artifact_size_secret_detection` | 0 |
| `ci_max_artifact_size_terraform` | 5 MB |
| `ci_max_artifact_size_trace` | 0 |
| `ci_max_artifact_size_cyclonedx` | 1 MB |
| `ci_max_artifact_size_cyclonedx` | 5 MB |
For example, to set the `ci_max_artifact_size_junit` limit to 10 MB on a self-managed
installation, run the following in the [GitLab Rails console](operations/rails_console.md#starting-a-rails-console-session):

View File

@ -10,8 +10,6 @@ DETAILS:
**Tier:** Premium, Ultimate
**Offering:** Self-managed
> - [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/2149) in GitLab 13.9.
Maintenance Mode allows administrators to reduce write operations to a minimum while maintenance tasks are performed. The main goal is to block all external actions that change the internal state. The internal state includes the PostgreSQL database, but especially files, Git repositories, and Container repositories.
When Maintenance Mode is enabled, in-progress actions finish relatively quickly because no new actions are coming in, and internal state changes are minimal.

View File

@ -410,7 +410,7 @@ listed in the descriptions of the relevant settings.
| `disabled_oauth_sign_in_sources` | array of strings | no | Disabled OAuth sign-in sources. |
| `dns_rebinding_protection_enabled` | boolean | no | Enforce DNS-rebinding attack protection. |
| `domain_denylist_enabled` | boolean | no | (**If enabled, requires:** `domain_denylist`) Allows blocking sign-ups from emails from specific domains. |
| `domain_denylist` | array of strings | no | Users with email addresses that match these domains **cannot** sign up. Wildcards allowed. Use separate lines for multiple entries. For example: `domain.com`, `*.domain.com`. |
| `domain_denylist` | array of strings | no | Users with email addresses that match these domains **cannot** sign up. Wildcards allowed. Enter multiple entries on separate lines. For example: `domain.com`, `*.domain.com`. |
| `domain_allowlist` | array of strings | no | Force people to use only corporate emails for sign-up. Default is `null`, meaning there is no restriction. |
| `downstream_pipeline_trigger_limit_per_project_user_sha` | integer | no | [Maximum downstream pipeline trigger rate](../administration/settings/continuous_integration.md#maximum-downstream-pipeline-trigger-rate). Default: `0` (no restriction). [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/144077) in GitLab 16.10. |
| `dsa_key_restriction` | integer | no | The minimum allowed bit length of an uploaded DSA key. Default is `0` (no restriction). `-1` disables DSA keys. |

View File

@ -521,7 +521,7 @@ Once such a feature is available, Personal Access Tokens for each workspace woul
We need to only allow certain users to access workspaces. Currently, we are restricting this to the creator/owner of the workspace. After the workspace is created, it needs to be exposed to the network so that the user can connect to it.
Thus, any traffic incoming to the workspace needs to be authenticated and authorized.
[`gitlab-workspaces-proxy`](https://gitlab.com/gitlab-org/remote-development/gitlab-workspaces-proxy) handles discovery, authentication and authorization of the workspaces running in a Kubernetes cluster.
[`gitlab-workspaces-proxy`](../../../user/workspace/set_up_workspaces_proxy.md) handles discovery, authentication and authorization of the workspaces running in a Kubernetes cluster.
It will proxy all HTTP and WebSocket calls to the correct workspace. It will perform the following tasks:

View File

@ -634,44 +634,6 @@ If a new feature introduces a new kind of data which is not a Git repository, or
As an example, container registry data does not easily fit into the above categories. It is backed by a registry service which owns the data, and GitLab interacts with the registry service's API. So a one off approach is required for Geo support of container registry. Still, we are able to reuse much of the glue code of [the Geo self-service framework](geo/framework.md#repository-replicator-strategy).
## History of communication channel
The communication channel has changed since first iteration, you can
check here historic decisions and why we moved to new implementations.
### Custom code (GitLab 8.6 and earlier)
In GitLab versions before 8.6, custom code is used to handle
notification from **primary** site to **secondary** sites by HTTP
requests.
### System hooks (GitLab 8.7 to 9.5)
Later, it was decided to move away from custom code and begin using
system hooks. More people were using them, so
many would benefit from improvements made to this communication layer.
There is a specific **internal** endpoint in our API code (Grape),
that receives all requests from this System Hooks:
`/api/v4/geo/receive_events`.
We switch and filter from each event by the `event_name` field.
### Geo Log Cursor (GitLab 10.0 and up)
In GitLab 10.0 and later, [System Webhooks](#system-hooks-gitlab-87-to-95) are no longer
used and [Geo Log Cursor](#geo-log-cursor-daemon) is used instead. The Log Cursor traverses the
`Geo::EventLog` rows to see if there are changes since the last time
the log was checked and will handle repository updates, deletes,
changes, and renames.
The table is within the replicated database. This has two advantages over the
old method:
- Replication is synchronous and we preserve the order of events.
- Replication of the events happen at the same time as the changes in the
database.
## Self-service framework
If you want to add easy Geo replication of a resource you're working

View File

@ -5,7 +5,7 @@ info: "To determine the technical writer assigned to the Stage/Group associated
source: /doc/user/search/advanced_search.md
---
# Search tips
# Syntax options
<!-- markdownlint-disable -->

View File

@ -5,7 +5,7 @@ info: "To determine the technical writer assigned to the Stage/Group associated
source: /doc/user/search/exact_code_search.md
---
# Search tips
# Syntax options
| Query | Regular expression mode | Exact match mode |
| -------------------- | ----------------------------------------------------- | ------------------------------ |

View File

@ -229,7 +229,7 @@ DETAILS:
results in a loop that consistently fails for all objects stored in object storage.
For information on how to fix this, see
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting/synchronization.md#failed-syncs-with-gitlab-managed-object-storage-replication).
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](https://archives.docs.gitlab.com/14.10/ee/administration/geo/replication/troubleshooting#failed-syncs-with-gitlab-managed-object-storage-replication).
## 14.6.0
@ -255,7 +255,7 @@ DETAILS:
results in a loop that consistently fails for all objects stored in object storage.
For information on how to fix this, see
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting/synchronization.md#failed-syncs-with-gitlab-managed-object-storage-replication).
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](https://archives.docs.gitlab.com/14.10/ee/administration/geo/replication/troubleshooting#failed-syncs-with-gitlab-managed-object-storage-replication).
## 14.5.0
@ -340,7 +340,7 @@ DETAILS:
results in a loop that consistently fails for all objects stored in object storage.
For information on how to fix this, see
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting/synchronization.md#failed-syncs-with-gitlab-managed-object-storage-replication).
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](https://archives.docs.gitlab.com/14.10/ee/administration/geo/replication/troubleshooting#failed-syncs-with-gitlab-managed-object-storage-replication).
## 14.4.4
@ -424,7 +424,7 @@ DETAILS:
results in a loop that consistently fails for all objects stored in object storage.
For information on how to fix this, see
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting/synchronization.md#failed-syncs-with-gitlab-managed-object-storage-replication).
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](https://archives.docs.gitlab.com/14.10/ee/administration/geo/replication/troubleshooting#failed-syncs-with-gitlab-managed-object-storage-replication).
- There is [an issue in GitLab 14.4.0 through 14.4.2](#1440) that can affect
Geo and other features that rely on cronjobs. We recommend upgrading to GitLab 14.4.3 or later.
@ -594,7 +594,7 @@ DETAILS:
results in a loop that consistently fails for all objects stored in object storage.
For information on how to fix this, see
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting/synchronization.md#failed-syncs-with-gitlab-managed-object-storage-replication).
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](https://archives.docs.gitlab.com/14.10/ee/administration/geo/replication/troubleshooting#failed-syncs-with-gitlab-managed-object-storage-replication).
- We found an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/336013) where the container registry replication
wasn't fully working if you used multi-arch images. In case of a multi-arch image, only the primary architecture
@ -699,7 +699,7 @@ DETAILS:
results in a loop that consistently fails for all objects stored in object storage.
For information on how to fix this, see
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](../../administration/geo/replication/troubleshooting/synchronization.md#failed-syncs-with-gitlab-managed-object-storage-replication).
[Troubleshooting - Failed syncs with GitLab-managed object storage replication](https://archives.docs.gitlab.com/14.10/ee/administration/geo/replication/troubleshooting#failed-syncs-with-gitlab-managed-object-storage-replication).
- We found an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/336013) where the container registry replication
wasn't fully working if you used multi-arch images. In case of a multi-arch image, only the primary architecture

View File

@ -395,6 +395,14 @@ The current month displays events counted to date.
The usage quota excludes projects that are not onboarded with product analytics.
## Best practices
- Define key metrics and goals from the start. Decide what questions you want to answer so you know how to use collected data.
- Use event data from all stages of the user journey. This data provides a comprehensive view of the user experience.
- Build dashboards aligned with team needs. Different teams need different data insights.
- Review dashboards regularly. This way, you can verify customer outcomes, identify trends in data, and update visualizations.
- Export raw data periodically. Dashboards provide only an overview of a subset of data, so you should export the data for a deeper analysis.
## Troubleshooting
### No events are collected

View File

@ -35,7 +35,7 @@ which you can customize to meet the specific needs of each project.
- In development environments, add an entry to the `/etc/hosts` file or update your DNS records.
- In production environments, point `*.<workspaces.example.dev>` and `<workspaces.example.dev>`
to the load balancer exposed by the Ingress controller.
- [Install `gitlab-workspaces-proxy`](https://gitlab.com/gitlab-org/remote-development/gitlab-workspaces-proxy#installation-instructions).
- [Set up the GitLab workspaces proxy](set_up_workspaces_proxy.md).
- [Install](../clusters/agent/install/index.md) and [configure](gitlab_agent_configuration.md) the GitLab agent.
- You must have at least the Developer role in the root group.
- In each project you want to use this feature for, create a [devfile](index.md#devfile):
@ -76,7 +76,7 @@ You also have access to the terminal and can install any necessary dependencies.
Prerequisites:
- SSH must be enabled for the workspace.
- You must have a TCP load balancer that points to [`gitlab-workspaces-proxy`](https://gitlab.com/gitlab-org/remote-development/gitlab-workspaces-proxy).
- You must have a TCP load balancer that points to the [GitLab workspaces proxy](set_up_workspaces_proxy.md).
To connect to a workspace with an SSH client:
@ -94,13 +94,13 @@ When you connect to `gitlab-workspaces-proxy` through the TCP load balancer,
- The personal access token
- User access to the workspace
### Set up `gitlab-workspaces-proxy` for SSH connections
### Set up the GitLab workspaces proxy for SSH connections
Prerequisites:
- You must have an SSH host key for client verification.
SSH is now enabled by default in [`gitlab-workspaces-proxy`](https://gitlab.com/gitlab-org/remote-development/gitlab-workspaces-proxy).
SSH is now enabled by default in the [GitLab workspaces proxy](set_up_workspaces_proxy.md).
To set up `gitlab-workspaces-proxy` with the GitLab Helm chart:
1. Run this command:

View File

@ -28,7 +28,7 @@ provided that the agent is properly configured for remote development.
|-------------------------------------------------------------------------------------------|----------|-----------------------------------------|-------------|
| [`enabled`](#enabled) | Yes | `false` | Indicates whether remote development is enabled for the GitLab agent. |
| [`dns_zone`](#dns_zone) | Yes | None | DNS zone where workspaces are available. |
| [`gitlab_workspaces_proxy`](#gitlab_workspaces_proxy) | No | `gitlab-workspaces` | Namespace where [`gitlab-workspaces-proxy`](https://gitlab.com/gitlab-org/remote-development/gitlab-workspaces-proxy) is installed. |
| [`gitlab_workspaces_proxy`](#gitlab_workspaces_proxy) | No | `gitlab-workspaces` | Namespace where [`gitlab-workspaces-proxy`](set_up_workspaces_proxy.md) is installed. |
| [`network_policy`](#network_policy) | No | See [`network_policy`](#network_policy) | Firewall rules for workspaces. |
| [`default_resources_per_workspace_container`](#default_resources_per_workspace_container) | No | `{}` | Default requests and limits for CPU and memory per workspace container. |
| [`max_resources_per_workspace`](#max_resources_per_workspace) | No | `{}` | Maximum requests and limits for CPU and memory per workspace. |
@ -71,7 +71,7 @@ remote_development:
### `gitlab_workspaces_proxy`
Use this setting to define the namespace where
[`gitlab-workspaces-proxy`](https://gitlab.com/gitlab-org/remote-development/gitlab-workspaces-proxy) is installed.
[`gitlab-workspaces-proxy`](set_up_workspaces_proxy.md) is installed.
The default value for `gitlab_workspaces_proxy.namespace` is `gitlab-workspaces`.
**Example configuration:**

View File

@ -68,8 +68,8 @@ module API
optional :disable_feed_token, type: Boolean, desc: 'Disable display of RSS/Atom and Calendar `feed_tokens`'
optional :disabled_oauth_sign_in_sources, type: Array[String], coerce_with: Validations::Types::CommaSeparatedToArray.coerce, desc: 'Disable certain OAuth sign-in sources'
optional :domain_denylist_enabled, type: Boolean, desc: 'Enable domain denylist for sign ups'
optional :domain_denylist, type: Array[String], coerce_with: Validations::Types::CommaSeparatedToArray.coerce, desc: 'Users with e-mail addresses that match these domain(s) will NOT be able to sign-up. Wildcards allowed. Use separate lines for multiple entries. Ex: domain.com, *.domain.com'
optional :domain_allowlist, type: Array[String], coerce_with: Validations::Types::CommaSeparatedToArray.coerce, desc: 'ONLY users with e-mail addresses that match these domain(s) will be able to sign-up. Wildcards allowed. Use separate lines for multiple entries. Ex: domain.com, *.domain.com'
optional :domain_denylist, type: Array[String], coerce_with: Validations::Types::CommaSeparatedToArray.coerce, desc: 'Users with e-mail addresses that match these domain(s) will NOT be able to sign-up. Wildcards allowed. Enter multiple entries on separate lines. Ex: domain.com, *.domain.com'
optional :domain_allowlist, type: Array[String], coerce_with: Validations::Types::CommaSeparatedToArray.coerce, desc: 'ONLY users with e-mail addresses that match these domain(s) will be able to sign-up. Wildcards allowed. Enter multiple entries on separate lines. Ex: domain.com, *.domain.com'
optional :eks_integration_enabled, type: Boolean, desc: 'Enable integration with Amazon EKS'
given eks_integration_enabled: -> (val) { val } do
requires :eks_account_id, type: String, desc: 'Amazon account ID for EKS integration'

View File

@ -6307,7 +6307,7 @@ msgstr ""
msgid "ApplicationSettings|Off"
msgstr ""
msgid "ApplicationSettings|Only users with e-mail addresses that match these domain(s) can sign up. Wildcards allowed. Use separate lines for multiple entries. Example: domain.com, *.domain.com"
msgid "ApplicationSettings|Only users with e-mail addresses that match these domain(s) can sign up. Wildcards allowed. Enter multiple entries on separate lines. Example: domain.com, *.domain.com"
msgstr ""
msgid "ApplicationSettings|Require admin approval for new sign-ups"
@ -6361,7 +6361,7 @@ msgstr ""
msgid "ApplicationSettings|User cap"
msgstr ""
msgid "ApplicationSettings|Users with e-mail addresses that match these domain(s) cannot sign up. Wildcards allowed. Use separate lines for multiple entries. Example: domain.com, *.domain.com"
msgid "ApplicationSettings|Users with e-mail addresses that match these domain(s) cannot sign up. Wildcards allowed. Enter multiple entries on separate lines. Example: domain.com, *.domain.com"
msgstr ""
msgid "ApplicationSettings|Users with e-mail addresses that match these domain(s) cannot sign up. Wildcards allowed. Use separate lines or commas for multiple entries."
@ -17926,6 +17926,9 @@ msgstr ""
msgid "Deployment|Approved"
msgstr ""
msgid "Deployment|Approved as %{role}"
msgstr ""
msgid "Deployment|Branch"
msgstr ""
@ -17992,6 +17995,9 @@ msgstr ""
msgid "Deployment|Rejected"
msgstr ""
msgid "Deployment|Rejected as %{role}"
msgstr ""
msgid "Deployment|Related Tags"
msgstr ""
@ -36415,6 +36421,9 @@ msgstr ""
msgid "PackageRegistry|Debian"
msgstr ""
msgid "PackageRegistry|Default"
msgstr ""
msgid "PackageRegistry|Delete %{count} assets"
msgstr ""
@ -36463,6 +36472,9 @@ msgstr ""
msgid "PackageRegistry|Enforce %{package_type} setting for all subgroups"
msgstr ""
msgid "PackageRegistry|Error"
msgstr ""
msgid "PackageRegistry|Error publishing"
msgstr ""
@ -36520,6 +36532,9 @@ msgstr ""
msgid "PackageRegistry|Helm"
msgstr ""
msgid "PackageRegistry|Hidden"
msgstr ""
msgid "PackageRegistry|If you haven't already done so, you will need to add the below to your %{codeStart}.pypirc%{codeEnd} file."
msgstr ""
@ -36621,6 +36636,9 @@ msgstr ""
msgid "PackageRegistry|Packages deleted successfully"
msgstr ""
msgid "PackageRegistry|Pending deletion"
msgstr ""
msgid "PackageRegistry|Permanently delete"
msgstr ""
@ -36630,6 +36648,9 @@ msgstr ""
msgid "PackageRegistry|Pip Command"
msgstr ""
msgid "PackageRegistry|Processing"
msgstr ""
msgid "PackageRegistry|Project-level"
msgstr ""
@ -43883,6 +43904,9 @@ msgstr ""
msgid "Runners|%{linkStart}Create a new runner%{linkEnd} to get started."
msgstr ""
msgid "Runners|%{linkStart}View available zones%{linkEnd}. Must have the format %{format}. Example: %{example}."
msgstr ""
msgid "Runners|%{link_start}These runners%{link_end} are available to all groups and projects."
msgstr ""
@ -44177,7 +44201,7 @@ msgstr ""
msgid "Runners|Fleet dashboard"
msgstr ""
msgid "Runners|For most CI/CD jobs, use a %{linkStart}N2D standard machine type%{linkEnd}."
msgid "Runners|For most CI/CD jobs, use a %{linkStart}N2D standard machine type%{linkEnd}. Must have the format %{format}. Example: %{example}."
msgstr ""
msgid "Runners|Get started with runners"
@ -44279,7 +44303,7 @@ msgstr ""
msgid "Runners|Machine type is required."
msgstr ""
msgid "Runners|Machine type must have the format `family-series-size`. Example: n2d-standard-2"
msgid "Runners|Machine type must have the right format."
msgstr ""
msgid "Runners|Machine type with preset amounts of virtual machines processors (vCPUs) and memory"
@ -44315,6 +44339,9 @@ msgstr ""
msgid "Runners|Most used instance runners"
msgstr ""
msgid "Runners|Must have the format %{format}. Example: %{example}."
msgstr ""
msgid "Runners|Never contacted"
msgstr ""
@ -44415,7 +44442,7 @@ msgstr ""
msgid "Runners|Project ID is required."
msgstr ""
msgid "Runners|Project ID must be 6 to 30 lowercase letters, digits, or hyphens. It needs to start with a lowercase letter and end with a letter or number."
msgid "Runners|Project ID must have the right format."
msgstr ""
msgid "Runners|Project runners"
@ -44442,7 +44469,7 @@ msgstr ""
msgid "Runners|Region is required."
msgstr ""
msgid "Runners|Region must have the correct format. Example: us-central1"
msgid "Runners|Region must have the right format."
msgstr ""
msgid "Runners|Register"
@ -44758,7 +44785,7 @@ msgstr ""
msgid "Runners|To follow the setup instructions, %{gcloudLinkStart}install the Google Cloud CLI%{gcloudLinkEnd} and %{terraformLinkStart}install Terraform%{terraformLinkEnd}."
msgstr ""
msgid "Runners|To improve security, use a dedicated project for CI/CD, separate from resources and identity management projects. %{linkStart}Wheres my project ID in Google Cloud?%{linkEnd}"
msgid "Runners|To improve security, use a project just for CI/CD. %{linkStart}Where's my project ID?%{linkEnd} Can be 6 to 30 lowercase letters, digits, or hyphens. Must start with a letter and end with a letter or number. Example: %{example}."
msgstr ""
msgid "Runners|To install Runner in Kubernetes follow the instructions described in the GitLab documentation."
@ -44845,9 +44872,6 @@ msgstr ""
msgid "Runners|Version starts with"
msgstr ""
msgid "Runners|View available zones"
msgstr ""
msgid "Runners|View installation instructions"
msgstr ""
@ -44902,7 +44926,7 @@ msgstr ""
msgid "Runners|Zone is required."
msgstr ""
msgid "Runners|Zone must have the correct format. Example: us-central1-a"
msgid "Runners|Zone must have the right format."
msgstr ""
msgid "Runners|active"

View File

@ -4,6 +4,8 @@ require 'json'
require 'net/http'
require 'uri'
# rubocop:disable Layout/LineLength -- we need to construct the URLs
# Validate CI_PROJECT_DIR and CI_API_V4_URL against the
# allowlist to protect against pipeline attacks
ALLOWED_PROJECT_DIRS = %w[/builds/gitlab-org/gitlab].freeze
@ -19,6 +21,25 @@ unless ALLOWED_API_URLS.include?(ENV['CI_API_V4_URL'])
exit 1
end
def ping_appsec(message)
uri = URI.parse("#{ENV['CI_API_V4_URL']}/projects/#{ENV['CI_MERGE_REQUEST_PROJECT_ID']}/merge_requests/#{ENV['CI_MERGE_REQUEST_IID']}/discussions?body=#{message}")
http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = (uri.scheme == 'https')
request = Net::HTTP::Post.new(uri.request_uri)
request['PRIVATE-TOKEN'] = ENV['CUSTOM_SAST_RULES_BOT_PAT']
response = http.request(request)
return if response.instance_of?(Net::HTTPCreated)
puts "Failed to ping AppSec #{response.code}: #{response.body}"
# if we cannot even ping appsec, fail the pipeline
# change this to `exit 1` when specs are ready
exit 0
end
# Load SAST report
artifact_relative_path = "#{ENV['CI_PROJECT_DIR']}/gl-sast-report.json"
raw_data = File.read(artifact_relative_path)
@ -26,6 +47,11 @@ data = JSON.parse(raw_data)
path_line_message_dict = {}
if data["results"].empty?
puts "No findings."
exit 0
end
# Extract findings from SAST report
results = data["results"]
results.each do |result|
@ -36,9 +62,7 @@ results.each do |result|
end
# Retrieve existing comments on the merge request
# rubocop:disable Layout/LineLength -- we need to construct the URL
notes_url = URI.parse("#{ENV['CI_API_V4_URL']}/projects/#{ENV['CI_MERGE_REQUEST_PROJECT_ID']}/merge_requests/#{ENV['CI_MERGE_REQUEST_IID']}/notes")
# rubocop:enable Layout/LineLength
request = Net::HTTP::Get.new(notes_url)
request["PRIVATE-TOKEN"] = ENV['CUSTOM_SAST_RULES_BOT_PAT']
@ -58,8 +82,10 @@ end
# Identify and remove duplicate findings
existing_comments.each do |comment|
next unless comment['author']['id'] == ENV['BOT_USER_ID']
next unless comment['author']['id'].to_s == ENV['BOT_USER_ID'].to_s
next unless comment['type'] == 'DiffNote'
puts "existing comment from BOT: #{comment}"
existing_path = comment['position']['new_path']
existing_line = comment['position']['new_line']
existing_message = comment['body']
@ -67,22 +93,47 @@ existing_comments.each do |comment|
message: existing_message }
end
# Fetch base_commit_sha, head_commit_sha and
# start_commit_sha required for creating inline comment
versions_url = URI.parse("#{ENV['CI_API_V4_URL']}/projects/#{ENV['CI_MERGE_REQUEST_PROJECT_ID']}/merge_requests/#{ENV['CI_MERGE_REQUEST_IID']}/versions")
request = Net::HTTP::Get.new(versions_url)
request["PRIVATE-TOKEN"] = ENV['CUSTOM_SAST_RULES_BOT_PAT']
response = Net::HTTP.start(versions_url.hostname, versions_url.port, use_ssl: versions_url.scheme == 'https') do |http|
http.request(request)
end
if response.instance_of?(Net::HTTPOK)
commits = JSON.parse(response.body)[0]
else
puts "Failed to fetch versions with status code #{response.code}: #{response.body}"
ping_appsec "Failed to fetch versions: #{response.body}. /cc @gitlab-com/gl-security/appsec for visibility."
exit 0
end
base_sha = commits['base_commit_sha']
head_sha = commits['head_commit_sha']
start_sha = commits['start_commit_sha']
# Create new comments for remaining findings
path_line_message_dict.each do |path, info|
new_path = old_path = path
new_line = old_line = info[:line]
new_line = info[:line]
message = info[:message]
# rubocop:disable Layout/LineLength -- we need to construct the URL
uri = URI.parse("#{ENV['CI_API_V4_URL']}/projects/#{ENV['CI_MERGE_REQUEST_PROJECT_ID']}/merge_requests/#{ENV['CI_MERGE_REQUEST_IID']}/discussions")
# rubocop:enable Layout/LineLength
request = Net::HTTP::Post.new(uri)
request["PRIVATE-TOKEN"] = ENV['CUSTOM_SAST_RULES_BOT_PAT']
request.set_form_data(
"position[position_type]" => "text",
"position[base_sha]" => base_sha,
"position[head_sha]" => head_sha,
"position[start_sha]" => start_sha,
"position[new_path]" => new_path,
"position[old_path]" => old_path,
"position[new_line]" => new_line,
"position[old_line]" => old_line,
"body" => message
)
@ -98,24 +149,4 @@ path_line_message_dict.each do |path, info|
exit 0
end
def ping_appsec(message)
# rubocop:disable Layout/LineLength -- we need to construct the URL
uri = URI.parse("#{ENV['CI_API_V4_URL']}/projects/#{ENV['CI_MERGE_REQUEST_PROJECT_ID']}/merge_requests/#{ENV['CI_MERGE_REQUEST_IID']}/discussions?body=#{message}")
# rubocop:enable Layout/LineLength
http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = (uri.scheme == 'https')
request = Net::HTTP::Post.new(uri.request_uri)
request['PRIVATE-TOKEN'] = ENV['CUSTOM_SAST_RULES_BOT_PAT']
response = http.request(request)
return if response.instance_of?(Net::HTTPCreated)
puts "Failed to ping AppSec #{response.code}: #{response.body}"
# if we cannot even ping appsec, fail the pipeline
# change this to `exit 1` when specs are ready
exit 0
end
# rubocop:enable Layout/LineLength

View File

@ -1,71 +0,0 @@
import htmlLinkedTabs from 'test_fixtures_static/linked_tabs.html';
import { setHTMLFixture, resetHTMLFixture } from 'helpers/fixtures';
import LinkedTabs from '~/lib/utils/bootstrap_linked_tabs';
describe('Linked Tabs', () => {
beforeEach(() => {
setHTMLFixture(htmlLinkedTabs);
});
afterEach(() => {
resetHTMLFixture();
});
describe('when is initialized', () => {
beforeEach(() => {
jest.spyOn(window.history, 'replaceState').mockImplementation(() => {});
});
it('should activate the tab correspondent to the given action', () => {
// eslint-disable-next-line no-new
new LinkedTabs({
action: 'tab1',
defaultAction: 'tab1',
parentEl: '.linked-tabs',
});
expect(document.querySelector('#tab1').classList).toContain('active');
});
it('should active the default tab action when the action is show', () => {
// eslint-disable-next-line no-new
new LinkedTabs({
action: 'show',
defaultAction: 'tab1',
parentEl: '.linked-tabs',
});
expect(document.querySelector('#tab1').classList).toContain('active');
});
});
describe('on click', () => {
it('should change the url according to the clicked tab', () => {
const historySpy = jest.spyOn(window.history, 'replaceState').mockImplementation(() => {});
const linkedTabs = new LinkedTabs({
action: 'show',
defaultAction: 'tab1',
parentEl: '.linked-tabs',
});
const secondTab = document.querySelector('.linked-tabs li:nth-child(2) a');
const newState =
secondTab.getAttribute('href') +
linkedTabs.currentLocation.search +
linkedTabs.currentLocation.hash;
secondTab.click();
if (historySpy) {
expect(historySpy).toHaveBeenCalledWith(
{
url: newState,
},
document.title,
newState,
);
}
});
});
});

View File

@ -251,13 +251,13 @@ describe('GoogleCloudRegistrationInstructions', () => {
${'correct'} | ${'correct-project-name'} | ${undefined} | ${''}
${'correct'} | ${'correct-project-name-1'} | ${undefined} | ${''}
${'correct'} | ${'project'} | ${undefined} | ${''}
${'invalid (too short)'} | ${'short'} | ${'true'} | ${'Project ID must be'}
${'invalid (starts with a number)'} | ${'1number'} | ${'true'} | ${'Project ID must be'}
${'invalid (starts with uppercase)'} | ${'Project'} | ${'true'} | ${'Project ID must be'}
${'invalid (contains uppercase)'} | ${'pRoject'} | ${'true'} | ${'Project ID must be'}
${'invalid (contains symbol)'} | ${'pro!ect'} | ${'true'} | ${'Project ID must be'}
${'invalid (too long)'} | ${'a-project-name-that-is-too-long'} | ${'true'} | ${'Project ID must be'}
${'invalid (ends with hyphen)'} | ${'a-project-'} | ${'true'} | ${'Project ID must be'}
${'invalid (too short)'} | ${'short'} | ${'true'} | ${'Project ID must have'}
${'invalid (starts with a number)'} | ${'1number'} | ${'true'} | ${'Project ID must have'}
${'invalid (starts with uppercase)'} | ${'Project'} | ${'true'} | ${'Project ID must have'}
${'invalid (contains uppercase)'} | ${'pRoject'} | ${'true'} | ${'Project ID must have'}
${'invalid (contains symbol)'} | ${'pro!ect'} | ${'true'} | ${'Project ID must have'}
${'invalid (too long)'} | ${'a-project-name-that-is-too-long'} | ${'true'} | ${'Project ID must have'}
${'invalid (ends with hyphen)'} | ${'a-project-'} | ${'true'} | ${'Project ID must have'}
${'invalid (missing)'} | ${''} | ${'true'} | ${'Project ID is required'}
`('"$input" as $case', async ({ input, ariaInvalid, feedback }) => {
await fillInTextField(findProjectIdInput(), input);

View File

@ -1,20 +0,0 @@
<ul class="nav nav-tabs linked-tabs">
<li class="nav-item">
<a class="nav-link" data-action="tab1" data-target="div#tab1" data-toggle="tab" href="foo/bar/1">
Tab 1
</a>
</li>
<li class="nav-item">
<a class="nav-link" data-action="tab2" data-target="div#tab2" data-toggle="tab" href="foo/bar/1/context">
Tab 2
</a>
</li>
</ul>
<div class="tab-content">
<div class="tab-pane" id="tab1">
Tab 1 Content
</div>
<div class="tab-pane" id="tab2">
Tab 2 Content
</div>
</div>

View File

@ -1,51 +0,0 @@
import MockAdapter from 'axios-mock-adapter';
import axios from '~/lib/utils/axios_utils';
import { setFaviconOverlay, resetFavicon } from '~/lib/utils/favicon';
import { setCiStatusFavicon } from '~/lib/utils/favicon_ci';
import { HTTP_STATUS_INTERNAL_SERVER_ERROR, HTTP_STATUS_OK } from '~/lib/utils/http_status';
jest.mock('~/lib/utils/favicon');
const TEST_URL = '/test/pipelinable/1';
const TEST_FAVICON = '/favicon.test.ico';
describe('~/lib/utils/favicon_ci', () => {
let mock;
beforeEach(() => {
mock = new MockAdapter(axios);
});
afterEach(() => {
mock.restore();
mock = null;
});
describe('setCiStatusFavicon', () => {
it.each`
response | setFaviconOverlayCalls | resetFaviconCalls
${{}} | ${[]} | ${[[]]}
${{ favicon: TEST_FAVICON }} | ${[[TEST_FAVICON]]} | ${[]}
`(
'with response=$response',
async ({ response, setFaviconOverlayCalls, resetFaviconCalls }) => {
mock.onGet(TEST_URL).replyOnce(HTTP_STATUS_OK, response);
expect(setFaviconOverlay).not.toHaveBeenCalled();
expect(resetFavicon).not.toHaveBeenCalled();
await setCiStatusFavicon(TEST_URL);
expect(setFaviconOverlay.mock.calls).toEqual(setFaviconOverlayCalls);
expect(resetFavicon.mock.calls).toEqual(resetFaviconCalls);
},
);
it('with error', async () => {
mock.onGet(TEST_URL).replyOnce(HTTP_STATUS_INTERNAL_SERVER_ERROR);
await expect(setCiStatusFavicon(TEST_URL)).rejects.toEqual(expect.any(Error));
expect(resetFavicon).toHaveBeenCalled();
});
});
});

View File

@ -1,4 +1,10 @@
import { periodToDate } from '~/observability/utils';
import { periodToDate, dateFilterObjToQuery, queryToDateFilterObj } from '~/observability/utils';
import {
CUSTOM_DATE_RANGE_OPTION,
DATE_RANGE_QUERY_KEY,
DATE_RANGE_START_QUERY_KEY,
DATE_RANGE_END_QUERY_KEY,
} from '~/observability/constants';
describe('periodToDate', () => {
const realDateNow = Date.now;
@ -33,3 +39,72 @@ describe('periodToDate', () => {
expect(periodToDate('2w')).toEqual({});
});
});
describe('queryToDateFilterObj', () => {
it('returns default date range if no query params provided', () => {
expect(queryToDateFilterObj({})).toEqual({ value: '1h' });
});
it('returns query params with provided value', () => {
expect(
queryToDateFilterObj({
[DATE_RANGE_QUERY_KEY]: '7d',
}),
).toEqual({ value: '7d' });
});
it('returns custom range if custom params provided', () => {
const query = {
[DATE_RANGE_QUERY_KEY]: CUSTOM_DATE_RANGE_OPTION,
[DATE_RANGE_START_QUERY_KEY]: '2020-01-01T00:00:00.000Z',
[DATE_RANGE_END_QUERY_KEY]: '2020-01-02T00:00:00.000Z',
};
expect(queryToDateFilterObj(query)).toEqual({
value: CUSTOM_DATE_RANGE_OPTION,
startDate: new Date('2020-01-01T00:00:00.000Z'),
endDate: new Date('2020-01-02T00:00:00.000Z'),
});
});
it('returns default range if custom params invalid', () => {
const query = {
[DATE_RANGE_QUERY_KEY]: CUSTOM_DATE_RANGE_OPTION,
[DATE_RANGE_START_QUERY_KEY]: 'invalid',
[DATE_RANGE_END_QUERY_KEY]: 'invalid',
};
expect(queryToDateFilterObj(query)).toEqual({ value: '1h' });
});
});
describe('dateFilterObjToQuery', () => {
it('converts a default date filter', () => {
expect(
dateFilterObjToQuery({
value: '7d',
}),
).toEqual({
[DATE_RANGE_QUERY_KEY]: '7d',
});
});
it('converts custom filter', () => {
const filter = {
value: CUSTOM_DATE_RANGE_OPTION,
startDate: new Date('2020-01-01T00:00:00.000Z'),
endDate: new Date('2020-01-02T00:00:00.000Z'),
};
expect(dateFilterObjToQuery(filter)).toEqual({
[DATE_RANGE_QUERY_KEY]: CUSTOM_DATE_RANGE_OPTION,
[DATE_RANGE_START_QUERY_KEY]: '2020-01-01T00:00:00.000Z',
[DATE_RANGE_END_QUERY_KEY]: '2020-01-02T00:00:00.000Z',
});
});
it('returns empty object if filter is empty', () => {
expect(dateFilterObjToQuery({})).toEqual({});
});
it('returns empty object if filter undefined', () => {
expect(dateFilterObjToQuery()).toEqual({});
});
});

View File

@ -2,8 +2,7 @@ import { nextTick } from 'vue';
import { GlFilteredSearchToken } from '@gitlab/ui';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import { sortableFields } from '~/packages_and_registries/package_registry/utils';
import component from '~/packages_and_registries/package_registry/components/list/package_search.vue';
import PackageTypeToken from '~/packages_and_registries/package_registry/components/list/tokens/package_type_token.vue';
import PackageSearch from '~/packages_and_registries/package_registry/components/list/package_search.vue';
import LocalStorageSync from '~/vue_shared/components/local_storage_sync.vue';
import PersistedSearch from '~/packages_and_registries/shared/components/persisted_search.vue';
import { LIST_KEY_CREATED_AT } from '~/packages_and_registries/package_registry/constants';
@ -12,6 +11,8 @@ import {
OPERATORS_IS,
TOKEN_TYPE_TYPE,
TOKEN_TYPE_VERSION,
TOKEN_TITLE_STATUS,
TOKEN_TYPE_STATUS,
} from '~/vue_shared/components/filtered_search_bar/constants';
describe('Package Search', () => {
@ -26,7 +27,7 @@ describe('Package Search', () => {
const findLocalStorageSync = () => wrapper.findComponent(LocalStorageSync);
const mountComponent = (isGroupPage = false) => {
wrapper = shallowMountExtended(component, {
wrapper = shallowMountExtended(PackageSearch, {
provide() {
return {
isGroupPage,
@ -88,15 +89,25 @@ describe('Package Search', () => {
expect(findPersistedSearch().props()).toMatchObject({
tokens: expect.arrayContaining([
expect.objectContaining({
token: PackageTypeToken,
token: GlFilteredSearchToken,
type: TOKEN_TYPE_TYPE,
icon: 'package',
unique: true,
operators: OPERATORS_IS,
}),
expect.objectContaining({
token: GlFilteredSearchToken,
type: TOKEN_TYPE_VERSION,
icon: 'doc-versions',
unique: true,
operators: OPERATORS_IS,
}),
expect.objectContaining({
type: TOKEN_TYPE_STATUS,
icon: 'status',
title: TOKEN_TITLE_STATUS,
unique: true,
token: GlFilteredSearchToken,
operators: OPERATORS_IS,
}),
]),
@ -123,11 +134,7 @@ describe('Package Search', () => {
expect(wrapper.emitted('update')[0]).toEqual([
{
filters: {
packageName: '',
packageType: undefined,
packageVersion: '',
},
filters: {},
sort: payload.sort,
sorting: payload.sorting,
},
@ -140,6 +147,7 @@ describe('Package Search', () => {
filters: [
{ type: 'type', value: { data: 'Generic', operator: '=' }, id: 'token-3' },
{ type: 'version', value: { data: '1.0.1', operator: '=' }, id: 'token-6' },
{ type: 'status', value: { data: 'HIDDEN', operator: '=' }, id: 'token-7' },
{ id: 'token-4', type: 'filtered-search-term', value: { data: 'gl' } },
{ id: 'token-5', type: 'filtered-search-term', value: { data: '' } },
],
@ -160,6 +168,7 @@ describe('Package Search', () => {
packageName: 'gl',
packageType: 'GENERIC',
packageVersion: '1.0.1',
packageStatus: 'HIDDEN',
},
sort: payload.sort,
sorting: payload.sorting,

View File

@ -1,46 +0,0 @@
import { GlFilteredSearchToken, GlFilteredSearchSuggestion } from '@gitlab/ui';
import { shallowMount } from '@vue/test-utils';
import component from '~/packages_and_registries/package_registry/components/list/tokens/package_type_token.vue';
import { PACKAGE_TYPES } from '~/packages_and_registries/package_registry/constants';
describe('packages_filter', () => {
let wrapper;
const findFilteredSearchToken = () => wrapper.findComponent(GlFilteredSearchToken);
const findFilteredSearchSuggestions = () => wrapper.findAllComponents(GlFilteredSearchSuggestion);
const mountComponent = ({ attrs, listeners } = {}) => {
wrapper = shallowMount(component, {
attrs: {
cursorPosition: 'start',
...attrs,
},
listeners,
});
};
it('binds all of his attrs to filtered search token', () => {
mountComponent({ attrs: { foo: 'bar' } });
expect(findFilteredSearchToken().attributes('foo')).toBe('bar');
});
it('binds all of his events to filtered search token', () => {
const clickListener = jest.fn();
mountComponent({ listeners: { click: clickListener } });
findFilteredSearchToken().vm.$emit('click');
expect(clickListener).toHaveBeenCalled();
});
it.each(PACKAGE_TYPES.map((p, index) => [p, index]))(
'displays a suggestion for %p',
(packageType, index) => {
mountComponent();
const item = findFilteredSearchSuggestions().at(index);
expect(item.text()).toBe(packageType);
expect(item.props('value')).toBe(packageType);
},
);
});

View File

@ -44,7 +44,12 @@ describe('PackagesListApp', () => {
const searchPayload = {
sort: 'VERSION_DESC',
filters: { packageName: 'foo', packageType: 'CONAN', packageVersion: '1.0.1' },
filters: {
packageName: 'foo',
packageType: 'CONAN',
packageVersion: '1.0.1',
packageStatus: 'DEFAULT',
},
};
const findPackageTitle = () => wrapper.findComponent(PackageTitle);

View File

@ -41,21 +41,22 @@ describe('Packages And Registries shared utils', () => {
});
describe('extractFilterAndSorting', () => {
it.each`
search | type | version | sort | orderBy | result
${['one']} | ${'myType'} | ${'1.0.1'} | ${'asc'} | ${'foo'} | ${{ sorting: { sort: 'asc', orderBy: 'foo' }, filters: [{ type: 'type', value: { data: 'myType' } }, { type: 'version', value: { data: '1.0.1' } }, { type: FILTERED_SEARCH_TERM, value: { data: 'one' } }] }}
${['one']} | ${null} | ${null} | ${'asc'} | ${'foo'} | ${{ sorting: { sort: 'asc', orderBy: 'foo' }, filters: [{ type: FILTERED_SEARCH_TERM, value: { data: 'one' } }] }}
${[]} | ${null} | ${null} | ${'asc'} | ${'foo'} | ${{ sorting: { sort: 'asc', orderBy: 'foo' }, filters: [] }}
${null} | ${null} | ${null} | ${'asc'} | ${'foo'} | ${{ sorting: { sort: 'asc', orderBy: 'foo' }, filters: [] }}
${null} | ${null} | ${null} | ${null} | ${'foo'} | ${{ sorting: { orderBy: 'foo' }, filters: [] }}
${null} | ${null} | ${null} | ${null} | ${null} | ${{ sorting: {}, filters: [] }}
search | type | version | status | sort | orderBy | result
${['one']} | ${'myType'} | ${'1.0.1'} | ${'DEFAULT'} | ${'asc'} | ${'foo'} | ${{ sorting: { sort: 'asc', orderBy: 'foo' }, filters: [{ type: 'type', value: { data: 'myType' } }, { type: 'version', value: { data: '1.0.1' } }, { type: 'status', value: { data: 'DEFAULT' } }, { type: FILTERED_SEARCH_TERM, value: { data: 'one' } }] }}
${['one']} | ${undefined} | ${undefined} | ${undefined} | ${'asc'} | ${'foo'} | ${{ sorting: { sort: 'asc', orderBy: 'foo' }, filters: [{ type: FILTERED_SEARCH_TERM, value: { data: 'one' } }] }}
${[]} | ${undefined} | ${undefined} | ${undefined} | ${'asc'} | ${'foo'} | ${{ sorting: { sort: 'asc', orderBy: 'foo' }, filters: [] }}
${undefined} | ${undefined} | ${undefined} | ${undefined} | ${'asc'} | ${'foo'} | ${{ sorting: { sort: 'asc', orderBy: 'foo' }, filters: [] }}
${undefined} | ${undefined} | ${undefined} | ${undefined} | ${undefined} | ${'foo'} | ${{ sorting: { orderBy: 'foo' }, filters: [] }}
${undefined} | ${undefined} | ${undefined} | ${undefined} | ${undefined} | ${undefined} | ${{ sorting: {}, filters: [] }}
`(
'returns sorting and filters objects in the correct form',
({ search, type, version, sort, orderBy, result }) => {
({ search, type, version, sort, status, orderBy, result }) => {
const queryObject = {
search,
type,
version,
sort,
status,
orderBy,
};
expect(extractFilterAndSorting(queryObject)).toStrictEqual(result);

View File

@ -61,7 +61,7 @@ RSpec.describe Mutations::Issues::Update, feature_category: :team_planning do
context 'when setting milestone to nil' do
let(:expected_attributes) { { milestone_id: nil } }
it 'changes the milestone corrrectly' do
it 'changes the milestone correctly' do
issue.update_column(:milestone_id, milestone.id)
expect { subject }.to change { issue.reload.milestone }.from(milestone).to(nil)
@ -202,6 +202,20 @@ RSpec.describe Mutations::Issues::Update, feature_category: :team_planning do
it 'updates the time estimate' do
expect { subject }.to change { issue.reload.time_estimate }.from(3600).to(5400)
end
context 'when user is a guest' do
let_it_be(:guest) { create(:user) }
let(:user) { guest }
before do
issue.update!(author: guest)
project.add_guest(guest)
end
it 'does not change time_estimate' do
expect { subject }.not_to change { issue.reload.time_estimate }
end
end
end
end
end

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe WorkItems::Callbacks::TimeTracking, feature_category: :team_planning do
RSpec.describe Issuable::Callbacks::TimeTracking, feature_category: :team_planning do
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, :private, group: group) }
let_it_be(:reporter) do
@ -38,8 +38,8 @@ RSpec.describe WorkItems::Callbacks::TimeTracking, feature_category: :team_plann
let(:callback) { described_class.new(issuable: issuable, current_user: current_user, params: params) }
describe '#after_initialize' do
shared_examples 'raises a WidgetError' do
it { expect { subject }.to raise_error(::WorkItems::Widgets::BaseService::WidgetError, message) }
shared_examples 'raises an Error' do
it { expect { subject }.to raise_error(::Issuable::Callbacks::Base::Error, message) }
end
shared_examples 'sets work item time tracking data' do
@ -212,7 +212,7 @@ RSpec.describe WorkItems::Callbacks::TimeTracking, feature_category: :team_plann
context 'when time_estimate is invalid' do
let(:params) { { time_estimate: "12abc" } }
it_behaves_like 'raises a WidgetError' do
it_behaves_like 'raises an Error' do
let(:message) { 'Time estimate must be formatted correctly. For example: 1h 30m.' }
end
end
@ -220,7 +220,7 @@ RSpec.describe WorkItems::Callbacks::TimeTracking, feature_category: :team_plann
context 'when time_spent is invalid' do
let(:params) { { timelog: { time_spent: "2abc" } } }
it_behaves_like 'raises a WidgetError' do
it_behaves_like 'raises an Error' do
let(:message) { 'Time spent must be formatted correctly. For example: 1h 30m.' }
end
end

View File

@ -153,10 +153,11 @@ RSpec.describe Notes::QuickActionsService, feature_category: :team_planning do
shared_examples 'does not update time_estimate and displays the correct error message' do
it 'shows validation error message' do
content = execute(note)
content, update_params = service.execute(note)
service_response = service.apply_updates(update_params, note)
expect(content).to be_empty
expect(note.noteable.errors[:time_estimate]).to include('must have a valid format and be greater than or equal to zero.')
expect(service_response.message).to include('Time estimate must have a valid format and be greater than or equal to zero.')
expect(note.noteable.reload.time_estimate).to eq(600)
end
end

View File

@ -1,3 +1,4 @@
// Package transport provides a roundtripper for HTTP clients with Workhorse integration.
package transport
import (
@ -11,12 +12,12 @@ import (
"gitlab.com/gitlab-org/gitlab/workhorse/internal/version"
)
// Creates a new default transport that has Workhorse's User-Agent header set.
// NewDefaultTransport creates a new default transport that has Workhorse's User-Agent header set.
func NewDefaultTransport() http.RoundTripper {
return &DefaultTransport{Next: http.DefaultTransport}
}
// Defines a http.Transport with values that are more restrictive than for
// NewRestrictedTransport defines a http.Transport with values that are more restrictive than for
// http.DefaultTransport, they define shorter TLS Handshake, and more
// aggressive connection closing to prevent the connection hanging and reduce
// FD usage
@ -24,24 +25,29 @@ func NewRestrictedTransport(options ...Option) http.RoundTripper {
return &DefaultTransport{Next: newRestrictedTransport(options...)}
}
// DefaultTransport is a roundtripper that sets the User-Agent header in requests
type DefaultTransport struct {
Next http.RoundTripper
}
// RoundTrip sets the User-Agent header in the request and then forwards the request to the next RoundTripper.
func (t DefaultTransport) RoundTrip(req *http.Request) (*http.Response, error) {
req.Header.Set("User-Agent", version.GetUserAgent())
return t.Next.RoundTrip(req)
}
// Option is a functional option to configure the restricted transport.
type Option func(*http.Transport)
// WithDisabledCompression disables compression for the transport.
func WithDisabledCompression() Option {
return func(t *http.Transport) {
t.DisableCompression = true
}
}
// WithDialTimeout sets the dial timeout for the transport.
func WithDialTimeout(timeout time.Duration) Option {
return func(t *http.Transport) {
t.DialContext = (&net.Dialer{
@ -50,6 +56,7 @@ func WithDialTimeout(timeout time.Duration) Option {
}
}
// WithResponseHeaderTimeout sets the response header timeout for the transport.
func WithResponseHeaderTimeout(timeout time.Duration) Option {
return func(t *http.Transport) {
t.ResponseHeaderTimeout = timeout