Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
669a1c319d
commit
1f8c5a116b
|
|
@ -1 +1 @@
|
|||
98d26efad3a10c52e1ad20429cffb4e6605510d5
|
||||
f4946fa24a5572704afdfc0050db44f099911c69
|
||||
|
|
|
|||
|
|
@ -37,8 +37,18 @@ export default Extension.create({
|
|||
|
||||
const { state, view } = editor;
|
||||
const { tr, selection } = state;
|
||||
const { firstChild } = document.content;
|
||||
const content =
|
||||
document.content.childCount === 1 && firstChild.type.name === 'paragraph'
|
||||
? firstChild.content
|
||||
: document.content;
|
||||
|
||||
if (selection.to - selection.from > 0) {
|
||||
tr.replaceWith(selection.from, selection.to, content);
|
||||
} else {
|
||||
tr.insert(selection.from, content);
|
||||
}
|
||||
|
||||
tr.replaceWith(selection.from - 1, selection.to, document.content);
|
||||
view.dispatch(tr);
|
||||
})
|
||||
.catch(() => {
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ export default {
|
|||
class="issuable-milestone gl-mr-3"
|
||||
data-testid="issuable-milestone"
|
||||
>
|
||||
<gl-link v-gl-tooltip :href="milestoneLink" :title="milestoneDate">
|
||||
<gl-link v-gl-tooltip :href="milestoneLink" :title="milestoneDate" class="gl-font-sm">
|
||||
<gl-icon name="clock" />
|
||||
{{ issue.milestone.title }}
|
||||
</gl-link>
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ export default {
|
|||
FailedJobsTable,
|
||||
},
|
||||
inject: {
|
||||
fullPath: {
|
||||
projectPath: {
|
||||
default: '',
|
||||
},
|
||||
pipelineIid: {
|
||||
|
|
@ -31,7 +31,7 @@ export default {
|
|||
query: GetFailedJobsQuery,
|
||||
variables() {
|
||||
return {
|
||||
fullPath: this.fullPath,
|
||||
fullPath: this.projectPath,
|
||||
pipelineIid: this.pipelineIid,
|
||||
};
|
||||
},
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ export default {
|
|||
JobsTable,
|
||||
},
|
||||
inject: {
|
||||
fullPath: {
|
||||
projectPath: {
|
||||
default: '',
|
||||
},
|
||||
pipelineIid: {
|
||||
|
|
@ -56,7 +56,7 @@ export default {
|
|||
computed: {
|
||||
queryVariables() {
|
||||
return {
|
||||
fullPath: this.fullPath,
|
||||
fullPath: this.projectPath,
|
||||
iid: this.pipelineIid,
|
||||
};
|
||||
},
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ export const createAppOptions = (selector, apolloProvider, router) => {
|
|||
exposeLicenseScanningData,
|
||||
failedJobsCount,
|
||||
failedJobsSummary,
|
||||
fullPath,
|
||||
projectPath,
|
||||
graphqlResourceEtag,
|
||||
pipelineIid,
|
||||
pipelineProjectPath,
|
||||
|
|
@ -50,8 +50,6 @@ export const createAppOptions = (selector, apolloProvider, router) => {
|
|||
testsCount,
|
||||
} = dataset;
|
||||
|
||||
// TODO remove projectPath variable once https://gitlab.com/gitlab-org/gitlab/-/issues/371641 is resolved
|
||||
const projectPath = fullPath;
|
||||
const defaultTabValue = getPipelineDefaultTab(window.location.href);
|
||||
|
||||
return {
|
||||
|
|
@ -83,7 +81,6 @@ export const createAppOptions = (selector, apolloProvider, router) => {
|
|||
exposeLicenseScanningData: parseBoolean(exposeLicenseScanningData),
|
||||
failedJobsCount,
|
||||
failedJobsSummary: JSON.parse(failedJobsSummary),
|
||||
fullPath,
|
||||
graphqlResourceEtag,
|
||||
pipelineIid,
|
||||
pipelineProjectPath,
|
||||
|
|
|
|||
|
|
@ -226,7 +226,7 @@ export default {
|
|||
</gl-link>
|
||||
<span
|
||||
v-if="taskStatus"
|
||||
class="task-status gl-display-none gl-sm-display-inline-block! gl-ml-3"
|
||||
class="task-status gl-display-none gl-sm-display-inline-block! gl-ml-2 gl-font-sm"
|
||||
data-testid="task-status"
|
||||
>
|
||||
{{ taskStatus }}
|
||||
|
|
@ -265,7 +265,7 @@ export default {
|
|||
:data-avatar-url="author.avatarUrl"
|
||||
:href="author.webUrl"
|
||||
data-testid="issuable-author"
|
||||
class="author-link js-user-link"
|
||||
class="author-link js-user-link gl-font-sm"
|
||||
>
|
||||
<span class="author">{{ author.name }}</span>
|
||||
</gl-link>
|
||||
|
|
@ -286,7 +286,7 @@ export default {
|
|||
<slot name="timeframe"></slot>
|
||||
</span>
|
||||
|
||||
<span v-if="labels.length" role="group" :aria-label="__('Labels')">
|
||||
<p v-if="labels.length" role="group" :aria-label="__('Labels')" class="gl-mt-1 gl-mb-0">
|
||||
<gl-label
|
||||
v-for="(label, index) in labels"
|
||||
:key="index"
|
||||
|
|
@ -298,7 +298,7 @@ export default {
|
|||
:class="{ 'gl-ml-2': index }"
|
||||
size="sm"
|
||||
/>
|
||||
</span>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="issuable-meta">
|
||||
|
|
|
|||
|
|
@ -180,7 +180,25 @@ export default {
|
|||
},
|
||||
},
|
||||
update(store, createNoteData) {
|
||||
if (createNoteData.data?.createNote?.errors?.length) {
|
||||
const numErrors = createNoteData.data?.createNote?.errors?.length;
|
||||
|
||||
if (numErrors) {
|
||||
const { errors } = createNoteData.data.createNote;
|
||||
|
||||
// TODO: https://gitlab.com/gitlab-org/gitlab/-/issues/346557
|
||||
// When a note only contains quick actions,
|
||||
// additional "helpful" messages are embedded in the errors field.
|
||||
// For instance, a note solely composed of "/assign @foobar" would
|
||||
// return a message "Commands only Assigned @root." as an error on creation
|
||||
// even though the quick action successfully executed.
|
||||
if (
|
||||
numErrors === 2 &&
|
||||
errors[0].includes('Commands only') &&
|
||||
errors[1].includes('Command names')
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
throw new Error(createNoteData.data?.createNote?.errors[0]);
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -18,6 +18,11 @@
|
|||
}
|
||||
}
|
||||
|
||||
.issuable-info,
|
||||
.issuable-meta {
|
||||
font-size: $gl-font-size-sm;
|
||||
}
|
||||
|
||||
.issuable-meta {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
|
|
|
|||
|
|
@ -3,14 +3,35 @@
|
|||
module Resolvers
|
||||
module Ci
|
||||
class AllJobsResolver < BaseResolver
|
||||
include LooksAhead
|
||||
|
||||
type ::Types::Ci::JobType.connection_type, null: true
|
||||
|
||||
argument :statuses, [::Types::Ci::JobStatusEnum],
|
||||
required: false,
|
||||
description: 'Filter jobs by status.'
|
||||
|
||||
def resolve(statuses: nil)
|
||||
::Ci::JobsFinder.new(current_user: current_user, params: { scope: statuses }).execute
|
||||
def resolve_with_lookahead(statuses: nil)
|
||||
jobs = ::Ci::JobsFinder.new(current_user: current_user, params: { scope: statuses }).execute
|
||||
|
||||
apply_lookahead(jobs)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def preloads
|
||||
{
|
||||
previous_stage_jobs_or_needs: [:needs, :pipeline],
|
||||
artifacts: [:job_artifacts],
|
||||
pipeline: [:user],
|
||||
project: [{ project: [:route, { namespace: [:route] }] }],
|
||||
commit_path: [:pipeline, { project: { namespace: [:route] } }],
|
||||
ref_path: [{ project: [:route, { namespace: [:route] }] }],
|
||||
browse_artifacts_path: [{ project: { namespace: [:route] } }],
|
||||
play_path: [{ project: { namespace: [:route] } }],
|
||||
web_path: [{ project: { namespace: [:route] } }],
|
||||
tags: [:tags]
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -36,7 +36,11 @@ module Resolvers
|
|||
{ pipeline: [:merge_request] },
|
||||
{ project: [:route, { namespace: :route }] }
|
||||
],
|
||||
commit_path: [:pipeline, { project: [:route, { namespace: [:route] }] }],
|
||||
commit_path: [:pipeline, { project: { namespace: [:route] } }],
|
||||
ref_path: [{ project: [:route, { namespace: [:route] }] }],
|
||||
browse_artifacts_path: [{ project: { namespace: [:route] } }],
|
||||
play_path: [{ project: { namespace: [:route] } }],
|
||||
web_path: [{ project: { namespace: [:route] } }],
|
||||
short_sha: [:pipeline],
|
||||
tags: [:tags]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ module Projects
|
|||
{
|
||||
failed_jobs_count: pipeline.failed_builds.count,
|
||||
failed_jobs_summary: prepare_failed_jobs_summary_data(pipeline.failed_builds),
|
||||
full_path: project.full_path,
|
||||
project_path: project.full_path,
|
||||
graphql_resource_etag: graphql_etag_pipeline_path(pipeline),
|
||||
metrics_path: namespace_project_ci_prometheus_metrics_histograms_path(namespace_id: project.namespace, project_id: project, format: :json),
|
||||
pipeline_iid: pipeline.iid,
|
||||
|
|
|
|||
|
|
@ -13,6 +13,11 @@ module Routing
|
|||
glm_source
|
||||
glm_content
|
||||
_gl
|
||||
utm_medium
|
||||
utm_source
|
||||
utm_campaign
|
||||
utm_content
|
||||
utm_budget
|
||||
].freeze
|
||||
|
||||
def initialize(request_object, group, project)
|
||||
|
|
|
|||
|
|
@ -595,7 +595,7 @@ module Ci
|
|||
end
|
||||
|
||||
def exactly_one_group
|
||||
unless runner_namespaces.one?
|
||||
unless runner_namespaces.size == 1
|
||||
errors.add(:runner, 'needs to be assigned to exactly one group')
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
- breadcrumb_title _("General settings")
|
||||
- page_title _("General settings")
|
||||
- @content_class = "limit-container-width" unless fluid_layout
|
||||
- expanded = expanded_by_default?
|
||||
|
||||
= render 'shared/namespaces/cascading_settings/lock_popovers'
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@
|
|||
- page_title _('Group Access Tokens')
|
||||
- type = _('group access token')
|
||||
- type_plural = _('group access tokens')
|
||||
- @content_class = 'limit-container-width' unless fluid_layout
|
||||
|
||||
.row.gl-mt-3.js-search-settings-section
|
||||
.col-lg-4
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
- page_title _("Edit"), @application.name, _("Group applications")
|
||||
- @content_class = "limit-container-width" unless fluid_layout
|
||||
|
||||
%h1.page-title.gl-font-size-h-display= _('Edit group application')
|
||||
= render 'shared/doorkeeper/applications/form', url: group_settings_application_path(@group, @application)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
- add_to_breadcrumbs _("Group applications"), group_settings_applications_path(@group)
|
||||
- breadcrumb_title @application.name
|
||||
- page_title @application.name, _("Group applications")
|
||||
- @content_class = "limit-container-width" unless fluid_layout
|
||||
|
||||
%h1.page-title.gl-font-size-h-display
|
||||
= _("Group application: %{name}") % { name: @application.name }
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
- breadcrumb_title s_('Integrations|Group-level integration management')
|
||||
- page_title s_('Integrations|Group-level integration management')
|
||||
- @content_class = 'limit-container-width' unless fluid_layout
|
||||
|
||||
%section.js-search-settings-section
|
||||
%h3= s_('Integrations|Group-level integration management')
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
- breadcrumb_title _('Packages and registries settings')
|
||||
- page_title _('Packages and registries settings')
|
||||
- @content_class = 'limit-container-width' unless fluid_layout
|
||||
|
||||
%section#js-packages-and-registries-settings{ data: { group_path: @group.full_path,
|
||||
group_dependency_proxy_path: group_dependency_proxy_path(@group) } }
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
- breadcrumb_title _('Repository Settings')
|
||||
- page_title _('Repository')
|
||||
- @content_class = "limit-container-width" unless fluid_layout
|
||||
|
||||
- if can?(current_user, :admin_group, @group)
|
||||
- deploy_token_description = s_('DeployTokens|Group deploy tokens allow access to the packages, repositories, and registry images within the group.')
|
||||
|
|
|
|||
|
|
@ -46,12 +46,11 @@
|
|||
.issuable-meta
|
||||
%ul.controls.d-flex.align-items-end
|
||||
- if merge_request.merged?
|
||||
%li.issuable-status.d-none.d-sm-inline-block
|
||||
= _('MERGED')
|
||||
%li.d-none.d-sm-flex
|
||||
= render Pajamas::BadgeComponent.new(_('Merged'), size: 'sm', variant: 'info')
|
||||
- elsif merge_request.closed?
|
||||
%li.issuable-status.d-none.d-sm-inline-block
|
||||
= sprite_icon('cancel', css_class: 'gl-vertical-align-text-bottom')
|
||||
= _('CLOSED')
|
||||
%li.d-none.d-sm-flex
|
||||
= render Pajamas::BadgeComponent.new(_('Closed'), size: 'sm', variant: 'danger')
|
||||
= render 'shared/merge_request_pipeline_status', merge_request: merge_request
|
||||
- if merge_request.open? && merge_request.broken?
|
||||
%li.issuable-pipeline-broken.d-none.d-sm-flex
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
- add_to_breadcrumbs _('Repository Settings'), project_settings_repository_path(@project)
|
||||
- add_to_breadcrumbs _('Branch rules'), project_settings_repository_path(@project, anchor: 'branch-rules')
|
||||
- breadcrumb_title _('Details')
|
||||
- @breadcrumb_link = '#'
|
||||
- page_title s_('BranchRules|Branch rules details')
|
||||
|
||||
%h3.gl-mb-5= s_('BranchRules|Branch rules details')
|
||||
%h3.gl-mb-5= page_title
|
||||
|
||||
#js-branch-rules{ data: branch_rules_data(@project) }
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
- @content_class = "limit-container-width" unless fluid_layout
|
||||
|
||||
.row.gl-mt-3.js-search-settings-section
|
||||
.col-lg-4.profile-settings-sidebar
|
||||
%h4.gl-mt-0
|
||||
|
|
|
|||
|
|
@ -0,0 +1,15 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class AddPackageIdCreatedAtDescIndexToPackageFiles < Gitlab::Database::Migration[2.1]
|
||||
disable_ddl_transaction!
|
||||
|
||||
INDEX_NAME = 'index_packages_package_files_on_package_id_and_created_at_desc'
|
||||
|
||||
def up
|
||||
add_concurrent_index :packages_package_files, 'package_id, created_at DESC', name: INDEX_NAME
|
||||
end
|
||||
|
||||
def down
|
||||
remove_concurrent_index_by_name :packages_package_files, name: INDEX_NAME
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1 @@
|
|||
5aa29a59aad33fe1ee81c44402c7fb2bb149fcb46c0d0afd5494b4788ea8840c
|
||||
|
|
@ -31301,6 +31301,8 @@ CREATE INDEX index_packages_package_files_on_file_store ON packages_package_file
|
|||
|
||||
CREATE INDEX index_packages_package_files_on_id_for_cleanup ON packages_package_files USING btree (id) WHERE (status = 1);
|
||||
|
||||
CREATE INDEX index_packages_package_files_on_package_id_and_created_at_desc ON packages_package_files USING btree (package_id, created_at DESC);
|
||||
|
||||
CREATE INDEX index_packages_package_files_on_package_id_and_file_name ON packages_package_files USING btree (package_id, file_name);
|
||||
|
||||
CREATE INDEX index_packages_package_files_on_package_id_id ON packages_package_files USING btree (package_id, id);
|
||||
|
|
|
|||
|
|
@ -199,6 +199,14 @@ Edit `/etc/gitlab/gitlab.rb`:
|
|||
|
||||
#### Configure Gitaly server
|
||||
|
||||
<!--
|
||||
Updates to example must be made at:
|
||||
|
||||
- https://gitlab.com/gitlab-org/charts/gitlab/blob/master/doc/advanced/external-gitaly/external-omnibus-gitaly.md#configure-omnibus-gitlab
|
||||
- https://gitlab.com/gitlab-org/gitlab/blob/master/doc/administration/gitaly/index.md#gitaly-server-configuration
|
||||
- All reference architecture pages
|
||||
-->
|
||||
|
||||
Configure Gitaly server in one of two ways:
|
||||
|
||||
::Tabs
|
||||
|
|
@ -207,13 +215,6 @@ Configure Gitaly server in one of two ways:
|
|||
|
||||
1. Edit `/etc/gitlab/gitlab.rb`:
|
||||
|
||||
<!--
|
||||
Updates to example must be made at:
|
||||
- https://gitlab.com/gitlab-org/charts/gitlab/blob/master/doc/advanced/external-gitaly/external-omnibus-gitaly.md#configure-omnibus-gitlab
|
||||
- https://gitlab.com/gitlab-org/gitlab/blob/master/doc/administration/gitaly/index.md#gitaly-server-configuration
|
||||
- all reference architecture pages
|
||||
-->
|
||||
|
||||
```ruby
|
||||
# Avoid running unnecessary services on the Gitaly server
|
||||
postgresql['enable'] = false
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ recommended for performance reasons.
|
|||
|
||||
For data objects such as LFS, Uploads, and Artifacts, an [Object Storage service](object_storage.md)
|
||||
is recommended over NFS where possible, due to better performance.
|
||||
When eliminating the usage of NFS, there are [additional steps you need to take](object_storage.md#other-alternatives-to-file-system-storage)
|
||||
When eliminating the usage of NFS, there are [additional steps you need to take](object_storage.md#alternatives-to-file-system-storage)
|
||||
in addition to moving to Object Storage.
|
||||
|
||||
File system performance can impact overall GitLab performance, especially for
|
||||
|
|
|
|||
|
|
@ -11,6 +11,19 @@ It's recommended over NFS and
|
|||
in general it's better in larger setups as object storage is
|
||||
typically much more performant, reliable, and scalable.
|
||||
|
||||
There are two ways of specifying the object storage:
|
||||
|
||||
- [Consolidated configuration](#consolidated-object-storage-configuration) (recommended): A single credential is
|
||||
shared by all supported object types.
|
||||
- [Storage-specific configuration](#storage-specific-configuration): Every object defines its
|
||||
own object storage [connection and configuration](#connection-settings).
|
||||
|
||||
For more information on the differences and to transition from one form to another, see
|
||||
[Transition to consolidated form](#transition-to-consolidated-form).
|
||||
|
||||
If you are currently storing data locally, see
|
||||
[Migrate to object storage](#migrate-to-object-storage) for migration details.
|
||||
|
||||
## Supported object storage providers
|
||||
|
||||
GitLab is tightly integrated with `Fog`, so you can refer to its
|
||||
|
|
@ -30,22 +43,7 @@ Specifically, GitLab has been tested by vendors and customers on a number of obj
|
|||
- On-premises hardware and appliances from various storage vendors, whose list is not officially established.
|
||||
- MinIO. We have [a guide to deploying this](https://docs.gitlab.com/charts/advanced/external-object-storage/minio.html) within our Helm Chart documentation.
|
||||
|
||||
## Configure the object storage
|
||||
|
||||
There are two ways of specifying object storage configuration in GitLab:
|
||||
|
||||
- [Consolidated form](#consolidated-object-storage-configuration): A single credential is
|
||||
shared by all supported object types.
|
||||
- [Storage-specific form](#storage-specific-configuration): Every object defines its
|
||||
own object storage [connection and configuration](#connection-settings).
|
||||
|
||||
For more information on the differences and to transition from one form to another, see
|
||||
[Transition to consolidated form](#transition-to-consolidated-form).
|
||||
|
||||
If you are currently storing data locally, see
|
||||
[Migrate to object storage](#migrate-to-object-storage) for migration details.
|
||||
|
||||
### Consolidated object storage configuration
|
||||
## Consolidated object storage configuration
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/4368) in GitLab 13.2.
|
||||
|
||||
|
|
@ -73,7 +71,7 @@ Mattermost. See the [full table for a complete list](#storage-specific-configura
|
|||
However, backups can be configured with [server side encryption](../raketasks/backup_gitlab.md#s3-encrypted-buckets) separately.
|
||||
|
||||
Enabling consolidated object storage enables object storage for all object
|
||||
types. If not all buckets are specified, `sudo gitlab-ctl reconfigure` may fail with the error like:
|
||||
types. If not all buckets are specified, you may see an error like:
|
||||
|
||||
```plaintext
|
||||
Object storage for <object type> must have a bucket specified
|
||||
|
|
@ -95,7 +93,333 @@ When the consolidated form is:
|
|||
|
||||
See the section on [ETag mismatch errors](#etag-mismatch) for more details.
|
||||
|
||||
#### Full example using Amazon S3
|
||||
### Common parameters
|
||||
|
||||
In the consolidated configuration, the `object_store` section defines a
|
||||
common set of parameters. Here we use the YAML from the source
|
||||
installation because it's easier to see the inheritance:
|
||||
|
||||
```yaml
|
||||
object_store:
|
||||
enabled: true
|
||||
proxy_download: true
|
||||
connection:
|
||||
provider: AWS
|
||||
aws_access_key_id: <AWS_ACCESS_KEY_ID>
|
||||
aws_secret_access_key: <AWS_SECRET_ACCESS_KEY>
|
||||
objects:
|
||||
...
|
||||
```
|
||||
|
||||
The Omnibus configuration maps directly to this:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['enabled'] = true
|
||||
gitlab_rails['object_store']['proxy_download'] = true
|
||||
gitlab_rails['object_store']['connection'] = {
|
||||
'provider' => 'AWS',
|
||||
'aws_access_key_id' => '<AWS_ACCESS_KEY_ID',
|
||||
'aws_secret_access_key' => '<AWS_SECRET_ACCESS_KEY>'
|
||||
}
|
||||
```
|
||||
|
||||
| Setting | Description |
|
||||
|-------------------|-----------------------------------|
|
||||
| `enabled` | Enable or disable object storage. |
|
||||
| `proxy_download` | Set to `true` to [enable proxying all files served](#proxy-download). Option allows to reduce egress traffic as this allows clients to download directly from remote storage instead of proxying all data. |
|
||||
| `connection` | Various [connection options](#connection-settings) described below. |
|
||||
| `storage_options` | Options to use when saving new objects, such as [server side encryption](#server-side-encryption-headers). Introduced in GitLab 13.3. |
|
||||
| `objects` | [Object-specific configuration](#object-specific-configuration). |
|
||||
|
||||
### Object-specific configuration
|
||||
|
||||
The following YAML shows how the `object_store` section defines
|
||||
object-specific configuration block and how the `enabled` and
|
||||
`proxy_download` flags can be overridden. The `bucket` is the only
|
||||
required parameter within each type:
|
||||
|
||||
```yaml
|
||||
object_store:
|
||||
connection:
|
||||
...
|
||||
objects:
|
||||
artifacts:
|
||||
bucket: artifacts
|
||||
proxy_download: false
|
||||
external_diffs:
|
||||
bucket: external-diffs
|
||||
lfs:
|
||||
bucket: lfs-objects
|
||||
uploads:
|
||||
bucket: uploads
|
||||
packages:
|
||||
bucket: packages
|
||||
dependency_proxy:
|
||||
enabled: false
|
||||
bucket: dependency_proxy
|
||||
terraform_state:
|
||||
bucket: terraform
|
||||
pages:
|
||||
bucket: pages
|
||||
```
|
||||
|
||||
This maps to this Omnibus GitLab configuration:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['objects']['artifacts']['bucket'] = 'artifacts'
|
||||
gitlab_rails['object_store']['objects']['artifacts']['proxy_download'] = false
|
||||
gitlab_rails['object_store']['objects']['external_diffs']['bucket'] = 'external-diffs'
|
||||
gitlab_rails['object_store']['objects']['lfs']['bucket'] = 'lfs-objects'
|
||||
gitlab_rails['object_store']['objects']['uploads']['bucket'] = 'uploads'
|
||||
gitlab_rails['object_store']['objects']['packages']['bucket'] = 'packages'
|
||||
gitlab_rails['object_store']['objects']['dependency_proxy']['enabled'] = false
|
||||
gitlab_rails['object_store']['objects']['dependency_proxy']['bucket'] = 'dependency-proxy'
|
||||
gitlab_rails['object_store']['objects']['terraform_state']['bucket'] = 'terraform-state'
|
||||
gitlab_rails['object_store']['objects']['pages']['bucket'] = 'pages'
|
||||
```
|
||||
|
||||
This is the list of valid `objects` that can be used:
|
||||
|
||||
| Type | Description |
|
||||
|--------------------|----------------------------------------------------------------------------|
|
||||
| `artifacts` | [CI artifacts](job_artifacts.md) |
|
||||
| `external_diffs` | [Merge request diffs](merge_request_diffs.md) |
|
||||
| `uploads` | [User uploads](uploads.md) |
|
||||
| `lfs` | [Git Large File Storage objects](lfs/index.md) |
|
||||
| `packages` | [Project packages (for example, PyPI, Maven, or NuGet)](packages/index.md) |
|
||||
| `dependency_proxy` | [Dependency Proxy](packages/dependency_proxy.md) |
|
||||
| `terraform_state` | [Terraform state files](terraform_state.md) |
|
||||
| `pages` | [Pages](pages/index.md) |
|
||||
|
||||
Within each object type, three parameters can be defined:
|
||||
|
||||
| Setting | Required? | Description |
|
||||
|------------------|------------------------|-------------------------------------|
|
||||
| `bucket` | **{check-circle}** Yes | Bucket name for the object storage. |
|
||||
| `enabled` | **{dotted-circle}** No | Overrides the common parameter. |
|
||||
| `proxy_download` | **{dotted-circle}** No | Overrides the common parameter. |
|
||||
|
||||
#### Disable object storage for specific features
|
||||
|
||||
As seen above, object storage can be disabled for specific types by
|
||||
setting the `enabled` flag to `false`. For example, to disable object
|
||||
storage for CI artifacts:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['objects']['artifacts']['enabled'] = false
|
||||
```
|
||||
|
||||
A bucket is not needed if the feature is disabled entirely. For example,
|
||||
no bucket is needed if CI artifacts are disabled with this setting:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['artifacts_enabled'] = false
|
||||
```
|
||||
|
||||
## Storage-specific configuration
|
||||
|
||||
For configuring object storage in GitLab 13.1 and earlier, or for storage types not
|
||||
supported by consolidated configuration form, refer to the following guides:
|
||||
|
||||
| Object storage type | Supported by consolidated configuration? |
|
||||
|---------------------|------------------------------------------|
|
||||
| [Backups](../raketasks/backup_gitlab.md#upload-backups-to-a-remote-cloud-storage) | **{dotted-circle}** No |
|
||||
| [Job artifacts](job_artifacts.md#using-object-storage) including archived job logs | **{check-circle}** Yes |
|
||||
| [LFS objects](lfs/index.md#storing-lfs-objects-in-remote-object-storage) | **{check-circle}** Yes |
|
||||
| [Uploads](uploads.md#using-object-storage) | **{check-circle}** Yes |
|
||||
| [Container Registry](packages/container_registry.md#use-object-storage) (optional feature) | **{dotted-circle}** No |
|
||||
| [Merge request diffs](merge_request_diffs.md#using-object-storage) | **{check-circle}** Yes |
|
||||
| [Mattermost](https://docs.mattermost.com/configure/file-storage-configuration-settings.html)| **{dotted-circle}** No |
|
||||
| [Packages](packages/index.md#use-object-storage) (optional feature) | **{check-circle}** Yes |
|
||||
| [Dependency Proxy](packages/dependency_proxy.md#using-object-storage) (optional feature) | **{check-circle}** Yes |
|
||||
| [Autoscale runner caching](https://docs.gitlab.com/runner/configuration/autoscale.html#distributed-runners-caching) (optional for improved performance) | **{dotted-circle}** No |
|
||||
| [Terraform state files](terraform_state.md#using-object-storage) | **{check-circle}** Yes |
|
||||
| [Pages content](pages/index.md#using-object-storage) | **{check-circle}** Yes |
|
||||
|
||||
WARNING:
|
||||
The use of [encrypted S3 buckets](#encrypted-s3-buckets) with non-consolidated configuration is not supported.
|
||||
You may start getting [ETag mismatch errors](#etag-mismatch) if you use it.
|
||||
|
||||
## Connection settings
|
||||
|
||||
Both consolidated configuration form and storage-specific configuration form must configure a connection. The following sections describe parameters that can be used
|
||||
in the `connection` setting.
|
||||
|
||||
### Amazon S3
|
||||
|
||||
The connection settings match those provided by [fog-aws](https://github.com/fog/fog-aws):
|
||||
|
||||
| Setting | Description | Default |
|
||||
|---------------------------------------------|------------------------------------|---------|
|
||||
| `provider` | Always `AWS` for compatible hosts. | `AWS` |
|
||||
| `aws_access_key_id` | AWS credentials, or compatible. | |
|
||||
| `aws_secret_access_key` | AWS credentials, or compatible. | |
|
||||
| `aws_signature_version` | AWS signature version to use. `2` or `4` are valid options. Digital Ocean Spaces and other providers may need `2`. | `4` |
|
||||
| `enable_signature_v4_streaming` | Set to `true` to enable HTTP chunked transfers with [AWS v4 signatures](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html). Oracle Cloud S3 needs this to be `false`. | `true` |
|
||||
| `region` | AWS region. | |
|
||||
| `host` | DEPRECATED: Use `endpoint` instead. S3 compatible host for when not using AWS. For example, `localhost` or `storage.example.com`. HTTPS and port 443 is assumed. | `s3.amazonaws.com` |
|
||||
| `endpoint` | Can be used when configuring an S3 compatible service such as [MinIO](https://min.io), by entering a URL such as `http://127.0.0.1:9000`. This takes precedence over `host`. Always use `endpoint` for consolidated form. | (optional) |
|
||||
| `path_style` | Set to `true` to use `host/bucket_name/object` style paths instead of `bucket_name.host/object`. Set to `true` for using [MinIO](https://min.io). Leave as `false` for AWS S3. | `false`. |
|
||||
| `use_iam_profile` | Set to `true` to use IAM profile instead of access keys. | `false` |
|
||||
| `aws_credentials_refresh_threshold_seconds` | Sets the [automatic refresh threshold](https://github.com/fog/fog-aws#controlling-credential-refresh-time-with-iam-authentication) when using temporary credentials in IAM. | `15` |
|
||||
|
||||
### Oracle Cloud S3
|
||||
|
||||
Oracle Cloud S3 must be sure to use the following settings:
|
||||
|
||||
| Setting | Value |
|
||||
|---------------------------------|---------|
|
||||
| `enable_signature_v4_streaming` | `false` |
|
||||
| `path_style` | `true` |
|
||||
|
||||
If `enable_signature_v4_streaming` is set to `true`, you may see the
|
||||
following error in `production.log`:
|
||||
|
||||
```plaintext
|
||||
STREAMING-AWS4-HMAC-SHA256-PAYLOAD is not supported
|
||||
```
|
||||
|
||||
### Google Cloud Storage (GCS)
|
||||
|
||||
Here are the valid connection parameters for GCS:
|
||||
|
||||
| Setting | Description | Example |
|
||||
|------------------------------|-------------------|---------|
|
||||
| `provider` | Provider name. | `Google` |
|
||||
| `google_project` | GCP project name. | `gcp-project-12345` |
|
||||
| `google_json_key_location` | JSON key path. | `/path/to/gcp-project-12345-abcde.json` |
|
||||
| `google_json_key_string` | JSON key string. | `{ "type": "service_account", "project_id": "example-project-382839", ... }` |
|
||||
| `google_application_default` | Set to `true` to use [Google Cloud Application Default Credentials](https://cloud.google.com/docs/authentication#adc) to locate service account credentials. | |
|
||||
|
||||
GitLab reads the value of `google_json_key_location`, then `google_json_key_string`, and finally, `google_application_default`.
|
||||
It uses the first of these settings that has a value.
|
||||
|
||||
The service account must have permission to access the bucket. For more information,
|
||||
see the [Cloud Storage authentication documentation](https://cloud.google.com/storage/docs/authentication).
|
||||
|
||||
NOTE:
|
||||
Bucket encryption with the [Cloud Key Management Service (KMS)](https://cloud.google.com/kms/docs) is not supported and results in [ETag mismatch errors](#etag-mismatch).
|
||||
|
||||
#### GCS example
|
||||
|
||||
For Omnibus installations, this is an example of the `connection` setting
|
||||
in the consolidated form:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['connection'] = {
|
||||
'provider' => 'Google',
|
||||
'google_project' => '<GOOGLE PROJECT>',
|
||||
'google_json_key_location' => '<FILENAME>'
|
||||
}
|
||||
```
|
||||
|
||||
#### GCS example with ADC
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/275979) in GitLab 13.6.
|
||||
|
||||
Google Cloud Application Default Credentials (ADC) are typically
|
||||
used with GitLab to use the default service account. This eliminates the
|
||||
need to supply credentials for the instance. For example, in the consolidated form:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['connection'] = {
|
||||
'provider' => 'Google',
|
||||
'google_project' => '<GOOGLE PROJECT>',
|
||||
'google_application_default' => true
|
||||
}
|
||||
```
|
||||
|
||||
If you use ADC, be sure that:
|
||||
|
||||
- The service account that you use has the
|
||||
[`iam.serviceAccounts.signBlob` permission](https://cloud.google.com/iam/docs/reference/credentials/rest/v1/projects.serviceAccounts/signBlob).
|
||||
Typically this is done by granting the `Service Account Token Creator` role to the service account.
|
||||
- Your virtual machines have the [correct access scopes to access Google Cloud APIs](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#changeserviceaccountandscopes). If the machines do not have the right scope, the error logs may show:
|
||||
|
||||
```markdown
|
||||
Google::Apis::ClientError (insufficientPermissions: Request had insufficient authentication scopes.)
|
||||
```
|
||||
|
||||
### Azure Blob storage
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/25877) in GitLab 13.4.
|
||||
|
||||
Although Azure uses the word `container` to denote a collection of
|
||||
blobs, GitLab standardizes on the term `bucket`. Be sure to configure
|
||||
Azure container names in the `bucket` settings.
|
||||
|
||||
Azure Blob storage can only be used with the [consolidated form](#consolidated-object-storage-configuration)
|
||||
because a single set of credentials are used to access multiple
|
||||
containers. The [storage-specific form](#storage-specific-configuration)
|
||||
is not supported. For more details, see [how to transition to consolidated form](#transition-to-consolidated-form).
|
||||
|
||||
The following are the valid connection parameters for Azure. For more information, see the
|
||||
[Azure Blob Storage documentation](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction).
|
||||
|
||||
| Setting | Description | Example |
|
||||
|------------------------------|----------------|-----------|
|
||||
| `provider` | Provider name. | `AzureRM` |
|
||||
| `azure_storage_account_name` | Name of the Azure Blob Storage account used to access the storage. | `azuretest` |
|
||||
| `azure_storage_access_key` | Storage account access key used to access the container. This is typically a secret, 512-bit encryption key encoded in base64. | `czV2OHkvQj9FKEgrTWJRZVRoV21ZcTN0Nnc5eiRDJkYpSkBOY1JmVWpYbjJy\nNHU3eCFBJUQqRy1LYVBkU2dWaw==\n` |
|
||||
| `azure_storage_domain` | Domain name used to contact the Azure Blob Storage API (optional). Defaults to `blob.core.windows.net`. Set this if you are using Azure China, Azure Germany, Azure US Government, or some other custom Azure domain. | `blob.core.windows.net` |
|
||||
|
||||
- For Omnibus installations, this is an example of the `connection` setting
|
||||
in the consolidated form:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['connection'] = {
|
||||
'provider' => 'AzureRM',
|
||||
'azure_storage_account_name' => '<AZURE STORAGE ACCOUNT NAME>',
|
||||
'azure_storage_access_key' => '<AZURE STORAGE ACCESS KEY>',
|
||||
'azure_storage_domain' => '<AZURE STORAGE DOMAIN>'
|
||||
}
|
||||
```
|
||||
|
||||
- For source installations, Workhorse also needs to be configured with Azure
|
||||
credentials. This isn't needed in Omnibus installs, because the Workhorse
|
||||
settings are populated from the previous settings.
|
||||
|
||||
1. Edit `/home/git/gitlab-workhorse/config.toml` and add or amend the following lines:
|
||||
|
||||
```toml
|
||||
[object_storage]
|
||||
provider = "AzureRM"
|
||||
|
||||
[object_storage.azurerm]
|
||||
azure_storage_account_name = "<AZURE STORAGE ACCOUNT NAME>"
|
||||
azure_storage_access_key = "<AZURE STORAGE ACCESS KEY>"
|
||||
```
|
||||
|
||||
If you are using a custom Azure storage domain,
|
||||
`azure_storage_domain` does **not** have to be set in the Workhorse
|
||||
configuration. This information is exchanged in an API call between
|
||||
GitLab Rails and Workhorse.
|
||||
|
||||
### Storj Gateway (SJ)
|
||||
|
||||
NOTE:
|
||||
The Storj Gateway [does not support](https://github.com/storj/gateway-st/blob/4b74c3b92c63b5de7409378b0d1ebd029db9337d/docs/s3-compatibility.md) multi-threaded copying (see `UploadPartCopy` in the table).
|
||||
While an implementation [is planned](https://github.com/storj/roadmap/issues/40), you must [disable multi-threaded copying](#multi-threaded-copying) until completion.
|
||||
|
||||
The [Storj Network](https://www.storj.io/) provides an S3-compatible API gateway. Use the following configuration example:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['connection'] = {
|
||||
'provider' => 'AWS',
|
||||
'endpoint' => 'https://gateway.storjshare.io',
|
||||
'path_style' => true,
|
||||
'region' => 'eu1',
|
||||
'aws_access_key_id' => 'ACCESS_KEY',
|
||||
'aws_secret_access_key' => 'SECRET_KEY',
|
||||
'aws_signature_version' => 2,
|
||||
'enable_signature_v4_streaming' => false
|
||||
}
|
||||
```
|
||||
|
||||
The signature version must be `2`. Using v4 results in a HTTP 411 Length Required error.
|
||||
For more information, see [issue #4419](https://gitlab.com/gitlab-org/gitlab/-/issues/4419).
|
||||
|
||||
## Full example using the consolidated object storage and Amazon S3
|
||||
|
||||
The following example uses AWS S3 to enable object storage for all supported services:
|
||||
|
||||
|
|
@ -379,309 +703,7 @@ The following example uses AWS S3 to enable object storage for all supported ser
|
|||
|
||||
::EndTabs
|
||||
|
||||
#### Common parameters
|
||||
|
||||
In the consolidated configuration, the `object_store` section defines a
|
||||
common set of parameters. Here we use the YAML from the source
|
||||
installation because it's easier to see the inheritance:
|
||||
|
||||
```yaml
|
||||
object_store:
|
||||
enabled: true
|
||||
proxy_download: true
|
||||
connection:
|
||||
provider: AWS
|
||||
aws_access_key_id: <AWS_ACCESS_KEY_ID>
|
||||
aws_secret_access_key: <AWS_SECRET_ACCESS_KEY>
|
||||
objects:
|
||||
...
|
||||
```
|
||||
|
||||
The Omnibus configuration maps directly to this:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['enabled'] = true
|
||||
gitlab_rails['object_store']['proxy_download'] = true
|
||||
gitlab_rails['object_store']['connection'] = {
|
||||
'provider' => 'AWS',
|
||||
'aws_access_key_id' => '<AWS_ACCESS_KEY_ID',
|
||||
'aws_secret_access_key' => '<AWS_SECRET_ACCESS_KEY>'
|
||||
}
|
||||
```
|
||||
|
||||
| Setting | Description |
|
||||
|-------------------|-----------------------------------|
|
||||
| `enabled` | Enable or disable object storage. |
|
||||
| `proxy_download` | Set to `true` to [enable proxying all files served](#proxy-download). Option allows to reduce egress traffic as this allows clients to download directly from remote storage instead of proxying all data. |
|
||||
| `connection` | Various [connection options](#connection-settings) described below. |
|
||||
| `storage_options` | Options to use when saving new objects, such as [server side encryption](#server-side-encryption-headers). Introduced in GitLab 13.3. |
|
||||
| `objects` | [Object-specific configuration](#object-specific-configuration). |
|
||||
|
||||
### Connection settings
|
||||
|
||||
Both consolidated configuration form and storage-specific configuration form must configure a connection. The following sections describe parameters that can be used
|
||||
in the `connection` setting.
|
||||
|
||||
#### Amazon S3
|
||||
|
||||
The connection settings match those provided by [fog-aws](https://github.com/fog/fog-aws):
|
||||
|
||||
| Setting | Description | Default |
|
||||
|---------------------------------------------|------------------------------------|---------|
|
||||
| `provider` | Always `AWS` for compatible hosts. | `AWS` |
|
||||
| `aws_access_key_id` | AWS credentials, or compatible. | |
|
||||
| `aws_secret_access_key` | AWS credentials, or compatible. | |
|
||||
| `aws_signature_version` | AWS signature version to use. `2` or `4` are valid options. Digital Ocean Spaces and other providers may need `2`. | `4` |
|
||||
| `enable_signature_v4_streaming` | Set to `true` to enable HTTP chunked transfers with [AWS v4 signatures](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html). Oracle Cloud S3 needs this to be `false`. | `true` |
|
||||
| `region` | AWS region. | |
|
||||
| `host` | DEPRECATED: Use `endpoint` instead. S3 compatible host for when not using AWS. For example, `localhost` or `storage.example.com`. HTTPS and port 443 is assumed. | `s3.amazonaws.com` |
|
||||
| `endpoint` | Can be used when configuring an S3 compatible service such as [MinIO](https://min.io), by entering a URL such as `http://127.0.0.1:9000`. This takes precedence over `host`. Always use `endpoint` for consolidated form. | (optional) |
|
||||
| `path_style` | Set to `true` to use `host/bucket_name/object` style paths instead of `bucket_name.host/object`. Set to `true` for using [MinIO](https://min.io). Leave as `false` for AWS S3. | `false`. |
|
||||
| `use_iam_profile` | Set to `true` to use IAM profile instead of access keys. | `false` |
|
||||
| `aws_credentials_refresh_threshold_seconds` | Sets the [automatic refresh threshold](https://github.com/fog/fog-aws#controlling-credential-refresh-time-with-iam-authentication) when using temporary credentials in IAM. | `15` |
|
||||
|
||||
#### Oracle Cloud S3
|
||||
|
||||
Oracle Cloud S3 must be sure to use the following settings:
|
||||
|
||||
| Setting | Value |
|
||||
|---------------------------------|---------|
|
||||
| `enable_signature_v4_streaming` | `false` |
|
||||
| `path_style` | `true` |
|
||||
|
||||
If `enable_signature_v4_streaming` is set to `true`, you may see the
|
||||
following error in `production.log`:
|
||||
|
||||
```plaintext
|
||||
STREAMING-AWS4-HMAC-SHA256-PAYLOAD is not supported
|
||||
```
|
||||
|
||||
#### Google Cloud Storage (GCS)
|
||||
|
||||
Here are the valid connection parameters for GCS:
|
||||
|
||||
| Setting | Description | Example |
|
||||
|------------------------------|-------------------|---------|
|
||||
| `provider` | Provider name. | `Google` |
|
||||
| `google_project` | GCP project name. | `gcp-project-12345` |
|
||||
| `google_json_key_location` | JSON key path. | `/path/to/gcp-project-12345-abcde.json` |
|
||||
| `google_json_key_string` | JSON key string. | `{ "type": "service_account", "project_id": "example-project-382839", ... }` |
|
||||
| `google_application_default` | Set to `true` to use [Google Cloud Application Default Credentials](https://cloud.google.com/docs/authentication#adc) to locate service account credentials. | |
|
||||
|
||||
GitLab reads the value of `google_json_key_location`, then `google_json_key_string`, and finally, `google_application_default`.
|
||||
It uses the first of these settings that has a value.
|
||||
|
||||
The service account must have permission to access the bucket. For more information,
|
||||
see the [Cloud Storage authentication documentation](https://cloud.google.com/storage/docs/authentication).
|
||||
|
||||
NOTE:
|
||||
Bucket encryption with the [Cloud Key Management Service (KMS)](https://cloud.google.com/kms/docs) is not supported and results in [ETag mismatch errors](#etag-mismatch).
|
||||
|
||||
##### GCS example
|
||||
|
||||
For Omnibus installations, this is an example of the `connection` setting
|
||||
in the consolidated form:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['connection'] = {
|
||||
'provider' => 'Google',
|
||||
'google_project' => '<GOOGLE PROJECT>',
|
||||
'google_json_key_location' => '<FILENAME>'
|
||||
}
|
||||
```
|
||||
|
||||
##### GCS example with ADC
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/275979) in GitLab 13.6.
|
||||
|
||||
Google Cloud Application Default Credentials (ADC) are typically
|
||||
used with GitLab to use the default service account. This eliminates the
|
||||
need to supply credentials for the instance. For example, in the consolidated form:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['connection'] = {
|
||||
'provider' => 'Google',
|
||||
'google_project' => '<GOOGLE PROJECT>',
|
||||
'google_application_default' => true
|
||||
}
|
||||
```
|
||||
|
||||
If you use ADC, be sure that:
|
||||
|
||||
- The service account that you use has the
|
||||
[`iam.serviceAccounts.signBlob` permission](https://cloud.google.com/iam/docs/reference/credentials/rest/v1/projects.serviceAccounts/signBlob).
|
||||
Typically this is done by granting the `Service Account Token Creator` role to the service account.
|
||||
- Your virtual machines have the [correct access scopes to access Google Cloud APIs](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#changeserviceaccountandscopes). If the machines do not have the right scope, the error logs may show:
|
||||
|
||||
```markdown
|
||||
Google::Apis::ClientError (insufficientPermissions: Request had insufficient authentication scopes.)
|
||||
```
|
||||
|
||||
#### Azure Blob storage
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/25877) in GitLab 13.4.
|
||||
|
||||
Although Azure uses the word `container` to denote a collection of
|
||||
blobs, GitLab standardizes on the term `bucket`. Be sure to configure
|
||||
Azure container names in the `bucket` settings.
|
||||
|
||||
Azure Blob storage can only be used with the [consolidated form](#consolidated-object-storage-configuration)
|
||||
because a single set of credentials are used to access multiple
|
||||
containers. The [storage-specific form](#storage-specific-configuration)
|
||||
is not supported. For more details, see [how to transition to consolidated form](#transition-to-consolidated-form).
|
||||
|
||||
The following are the valid connection parameters for Azure. For more information, see the
|
||||
[Azure Blob Storage documentation](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction).
|
||||
|
||||
| Setting | Description | Example |
|
||||
|------------------------------|----------------|-----------|
|
||||
| `provider` | Provider name. | `AzureRM` |
|
||||
| `azure_storage_account_name` | Name of the Azure Blob Storage account used to access the storage. | `azuretest` |
|
||||
| `azure_storage_access_key` | Storage account access key used to access the container. This is typically a secret, 512-bit encryption key encoded in base64. | `czV2OHkvQj9FKEgrTWJRZVRoV21ZcTN0Nnc5eiRDJkYpSkBOY1JmVWpYbjJy\nNHU3eCFBJUQqRy1LYVBkU2dWaw==\n` |
|
||||
| `azure_storage_domain` | Domain name used to contact the Azure Blob Storage API (optional). Defaults to `blob.core.windows.net`. Set this if you are using Azure China, Azure Germany, Azure US Government, or some other custom Azure domain. | `blob.core.windows.net` |
|
||||
|
||||
- For Omnibus installations, this is an example of the `connection` setting
|
||||
in the consolidated form:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['connection'] = {
|
||||
'provider' => 'AzureRM',
|
||||
'azure_storage_account_name' => '<AZURE STORAGE ACCOUNT NAME>',
|
||||
'azure_storage_access_key' => '<AZURE STORAGE ACCESS KEY>',
|
||||
'azure_storage_domain' => '<AZURE STORAGE DOMAIN>'
|
||||
}
|
||||
```
|
||||
|
||||
- For source installations, Workhorse also needs to be configured with Azure
|
||||
credentials. This isn't needed in Omnibus installs, because the Workhorse
|
||||
settings are populated from the previous settings.
|
||||
|
||||
1. Edit `/home/git/gitlab-workhorse/config.toml` and add or amend the following lines:
|
||||
|
||||
```toml
|
||||
[object_storage]
|
||||
provider = "AzureRM"
|
||||
|
||||
[object_storage.azurerm]
|
||||
azure_storage_account_name = "<AZURE STORAGE ACCOUNT NAME>"
|
||||
azure_storage_access_key = "<AZURE STORAGE ACCESS KEY>"
|
||||
```
|
||||
|
||||
If you are using a custom Azure storage domain,
|
||||
`azure_storage_domain` does **not** have to be set in the Workhorse
|
||||
configuration. This information is exchanged in an API call between
|
||||
GitLab Rails and Workhorse.
|
||||
|
||||
#### Storj Gateway (SJ)
|
||||
|
||||
NOTE:
|
||||
The Storj Gateway [does not support](https://github.com/storj/gateway-st/blob/4b74c3b92c63b5de7409378b0d1ebd029db9337d/docs/s3-compatibility.md) multi-threaded copying (see `UploadPartCopy` in the table).
|
||||
While an implementation [is planned](https://github.com/storj/roadmap/issues/40), you must [disable multi-threaded copying](#multi-threaded-copying) until completion.
|
||||
|
||||
The [Storj Network](https://www.storj.io/) provides an S3-compatible API gateway. Use the following configuration example:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['connection'] = {
|
||||
'provider' => 'AWS',
|
||||
'endpoint' => 'https://gateway.storjshare.io',
|
||||
'path_style' => true,
|
||||
'region' => 'eu1',
|
||||
'aws_access_key_id' => 'ACCESS_KEY',
|
||||
'aws_secret_access_key' => 'SECRET_KEY',
|
||||
'aws_signature_version' => 2,
|
||||
'enable_signature_v4_streaming' => false
|
||||
}
|
||||
```
|
||||
|
||||
The signature version must be `2`. Using v4 results in a HTTP 411 Length Required error.
|
||||
For more information, see [issue #4419](https://gitlab.com/gitlab-org/gitlab/-/issues/4419).
|
||||
|
||||
### Object-specific configuration
|
||||
|
||||
The following YAML shows how the `object_store` section defines
|
||||
object-specific configuration block and how the `enabled` and
|
||||
`proxy_download` flags can be overridden. The `bucket` is the only
|
||||
required parameter within each type:
|
||||
|
||||
```yaml
|
||||
object_store:
|
||||
connection:
|
||||
...
|
||||
objects:
|
||||
artifacts:
|
||||
bucket: artifacts
|
||||
proxy_download: false
|
||||
external_diffs:
|
||||
bucket: external-diffs
|
||||
lfs:
|
||||
bucket: lfs-objects
|
||||
uploads:
|
||||
bucket: uploads
|
||||
packages:
|
||||
bucket: packages
|
||||
dependency_proxy:
|
||||
enabled: false
|
||||
bucket: dependency_proxy
|
||||
terraform_state:
|
||||
bucket: terraform
|
||||
pages:
|
||||
bucket: pages
|
||||
```
|
||||
|
||||
This maps to this Omnibus GitLab configuration:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['objects']['artifacts']['bucket'] = 'artifacts'
|
||||
gitlab_rails['object_store']['objects']['artifacts']['proxy_download'] = false
|
||||
gitlab_rails['object_store']['objects']['external_diffs']['bucket'] = 'external-diffs'
|
||||
gitlab_rails['object_store']['objects']['lfs']['bucket'] = 'lfs-objects'
|
||||
gitlab_rails['object_store']['objects']['uploads']['bucket'] = 'uploads'
|
||||
gitlab_rails['object_store']['objects']['packages']['bucket'] = 'packages'
|
||||
gitlab_rails['object_store']['objects']['dependency_proxy']['enabled'] = false
|
||||
gitlab_rails['object_store']['objects']['dependency_proxy']['bucket'] = 'dependency-proxy'
|
||||
gitlab_rails['object_store']['objects']['terraform_state']['bucket'] = 'terraform-state'
|
||||
gitlab_rails['object_store']['objects']['pages']['bucket'] = 'pages'
|
||||
```
|
||||
|
||||
This is the list of valid `objects` that can be used:
|
||||
|
||||
| Type | Description |
|
||||
|--------------------|----------------------------------------------------------------------------|
|
||||
| `artifacts` | [CI artifacts](job_artifacts.md) |
|
||||
| `external_diffs` | [Merge request diffs](merge_request_diffs.md) |
|
||||
| `uploads` | [User uploads](uploads.md) |
|
||||
| `lfs` | [Git Large File Storage objects](lfs/index.md) |
|
||||
| `packages` | [Project packages (for example, PyPI, Maven, or NuGet)](packages/index.md) |
|
||||
| `dependency_proxy` | [Dependency Proxy](packages/dependency_proxy.md) |
|
||||
| `terraform_state` | [Terraform state files](terraform_state.md) |
|
||||
| `pages` | [Pages](pages/index.md) |
|
||||
|
||||
Within each object type, three parameters can be defined:
|
||||
|
||||
| Setting | Required? | Description |
|
||||
|------------------|------------------------|-------------------------------------|
|
||||
| `bucket` | **{check-circle}** Yes | Bucket name for the object storage. |
|
||||
| `enabled` | **{dotted-circle}** No | Overrides the common parameter. |
|
||||
| `proxy_download` | **{dotted-circle}** No | Overrides the common parameter. |
|
||||
|
||||
#### Disable object storage for specific features
|
||||
|
||||
As seen above, object storage can be disabled for specific types by
|
||||
setting the `enabled` flag to `false`. For example, to disable object
|
||||
storage for CI artifacts:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['object_store']['objects']['artifacts']['enabled'] = false
|
||||
```
|
||||
|
||||
A bucket is not needed if the feature is disabled entirely. For example,
|
||||
no bucket is needed if CI artifacts are disabled with this setting:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['artifacts_enabled'] = false
|
||||
```
|
||||
|
||||
### Migrate to object storage
|
||||
## Migrate to object storage
|
||||
|
||||
To migrate existing local data to object storage see the following guides:
|
||||
|
||||
|
|
@ -694,7 +716,7 @@ To migrate existing local data to object storage see the following guides:
|
|||
- [Terraform state files](terraform_state.md#migrate-to-object-storage)
|
||||
- [Pages content](pages/index.md#migrate-pages-deployments-to-object-storage)
|
||||
|
||||
### Transition to consolidated form
|
||||
## Transition to consolidated form
|
||||
|
||||
Prior to GitLab 13.2:
|
||||
|
||||
|
|
@ -730,43 +752,6 @@ the original form is omitted. To move to the consolidated form, remove the
|
|||
original configuration (for example, `artifacts_object_store_enabled`, or
|
||||
`uploads_object_store_connection`)
|
||||
|
||||
### Storage-specific configuration
|
||||
|
||||
For configuring object storage in GitLab 13.1 and earlier, or for storage types not
|
||||
supported by consolidated configuration form, refer to the following guides:
|
||||
|
||||
| Object storage type | Supported by consolidated configuration? |
|
||||
|---------------------|------------------------------------------|
|
||||
| [Backups](../raketasks/backup_gitlab.md#upload-backups-to-a-remote-cloud-storage) | **{dotted-circle}** No |
|
||||
| [Job artifacts](job_artifacts.md#using-object-storage) including archived job logs | **{check-circle}** Yes |
|
||||
| [LFS objects](lfs/index.md#storing-lfs-objects-in-remote-object-storage) | **{check-circle}** Yes |
|
||||
| [Uploads](uploads.md#using-object-storage) | **{check-circle}** Yes |
|
||||
| [Container Registry](packages/container_registry.md#use-object-storage) (optional feature) | **{dotted-circle}** No |
|
||||
| [Merge request diffs](merge_request_diffs.md#using-object-storage) | **{check-circle}** Yes |
|
||||
| [Mattermost](https://docs.mattermost.com/configure/file-storage-configuration-settings.html)| **{dotted-circle}** No |
|
||||
| [Packages](packages/index.md#use-object-storage) (optional feature) | **{check-circle}** Yes |
|
||||
| [Dependency Proxy](packages/dependency_proxy.md#using-object-storage) (optional feature) | **{check-circle}** Yes |
|
||||
| [Autoscale runner caching](https://docs.gitlab.com/runner/configuration/autoscale.html#distributed-runners-caching) (optional for improved performance) | **{dotted-circle}** No |
|
||||
| [Terraform state files](terraform_state.md#using-object-storage) | **{check-circle}** Yes |
|
||||
| [Pages content](pages/index.md#using-object-storage) | **{check-circle}** Yes |
|
||||
|
||||
WARNING:
|
||||
The use of [encrypted S3 buckets](#encrypted-s3-buckets) with non-consolidated configuration is not supported.
|
||||
You may start getting [ETag mismatch errors](#etag-mismatch) if you use it.
|
||||
|
||||
### Other alternatives to file system storage
|
||||
|
||||
If you're working to [scale out](reference_architectures/index.md) your GitLab implementation,
|
||||
or add fault tolerance and redundancy, you may be
|
||||
looking at removing dependencies on block or network file systems.
|
||||
See the following additional guides:
|
||||
|
||||
1. Make sure the [`git` user home directory](https://docs.gitlab.com/omnibus/settings/configuration.html#moving-the-home-directory-for-a-user) is on local disk.
|
||||
1. Configure [database lookup of SSH keys](operations/fast_ssh_key_lookup.md)
|
||||
to eliminate the need for a shared `authorized_keys` file.
|
||||
1. [Prevent local disk usage for job logs](job_logs.md#prevent-local-disk-usage).
|
||||
1. [Disable Pages local storage](pages/index.md#disable-pages-local-storage).
|
||||
|
||||
## Use Amazon instance profiles
|
||||
|
||||
Instead of supplying AWS access and secret keys in object storage
|
||||
|
|
@ -889,6 +874,19 @@ After you have done at least one successful Rclone copy from the old location to
|
|||
1. Perform a final `rclone sync` run, knowing that your users cannot add new objects so you do not leave any behind in the old bucket.
|
||||
1. Update the object storage configuration of your GitLab server to use the new provider for `uploads`.
|
||||
|
||||
## Alternatives to file system storage
|
||||
|
||||
If you're working to [scale out](reference_architectures/index.md) your GitLab implementation,
|
||||
or add fault tolerance and redundancy, you may be
|
||||
looking at removing dependencies on block or network file systems.
|
||||
See the following additional guides:
|
||||
|
||||
1. Make sure the [`git` user home directory](https://docs.gitlab.com/omnibus/settings/configuration.html#moving-the-home-directory-for-a-user) is on local disk.
|
||||
1. Configure [database lookup of SSH keys](operations/fast_ssh_key_lookup.md)
|
||||
to eliminate the need for a shared `authorized_keys` file.
|
||||
1. [Prevent local disk usage for job logs](job_logs.md#prevent-local-disk-usage).
|
||||
1. [Disable Pages local storage](pages/index.md#disable-pages-local-storage).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Objects are not included in GitLab backups
|
||||
|
|
|
|||
|
|
@ -46,7 +46,8 @@ See [Authenticate to the Debian Package Repositories](../../user/packages/debian
|
|||
|
||||
## Upload a package file
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/62028) in GitLab 14.0.
|
||||
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/62028) in GitLab 14.0.
|
||||
> - Upload with explicit distribution and component [introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101838) in GitLab 15.9.
|
||||
|
||||
Upload a Debian package file:
|
||||
|
||||
|
|
@ -54,18 +55,29 @@ Upload a Debian package file:
|
|||
PUT projects/:id/packages/debian/:file_name
|
||||
```
|
||||
|
||||
| Attribute | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `id` | string | yes | The ID or full path of the project. |
|
||||
| `file_name` | string | yes | The name of the Debian package file. |
|
||||
| Attribute | Type | Required | Description |
|
||||
| -------------- | ------ | -------- | ----------- |
|
||||
| `id` | string | yes | The ID or full path of the project. |
|
||||
| `file_name` | string | yes | The name of the Debian package file. |
|
||||
| `distribution` | string | no | The distribution codename or suite. Used with `component` for upload with explicit distribution and component. |
|
||||
| `component` | string | no | The package file component. Used with `distribution` for upload with explicit distribution and component. |
|
||||
|
||||
```shell
|
||||
curl --request PUT \
|
||||
--user <username>:<personal_access_token> \
|
||||
--user "<username>:<personal_access_token>" \
|
||||
--upload-file path/to/mypkg.deb \
|
||||
"https://gitlab.example.com/api/v4/projects/1/packages/debian/mypkg.deb"
|
||||
```
|
||||
|
||||
Upload with explicit distribution and component:
|
||||
|
||||
```shell
|
||||
curl --request PUT \
|
||||
--user "<username>:<personal_access_token>" \
|
||||
--upload-file /path/to/myother.deb \
|
||||
"https://gitlab.example.com/api/v4/projects/1/packages/debian/myother.deb?distribution=sid&component=main"
|
||||
```
|
||||
|
||||
## Download a package
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/64923) in GitLab 14.2.
|
||||
|
|
|
|||
|
|
@ -644,6 +644,14 @@ For example, suppose a project with `id: 42` has an issue with `id: 46` and
|
|||
Not all resources with the `iid` field are fetched by `iid`. For guidance
|
||||
regarding which field to use, see the documentation for the specific resource.
|
||||
|
||||
## `null` vs `false`
|
||||
|
||||
In API responses, some boolean fields can have `null` values.
|
||||
A `null` boolean has no default value and is neither `true` nor `false`.
|
||||
GitLab treats `null` values in boolean fields the same as `false`.
|
||||
|
||||
In boolean arguments, you should only set `true` or `false` values (not `null`).
|
||||
|
||||
## Data validation and error reporting
|
||||
|
||||
When working with the API you may encounter validation errors, in which case
|
||||
|
|
|
|||
|
|
@ -845,6 +845,6 @@ If you see this page when trying to set a password via the web interface, make s
|
|||
|
||||
### Some job logs are not uploaded to object storage
|
||||
|
||||
When the GitLab deployment is scaled up to more than one node, some job logs may not be uploaded to [object storage](../../administration/object_storage.md) properly. [Incremental logging is required](../../administration/object_storage.md#other-alternatives-to-file-system-storage) for CI to use object storage.
|
||||
When the GitLab deployment is scaled up to more than one node, some job logs may not be uploaded to [object storage](../../administration/object_storage.md) properly. [Incremental logging is required](../../administration/object_storage.md#alternatives-to-file-system-storage) for CI to use object storage.
|
||||
|
||||
Enable [incremental logging](../../administration/job_logs.md#enable-or-disable-incremental-logging) if it has not already been enabled.
|
||||
|
|
|
|||
|
|
@ -162,9 +162,9 @@ EOF
|
|||
dput --config=dput.cf --unchecked --no-upload-log gitlab <your_package>.changes
|
||||
```
|
||||
|
||||
## Directly upload a package
|
||||
## Upload a package with explicit distribution and component
|
||||
|
||||
> Direct upload [introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101838) in GitLab 15.9.
|
||||
> Upload with explicit distribution and component [introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101838) in GitLab 15.9.
|
||||
|
||||
When you don't have access to `.changes` file, you can directly upload a `.deb` by passing
|
||||
distribution `codename` and target `component` as parameters with
|
||||
|
|
@ -173,7 +173,8 @@ For example, to upload to component `main` of distribution `sid` using a persona
|
|||
|
||||
```shell
|
||||
curl --request PUT --user "<username>:<personal_access_token>" \
|
||||
"https://gitlab.example.com/api/v4/projects/<project_id>/packages/debian/?distribution=sid&component=main" \
|
||||
--get --data "distribution=sid" --data "component=main" \
|
||||
"https://gitlab.example.com/api/v4/projects/<project_id>/packages/debian/" \
|
||||
--upload-file /path/to/your.deb
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -26130,9 +26130,6 @@ msgstr ""
|
|||
msgid "MD5"
|
||||
msgstr ""
|
||||
|
||||
msgid "MERGED"
|
||||
msgstr ""
|
||||
|
||||
msgid "MR widget|Back to the merge request"
|
||||
msgstr ""
|
||||
|
||||
|
|
|
|||
|
|
@ -28,6 +28,17 @@ RSpec.describe 'Projects > Settings > Repository > Branch rules settings', featu
|
|||
let(:role) { :maintainer }
|
||||
|
||||
context 'Branch rules', :js do
|
||||
it 'renders breadcrumbs' do
|
||||
request
|
||||
|
||||
page.within '.breadcrumbs' do
|
||||
expect(page).to have_link('Repository Settings', href: project_settings_repository_path(project))
|
||||
expect(page).to have_link('Branch rules',
|
||||
href: project_settings_repository_path(project, anchor: 'branch-rules'))
|
||||
expect(page).to have_link('Details', href: '#')
|
||||
end
|
||||
end
|
||||
|
||||
it 'renders branch rules page' do
|
||||
request
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import PasteMarkdown from '~/content_editor/extensions/paste_markdown';
|
|||
import CodeBlockHighlight from '~/content_editor/extensions/code_block_highlight';
|
||||
import Diagram from '~/content_editor/extensions/diagram';
|
||||
import Frontmatter from '~/content_editor/extensions/frontmatter';
|
||||
import Heading from '~/content_editor/extensions/heading';
|
||||
import Bold from '~/content_editor/extensions/bold';
|
||||
import { VARIANT_DANGER } from '~/alert';
|
||||
import eventHubFactory from '~/helpers/event_hub_factory';
|
||||
|
|
@ -20,6 +21,7 @@ describe('content_editor/extensions/paste_markdown', () => {
|
|||
let doc;
|
||||
let p;
|
||||
let bold;
|
||||
let heading;
|
||||
let renderMarkdown;
|
||||
let eventHub;
|
||||
const defaultData = { 'text/plain': '**bold text**' };
|
||||
|
|
@ -36,16 +38,18 @@ describe('content_editor/extensions/paste_markdown', () => {
|
|||
CodeBlockHighlight,
|
||||
Diagram,
|
||||
Frontmatter,
|
||||
Heading,
|
||||
PasteMarkdown.configure({ renderMarkdown, eventHub }),
|
||||
],
|
||||
});
|
||||
|
||||
({
|
||||
builders: { doc, p, bold },
|
||||
builders: { doc, p, bold, heading },
|
||||
} = createDocBuilder({
|
||||
tiptapEditor,
|
||||
names: {
|
||||
bold: { markType: Bold.name },
|
||||
heading: { nodeType: Heading.name },
|
||||
},
|
||||
}));
|
||||
});
|
||||
|
|
@ -110,6 +114,52 @@ describe('content_editor/extensions/paste_markdown', () => {
|
|||
|
||||
expect(tiptapEditor.state.doc.toJSON()).toEqual(expectedDoc.toJSON());
|
||||
});
|
||||
|
||||
describe('when pasting inline content in an existing paragraph', () => {
|
||||
it('inserts the inline content next to the existing paragraph content', async () => {
|
||||
const expectedDoc = doc(p('Initial text and', bold('bold text')));
|
||||
|
||||
tiptapEditor.commands.setContent('Initial text and ');
|
||||
|
||||
await triggerPasteEventHandlerAndWaitForTransaction(buildClipboardEvent());
|
||||
|
||||
expect(tiptapEditor.state.doc.toJSON()).toEqual(expectedDoc.toJSON());
|
||||
});
|
||||
});
|
||||
|
||||
describe('when pasting inline content and there is text selected', () => {
|
||||
it('inserts the block content after the existing paragraph', async () => {
|
||||
const expectedDoc = doc(p('Initial text', bold('bold text')));
|
||||
|
||||
tiptapEditor.commands.setContent('Initial text and ');
|
||||
tiptapEditor.commands.setTextSelection({ from: 13, to: 17 });
|
||||
|
||||
await triggerPasteEventHandlerAndWaitForTransaction(buildClipboardEvent());
|
||||
|
||||
expect(tiptapEditor.state.doc.toJSON()).toEqual(expectedDoc.toJSON());
|
||||
});
|
||||
});
|
||||
|
||||
describe('when pasting block content in an existing paragraph', () => {
|
||||
beforeEach(() => {
|
||||
renderMarkdown.mockReset();
|
||||
renderMarkdown.mockResolvedValueOnce('<h1>Heading</h1><p><strong>bold text</strong></p>');
|
||||
});
|
||||
|
||||
it('inserts the block content after the existing paragraph', async () => {
|
||||
const expectedDoc = doc(
|
||||
p('Initial text and'),
|
||||
heading({ level: 1 }, 'Heading'),
|
||||
p(bold('bold text')),
|
||||
);
|
||||
|
||||
tiptapEditor.commands.setContent('Initial text and ');
|
||||
|
||||
await triggerPasteEventHandlerAndWaitForTransaction(buildClipboardEvent());
|
||||
|
||||
expect(tiptapEditor.state.doc.toJSON()).toEqual(expectedDoc.toJSON());
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('when rendering markdown fails', () => {
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ describe('Jobs app', () => {
|
|||
const createComponent = (resolver) => {
|
||||
wrapper = shallowMount(JobsApp, {
|
||||
provide: {
|
||||
fullPath: 'root/ci-project',
|
||||
projectPath: 'root/ci-project',
|
||||
pipelineIid: 1,
|
||||
},
|
||||
apolloProvider: createMockApolloProvider(resolver),
|
||||
|
|
|
|||
|
|
@ -209,6 +209,38 @@ describe('WorkItemCommentForm', () => {
|
|||
|
||||
expect(wrapper.emitted('error')).toEqual([[error]]);
|
||||
});
|
||||
|
||||
it('ignores errors when mutation returns additional information as errors for quick actions', async () => {
|
||||
await createComponent({
|
||||
isEditing: true,
|
||||
mutationHandler: jest.fn().mockResolvedValue({
|
||||
data: {
|
||||
createNote: {
|
||||
note: {
|
||||
id: 'gid://gitlab/Discussion/c872ba2d7d3eb780d2255138d67ca8b04f65b122',
|
||||
discussion: {
|
||||
id: 'gid://gitlab/Discussion/c872ba2d7d3eb780d2255138d67ca8b04f65b122',
|
||||
notes: {
|
||||
nodes: [],
|
||||
__typename: 'NoteConnection',
|
||||
},
|
||||
__typename: 'Discussion',
|
||||
},
|
||||
__typename: 'Note',
|
||||
},
|
||||
__typename: 'CreateNotePayload',
|
||||
errors: ['Commands only Removed assignee @foobar.', 'Command names ["unassign"]'],
|
||||
},
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
findCommentForm().vm.$emit('submitForm', 'updated desc');
|
||||
|
||||
await waitForPromises();
|
||||
|
||||
expect(clearDraft).toHaveBeenCalledWith('gid://gitlab/WorkItem/1-comment');
|
||||
});
|
||||
});
|
||||
|
||||
it('calls the global ID work item query when `fetchByIid` prop is false', async () => {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Resolvers::Ci::AllJobsResolver do
|
||||
RSpec.describe Resolvers::Ci::AllJobsResolver, feature_category: :continuous_integration do
|
||||
include GraphqlHelpers
|
||||
|
||||
let_it_be(:successful_job) { create(:ci_build, :success, name: 'Job One') }
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Resolvers::Ci::JobsResolver do
|
||||
RSpec.describe Resolvers::Ci::JobsResolver, feature_category: :continuous_integration do
|
||||
include GraphqlHelpers
|
||||
|
||||
let_it_be(:project) { create(:project, :repository, :public) }
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ RSpec.describe Projects::PipelineHelper do
|
|||
expect(pipeline_tabs_data).to include({
|
||||
failed_jobs_count: pipeline.failed_builds.count,
|
||||
failed_jobs_summary: prepare_failed_jobs_summary_data(pipeline.failed_builds),
|
||||
full_path: project.full_path,
|
||||
project_path: project.full_path,
|
||||
graphql_resource_etag: graphql_etag_pipeline_path(pipeline),
|
||||
metrics_path: namespace_project_ci_prometheus_metrics_histograms_path(namespace_id: project.namespace, project_id: project, format: :json),
|
||||
pipeline_iid: pipeline.iid,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,20 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
require_migration!
|
||||
|
||||
RSpec.describe AddPackageIdCreatedAtDescIndexToPackageFiles, feature_category: :package_registry do
|
||||
it 'correctly migrates up and down' do
|
||||
reversible_migration do |migration|
|
||||
migration.before -> {
|
||||
expect(ActiveRecord::Base.connection.indexes('packages_package_files').map(&:name))
|
||||
.not_to include('index_packages_package_files_on_package_id_and_created_at_desc')
|
||||
}
|
||||
|
||||
migration.after -> {
|
||||
expect(ActiveRecord::Base.connection.indexes('packages_package_files').map(&:name))
|
||||
.to include('index_packages_package_files_on_package_id_and_created_at_desc')
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -1,6 +1,84 @@
|
|||
# frozen_string_literal: true
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe 'Query.jobs', feature_category: :continuous_integration do
|
||||
include GraphqlHelpers
|
||||
|
||||
let_it_be(:admin) { create(:admin) }
|
||||
let_it_be(:project) { create(:project, :repository, :public) }
|
||||
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
|
||||
let_it_be(:build) do
|
||||
create(:ci_build, pipeline: pipeline, name: 'my test job', ref: 'HEAD', tag_list: %w[tag1 tag2])
|
||||
end
|
||||
|
||||
let(:query) do
|
||||
%(
|
||||
query {
|
||||
jobs {
|
||||
nodes {
|
||||
id
|
||||
#{fields.join(' ')}
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
end
|
||||
|
||||
let(:jobs_graphql_data) { graphql_data_at(:jobs, :nodes) }
|
||||
|
||||
let(:fields) do
|
||||
%w[commitPath refPath webPath browseArtifactsPath playPath tags]
|
||||
end
|
||||
|
||||
it 'returns the paths in each job of a pipeline' do
|
||||
post_graphql(query, current_user: admin)
|
||||
|
||||
expect(jobs_graphql_data).to contain_exactly(
|
||||
a_graphql_entity_for(
|
||||
build,
|
||||
commit_path: "/#{project.full_path}/-/commit/#{build.sha}",
|
||||
ref_path: "/#{project.full_path}/-/commits/HEAD",
|
||||
web_path: "/#{project.full_path}/-/jobs/#{build.id}",
|
||||
browse_artifacts_path: "/#{project.full_path}/-/jobs/#{build.id}/artifacts/browse",
|
||||
play_path: "/#{project.full_path}/-/jobs/#{build.id}/play",
|
||||
tags: build.tag_list
|
||||
)
|
||||
)
|
||||
end
|
||||
|
||||
context 'when requesting individual fields' do
|
||||
using RSpec::Parameterized::TableSyntax
|
||||
|
||||
let_it_be(:admin2) { create(:admin) }
|
||||
let_it_be(:project2) { create(:project) }
|
||||
let_it_be(:pipeline2) { create(:ci_pipeline, project: project2) }
|
||||
|
||||
where(:field) { fields }
|
||||
|
||||
with_them do
|
||||
let(:fields) do
|
||||
[field]
|
||||
end
|
||||
|
||||
it 'does not generate N+1 queries', :request_store, :use_sql_query_cache do
|
||||
# warm-up cache and so on:
|
||||
args = { current_user: admin }
|
||||
args2 = { current_user: admin2 }
|
||||
post_graphql(query, **args2)
|
||||
|
||||
control = ActiveRecord::QueryRecorder.new(skip_cached: false) do
|
||||
post_graphql(query, **args)
|
||||
end
|
||||
|
||||
create(:ci_build, pipeline: pipeline2, name: 'my test job2', ref: 'HEAD', tag_list: %w[tag3])
|
||||
post_graphql(query, **args)
|
||||
|
||||
expect { post_graphql(query, **args) }.not_to exceed_all_query_limit(control)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
RSpec.describe 'Query.project.pipeline', feature_category: :continuous_integration do
|
||||
include GraphqlHelpers
|
||||
|
||||
|
|
|
|||
|
|
@ -130,7 +130,7 @@ RSpec.describe 'Query.runner(id)', feature_category: :runner_fleet do
|
|||
expect(runner_data['tagList']).to match_array runner.tag_list
|
||||
end
|
||||
|
||||
it 'does not execute more queries per runner', :aggregate_failures do
|
||||
it 'does not execute more queries per runner', :use_sql_query_cache, :aggregate_failures do
|
||||
# warm-up license cache and so on:
|
||||
personal_access_token = create(:personal_access_token, user: user)
|
||||
args = { current_user: user, token: { personal_access_token: personal_access_token } }
|
||||
|
|
@ -139,12 +139,12 @@ RSpec.describe 'Query.runner(id)', feature_category: :runner_fleet do
|
|||
|
||||
personal_access_token = create(:personal_access_token, user: another_admin)
|
||||
args = { current_user: another_admin, token: { personal_access_token: personal_access_token } }
|
||||
control = ActiveRecord::QueryRecorder.new { post_graphql(query, **args) }
|
||||
control = ActiveRecord::QueryRecorder.new(skip_cached: false) { post_graphql(query, **args) }
|
||||
|
||||
create(:ci_runner, :instance, version: '14.0.0', tag_list: %w[tag5 tag6], creator: another_admin)
|
||||
create(:ci_runner, :project, version: '14.0.1', projects: [project1], tag_list: %w[tag3 tag8], creator: another_admin)
|
||||
|
||||
expect { post_graphql(query, **args) }.not_to exceed_query_limit(control)
|
||||
expect { post_graphql(query, **args) }.not_to exceed_all_query_limit(control)
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -777,20 +777,20 @@ RSpec.describe 'Query.runner(id)', feature_category: :runner_fleet do
|
|||
end
|
||||
|
||||
describe 'Query limits with jobs' do
|
||||
let!(:group1) { create(:group) }
|
||||
let!(:group2) { create(:group) }
|
||||
let!(:project1) { create(:project, :repository, group: group1) }
|
||||
let!(:project2) { create(:project, :repository, group: group1) }
|
||||
let!(:project3) { create(:project, :repository, group: group2) }
|
||||
let_it_be(:group1) { create(:group) }
|
||||
let_it_be(:group2) { create(:group) }
|
||||
let_it_be(:project1) { create(:project, :repository, group: group1) }
|
||||
let_it_be(:project2) { create(:project, :repository, group: group1) }
|
||||
let_it_be(:project3) { create(:project, :repository, group: group2) }
|
||||
|
||||
let!(:merge_request1) { create(:merge_request, source_project: project1) }
|
||||
let!(:merge_request2) { create(:merge_request, source_project: project3) }
|
||||
let_it_be(:merge_request1) { create(:merge_request, source_project: project1) }
|
||||
let_it_be(:merge_request2) { create(:merge_request, source_project: project3) }
|
||||
|
||||
let(:project_runner2) { create(:ci_runner, :project, projects: [project1, project2]) }
|
||||
let!(:build1) { create(:ci_build, :success, name: 'Build One', runner: project_runner2, pipeline: pipeline1) }
|
||||
let!(:pipeline1) do
|
||||
let_it_be(:pipeline1) do
|
||||
create(:ci_pipeline, project: project1, source: :merge_request_event, merge_request: merge_request1, ref: 'main',
|
||||
target_sha: 'xxx')
|
||||
target_sha: 'xxx')
|
||||
end
|
||||
|
||||
let(:query) do
|
||||
|
|
@ -801,24 +801,7 @@ RSpec.describe 'Query.runner(id)', feature_category: :runner_fleet do
|
|||
jobs {
|
||||
nodes {
|
||||
id
|
||||
detailedStatus {
|
||||
id
|
||||
detailsPath
|
||||
group
|
||||
icon
|
||||
text
|
||||
}
|
||||
project {
|
||||
id
|
||||
name
|
||||
webUrl
|
||||
}
|
||||
shortSha
|
||||
commitPath
|
||||
finishedAt
|
||||
duration
|
||||
queuedDuration
|
||||
tags
|
||||
#{field}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -826,31 +809,58 @@ RSpec.describe 'Query.runner(id)', feature_category: :runner_fleet do
|
|||
QUERY
|
||||
end
|
||||
|
||||
it 'does not execute more queries per job', :aggregate_failures do
|
||||
# warm-up license cache and so on:
|
||||
personal_access_token = create(:personal_access_token, user: user)
|
||||
args = { current_user: user, token: { personal_access_token: personal_access_token } }
|
||||
post_graphql(query, **args)
|
||||
context 'when requesting individual fields' do
|
||||
using RSpec::Parameterized::TableSyntax
|
||||
|
||||
control = ActiveRecord::QueryRecorder.new(query_recorder_debug: true) { post_graphql(query, **args) }
|
||||
where(:field) do
|
||||
[
|
||||
'detailedStatus { id detailsPath group icon text }',
|
||||
'project { id name webUrl }'
|
||||
] + %w[
|
||||
shortSha
|
||||
browseArtifactsPath
|
||||
commitPath
|
||||
playPath
|
||||
refPath
|
||||
webPath
|
||||
finishedAt
|
||||
duration
|
||||
queuedDuration
|
||||
tags
|
||||
]
|
||||
end
|
||||
|
||||
# Add a new build to project_runner2
|
||||
project_runner2.runner_projects << build(:ci_runner_project, runner: project_runner2, project: project3)
|
||||
pipeline2 = create(:ci_pipeline, project: project3, source: :merge_request_event, merge_request: merge_request2,
|
||||
ref: 'main', target_sha: 'xxx')
|
||||
build2 = create(:ci_build, :success, name: 'Build Two', runner: project_runner2, pipeline: pipeline2)
|
||||
with_them do
|
||||
it 'does not execute more queries per job', :use_sql_query_cache, :aggregate_failures do
|
||||
admin2 = create(:user, :admin) # do not reuse same user
|
||||
|
||||
args[:current_user] = create(:user, :admin) # do not reuse same user
|
||||
expect { post_graphql(query, **args) }.not_to exceed_all_query_limit(control)
|
||||
# warm-up license cache and so on:
|
||||
personal_access_token = create(:personal_access_token, user: user)
|
||||
personal_access_token2 = create(:personal_access_token, user: admin2)
|
||||
args = { current_user: user, token: { personal_access_token: personal_access_token } }
|
||||
args2 = { current_user: admin2, token: { personal_access_token: personal_access_token2 } }
|
||||
post_graphql(query, **args2)
|
||||
|
||||
expect(graphql_data.count).to eq 1
|
||||
expect(graphql_data).to match(
|
||||
a_hash_including(
|
||||
'runner' => a_graphql_entity_for(
|
||||
project_runner2,
|
||||
jobs: { 'nodes' => containing_exactly(a_graphql_entity_for(build1), a_graphql_entity_for(build2)) }
|
||||
)
|
||||
))
|
||||
control = ActiveRecord::QueryRecorder.new(skip_cached: false) { post_graphql(query, **args) }
|
||||
|
||||
# Add a new build to project_runner2
|
||||
project_runner2.runner_projects << build(:ci_runner_project, runner: project_runner2, project: project3)
|
||||
pipeline2 = create(:ci_pipeline, project: project3, source: :merge_request_event, merge_request: merge_request2,
|
||||
ref: 'main', target_sha: 'xxx')
|
||||
build2 = create(:ci_build, :success, name: 'Build Two', runner: project_runner2, pipeline: pipeline2)
|
||||
|
||||
expect { post_graphql(query, **args2) }.not_to exceed_all_query_limit(control)
|
||||
|
||||
expect(graphql_data.count).to eq 1
|
||||
expect(graphql_data).to match(
|
||||
a_hash_including(
|
||||
'runner' => a_graphql_entity_for(
|
||||
project_runner2,
|
||||
jobs: { 'nodes' => containing_exactly(a_graphql_entity_for(build1), a_graphql_entity_for(build2)) }
|
||||
)
|
||||
))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue