Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2023-09-07 09:11:12 +00:00
parent 4bcd955830
commit 3822e951cb
105 changed files with 1289 additions and 820 deletions

View File

@ -16,7 +16,7 @@ _Describe in detail what merge request is being backported and why_
This checklist encourages us to confirm any changes have been analyzed to reduce risks in quality, performance, reliability, security, and maintainability.
* [ ] This MR is backporting a bug fix, documentation update, or spec fix, previously merged in the default branch.
* [ ] The original MR has been deployed to GitLab.com (not applicable for documentation or spec changes).
* [ ] The MR that fixed the bug on the default branch has been deployed to GitLab.com (not applicable for documentation or spec changes).
* [ ] This MR has a [severity label] assigned (if applicable).
* [ ] This MR has been approved by a maintainer (only one approval is required).
* [ ] Ensure the `e2e:package-and-test-ee` job has either succeeded or been approved by a Software Engineer in Test.

View File

@ -256,7 +256,6 @@ Layout/EmptyLineAfterMagicComment:
- 'ee/lib/ee/gitlab/ci/parsers/security/validators/schema_validator.rb'
- 'ee/lib/ee/gitlab/hook_data/group_member_builder.rb'
- 'ee/lib/ee/gitlab/hook_data/issue_builder.rb'
- 'ee/lib/ee/gitlab/hook_data/user_builder.rb'
- 'ee/lib/ee/gitlab/scim/base_deprovisioning_service.rb'
- 'ee/lib/ee/gitlab/scim/base_provisioning_service.rb'
- 'ee/lib/ee/gitlab/scim/provisioning_service.rb'
@ -331,7 +330,6 @@ Layout/EmptyLineAfterMagicComment:
- 'ee/spec/lib/ee/gitlab/git_access_snippet_spec.rb'
- 'ee/spec/lib/ee/gitlab/hook_data/group_member_builder_spec.rb'
- 'ee/spec/lib/ee/gitlab/hook_data/issue_builder_spec.rb'
- 'ee/spec/lib/ee/gitlab/hook_data/user_builder_spec.rb'
- 'ee/spec/lib/ee/gitlab/import_export/project/tree_restorer_spec.rb'
- 'ee/spec/lib/ee/gitlab/snippet_search_results_spec.rb'
- 'ee/spec/lib/gitlab/analytics/cycle_analytics/summary/group/stage_summary_spec.rb'

View File

@ -311,7 +311,6 @@ RSpec/ContextWording:
- 'ee/spec/lib/ee/gitlab/gon_helper_spec.rb'
- 'ee/spec/lib/ee/gitlab/group_search_results_spec.rb'
- 'ee/spec/lib/ee/gitlab/hook_data/group_member_builder_spec.rb'
- 'ee/spec/lib/ee/gitlab/hook_data/user_builder_spec.rb'
- 'ee/spec/lib/ee/gitlab/import_export/group/tree_restorer_spec.rb'
- 'ee/spec/lib/ee/gitlab/import_export/group/tree_saver_spec.rb'
- 'ee/spec/lib/ee/gitlab/import_export/project/tree_saver_spec.rb'

View File

@ -28,7 +28,6 @@ RSpec/ExpectInHook:
- 'ee/spec/lib/ee/api/helpers/members_helpers_spec.rb'
- 'ee/spec/lib/ee/gitlab/auth/ldap/sync/group_spec.rb'
- 'ee/spec/lib/ee/gitlab/gon_helper_spec.rb'
- 'ee/spec/lib/ee/gitlab/hook_data/user_builder_spec.rb'
- 'ee/spec/lib/gitlab/auth/smartcard/certificate_spec.rb'
- 'ee/spec/lib/gitlab/checks/diff_check_spec.rb'
- 'ee/spec/lib/gitlab/ci/minutes/cost_factor_spec.rb'

View File

@ -659,7 +659,6 @@ RSpec/MissingFeatureCategory:
- 'ee/spec/lib/ee/gitlab/group_search_results_spec.rb'
- 'ee/spec/lib/ee/gitlab/hook_data/group_member_builder_spec.rb'
- 'ee/spec/lib/ee/gitlab/hook_data/issue_builder_spec.rb'
- 'ee/spec/lib/ee/gitlab/hook_data/user_builder_spec.rb'
- 'ee/spec/lib/ee/gitlab/import_export/group/tree_restorer_spec.rb'
- 'ee/spec/lib/ee/gitlab/import_export/group/tree_saver_spec.rb'
- 'ee/spec/lib/ee/gitlab/import_export/project/tree_saver_spec.rb'

View File

@ -304,7 +304,7 @@ gem 'ruby-openai', '~> 3.7'
gem 'circuitbox', '2.0.0'
# Sanitize user input
gem 'sanitize', '~> 6.0'
gem 'sanitize', '~> 6.0.2'
gem 'babosa', '~> 2.0'
# Sanitizes SVG input

View File

@ -553,7 +553,7 @@
{"name":"rugged","version":"1.6.3","platform":"ruby","checksum":"362631de8dc6f1074242f21e01148ac70b7fe8cdb17f85eee91d4ea83457cb04"},
{"name":"safe_yaml","version":"1.0.4","platform":"ruby","checksum":"248193992ef1730a0c9ec579999ef2256a2b3a32a9bd9d708a1e12544a489ec2"},
{"name":"safety_net_attestation","version":"0.4.0","platform":"ruby","checksum":"96be2d74e7ed26453a51894913449bea0e072f44490021545ac2d1c38b0718ce"},
{"name":"sanitize","version":"6.0.0","platform":"ruby","checksum":"81795f985873f3bacee2eaaededeaafc3a29aafeaa9aff51e04b85a66bbf08ff"},
{"name":"sanitize","version":"6.0.2","platform":"ruby","checksum":"48c4eb8e92bb1699056b6000986ac50fc9df82f458a941abf2c4d6759bccd5cf"},
{"name":"sass","version":"3.5.5","platform":"ruby","checksum":"1bb5431bc620ce29076728a4c8f7b4acb55066ed9df8cf5d57db6cda450d8080"},
{"name":"sass-listen","version":"4.0.0","platform":"ruby","checksum":"ae9dcb76dd3e234329e5ba6e213f48e532c5a3e7b0b4d8a87f13aaca0cc18377"},
{"name":"sassc","version":"2.4.0","platform":"ruby","checksum":"4c60a2b0a3b36685c83b80d5789401c2f678c1652e3288315a1551d811d9f83e"},

View File

@ -1436,7 +1436,7 @@ GEM
safe_yaml (1.0.4)
safety_net_attestation (0.4.0)
jwt (~> 2.0)
sanitize (6.0.0)
sanitize (6.0.2)
crass (~> 1.0.2)
nokogiri (>= 1.12.0)
sass (3.5.5)
@ -1991,7 +1991,7 @@ DEPENDENCIES
ruby_parser (~> 3.20.3)
rubyzip (~> 2.3.2)
rugged (~> 1.6)
sanitize (~> 6.0)
sanitize (~> 6.0.2)
sassc-rails (~> 2.1.0)
sd_notify (~> 0.1.0)
seed-fu (~> 2.3.7)

View File

@ -41,8 +41,8 @@ import { createAlert, VARIANT_INFO } from '~/alert';
import { convertToGraphQLId, getIdFromGraphQLId } from '~/graphql_shared/utils';
import { TYPENAME_USER } from '~/graphql_shared/constants';
import searchProjectMembers from '~/graphql_shared/queries/project_user_members_search.query.graphql';
import getServiceDeskIssuesQuery from 'ee_else_ce/service_desk/queries/get_service_desk_issues.query.graphql';
import getServiceDeskIssuesCounts from 'ee_else_ce/service_desk/queries/get_service_desk_issues_counts.query.graphql';
import getServiceDeskIssuesQuery from 'ee_else_ce/issues/service_desk/queries/get_service_desk_issues.query.graphql';
import getServiceDeskIssuesCounts from 'ee_else_ce/issues/service_desk/queries/get_service_desk_issues_counts.query.graphql';
import searchProjectLabelsQuery from '../queries/search_project_labels.query.graphql';
import searchProjectMilestonesQuery from '../queries/search_project_milestones.query.graphql';
import setSortingPreferenceMutation from '../queries/set_sorting_preference.mutation.graphql';

View File

@ -2,7 +2,7 @@ import Vue from 'vue';
import VueApollo from 'vue-apollo';
import VueRouter from 'vue-router';
import { parseBoolean } from '~/lib/utils/common_utils';
import ServiceDeskListApp from 'ee_else_ce/service_desk/components/service_desk_list_app.vue';
import ServiceDeskListApp from 'ee_else_ce/issues/service_desk/components/service_desk_list_app.vue';
import { gqlClient } from './graphql';
export async function mountServiceDeskListApp() {

View File

@ -75,7 +75,7 @@ export default {
data-testid="artifacts-remove-timeline"
>
<span v-if="isExpired">{{ $options.i18n.expiredText }}</span>
<span v-if="willExpire" data-qa-selector="artifacts_unlocked_message_content">
<span v-if="willExpire" data-testid="artifacts-unlocked-message-content">
{{ $options.i18n.willExpireText }}
</span>
<timeago-tooltip v-if="artifact.expire_at" :time="artifact.expire_at" />
@ -89,7 +89,7 @@ export default {
</gl-link>
</p>
<p v-else-if="isLocked" class="build-detail-row">
<span data-testid="job-locked-message" data-qa-selector="artifacts_locked_message_content">
<span data-testid="artifacts-locked-message-content">
{{ $options.i18n.lockedText }}
</span>
</p>
@ -112,8 +112,7 @@ export default {
<gl-button
v-if="artifact.browse_path"
:href="artifact.browse_path"
data-testid="browse-artifacts"
data-qa-selector="browse_artifacts_button"
data-testid="browse-artifacts-button"
>{{ $options.i18n.browseText }}</gl-button
>
</gl-button-group>

View File

@ -138,7 +138,6 @@ export default {
:href="restJob.retry_path"
:modal-id="$options.forwardDeploymentFailureModalId"
variant="confirm"
data-qa-selector="retry_button"
data-testid="retry-button"
@updateVariablesClicked="$emit('updateVariables')"
/>

View File

@ -102,7 +102,6 @@ export default {
:href="pipeline.path"
class="js-pipeline-path link-commit"
data-testid="pipeline-path"
data-qa-selector="pipeline_path"
>#{{ pipeline.id }}</gl-link
>
</template>

View File

@ -78,7 +78,7 @@ export default {
};
</script>
<template>
<code class="job-log d-block" data-qa-selector="job_log_content">
<code class="job-log d-block" data-testid="job-log-content">
<template v-for="(section, index) in jobLog">
<collapsible-log-section
v-if="section.isHeader"

View File

@ -1,5 +1,5 @@
import { initFilteredSearchServiceDesk } from '~/issues';
import { mountServiceDeskListApp } from '~/service_desk';
import { mountServiceDeskListApp } from '~/issues/service_desk';
initFilteredSearchServiceDesk();

View File

@ -30,7 +30,6 @@ export default class Profile {
bindEvents() {
$('.js-preferences-form').on('change.preference', 'input[type=radio]', this.submitForm);
$('#user_email_opted_in').on('change', this.submitForm);
$('#user_notified_of_own_activity').on('change', this.submitForm);
this.form.on('submit', this.onSubmitForm);
}

View File

@ -15,10 +15,14 @@ export function dispatchSnowplowEvent(
let { value } = data;
const standardContext = getStandardContext({ extra });
const contexts = [standardContext];
let contexts = [standardContext];
if (data.context) {
contexts.push(data.context);
if (Array.isArray(data.context)) {
contexts = [...contexts, ...data.context];
} else {
contexts.push(data.context);
}
}
if (value !== undefined) {

View File

@ -15,17 +15,24 @@ const InternalEvents = {
/**
*
* @param {string} event
* @param {object} data
*/
track_event(event) {
track_event(event, data = {}) {
const { context, ...rest } = data;
const defaultContext = {
schema: SERVICE_PING_SCHEMA,
data: {
event_name: event,
data_source: 'redis_hll',
},
};
const mergedContext = context ? [defaultContext, context] : defaultContext;
API.trackInternalEvent(event);
Tracking.event(GITLAB_INTERNAL_EVENT_CATEGORY, event, {
context: {
schema: SERVICE_PING_SCHEMA,
data: {
event_name: event,
data_source: 'redis_hll',
},
},
context: mergedContext,
...rest,
});
},
/**
@ -35,8 +42,8 @@ const InternalEvents = {
mixin() {
return {
methods: {
track_event(event) {
InternalEvents.track_event(event);
track_event(event, data = {}) {
InternalEvents.track_event(event, data);
},
},
};

View File

@ -25,7 +25,7 @@ class Profiles::NotificationsController < Profiles::ApplicationController
end
def user_params
params.require(:user).permit(:notification_email, :email_opted_in, :notified_of_own_activity)
params.require(:user).permit(:notification_email, :notified_of_own_activity)
end
private

View File

@ -36,6 +36,7 @@ class ApplicationSetting < MainClusterwide::ApplicationRecord
jitsu_project_xid
jitsu_administrator_email
], remove_with: '16.5', remove_after: '2023-09-22'
ignore_columns %i[ai_access_token ai_access_token_iv], remove_with: '16.6', remove_after: '2023-10-22'
INSTANCE_REVIEW_MIN_USERS = 50
GRAFANA_URL_ERROR_MESSAGE = 'Please check your Grafana URL setting in ' \
@ -798,7 +799,6 @@ class ApplicationSetting < MainClusterwide::ApplicationRecord
attr_encrypted :product_analytics_configurator_connection_string, encryption_options_base_32_aes_256_gcm.merge(encode: false, encode_iv: false)
attr_encrypted :openai_api_key, encryption_options_base_32_aes_256_gcm.merge(encode: false, encode_iv: false)
attr_encrypted :anthropic_api_key, encryption_options_base_32_aes_256_gcm.merge(encode: false, encode_iv: false)
attr_encrypted :ai_access_token, encryption_options_base_32_aes_256_gcm.merge(encode: false, encode_iv: false)
attr_encrypted :vertex_ai_credentials, encryption_options_base_32_aes_256_gcm.merge(encode: false, encode_iv: false)
# Restricting the validation to `on: :update` only to avoid cyclical dependencies with

View File

@ -37,7 +37,6 @@ module ApplicationSettingImplementation
{
admin_mode: false,
after_sign_up_text: nil,
ai_access_token: nil,
akismet_enabled: false,
akismet_api_key: nil,
allow_local_requests_from_system_hooks: true,

View File

@ -30,6 +30,14 @@ class User < MainClusterwide::ApplicationRecord
include RestrictedSignup
include StripAttribute
include EachBatch
include IgnorableColumns
ignore_column %i[
email_opted_in
email_opted_in_ip
email_opted_in_source_id
email_opted_in_at
], remove_with: '16.6', remove_after: '2023-10-22'
DEFAULT_NOTIFICATION_LEVEL = :participating

View File

@ -26,13 +26,9 @@ module Projects
sent_email_records.save!
end
# rubocop: disable CodeReuse/ActiveRecord
def project_users
@project_users ||= project.users
.where(email_opted_in: true)
.merge(Users::InProductMarketingEmail.without_campaign(campaign))
@project_users ||= project.users.merge(Users::InProductMarketingEmail.without_campaign(campaign))
end
# rubocop: enable CodeReuse/ActiveRecord
def project_users_max_access_levels
ids = project_users.map(&:id)

View File

@ -1,6 +0,0 @@
- return unless Gitlab.com?
.gl-mb-3.js-email-opt-in.hidden
.gl-font-weight-bold.gl-mb-3
= _('Email updates (optional)')
= f.gitlab_ui_checkbox_component :email_opted_in, _("I'd like to receive updates about GitLab via email")

View File

@ -1,6 +1,3 @@
- form = local_assigns.fetch(:form)
.js-notification-email-listbox-input.gl-mb-3{ data: { label: _('Global notification email'), name: 'user[notification_email]', emails: @user.public_verified_emails.to_json, empty_value_text: _('Use primary email (%{email})') % { email: @user.email }, value: @user.notification_email, disabled: local_assigns.fetch(:email_change_disabled, nil) } }
.help-block
= local_assigns.fetch(:help_text, nil)
.form-group
= form.gitlab_ui_checkbox_component :email_opted_in, _('Receive product marketing emails')

View File

@ -19,8 +19,8 @@
= _('You can specify notification level per group or per project.')
.gl-mt-0
= gitlab_ui_form_for @user, url: profile_notifications_path, method: :put, html: { class: 'update-notifications gl-mt-3' } do |f|
= render_if_exists 'profiles/notifications/email_settings', form: f
= gitlab_ui_form_for @user, url: profile_notifications_path, method: :put, html: { class: 'update-notifications gl-mt-3 gl-mb-6' } do |f|
= render_if_exists 'profiles/notifications/email_settings'
= label_tag :global_notification_level, _('Global notification level'), class: "label-bold gl-mb-0"
.gl-text-secondary.gl-mb-3

View File

@ -3,6 +3,6 @@
%tr.tree-item{ 'data-link' => path_to_directory }
%td.tree-item-file-name
= tree_icon('folder', '755', directory.name)
= link_to path_to_directory, class: 'str-truncated', data: { qa_selector: 'directory_name_link', qa_directory_name: directory.name } do
= link_to path_to_directory, class: 'str-truncated', data: { testid: 'directory-name-link', qa_directory_name: directory.name } do
%span= directory.name
%td

View File

@ -45,8 +45,10 @@
.issuable-meta
%ul.controls.d-flex.align-items-end
- if merge_request.merged?
- merged_at = merge_request.merged_at ? l(merge_request.merged_at.to_time) : _("Merge date & time could not be determined")
%li.d-none.d-sm-flex
= render Pajamas::BadgeComponent.new(_('Merged'), size: 'sm', variant: 'info')
%a.has-tooltip{ href: "#{merge_request_path(merge_request)}#widget-state", title: merged_at }
= render Pajamas::BadgeComponent.new(_('Merged'), size: 'sm', variant: 'info')
- elsif merge_request.closed?
%li.d-none.d-sm-flex
= render Pajamas::BadgeComponent.new(_('Closed'), size: 'sm', variant: 'danger')

View File

@ -33,7 +33,6 @@
= render_if_exists "registrations/welcome/jobs_to_be_done", f: f
= render_if_exists "registrations/welcome/setup_for_company", f: f
= render_if_exists "registrations/welcome/joining_project"
= render 'devise/shared/email_opted_in', f: f
.row
.form-group.col-sm-12.gl-mb-0
- if partial_exists? "registrations/welcome/button"

View File

@ -109,6 +109,7 @@ bugfixed
bugfixes
bugfixing
Bugzilla
Buildah
Buildkite
buildpack
buildpacks
@ -644,6 +645,7 @@ OmniAuth
onboarding
OpenID
OpenShift
OpenTelemetry
Opsgenie
Opstrace
ORMs

View File

@ -292,6 +292,7 @@ Every audit event is associated with an event type. The association with the eve
| [`user_enable_admin_mode`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/104754) | Event triggered on enabling admin mode | **{check-circle}** Yes | **{check-circle}** Yes | `system_access` | GitLab [15.7](https://gitlab.com/gitlab-org/gitlab/-/issues/362101) |
| [`user_impersonation`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/79340) | Triggered when an instance administrator starts or stops impersonating a user | **{check-circle}** Yes | **{check-circle}** Yes | `user_management` | GitLab [14.8](https://gitlab.com/gitlab-org/gitlab/-/issues/300961) |
| [`user_password_updated`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/106086) | audit when user password is updated | **{check-circle}** Yes | **{check-circle}** Yes | `user_management` | GitLab [15.7](https://gitlab.com/gitlab-org/gitlab/-/issues/369330) |
| [`user_profile_visiblity_updated`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/129149) | Triggered when user toggles private profile user setting | **{dotted-circle}** No | **{check-circle}** Yes | `user_profile` | GitLab [16.3](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/129149) |
| [`user_rejected`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/113784) | Event triggered when a user registration is rejected | **{check-circle}** Yes | **{dotted-circle}** No | `user_management` | GitLab [15.11](https://gitlab.com/gitlab-org/gitlab/-/issues/374107) |
| [`user_username_updated`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/106086) | Event triggered on updating a user's username | **{check-circle}** Yes | **{check-circle}** Yes | `user_profile` | GitLab [15.7](https://gitlab.com/gitlab-org/gitlab/-/issues/369329) |
| [`feature_flag_created`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/113453) | Triggered when a feature flag is created. | **{check-circle}** Yes | **{check-circle}** Yes | `feature_flags` | GitLab [15.10](https://gitlab.com/gitlab-org/gitlab/-/issues/374109) |

View File

@ -757,7 +757,7 @@ To solve this:
1. Sign in on the web interface for the secondary Geo site.
1. Back up [the `.git` folder](../../repository_storage_types.md#translate-hashed-storage-paths).
1. Back up [the `.git` folder](../../repository_storage_paths.md#translate-hashed-storage-paths).
1. Optional. [Spot-check](../../logs/log_parsing.md#find-all-projects-affected-by-a-fatal-git-problem)
a few of those IDs whether they indeed correspond

View File

@ -326,7 +326,7 @@ conflicts that could occur due to partially applied operations.
Repositories are stored in the storages at the relative path determined by the [Gitaly client](#gitaly-architecture). These paths can be
identified by them not beginning with the `@cluster` prefix. The relative paths
follow the [hashed storage](../repository_storage_types.md#hashed-storage) schema.
follow the [hashed storage](../repository_storage_paths.md#hashed-storage) schema.
#### Praefect-generated replica paths (GitLab 15.0 and later)
@ -377,7 +377,7 @@ Use the [`praefect metadata`](troubleshooting.md#view-repository-metadata) subco
The repository on disk also contains the project path in the Git configuration file. The configuration
file can be used to determine the project path even if the repository's metadata has been deleted.
Follow the [instructions in hashed storage's documentation](../repository_storage_types.md#from-hashed-path-to-project-name).
Follow the [instructions in hashed storage's documentation](../repository_storage_paths.md#from-hashed-path-to-project-name).
#### Atomicity of operations

View File

@ -372,7 +372,7 @@ sudo /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.t
In this example, the virtual storage to specify is `default` or `storage-1`.
- `-repository` is the repository's relative path in the storage [beginning with `@hashed`](../repository_storage_types.md#hashed-storage).
- `-repository` is the repository's relative path in the storage [beginning with `@hashed`](../repository_storage_paths.md#hashed-storage).
For example:
```plaintext
@ -463,14 +463,14 @@ sudo /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.t
In this example, the virtual storage to specify is `default` or `storage-1`.
- `-relative-path` is the relative path in the virtual storage. Usually [beginning with `@hashed`](../repository_storage_types.md#hashed-storage).
- `-relative-path` is the relative path in the virtual storage. Usually [beginning with `@hashed`](../repository_storage_paths.md#hashed-storage).
For example:
```plaintext
@hashed/f5/ca/f5ca38f748a1d6eaf726b8a42fb575c3c71f1864a8143301782de13da2d9202b.git
```
- `-replica-path` is the relative path on physical storage. Can start with [`@cluster` or match `relative_path`](../repository_storage_types.md#gitaly-cluster-storage).
- `-replica-path` is the relative path on physical storage. Can start with [`@cluster` or match `relative_path`](../repository_storage_paths.md#gitaly-cluster-storage).
- `-authoritative-storage` is the storage we want Praefect to treat as the primary. Required if
[per-repository replication](praefect.md#configure-replication-factor) is set as the replication strategy.
- `-replicate-immediately`, available in GitLab 14.6 and later, causes the command to replicate the repository to its secondaries immediately.

View File

@ -8,7 +8,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
This is a collection of Rake tasks to help you list and migrate
existing projects and their attachments to the new
[hashed storage](../repository_storage_types.md) that GitLab
[hashed storage](../repository_storage_paths.md) that GitLab
uses to organize the Git data.
## List projects and attachments
@ -75,7 +75,7 @@ To have a summary and then a list of projects and their attachments using hashed
## Migrate to hashed storage
WARNING:
In GitLab 13.0, [hashed storage](../repository_storage_types.md#hashed-storage)
In GitLab 13.0, [hashed storage](../repository_storage_paths.md#hashed-storage)
is enabled by default and the legacy storage is deprecated.
GitLab 14.0 eliminates support for legacy storage. If you're on GitLab
13.0 and later, switching new projects to legacy storage is not possible.
@ -129,7 +129,7 @@ You only need the `gitlab:storage:migrate_to_hashed` Rake task to migrate your r
## Rollback from hashed storage to legacy storage
WARNING:
In GitLab 13.0, [hashed storage](../repository_storage_types.md#hashed-storage)
In GitLab 13.0, [hashed storage](../repository_storage_paths.md#hashed-storage)
is enabled by default and the legacy storage is deprecated.
GitLab 14.0 eliminates support for legacy storage. If you're on GitLab
13.0 and later, switching new projects to legacy storage is not possible.

View File

@ -65,7 +65,7 @@ You can run [`git fsck`](https://git-scm.com/docs/git-fsck) using the command li
by default.
- For GitLab Helm chart installations, repositories are stored in the `/home/git/repositories` directory inside the
Gitaly pod by default.
1. [Identify the subdirectory that contains the repository](repository_storage_types.md#from-project-name-to-hashed-path)
1. [Identify the subdirectory that contains the repository](repository_storage_paths.md#from-project-name-to-hashed-path)
that you need to check.
1. Run the check. For example:

View File

@ -22,6 +22,185 @@ For more information on:
- Configuring Gitaly, see [Configure Gitaly](gitaly/configure_gitaly.md).
- Configuring Gitaly Cluster, see [Configure Gitaly Cluster](gitaly/praefect.md).
## Hashed storage
> **Storage name** field [renamed](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/128416) from **Gitaly storage name** and **Relative path** field [renamed](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/128416) from **Gitaly relative path** in GitLab 16.3.
Hashed storage stores projects on disk in a location based on a hash of the project's ID. This makes the folder
structure immutable and eliminates the need to synchronize state from URLs to disk structure. This means that renaming a
group, user, or project:
- Costs only the database transaction.
- Takes effect immediately.
The hash also helps spread the repositories more evenly on the disk. The top-level directory
contains fewer folders than the total number of top-level namespaces.
The hash format is based on the hexadecimal representation of a SHA256, calculated with
`SHA256(project.id)`. The top-level folder uses the first two characters, followed by another folder
with the next two characters. They are both stored in a special `@hashed` folder so they can
co-exist with existing legacy storage projects. For example:
```ruby
# Project's repository:
"@hashed/#{hash[0..1]}/#{hash[2..3]}/#{hash}.git"
# Wiki's repository:
"@hashed/#{hash[0..1]}/#{hash[2..3]}/#{hash}.wiki.git"
```
### Translate hashed storage paths
Troubleshooting problems with the Git repositories, adding hooks, and other tasks requires you
translate between the human-readable project name and the hashed storage path. You can translate:
- From a [project's name to its hashed path](#from-project-name-to-hashed-path).
- From a [hashed path to a project's name](#from-hashed-path-to-project-name).
#### From project name to hashed path
Administrators can look up a project's hashed path from its name or ID using:
- The [Admin Area](../administration/admin_area.md#administering-projects).
- A Rails console.
To look up a project's hash path in the Admin Area:
1. On the left sidebar, select **Search or go to**.
1. Select **Admin Area**.
1. On the left sidebar, select **Overview > Projects** and select the project.
1. Locate the **Relative path** field. The value is similar to:
```plaintext
"@hashed/b1/7e/b17ef6d19c7a5b1ee83b907c595526dcb1eb06db8227d650d5dda0a9f4ce8cd9.git"
```
To look up a project's hash path using a Rails console:
1. Start a [Rails console](operations/rails_console.md#starting-a-rails-console-session).
1. Run a command similar to this example (use either the project's ID or its name):
```ruby
Project.find(16).disk_path
Project.find_by_full_path('group/project').disk_path
```
#### From hashed path to project name
Administrators can look up a project's name from its hashed relative path using:
- A Rails console.
- The `config` file in the `*.git` directory.
To look up a project's name using the Rails console:
1. Start a [Rails console](operations/rails_console.md#starting-a-rails-console-session).
1. Run a command similar to this example:
```ruby
ProjectRepository.find_by(disk_path: '@hashed/b1/7e/b17ef6d19c7a5b1ee83b907c595526dcb1eb06db8227d650d5dda0a9f4ce8cd9').project
```
The quoted string in that command is the directory tree you can find on your GitLab server. For
example, on a default Linux package installation this would be `/var/opt/gitlab/git-data/repositories/@hashed/b1/7e/b17ef6d19c7a5b1ee83b907c595526dcb1eb06db8227d650d5dda0a9f4ce8cd9.git`
with `.git` from the end of the directory name removed.
The output includes the project ID and the project name. For example:
```plaintext
=> #<Project id:16 it/supportteam/ticketsystem>
```
To look up a project's name using the `config` file in the `*.git` directory:
1. Locate the `*.git` directory. This directory is located in `/var/opt/gitlab/git-data/repositories/@hashed/`, where the first four
characters of the hash are the first two directories in the path under `@hashed/`. For example, on a default Linux package installation the
`*.git` directory of the hash `b17eb17ef6d19c7a5b1ee83b907c595526dcb1eb06db8227d650d5dda0a9f4ce8cd9` would be
`/var/opt/gitlab/git-data/repositories/@hashed/b1/7e/b17ef6d19c7a5b1ee83b907c595526dcb1eb06db8227d650d5dda0a9f4ce8cd9.git`.
1. Open the `config` file and locate the `fullpath=` key under `[gitlab]`.
### Hashed object pools
Object pools are repositories used to deduplicate forks of public and internal projects and
contain the objects from the source project. Using `objects/info/alternates`, the source project and
forks use the object pool for shared objects. For more information, see
[How Git object deduplication works in GitLab](../development/git_object_deduplication.md).
Objects are moved from the source project to the object pool when housekeeping is run on the source
project. Object pool repositories are stored similarly to regular repositories in a directory called `@pools` instead of `@hashed`
```ruby
# object pool paths
"@pools/#{hash[0..1]}/#{hash[2..3]}/#{hash}.git"
```
WARNING:
Do not run `git prune` or `git gc` in object pool repositories, which are stored in the `@pools` directory.
This can cause data loss in the regular repositories that depend on the object pool.
### Group wiki storage
Unlike project wikis that are stored in the `@hashed` directory, group wikis are stored in a directory called `@groups`.
Like project wikis, group wikis follow the hashed storage folder convention, but use a hash of the group ID rather than the project ID.
For example:
```ruby
# group wiki paths
"@groups/#{hash[0..1]}/#{hash[2..3]}/#{hash}.wiki.git"
```
### Gitaly Cluster storage
If Gitaly Cluster is used, Praefect manages storage locations. The internal path used by Praefect for the repository
differs from the hashed path. For more information, see
[Praefect-generated replica paths](gitaly/index.md#praefect-generated-replica-paths-gitlab-150-and-later).
### Object storage support
This table shows which storable objects are storable in each storage type:
| Storable object | Hashed storage | S3 compatible |
|:-----------------|:---------------|:--------------|
| Repository | Yes | - |
| Attachments | Yes | - |
| Avatars | No | - |
| Pages | No | - |
| Docker Registry | No | - |
| CI/CD job logs | No | - |
| CI/CD artifacts | No | Yes |
| CI/CD cache | No | Yes |
| LFS objects | Similar | Yes |
| Repository pools | Yes | - |
Files stored in an S3-compatible endpoint can have the same advantages as
[hashed storage](#hashed-storage), as long as they are not prefixed with
`#{namespace}/#{project_name}`. This is true for CI/CD cache and LFS objects.
#### Avatars
Each file is stored in a directory that matches the `id` assigned to it in the database. The
filename is always `avatar.png` for user avatars. When an avatar is replaced, the `Upload` model is
destroyed and a new one takes place with a different `id`.
#### CI/CD artifacts
CI/CD artifacts are S3-compatible.
#### LFS objects
[LFS Objects in GitLab](../topics/git/lfs/index.md) implement a similar
storage pattern using two characters and two-level folders, following the Git implementation:
```ruby
"shared/lfs-objects/#{oid[0..1}/#{oid[2..3]}/#{oid[4..-1]}"
# Based on object `oid`: `8909029eb962194cfb326259411b22ae3f4a814b5be4f80651735aeef9f3229c`, path will be:
"shared/lfs-objects/89/09/029eb962194cfb326259411b22ae3f4a814b5be4f80651735aeef9f3229c"
```
LFS objects are also [S3-compatible](lfs/index.md#storing-lfs-objects-in-remote-object-storage).
## Configure where new repositories are stored
After you configure multiple repository storages, you can choose where new repositories are stored:

View File

@ -1,202 +1,8 @@
---
stage: Systems
group: Gitaly
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
redirect_to: 'repository_storage_paths.md'
remove_date: '2023-11-29'
---
# Repository storage types **(FREE SELF)**
This document was moved to [another location](repository_storage_paths.md).
GitLab can be configured to use one or multiple repository storages. These storages are accessed through either:
- [Gitaly](gitaly/index.md).
- [Gitaly Cluster](gitaly/index.md#gitaly-cluster) as virtual storage.
In GitLab:
- Repository storages are configured in:
- `/etc/gitlab/gitlab.rb` by the `git_data_dirs({})` configuration hash for Linux package installations.
- `gitlab.yml` by the `repositories.storages` key for self-compiled installations.
- The `default` repository storage is available in any installations that haven't customized it. By
default, it points to a Gitaly node.
The repository storage types documented here apply to any repository storage defined in
`git_data_dirs({})` or `repositories.storages`.
## Hashed storage
> **Storage name** field [renamed](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/128416) from **Gitaly storage name** and **Relative path** field [renamed](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/128416) from **Gitaly relative path** in GitLab 16.3.
Hashed storage stores projects on disk in a location based on a hash of the project's ID. This makes the folder
structure immutable and eliminates the need to synchronize state from URLs to disk structure. This means that renaming a
group, user, or project:
- Costs only the database transaction.
- Takes effect immediately.
The hash also helps spread the repositories more evenly on the disk. The top-level directory
contains fewer folders than the total number of top-level namespaces.
The hash format is based on the hexadecimal representation of a SHA256, calculated with
`SHA256(project.id)`. The top-level folder uses the first two characters, followed by another folder
with the next two characters. They are both stored in a special `@hashed` folder so they can
co-exist with existing legacy storage projects. For example:
```ruby
# Project's repository:
"@hashed/#{hash[0..1]}/#{hash[2..3]}/#{hash}.git"
# Wiki's repository:
"@hashed/#{hash[0..1]}/#{hash[2..3]}/#{hash}.wiki.git"
```
### Translate hashed storage paths
Troubleshooting problems with the Git repositories, adding hooks, and other tasks requires you
translate between the human-readable project name and the hashed storage path. You can translate:
- From a [project's name to its hashed path](#from-project-name-to-hashed-path).
- From a [hashed path to a project's name](#from-hashed-path-to-project-name).
#### From project name to hashed path
Administrators can look up a project's hashed path from its name or ID using:
- The [Admin Area](../administration/admin_area.md#administering-projects).
- A Rails console.
To look up a project's hash path in the Admin Area:
1. On the left sidebar, select **Search or go to**.
1. Select **Admin Area**.
1. On the left sidebar, select **Overview > Projects** and select the project.
1. Locate the **Relative path** field. The value is similar to:
```plaintext
"@hashed/b1/7e/b17ef6d19c7a5b1ee83b907c595526dcb1eb06db8227d650d5dda0a9f4ce8cd9.git"
```
To look up a project's hash path using a Rails console:
1. Start a [Rails console](operations/rails_console.md#starting-a-rails-console-session).
1. Run a command similar to this example (use either the project's ID or its name):
```ruby
Project.find(16).disk_path
Project.find_by_full_path('group/project').disk_path
```
#### From hashed path to project name
Administrators can look up a project's name from its hashed relative path using:
- A Rails console.
- The `config` file in the `*.git` directory.
To look up a project's name using the Rails console:
1. Start a [Rails console](operations/rails_console.md#starting-a-rails-console-session).
1. Run a command similar to this example:
```ruby
ProjectRepository.find_by(disk_path: '@hashed/b1/7e/b17ef6d19c7a5b1ee83b907c595526dcb1eb06db8227d650d5dda0a9f4ce8cd9').project
```
The quoted string in that command is the directory tree you can find on your GitLab server. For
example, on a default Linux package installation this would be `/var/opt/gitlab/git-data/repositories/@hashed/b1/7e/b17ef6d19c7a5b1ee83b907c595526dcb1eb06db8227d650d5dda0a9f4ce8cd9.git`
with `.git` from the end of the directory name removed.
The output includes the project ID and the project name. For example:
```plaintext
=> #<Project id:16 it/supportteam/ticketsystem>
```
To look up a project's name using the `config` file in the `*.git` directory:
1. Locate the `*.git` directory. This directory is located in `/var/opt/gitlab/git-data/repositories/@hashed/`, where the first four
characters of the hash are the first two directories in the path under `@hashed/`. For example, on a default Linux package installation the
`*.git` directory of the hash `b17eb17ef6d19c7a5b1ee83b907c595526dcb1eb06db8227d650d5dda0a9f4ce8cd9` would be
`/var/opt/gitlab/git-data/repositories/@hashed/b1/7e/b17ef6d19c7a5b1ee83b907c595526dcb1eb06db8227d650d5dda0a9f4ce8cd9.git`.
1. Open the `config` file and locate the `fullpath=` key under `[gitlab]`.
### Hashed object pools
Object pools are repositories used to deduplicate forks of public and internal projects and
contain the objects from the source project. Using `objects/info/alternates`, the source project and
forks use the object pool for shared objects. For more information, see
[How Git object deduplication works in GitLab](../development/git_object_deduplication.md).
Objects are moved from the source project to the object pool when housekeeping is run on the source
project. Object pool repositories are stored similarly to regular repositories in a directory called `@pools` instead of `@hashed`
```ruby
# object pool paths
"@pools/#{hash[0..1]}/#{hash[2..3]}/#{hash}.git"
```
WARNING:
Do not run `git prune` or `git gc` in object pool repositories, which are stored in the `@pools` directory.
This can cause data loss in the regular repositories that depend on the object pool.
### Group wiki storage
Unlike project wikis that are stored in the `@hashed` directory, group wikis are stored in a directory called `@groups`.
Like project wikis, group wikis follow the hashed storage folder convention, but use a hash of the group ID rather than the project ID.
For example:
```ruby
# group wiki paths
"@groups/#{hash[0..1]}/#{hash[2..3]}/#{hash}.wiki.git"
```
### Gitaly Cluster storage
If Gitaly Cluster is used, Praefect manages storage locations. The internal path used by Praefect for the repository
differs from the hashed path. For more information, see
[Praefect-generated replica paths](gitaly/index.md#praefect-generated-replica-paths-gitlab-150-and-later).
### Object storage support
This table shows which storable objects are storable in each storage type:
| Storable object | Hashed storage | S3 compatible |
|:-----------------|:---------------|:--------------|
| Repository | Yes | - |
| Attachments | Yes | - |
| Avatars | No | - |
| Pages | No | - |
| Docker Registry | No | - |
| CI/CD job logs | No | - |
| CI/CD artifacts | No | Yes |
| CI/CD cache | No | Yes |
| LFS objects | Similar | Yes |
| Repository pools | Yes | - |
Files stored in an S3-compatible endpoint can have the same advantages as
[hashed storage](#hashed-storage), as long as they are not prefixed with
`#{namespace}/#{project_name}`. This is true for CI/CD cache and LFS objects.
#### Avatars
Each file is stored in a directory that matches the `id` assigned to it in the database. The
filename is always `avatar.png` for user avatars. When an avatar is replaced, the `Upload` model is
destroyed and a new one takes place with a different `id`.
#### CI/CD artifacts
CI/CD artifacts are S3-compatible.
#### LFS objects
[LFS Objects in GitLab](../topics/git/lfs/index.md) implement a similar
storage pattern using two characters and two-level folders, following the Git implementation:
```ruby
"shared/lfs-objects/#{oid[0..1}/#{oid[2..3]}/#{oid[4..-1]}"
# Based on object `oid`: `8909029eb962194cfb326259411b22ae3f4a814b5be4f80651735aeef9f3229c`, path will be:
"shared/lfs-objects/89/09/029eb962194cfb326259411b22ae3f4a814b5be4f80651735aeef9f3229c"
```
LFS objects are also [S3-compatible](lfs/index.md#storing-lfs-objects-in-remote-object-storage).
<!-- This redirect file can be deleted after <2023-11-29>. -->

View File

@ -39,7 +39,7 @@ Prerequisites:
- The [storage name](gitaly/configure_gitaly.md#gitlab-requires-a-default-repository-storage), path to the Gitaly configuration file
(default is `/var/opt/gitlab/gitaly/config.toml` on Linux package instances), and the
[repository relative path](repository_storage_types.md#from-project-name-to-hashed-path) for the repository.
[repository relative path](repository_storage_paths.md#from-project-name-to-hashed-path) for the repository.
To set server hooks for a repository:
@ -76,10 +76,10 @@ To create server hooks for a repository:
1. Go to **Overview > Projects** and select the project you want to add a server hook to.
1. On the page that appears, locate the value of **Relative path**. This path is where server
hooks must be located.
- If you are using [hashed storage](repository_storage_types.md#hashed-storage), see
[Translate hashed storage paths](repository_storage_types.md#translate-hashed-storage-paths) for information on
- If you are using [hashed storage](repository_storage_paths.md#hashed-storage), see
[Translate hashed storage paths](repository_storage_paths.md#translate-hashed-storage-paths) for information on
interpreting the relative path.
- If you are not using [hashed storage](repository_storage_types.md#hashed-storage):
- If you are not using [hashed storage](repository_storage_paths.md#hashed-storage):
- For Linux package installations, the path is usually `/var/opt/gitlab/git-data/repositories/<group>/<project>.git`.
- For self-compiled installations, the path is usually `/home/git/repositories/<group>/<project>.git`.
1. On the file system, create a new directory in the correct location called `custom_hooks`.
@ -109,7 +109,7 @@ To accomplish this, follow the same steps for setting custom repository hooks fo
The location to copy the scripts to depends on where repositories are stored:
- In GitLab 15.2 and earlier, Gitaly Cluster uses the [hashed storage path](repository_storage_types.md#hashed-storage)
- In GitLab 15.2 and earlier, Gitaly Cluster uses the [hashed storage path](repository_storage_paths.md#hashed-storage)
reported by the GitLab application.
- In GitLab 15.3 and later, new repositories are created using
[Praefect-generated replica paths](gitaly/index.md#praefect-generated-replica-paths-gitlab-150-and-later),
@ -169,7 +169,7 @@ subdirectories.
Prerequisites:
- The [storage name and relative path](repository_storage_types.md#from-project-name-to-hashed-path) for the repository.
- The [storage name and relative path](repository_storage_paths.md#from-project-name-to-hashed-path) for the repository.
To remove server hooks, pass an empty tarball to `hook set` to indicate that the repository should contain no hooks. For example:

View File

@ -12589,11 +12589,22 @@ Duo Chat message.
| <a id="aichatmessagecontent"></a>`content` | [`String`](#string) | Content of the message. Can be null for failed responses. |
| <a id="aichatmessagecontenthtml"></a>`contentHtml` | [`String`](#string) | Content of the message in HTML format. Can be null for failed responses. |
| <a id="aichatmessageerrors"></a>`errors` | [`[String!]!`](#string) | Errors that occurred while asynchronously fetching an AI (assistant) response. |
| <a id="aichatmessageextras"></a>`extras` | [`AiMessageExtras`](#aimessageextras) | Extra message metadata. |
| <a id="aichatmessageid"></a>`id` | [`ID`](#id) | UUID of the message. |
| <a id="aichatmessagerequestid"></a>`requestId` | [`ID`](#id) | UUID of the original request message. Shared between chat prompt and response. |
| <a id="aichatmessagerole"></a>`role` | [`AiChatMessageRole!`](#aichatmessagerole) | Message role. |
| <a id="aichatmessagetimestamp"></a>`timestamp` | [`Time!`](#time) | Message timestamp. |
### `AiMessageExtras`
Extra metadata for AI message.
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="aimessageextrassources"></a>`sources` | [`[JSON!]`](#json) | Sources used to form the message. |
### `AiMessageType`
#### Fields
@ -12614,6 +12625,7 @@ Duo Chat message.
| ---- | ---- | ----------- |
| <a id="airesponsechunkid"></a>`chunkId` | [`Int`](#int) | Incremental ID for a chunk from a streamed response. Null when it is not a streamed response. |
| <a id="airesponseerrors"></a>`errors` | [`[String!]`](#string) | Errors return by AI API as response. |
| <a id="airesponseextras"></a>`extras` | [`AiMessageExtras`](#aimessageextras) | Extra message metadata. |
| <a id="airesponserequestid"></a>`requestId` | [`String`](#string) | ID of the original request. |
| <a id="airesponseresponsebody"></a>`responseBody` | [`String`](#string) | Response body from AI API. |
| <a id="airesponseresponsebodyhtml"></a>`responseBodyHtml` | [`String`](#string) | Response body HTML. |

View File

@ -0,0 +1,149 @@
---
stage: Verify
group: Pipeline Execution
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
type: howto
---
# Tutorial: Use Buildah in a rootless container with GitLab Runner Operator on OpenShift **(FREE)**
This tutorial teaches you how to successfully build images using the `buildah` tool,
with GitLab Runner deployed using [GitLab Runner Operator](https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator)
on an OpenShift cluster.
This guide is an adaptation of [using Buildah to build images in a rootless OpenShift container](https://github.com/containers/buildah/blob/main/docs/tutorials/05-openshift-rootless-build.md)
documentation for GitLab Runner Operator.
To complete this tutorial, you will:
1. [Configure the Buildah image](#configure-the-buildah-image)
1. [Configure the service account](#configure-the-service-account)
1. [Configure the job](#configure-the-job)
## Prerequisites
- A runner already deployed to a `gitlab-runner` namespace.
## Configure the Buildah image
We start by preparing a custom image based on the `quay.io/buildah/stable:v1.23.1` image.
1. Create the `Containerfile-buildah` file:
```shell
cat > Containerfile-buildah <<EOF
FROM quay.io/buildah/stable:v1.23.1
RUN touch /etc/subgid /etc/subuid \
&& chmod g=u /etc/subgid /etc/subuid /etc/passwd \
&& echo build:10000:65536 > /etc/subuid \
&& echo build:10000:65536 > /etc/subgid
# Use chroot since the default runc does not work when running rootless
RUN echo "export BUILDAH_ISOLATION=chroot" >> /home/build/.bashrc
# Use VFS since fuse does not work
RUN mkdir -p /home/build/.config/containers \
&& (echo '[storage]';echo 'driver = "vfs"') > /home/build/.config/containers/storage.conf
# The buildah container will run as `build` user
USER build
WORKDIR /home/build
EOF
```
1. Build and push the Buildah image to a Container Registry. Let's push to the
[GitLab Container Registry](../../user/packages/container_registry/index.md):
```shell
docker build -f Containerfile-buildah -t registry.example.com/group/project/buildah:1.23.1 .
docker push registry.example.com/group/project/buildah:1.23.1
```
## Configure the service account
For these steps, you need to run the commands in a terminal connected to the OpenShift cluster.
1. Run this command to create a service account named `buildah-sa`:
```shell
oc create -f - <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: buildah-sa
namespace: gitlab-runner
EOF
```
1. Give the created service account the ability to run with `anyuid` [SCC](https://docs.openshift.com/container-platform/4.3/authentication/managing-security-context-constraints.html):
```shell
oc adm policy add-scc-to-user anyuid -z buildah-sa -n gitlab-runner
```
1. Use a [runner configuration template](https://docs.gitlab.com/runner/configuration/configuring_runner_operator.html#customize-configtoml-with-a-configuration-template)
to configure Operator to use the service account we just created. Create a `custom-config.toml` file that contains:
```toml
[[runners]]
[runners.kubernetes]
service_account_overwrite_allowed = "buildah-*"
```
1. Create a `ConfigMap` named `custom-config-toml` from the `custom-config.toml` file:
```shell
oc create configmap custom-config-toml --from-file config.toml=custom-config.toml -n gitlab-runner
```
1. Set the `config` property of the `Runner` by updating its [Custom Resource Definition (CRD) file](https://docs.gitlab.com/runner/install/operator.html#install-gitlab-runner):
```yaml
apiVersion: apps.gitlab.com/v1beta2
kind: Runner
metadata:
name: builah-runner
spec:
gitlabUrl: https://gitlab.example.com
token: gitlab-runner-secret
config: custom-config-toml
```
## Configure the job
The final step is to set up a GitLab CI/CD configuration file in you project to use
the image we built and the configured service account:
```yaml
build:
stage: build
image: registry.example.com/group/project/buildah:1.23.1
variables:
STORAGE_DRIVER: vfs
BUILDAH_FORMAT: docker
BUILDAH_ISOLATION: chroot
FQ_IMAGE_NAME: "$CI_REGISTRY_IMAGE/test"
KUBERNETES_SERVICE_ACCOUNT_OVERWRITE: "buildah-sa"
before_script:
# Log in to the GitLab container registry
- buildah login -u "$CI_REGISTRY_USER" --password $CI_REGISTRY_PASSWORD $CI_REGISTRY
script:
- buildah images
- buildah build -t $FQ_IMAGE_NAME
- buildah images
- buildah push $FQ_IMAGE_NAME
```
The job should use the image that we built as the value of `image` keyword.
The `KUBERNETES_SERVICE_ACCOUNT_OVERWRITE` variable should have the value of the
service account name that we created.
Congratulations, you've successfully built an image with Buildah in a rootless container!
## Troubleshooting
There is a [known issue](https://github.com/containers/buildah/issues/4049) with running as non-root.
You might need to use a [workaround](https://docs.gitlab.com/runner/configuration/configuring_runner_operator.html#configure-setfcap)
if you are using an OpenShift runner.

View File

@ -639,32 +639,43 @@ To build Docker images without enabling privileged mode on the runner, you can
use one of these alternatives:
- [`kaniko`](using_kaniko.md).
- [`buildah`](https://github.com/containers/buildah). There is a [known issue](https://github.com/containers/buildah/issues/4049)
with running as non-root, you might need this [workaround](https://docs.gitlab.com/runner/configuration/configuring_runner_operator.html#configure-setfcap)
if you are using OpenShift Runner.
- [`buildah`](#buildah-example).
For example, with `buildah`:
### Buildah example
To use Buildah with GitLab CI/CD, you need [a runner](https://docs.gitlab.com/runner/) with one
of the following executors:
- [Kubernetes](https://docs.gitlab.com/runner/executors/kubernetes.html).
- [Docker](https://docs.gitlab.com/runner/executors/docker.html).
- [Docker Machine](https://docs.gitlab.com/runner/executors/docker_machine.html).
In this example, you use Buildah to:
1. Build a Docker image.
1. Push it to [GitLab Container Registry](../../user/packages/container_registry/index.md).
In the last step, Buildah uses the `Dockerfile` under the
root directory of the project to build the Docker image. Finally, it pushes the image to the
project's Container Registry:
```yaml
# Some details from https://major.io/2019/05/24/build-containers-in-gitlab-ci-with-buildah/
build:
stage: build
image: quay.io/buildah/stable:v1.31.0
image: quay.io/buildah/stable
variables:
# Use vfs with buildah. Docker offers overlayfs as a default, but buildah
# Use vfs with buildah. Docker offers overlayfs as a default, but Buildah
# cannot stack overlayfs on top of another overlayfs filesystem.
STORAGE_DRIVER: vfs
# Write all image metadata in the docker format, not the standard OCI format.
# Newer versions of docker can handle the OCI format, but older versions, like
# the one shipped with Fedora 30, cannot handle the format.
BUILDAH_FORMAT: docker
# You may need this workaround for some errors: https://stackoverflow.com/a/70438141/1233435
BUILDAH_ISOLATION: chroot
FQ_IMAGE_NAME: "$CI_REGISTRY_IMAGE/test"
before_script:
# Log in to the GitLab container registry
- export REGISTRY_AUTH_FILE=$HOME/auth.json
# GitLab Container Registry credentials taken from the
# [predefined CI/CD variables](../variables/index.md#predefined-cicd-variables)
# to authenticate to the registry.
- echo "$CI_REGISTRY_PASSWORD" | buildah login -u "$CI_REGISTRY_USER" --password-stdin $CI_REGISTRY
script:
- buildah images
@ -673,6 +684,9 @@ build:
- buildah push $FQ_IMAGE_NAME
```
If you are using GitLab Runner Operator deployed to an OpenShift cluster, try the
[tutorial for using Buildah to build images in rootless container](buildah_rootless_tutorial.md).
## Use the GitLab Container Registry
After you've built a Docker image, you can push it to the

View File

@ -494,6 +494,30 @@ def find_actual_head_pipeline
end
```
In model associations or scopes, this can be used as in the following example:
```ruby
class Group < Namespace
has_many :users, -> {
allow_cross_joins_across_databases(url: "https://gitlab.com/gitlab-org/gitlab/-/issues/422405")
}, through: :group_members
end
```
WARNING:
Overriding an association can have unintended consequences and may even lead to data loss, as we noticed in [issue 424307](https://gitlab.com/gitlab-org/gitlab/-/issues/424307). Do not override existing ActiveRecord associations to mark a cross-join as allowed, as in the example below.
```ruby
class Group < Namespace
has_many :users, through: :group_members
# DO NOT override an association like this.
def users
super.allow_cross_joins_across_databases(url: "https://gitlab.com/gitlab-org/gitlab/-/issues/422405")
end
end
```
The `url` parameter should point to an issue with a milestone for when we intend
to fix the cross-join. If the cross-join is being used in a migration, we do not
need to fix the code. See <https://gitlab.com/gitlab-org/gitlab/-/issues/340017>

View File

@ -6,16 +6,11 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Architecture
When developing a feature that requires architectural design, or changing the fundamental design of an existing feature, discuss it with a Frontend Architecture Expert.
When building new features, consider reaching out to relevant stakeholders as early as possible in the process.
A Frontend Architect is an expert who makes high-level Frontend design decisions
and decides on technical standards, including coding standards and frameworks.
Architectural decisions should be accessible to everyone, so document
them in the relevant Merge Request discussion or by updating our documentation
when appropriate.
You can find the Frontend Architecture experts on the [team page](https://about.gitlab.com/company/team/).
Architectural decisions should be accessible to everyone. Document
them in the relevant Merge Request discussions or by updating our documentation
when appropriate by adding an entry to this section.
## Widget Architecture
@ -23,8 +18,3 @@ The [Plan stage](https://about.gitlab.com/handbook/engineering/development/dev/p
is refactoring the right sidebar to consist of **widgets**. They have a specific architecture to be
reusable and to expose an interface that can be used by external Vue applications on the page.
Learn more about the [widget architecture](widgets.md).
## Examples
You can find [documentation about the desired architecture](vue.md) for a new
feature built with Vue.js.

View File

@ -6,9 +6,9 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Axios
We use [Axios](https://github.com/axios/axios) to communicate with the server in Vue applications and most new code.
In older parts of our codebase using the REST API, we used [Axios](https://github.com/axios/axios) to communicate with the server, but you should not use Axios in new applications. Instead rely on `apollo-client` to query the GraphQL API. For more details, see [our GraphQL documentation](graphql.md).
In order to guarantee all defaults are set you *should not use Axios directly*, you should import Axios from `axios_utils`.
To guarantee all defaults are set you should import Axios from `axios_utils`. Do not use Axios directly.
## CSRF token

View File

@ -1,218 +1,10 @@
---
stage: none
group: unassigned
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
redirect_to: 'design_patterns.md'
remove_date: '2023-12-07'
---
# Design Anti-patterns
This document was moved to [another location](design_patterns.md).
Anti-patterns may seem like good approaches at first, but it has been shown that they bring more ills than benefits. These should
generally be avoided.
Throughout the GitLab codebase, there may be historic uses of these anti-patterns. [Use discretion](https://about.gitlab.com/handbook/engineering/development/principles/#balance-refactoring-and-velocity)
when figuring out whether or not to refactor, when touching code that uses one of these legacy patterns.
NOTE:
For new features, anti-patterns are not necessarily prohibited, but it is **strongly suggested** to find another approach.
## Shared Global Object (Anti-pattern)
A shared global object is an instance of something that can be accessed from anywhere and therefore has no clear owner.
Here's an example of this pattern applied to a Vuex Store:
```javascript
const createStore = () => new Vuex.Store({
actions,
state,
mutations
});
// Notice that we are forcing all references to this module to use the same single instance of the store.
// We are also creating the store at import-time and there is nothing which can automatically dispose of it.
//
// As an alternative, we should export the `createStore` and let the client manage the
// lifecycle and instance of the store.
export default createStore();
```
### What problems do Shared Global Objects cause?
Shared Global Objects are convenient because they can be accessed from anywhere. However,
the convenience does not always outweigh their heavy cost:
- **No ownership.** There is no clear owner to these objects and therefore they assume a non-deterministic
and permanent lifecycle. This can be especially problematic for tests.
- **No access control.** When Shared Global Objects manage some state, this can create some very buggy and difficult
coupling situations because there is no access control to this object.
- **Possible circular references.** Shared Global Objects can also create some circular referencing situations since submodules
of the Shared Global Object can reference modules that reference itself (see
[this MR for an example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/33366)).
Here are some historic examples where this pattern was identified to be problematic:
- [Reference to global Vuex store in IDE](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/36401)
- [Docs update to discourage singleton Vuex store](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/36952)
### When could the Shared Global Object pattern be actually appropriate?
Shared Global Object's solve the problem of making something globally accessible. This pattern
could be appropriate:
- When a responsibility is truly global and should be referenced across the application
(for example, an application-wide Event Bus).
Even in these scenarios, consider avoiding the Shared Global Object pattern because the
side-effects can be notoriously difficult to reason with.
### References
For more information, see [Global Variables Are Bad on the C2 wiki](https://wiki.c2.com/?GlobalVariablesAreBad).
## Singleton (Anti-pattern)
The classic [Singleton pattern](https://en.wikipedia.org/wiki/Singleton_pattern) is an approach to ensure that only one
instance of a thing exists.
Here's an example of this pattern:
```javascript
class MyThing {
constructor() {
// ...
}
// ...
}
MyThing.instance = null;
export const getThingInstance = () => {
if (MyThing.instance) {
return MyThing.instance;
}
const instance = new MyThing();
MyThing.instance = instance;
return instance;
};
```
### What problems do Singletons cause?
It is a big assumption that only one instance of a thing should exist. More often than not,
a Singleton is misused and causes very tight coupling amongst itself and the modules that reference it.
Here are some historic examples where this pattern was identified to be problematic:
- [Test issues caused by singleton class in IDE](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/30398#note_331174190)
- [Implicit Singleton created by module's shared variables](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/97#note_417515776)
- [Complexity caused by Singletons](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/29461#note_324585814)
Here are some ills that Singletons often produce:
1. **Non-deterministic tests.** Singletons encourage non-deterministic tests because the single instance is shared across
individual tests, often causing the state of one test to bleed into another.
1. **High coupling.** Under the hood, clients of a singleton class all share a single specific
instance of an object, which means this pattern inherits all the [problems of Shared Global Object](#what-problems-do-shared-global-objects-cause)
such as no clear ownership and no access control. These leads to high coupling situations that can
be buggy and difficult to untangle.
1. **Infectious.** Singletons are infectious, especially when they manage state. Consider the component
[RepoEditor](https://gitlab.com/gitlab-org/gitlab/-/blob/27ad6cb7b76430fbcbaf850df68c338d6719ed2b/app%2Fassets%2Fjavascripts%2Fide%2Fcomponents%2Frepo_editor.vue#L0-1)
used in the Web IDE. This component interfaces with a Singleton [Editor](https://gitlab.com/gitlab-org/gitlab/-/blob/862ad57c44ec758ef3942ac2e7a2bd40a37a9c59/app%2Fassets%2Fjavascripts%2Fide%2Flib%2Feditor.js#L21)
which manages some state for working with Monaco. Because of the Singleton nature of the Editor class,
the component `RepoEditor` is now forced to be a Singleton as well. Multiple instances of this component
would cause production issues because no one truly owns the instance of `Editor`.
### Why is the Singleton pattern popular in other languages like Java?
This is because of the limitations of languages like Java where everything has to be wrapped
in a class. In JavaScript we have things like object and function literals where we can solve
many problems with a module that exports utility functions.
### When could the Singleton pattern be actually appropriate?**
Singletons solve the problem of enforcing there to be only 1 instance of a thing. It's possible
that a Singleton could be appropriate in the following rare cases:
- We need to manage some resource that **MUST** have just 1 instance (that is, some hardware restriction).
- There is a real [cross-cutting concern](https://en.wikipedia.org/wiki/Cross-cutting_concern) (for example, logging) and a Singleton provides the simplest API.
Even in these scenarios, consider avoiding the Singleton pattern.
### What alternatives are there to the Singleton pattern?
#### Utility Functions
When no state needs to be managed, we can export utility functions from a module without
messing with any class instantiation.
```javascript
// bad - Singleton
export class ThingUtils {
static create() {
if(this.instance) {
return this.instance;
}
this.instance = new ThingUtils();
return this.instance;
}
bar() { /* ... */ }
fuzzify(id) { /* ... */ }
}
// good - Utility functions
export const bar = () => { /* ... */ };
export const fuzzify = (id) => { /* ... */ };
```
#### Dependency Injection
[Dependency Injection](https://en.wikipedia.org/wiki/Dependency_injection) is an approach which breaks
coupling by declaring a module's dependencies to be injected from outside the module (for example, through constructor parameters, a bona-fide Dependency Injection framework, and even in Vue `provide/inject`).
```javascript
// bad - Vue component coupled to Singleton
export default {
created() {
this.mediator = MyFooMediator.getInstance();
},
};
// good - Vue component declares dependency
export default {
inject: ['mediator']
};
```
```javascript
// bad - We're not sure where the singleton is in it's lifecycle so we init it here.
export class Foo {
constructor() {
Bar.getInstance().init();
}
stuff() {
return Bar.getInstance().doStuff();
}
}
// good - Lets receive this dependency as a constructor argument.
// It's also not our responsibility to manage the lifecycle.
export class Foo {
constructor(bar) {
this.bar = bar;
}
stuff() {
return this.bar.doStuff();
}
}
```
In this example, the lifecycle and implementation details of `mediator` are all managed
**outside** the component (most likely the page entrypoint).
<!-- This redirect file can be deleted after <2023-12-07>. -->
<!-- Redirects that point to other docs in the same project expire in three months. -->
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->

View File

@ -6,12 +6,226 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Design Patterns
The following design patterns are suggested approaches for solving common problems. Use discretion when evaluating
if a certain pattern makes sense in your situation. Just because it is a pattern, doesn't mean it is a good one for your problem.
This page covers suggested design patterns and also anti-patterns.
NOTE:
When adding a design pattern to this document, be sure to clearly state the **problem it solves**.
When adding a design anti-pattern, clearly state **the problem it prevents**.
## TBD
## Patterns
Stay tuned!
The following design patterns are suggested approaches for solving common problems. Use discretion when evaluating
if a certain pattern makes sense in your situation. Just because it is a pattern, doesn't mean it is a good one for your problem.
## Anti-patterns
Anti-patterns may seem like good approaches at first, but it has been shown that they bring more ills than benefits. These should
generally be avoided.
Throughout the GitLab codebase, there may be historic uses of these anti-patterns. [Use discretion](https://about.gitlab.com/handbook/engineering/development/principles/#balance-refactoring-and-velocity)
when figuring out whether or not to refactor, when touching code that uses one of these legacy patterns.
NOTE:
For new features, anti-patterns are not necessarily prohibited, but it is **strongly suggested** to find another approach.
### Shared Global Object
A shared global object is an instance of something that can be accessed from anywhere and therefore has no clear owner.
Here's an example of this pattern applied to a Vuex Store:
```javascript
const createStore = () => new Vuex.Store({
actions,
state,
mutations
});
// Notice that we are forcing all references to this module to use the same single instance of the store.
// We are also creating the store at import-time and there is nothing which can automatically dispose of it.
//
// As an alternative, we should export the `createStore` and let the client manage the
// lifecycle and instance of the store.
export default createStore();
```
#### What problems do Shared Global Objects cause?
Shared Global Objects are convenient because they can be accessed from anywhere. However,
the convenience does not always outweigh their heavy cost:
- **No ownership.** There is no clear owner to these objects and therefore they assume a non-deterministic
and permanent lifecycle. This can be especially problematic for tests.
- **No access control.** When Shared Global Objects manage some state, this can create some very buggy and difficult
coupling situations because there is no access control to this object.
- **Possible circular references.** Shared Global Objects can also create some circular referencing situations since submodules
of the Shared Global Object can reference modules that reference itself (see
[this MR for an example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/33366)).
Here are some historic examples where this pattern was identified to be problematic:
- [Reference to global Vuex store in IDE](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/36401)
- [Docs update to discourage singleton Vuex store](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/36952)
#### When could the Shared Global Object pattern be actually appropriate?
Shared Global Object's solve the problem of making something globally accessible. This pattern
could be appropriate:
- When a responsibility is truly global and should be referenced across the application
(for example, an application-wide Event Bus).
Even in these scenarios, consider avoiding the Shared Global Object pattern because the
side-effects can be notoriously difficult to reason with.
#### References
For more information, see [Global Variables Are Bad on the C2 wiki](https://wiki.c2.com/?GlobalVariablesAreBad).
### Singleton
The classic [Singleton pattern](https://en.wikipedia.org/wiki/Singleton_pattern) is an approach to ensure that only one
instance of a thing exists.
Here's an example of this pattern:
```javascript
class MyThing {
constructor() {
// ...
}
// ...
}
MyThing.instance = null;
export const getThingInstance = () => {
if (MyThing.instance) {
return MyThing.instance;
}
const instance = new MyThing();
MyThing.instance = instance;
return instance;
};
```
#### What problems do Singletons cause?
It is a big assumption that only one instance of a thing should exist. More often than not,
a Singleton is misused and causes very tight coupling amongst itself and the modules that reference it.
Here are some historic examples where this pattern was identified to be problematic:
- [Test issues caused by singleton class in IDE](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/30398#note_331174190)
- [Implicit Singleton created by module's shared variables](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/97#note_417515776)
- [Complexity caused by Singletons](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/29461#note_324585814)
Here are some ills that Singletons often produce:
1. **Non-deterministic tests.** Singletons encourage non-deterministic tests because the single instance is shared across
individual tests, often causing the state of one test to bleed into another.
1. **High coupling.** Under the hood, clients of a singleton class all share a single specific
instance of an object, which means this pattern inherits all the [problems of Shared Global Object](#what-problems-do-shared-global-objects-cause)
such as no clear ownership and no access control. These leads to high coupling situations that can
be buggy and difficult to untangle.
1. **Infectious.** Singletons are infectious, especially when they manage state. Consider the component
[RepoEditor](https://gitlab.com/gitlab-org/gitlab/-/blob/27ad6cb7b76430fbcbaf850df68c338d6719ed2b/app%2Fassets%2Fjavascripts%2Fide%2Fcomponents%2Frepo_editor.vue#L0-1)
used in the Web IDE. This component interfaces with a Singleton [Editor](https://gitlab.com/gitlab-org/gitlab/-/blob/862ad57c44ec758ef3942ac2e7a2bd40a37a9c59/app%2Fassets%2Fjavascripts%2Fide%2Flib%2Feditor.js#L21)
which manages some state for working with Monaco. Because of the Singleton nature of the Editor class,
the component `RepoEditor` is now forced to be a Singleton as well. Multiple instances of this component
would cause production issues because no one truly owns the instance of `Editor`.
#### Why is the Singleton pattern popular in other languages like Java?
This is because of the limitations of languages like Java where everything has to be wrapped
in a class. In JavaScript we have things like object and function literals where we can solve
many problems with a module that exports utility functions.
#### When could the Singleton pattern be actually appropriate?**
Singletons solve the problem of enforcing there to be only 1 instance of a thing. It's possible
that a Singleton could be appropriate in the following rare cases:
- We need to manage some resource that **MUST** have just 1 instance (that is, some hardware restriction).
- There is a real [cross-cutting concern](https://en.wikipedia.org/wiki/Cross-cutting_concern) (for example, logging) and a Singleton provides the simplest API.
Even in these scenarios, consider avoiding the Singleton pattern.
#### What alternatives are there to the Singleton pattern?
##### Utility Functions
When no state needs to be managed, we can export utility functions from a module without
messing with any class instantiation.
```javascript
// bad - Singleton
export class ThingUtils {
static create() {
if(this.instance) {
return this.instance;
}
this.instance = new ThingUtils();
return this.instance;
}
bar() { /* ... */ }
fuzzify(id) { /* ... */ }
}
// good - Utility functions
export const bar = () => { /* ... */ };
export const fuzzify = (id) => { /* ... */ };
```
##### Dependency Injection
[Dependency Injection](https://en.wikipedia.org/wiki/Dependency_injection) is an approach which breaks
coupling by declaring a module's dependencies to be injected from outside the module (for example, through constructor parameters, a bona-fide Dependency Injection framework, and even in Vue `provide/inject`).
```javascript
// bad - Vue component coupled to Singleton
export default {
created() {
this.mediator = MyFooMediator.getInstance();
},
};
// good - Vue component declares dependency
export default {
inject: ['mediator']
};
```
```javascript
// bad - We're not sure where the singleton is in it's lifecycle so we init it here.
export class Foo {
constructor() {
Bar.getInstance().init();
}
stuff() {
return Bar.getInstance().doStuff();
}
}
// good - Lets receive this dependency as a constructor argument.
// It's also not our responsibility to manage the lifecycle.
export class Foo {
constructor(bar) {
this.bar = bar;
}
stuff() {
return this.bar.doStuff();
}
}
```
In this example, the lifecycle and implementation details of `mediator` are all managed
**outside** the component (most likely the page entrypoint).

View File

@ -0,0 +1,11 @@
---
stage: none
group: unassigned
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
---
# Tech Stack
For an exhaustive list of all the technology that we use, simply check our [latest `package.json` file](https://gitlab.com/gitlab-org/gitlab/-/blob/master/package.json?ref_type=heads).
Each navigation item in this section is a guide for that specific technology.

View File

@ -56,7 +56,7 @@ they are still not 100% standardized. You can see them below:
CI Artifacts and LFS Objects behave differently in CE and EE. In CE they inherit the `GitlabUploader`
while in EE they inherit the `ObjectStorage` and store files in and S3 API compatible object store.
In the case of Issues/MR/Notes Markdown attachments, there is a different approach using the [Hashed Storage](../administration/repository_storage_types.md) layout,
In the case of Issues/MR/Notes Markdown attachments, there is a different approach using the [Hashed Storage](../administration/repository_storage_paths.md) layout,
instead of basing the path into a mutable variable `:project_path_with_namespace`, it's possible to use the
hash of the project ID instead, if project migrates to the new approach (introduced in 10.2).

View File

@ -46,7 +46,7 @@ reliable decide if an object is no longer needed.
### Git alternates in GitLab: pool repositories
GitLab organizes this object borrowing by [creating special **pool repositories**](../administration/repository_storage_types.md)
GitLab organizes this object borrowing by [creating special **pool repositories**](../administration/repository_storage_paths.md)
which are hidden from the user. We then use Git
alternates to let a collection of project repositories borrow from a
single pool repository. We call such a collection of project
@ -101,7 +101,7 @@ are as follows:
### Assumptions
- All repositories in a pool must use [hashed storage](../administration/repository_storage_types.md).
- All repositories in a pool must use [hashed storage](../administration/repository_storage_paths.md).
This is so that we don't have to ever worry about updating paths in
`object/info/alternates` files.
- All repositories in a pool must be on the same Gitaly storage shard.

View File

@ -1225,7 +1225,7 @@ specs, so created repositories accumulate in this directory over the
lifetime of the process. Deleting them is expensive, but this could lead to
pollution unless carefully managed.
To avoid this, [hashed storage](../../administration/repository_storage_types.md)
To avoid this, [hashed storage](../../administration/repository_storage_paths.md)
is enabled in the test suite. This means that repositories are given a unique
path that depends on their project's ID. Because the project IDs are not reset
between specs, each spec gets its own repository on disk,

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

79
doc/operations/tracing.md Normal file
View File

@ -0,0 +1,79 @@
---
stage: Analytics
group: Observability
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
---
# Distributed tracing **(ULTIMATE SAAS EXPERIMENT)**
This feature is an [Experiment](../policy/experiment-beta-support.md). If you find a bug,
[open an issue in our issue tracker](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/).
> Introduced in GitLab 16.3 [with flags](../administration/feature_flags.md) named `observability_group_tab` and `observability_tracing`. Disabled by default.
FLAG:
On GitLab.com, by default this feature is not available. To make it available,
an administrator can [enable the feature flags](../administration/feature_flags.md) named `observability_group_tab` and `observability_tracing`.
The feature is not ready for production use.
With distributed tracing you can inspect how a request moves through different services and systems,
the timing of each operation, and any errors or logs as they occur.
Tracing is particularly useful in the context of microservice applications, which group multiple independent services collaborating to fulfill user requests.
## Configure distributed tracing for a project
To configure distributed tracing:
1. [Create an access token and enable tracing.](#create-an-access-token-and-enable-tracing)
1. [Configure your application to use the OpenTelemetry exporter.](#configure-your-application-to-use-the-opentelemetry-exporter)
### Create an access token and enable tracing
Prerequisites:
- You must have at least the Maintainer role for the project.
To enable tracing in a project:
1. On the left sidebar, select **Search or go to** and find your group.
1. Select **Settings > Access Tokens**.
1. Create an access token with the following scopes: `read_api`, `read_observability`, `write_observability`.
1. Copy the value of the access token.
1. Navigate to your project.
1. Select **Monitor > Tracing**.
1. Select **Enable**.
## Configure your application to use the OpenTelemetry exporter
Next, configure your application to send traces to GitLab.
To do this, set the following environment variables:
```shell
OTEL_EXPORTER = "otlphttp"
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT = "https://observe.gitlab.com/v3/<namespace-id>/<gitlab-project-id>/ingest/traces"
OTEL_EXPORTER_OTLP_TRACES_HEADERS = "PRIVATE-TOKEN=<gitlab-access-token>"
```
Use the following values:
- `namespace-id`: The top-level namespace ID where your project is located.
- `gitlab-project-id`: The project ID.
- `gitlab-access-token`: The access token you [created previously](#create-an-access-token-and-enable-tracing).
When your application is configured, run it, and the OpenTelemetry exporter attempts to send
traces to GitLab.
## View your traces
If your traces are exported successfully, you can see them in the project.
To view the list of traces:
1. On the left sidebar, select **Search or go to** and find your project.
1. Select **Monitor > Traces**.
To see the details of a trace, select it from the list.
![list of traces](img/tracing_list_v16_3.png)

View File

@ -21,6 +21,7 @@ Use CI/CD pipelines to automatically build, test, and deploy your code.
| [Find CI/CD examples and templates](../ci/examples/index.md#cicd-examples) | Use these examples and templates to set up CI/CD for your use case. | |
| <i class="fa fa-youtube-play youtube" aria-hidden="true"></i> [Understand CI/CD rules](https://www.youtube.com/watch?v=QjQc-zeL16Q) (8m 56s) | Learn more about how to use CI/CD rules. | |
| [Use Auto DevOps to deploy an application](../topics/autodevops/cloud_deployments/auto_devops_with_gke.md) | Deploy an application to Google Kubernetes Engine (GKE). | |
| [Using Buildah in a rootless container with GitLab Runner Operator on OpenShift](../ci/docker/buildah_rootless_tutorial.md) | Learn how to setup GitLab Runner Operator on OpenShift to build Docker images with Buildah in a rootless container | |
## Configure GitLab Runner

View File

@ -299,34 +299,6 @@ Before upgrading to GitLab 15, see [GitLab 15 changes](versions/gitlab_15_change
Before upgrading to GitLab 14, see [GitLab 14 changes](versions/gitlab_14_changes.md).
### PostgreSQL segmentation fault issue
If you run GitLab with external PostgreSQL, particularly AWS RDS, ensure you upgrade PostgreSQL
to patch levels to a minimum of 12.7 or 13.3 before upgrading to GitLab 14.8 or later.
[In 14.8](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/75511)
for GitLab Enterprise Edition and [in 15.1](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/87983)
for GitLab Community Edition a GitLab feature called Loose Foreign Keys was enabled.
After it was enabled, we have had reports of unplanned PostgreSQL restarts caused
by a database engine bug that causes a segmentation fault.
Read more [in the issue](https://gitlab.com/gitlab-org/gitlab/-/issues/364763).
### LFS objects import and mirror issue in GitLab 14.6.0 to 14.7.2
When Geo is enabled, LFS objects fail to be saved for imported or mirrored projects.
[This bug](https://gitlab.com/gitlab-org/gitlab/-/issues/352368) was fixed in GitLab 14.8.0 and backported into 14.7.3.
### Maintenance mode issue in GitLab 13.9 to 14.4
When [Maintenance mode](../administration/maintenance_mode/index.md) is enabled, users cannot sign in with SSO, SAML, or LDAP.
Users who were signed in before Maintenance mode was enabled, continue to be signed in. If the administrator who enabled Maintenance mode loses their session, then they can't disable Maintenance mode via the UI. In that case, you can [disable Maintenance mode via the API or Rails console](../administration/maintenance_mode/index.md#disable-maintenance-mode).
[This bug](https://gitlab.com/gitlab-org/gitlab/-/issues/329261) was fixed in GitLab 14.5.0 and backported into 14.4.3 and 14.3.5.
## Miscellaneous
- [Managing PostgreSQL extensions](../install/postgresql_extensions.md)

View File

@ -18,9 +18,18 @@ For more information about upgrading GitLab Helm Chart, see [the release notes f
entries from the `ci_job_artifacts` database table. This could potentially run for multiple minutes, especially if the table has a lot of
traffic and the migration is unable to acquire a lock. It is advised to let this process finish as restarting may result in data loss.
- If you run external PostgreSQL, particularly AWS RDS,
[check you have a PostgreSQL bug fix](../index.md#postgresql-segmentation-fault-issue)
to avoid the database crashing.
- If you run GitLab with external PostgreSQL, particularly AWS RDS, ensure you
upgrade PostgreSQL to patch levels to a minimum of 12.7 or 13.3 before
upgrading to GitLab 14.8 or later.
[In 14.8](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/75511)
for GitLab Enterprise Edition and [in 15.0](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/87983)
for GitLab Community Edition a GitLab feature called Loose Foreign Keys was enabled.
After it was enabled, we have had reports of unplanned PostgreSQL restarts caused
by a database engine bug that causes a segmentation fault.
For more information, see [issue 364763](https://gitlab.com/gitlab-org/gitlab/-/issues/364763).
- Upgrading to patch level 14.10.3 or later might encounter a one-hour timeout due to a long running database data change,
if it was not completed while running GitLab 14.9.
@ -85,9 +94,18 @@ For more information about upgrading GitLab Helm Chart, see [the release notes f
end
```
- If you run external PostgreSQL, particularly AWS RDS,
[check you have a PostgreSQL bug fix](../index.md#postgresql-segmentation-fault-issue)
to avoid the database crashing.
- If you run GitLab with external PostgreSQL, particularly AWS RDS, ensure you
upgrade PostgreSQL to patch levels to a minimum of 12.7 or 13.3 before
upgrading to GitLab 14.8 or later.
[In 14.8](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/75511)
for GitLab Enterprise Edition and [in 15.0](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/87983)
for GitLab Community Edition a GitLab feature called Loose Foreign Keys was enabled.
After it was enabled, we have had reports of unplanned PostgreSQL restarts caused
by a database engine bug that causes a segmentation fault.
For more information, see [issue 364763](https://gitlab.com/gitlab-org/gitlab/-/issues/364763).
### Geo installations **(PREMIUM SELF)**
@ -142,13 +160,21 @@ that may remain stuck permanently in a **pending** state.
[batched migration](../background_migrations.md#batched-background-migrations) named
`BackfillNamespaceIdForNamespaceRoute`. You can [ignore](https://gitlab.com/gitlab-org/gitlab/-/issues/357822)
this. Retry it after you upgrade to version 14.9.x.
- If you run external PostgreSQL, particularly AWS RDS,
[check you have a PostgreSQL bug fix](../index.md#postgresql-segmentation-fault-issue)
to avoid the database crashing.
- If you run GitLab with external PostgreSQL, particularly AWS RDS, ensure you
upgrade PostgreSQL to patch levels to a minimum of 12.7 or 13.3 before
upgrading to GitLab 14.8 or later.
[In 14.8](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/75511)
for GitLab Enterprise Edition and [in 15.0](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/87983)
for GitLab Community Edition a GitLab feature called Loose Foreign Keys was enabled.
After it was enabled, we have had reports of unplanned PostgreSQL restarts caused
by a database engine bug that causes a segmentation fault.
For more information, see [issue 364763](https://gitlab.com/gitlab-org/gitlab/-/issues/364763).
## 14.7.0
- See [LFS objects import and mirror issue in GitLab 14.6.0 to 14.7.2](../index.md#lfs-objects-import-and-mirror-issue-in-gitlab-1460-to-1472).
- If upgrading from a version earlier than 14.6.5, 14.7.4, or 14.8.2, review the [Critical Security Release: 14.8.2, 14.7.4, and 14.6.5](https://about.gitlab.com/releases/2022/02/25/critical-security-release-gitlab-14-8-2-released/) blog post.
Updating to 14.7.4 or later resets runner registration tokens for your groups and projects.
- GitLab 14.7 introduced a change where Gitaly expects persistent files in the `/tmp` directory.
@ -175,6 +201,9 @@ that may remain stuck permanently in a **pending** state.
### Geo installations **(PREMIUM SELF)**
- LFS objects import and mirror issue in GitLab 14.6.0 to 14.7.2.
When Geo is enabled, LFS objects fail to be saved for imported or mirrored projects.
[This bug](https://gitlab.com/gitlab-org/gitlab/-/issues/352368) was fixed in GitLab 14.8.0 and backported into 14.7.3.
- There is [an issue in GitLab 14.2 through 14.7](https://gitlab.com/gitlab-org/gitlab/-/issues/299819#note_822629467)
that affects Geo when the GitLab-managed object storage replication is used, causing blob object types to fail synchronization.
@ -189,12 +218,14 @@ that may remain stuck permanently in a **pending** state.
## 14.6.0
- See [LFS objects import and mirror issue in GitLab 14.6.0 to 14.7.2](../index.md#lfs-objects-import-and-mirror-issue-in-gitlab-1460-to-1472).
- If upgrading from a version earlier than 14.6.5, 14.7.4, or 14.8.2, review the [Critical Security Release: 14.8.2, 14.7.4, and 14.6.5](https://about.gitlab.com/releases/2022/02/25/critical-security-release-gitlab-14-8-2-released/) blog post.
Updating to 14.6.5 or later resets runner registration tokens for your groups and projects.
### Geo installations **(PREMIUM SELF)**
- LFS objects import and mirror issue in GitLab 14.6.0 to 14.7.2.
When Geo is enabled, LFS objects fail to be saved for imported or mirrored projects.
[This bug](https://gitlab.com/gitlab-org/gitlab/-/issues/352368) was fixed in GitLab 14.8.0 and backported into 14.7.3.
- There is [an issue in GitLab 14.2 through 14.7](https://gitlab.com/gitlab-org/gitlab/-/issues/299819#note_822629467)
that affects Geo when the GitLab-managed object storage replication is used, causing blob object types to fail synchronization.
@ -306,7 +337,17 @@ or [init scripts](../upgrading_from_source.md#configure-sysv-init-script) by [fo
- Git 2.33.x and later is required. We recommend you use the
[Git version provided by Gitaly](../../install/installation.md#git).
- See [Maintenance mode issue in GitLab 13.9 to 14.4](../index.md#maintenance-mode-issue-in-gitlab-139-to-144).
- When [Maintenance mode](../../administration/maintenance_mode/index.md) is
enabled, users cannot sign in with SSO, SAML, or LDAP.
Users who were signed in before Maintenance mode was enabled, continue to be
signed in. If the administrator who enabled Maintenance mode loses their
session, then they can't disable Maintenance mode via the UI. In that case,
you can
[disable Maintenance mode via the API or Rails console](../../administration/maintenance_mode/index.md#disable-maintenance-mode).
[This bug](https://gitlab.com/gitlab-org/gitlab/-/issues/329261) was fixed in
GitLab 14.5.0 and backported into 14.4.3 and 14.3.5.
- After enabling database load balancing by default in 14.4.0, we found an issue where
[cron jobs would not work if the connection to PostgreSQL was severed](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/73716),
as Sidekiq would continue using a bad connection. Geo and other features that rely on
@ -467,7 +508,17 @@ for how to proceed.
NOTE:
When using Rails to execute these background migrations synchronously, make sure that the machine running the process has sufficient resources to handle the task. If the process gets terminated, it's likely due to insufficient memory available. If your SSH session times out after a while, it might be necessary to run the previous code by using a terminal multiplexer like `screen` or `tmux`.
- See [Maintenance mode issue in GitLab 13.9 to 14.4](../index.md#maintenance-mode-issue-in-gitlab-139-to-144).
- When [Maintenance mode](../../administration/maintenance_mode/index.md) is
enabled, users cannot sign in with SSO, SAML, or LDAP.
Users who were signed in before Maintenance mode was enabled, continue to be
signed in. If the administrator who enabled Maintenance mode loses their
session, then they can't disable Maintenance mode via the UI. In that case,
you can
[disable Maintenance mode via the API or Rails console](../../administration/maintenance_mode/index.md#disable-maintenance-mode).
[This bug](https://gitlab.com/gitlab-org/gitlab/-/issues/329261) was fixed in
GitLab 14.5.0 and backported into 14.4.3 and 14.3.5.
- You may see the following error when setting up two factor authentication (2FA) for accounts
that authenticate using an LDAP password:
@ -568,7 +619,17 @@ for how to proceed.
sudo -u git -H bundle exec rake db:migrate RAILS_ENV=production
```
- See [Maintenance mode issue in GitLab 13.9 to 14.4](../index.md#maintenance-mode-issue-in-gitlab-139-to-144).
- When [Maintenance mode](../../administration/maintenance_mode/index.md) is
enabled, users cannot sign in with SSO, SAML, or LDAP.
Users who were signed in before Maintenance mode was enabled, continue to be
signed in. If the administrator who enabled Maintenance mode loses their
session, then they can't disable Maintenance mode via the UI. In that case,
you can
[disable Maintenance mode via the API or Rails console](../../administration/maintenance_mode/index.md#disable-maintenance-mode).
[This bug](https://gitlab.com/gitlab-org/gitlab/-/issues/329261) was fixed in
GitLab 14.5.0 and backported into 14.4.3 and 14.3.5.
- GitLab 14.2.0 includes a
[background migration `BackfillDraftStatusOnMergeRequests`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67687)
that may remain stuck permanently in a **pending** state when the instance lacks records that match the migration's target.
@ -648,7 +709,17 @@ for how to proceed.
background while GitLab is in use. GitLab instances upgraded directly from 14.0 to 14.5 or later must
run the migration in the foreground and therefore take a lot longer to complete.
- See [Maintenance mode issue in GitLab 13.9 to 14.4](../index.md#maintenance-mode-issue-in-gitlab-139-to-144).
- When [Maintenance mode](../../administration/maintenance_mode/index.md) is
enabled, users cannot sign in with SSO, SAML, or LDAP.
Users who were signed in before Maintenance mode was enabled, continue to be
signed in. If the administrator who enabled Maintenance mode loses their
session, then they can't disable Maintenance mode via the UI. In that case,
you can
[disable Maintenance mode via the API or Rails console](../../administration/maintenance_mode/index.md#disable-maintenance-mode).
[This bug](https://gitlab.com/gitlab-org/gitlab/-/issues/329261) was fixed in
GitLab 14.5.0 and backported into 14.4.3 and 14.3.5.
- If you encounter the error, `I18n::InvalidLocale: :en is not a valid locale`, when starting the application, follow the [patching](https://about.gitlab.com/handbook/support/workflows/patching_an_instance.html) process. Use [123475](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/123475) as the `mr_iid`.
@ -734,7 +805,17 @@ Other issues:
and this code was completely removed in GitLab 14.0. If you plan to upgrade from
**GitLab 13.2 or older** directly to 14.0, this is [unsupported](../index.md#upgrading-to-a-new-major-version).
You should instead follow a [supported upgrade path](../index.md#upgrade-paths).
- See [Maintenance mode issue in GitLab 13.9 to 14.4](../index.md#maintenance-mode-issue-in-gitlab-139-to-144).
- When [Maintenance mode](../../administration/maintenance_mode/index.md) is
enabled, users cannot sign in with SSO, SAML, or LDAP.
Users who were signed in before Maintenance mode was enabled, continue to be
signed in. If the administrator who enabled Maintenance mode loses their
session, then they can't disable Maintenance mode via the UI. In that case,
you can
[disable Maintenance mode via the API or Rails console](../../administration/maintenance_mode/index.md#disable-maintenance-mode).
[This bug](https://gitlab.com/gitlab-org/gitlab/-/issues/329261) was fixed in
GitLab 14.5.0 and backported into 14.4.3 and 14.3.5.
- **In GitLab 13.12.2 and later**, users with expired passwords can no longer authenticate with API and Git using tokens because of
the [Insufficient Expired Password Validation](https://about.gitlab.com/releases/2021/06/01/security-release-gitlab-13-12-2-released/#insufficient-expired-password-validation)
security fix. If your users get authentication issues following the upgrade, check that their password is not expired:

View File

@ -665,9 +665,6 @@ A [license caching issue](https://gitlab.com/gitlab-org/gitlab/-/issues/376706)
## 15.1.0
- If you run external PostgreSQL, particularly AWS RDS,
[check you have a PostgreSQL bug fix](../index.md#postgresql-segmentation-fault-issue)
to avoid the database crashing.
- In GitLab 15.1.0, we are switching Rails `ActiveSupport::Digest` to use SHA256 instead of MD5.
This affects ETag key generation for resources such as raw Snippet file
downloads. To ensure consistent ETag key generation across multiple
@ -707,9 +704,19 @@ A [license caching issue](https://gitlab.com/gitlab-org/gitlab/-/issues/376706)
## 15.0.0
- Elasticsearch 6.8 [is no longer supported](../../integration/advanced_search/elasticsearch.md#version-requirements). Before you upgrade to GitLab 15.0, [update Elasticsearch to any 7.x version](../../integration/advanced_search/elasticsearch.md#upgrade-to-a-new-elasticsearch-major-version).
- If you run external PostgreSQL, particularly AWS RDS,
[check you have a PostgreSQL bug fix](../index.md#postgresql-segmentation-fault-issue)
to avoid the database crashing.
- If you run GitLab with external PostgreSQL, particularly AWS RDS, ensure you
upgrade PostgreSQL to patch levels to a minimum of 12.7 or 13.3 before
upgrading to GitLab 14.8 or later.
[In 14.8](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/75511)
for GitLab Enterprise Edition and [in 15.0](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/87983)
for GitLab Community Edition a GitLab feature called Loose Foreign Keys was enabled.
After it was enabled, we have had reports of unplanned PostgreSQL restarts caused
by a database engine bug that causes a segmentation fault.
For more information, see [issue 364763](https://gitlab.com/gitlab-org/gitlab/-/issues/364763).
- The use of encrypted S3 buckets with storage-specific configuration is no longer supported after [removing support for using `background_upload`](../deprecations.md#background-upload-for-object-storage).
- The [certificate-based Kubernetes integration (DEPRECATED)](../../user/infrastructure/clusters/index.md#certificate-based-kubernetes-integration-deprecated) is disabled by default, but you can be re-enable it through the [`certificate_based_clusters` feature flag](../../administration/feature_flags.md#how-to-enable-and-disable-features-behind-flags) until GitLab 16.0.
- When you use the GitLab Helm Chart project with a custom `serviceAccount`, ensure it has `get` and `list` permissions for the `serviceAccount` and `secret` resources.

View File

@ -307,6 +307,21 @@ Make sure the ACS URL points to `https://gitlab.example.com/users/auth/saml/call
If the ACS URL is correct, and you still have errors, review the other
Troubleshooting sections.
#### 422 error with non-allowed email
You might get an 422 error that states "Email is not allowed for sign-up. Please use your regular email address."
This message might indicate that you must add or remove a domain from your domain allowlist or denylist settings.
To implement this workaround:
1. On the left sidebar, select **Search or go to**.
1. Select **Admin Area**.
1. Select **Settings** > **General**.
1. Expand **Sign-up restrictions**.
1. Add or remove a domain as appropriate to **Allowed domains for sign-ups** and **Denied domains for sign-ups**.
1. Select **Save changes**.
### User is blocked when signing in through SAML **(FREE SELF)**
The following are the most likely reasons that a user is blocked when signing in through SAML:

View File

@ -187,7 +187,7 @@ To remove a fork relationship:
1. In the **Remove fork relationship** section, select **Remove fork relationship**.
1. To confirm, enter the project path and select **Confirm**.
When you unlink a fork that uses a [hashed storage pool](../../../administration/repository_storage_types.md#hashed-object-pools)
When you unlink a fork that uses a [hashed storage pool](../../../administration/repository_storage_paths.md#hashed-object-pools)
to share objects with another repository:
- All objects are copied from the pool into your fork.

View File

@ -26,7 +26,7 @@ you begin. The best way to back up a repository is to
The size of a repository is determined by computing the accumulated size of all files in the repository.
It is similar to executing `du --summarize --bytes` on your repository's
[hashed storage path](../../../administration/repository_storage_types.md).
[hashed storage path](../../../administration/repository_storage_paths.md).
## Purge files from repository history

View File

@ -49,5 +49,3 @@ module Gitlab
end
end
end
Gitlab::HookData::UserBuilder.prepend_mod_with('Gitlab::HookData::UserBuilder')

View File

@ -5336,6 +5336,9 @@ msgstr ""
msgid "Analytics|A visualization with that name already exists."
msgstr ""
msgid "Analytics|Add a visualization"
msgstr ""
msgid "Analytics|Add visualizations"
msgstr ""
@ -5381,13 +5384,16 @@ msgstr ""
msgid "Analytics|Create dashboard %{dashboardSlug}"
msgstr ""
msgid "Analytics|Create your dashboard"
msgstr ""
msgid "Analytics|Custom dashboards"
msgstr ""
msgid "Analytics|Dashboard Title"
msgid "Analytics|Dashboard not found"
msgstr ""
msgid "Analytics|Dashboard not found"
msgid "Analytics|Dashboard title"
msgstr ""
msgid "Analytics|Dashboard was saved successfully"
@ -5411,6 +5417,12 @@ msgstr ""
msgid "Analytics|Edit"
msgstr ""
msgid "Analytics|Edit your dashboard"
msgstr ""
msgid "Analytics|Enter a dashboard title"
msgstr ""
msgid "Analytics|Enter a visualization name"
msgstr ""
@ -5471,18 +5483,21 @@ msgstr ""
msgid "Analytics|Resulting Data"
msgstr ""
msgid "Analytics|Save"
msgstr ""
msgid "Analytics|Save and add to Dashboard"
msgstr ""
msgid "Analytics|Save new visualization"
msgstr ""
msgid "Analytics|Save your dashboard"
msgstr ""
msgid "Analytics|Select a measurement"
msgstr ""
msgid "Analytics|Select a visualization from the sidebar to get started."
msgstr ""
msgid "Analytics|Select a visualization type"
msgstr ""
@ -17705,9 +17720,6 @@ msgstr ""
msgid "Email the pipeline status to a list of recipients."
msgstr ""
msgid "Email updates (optional)"
msgstr ""
msgid "Email:"
msgstr ""
@ -23599,9 +23611,6 @@ msgstr ""
msgid "I want to use GitLab CI with my existing repository"
msgstr ""
msgid "I'd like to receive updates about GitLab via email"
msgstr ""
msgid "I'm signing up for GitLab because:"
msgstr ""
@ -29061,6 +29070,9 @@ msgstr ""
msgid "Merge conflicts"
msgstr ""
msgid "Merge date & time could not be determined"
msgstr ""
msgid "Merge details"
msgstr ""
@ -35506,9 +35518,6 @@ msgstr ""
msgid "ProductAnalytics|For the product analytics dashboard to start showing you some data, you need to add the analytics tracking code to your project."
msgstr ""
msgid "ProductAnalytics|Go back"
msgstr ""
msgid "ProductAnalytics|How many sessions a user has"
msgstr ""
@ -38447,9 +38456,6 @@ msgstr ""
msgid "Receive notifications about your own activity"
msgstr ""
msgid "Receive product marketing emails"
msgstr ""
msgid "Recent"
msgstr ""

View File

@ -148,7 +148,7 @@
"gettext-parser": "^6.0.0",
"graphql": "^15.7.2",
"graphql-tag": "^2.11.0",
"gridstack": "^9.1.0",
"gridstack": "^9.1.1",
"highlight.js": "^11.8.0",
"immer": "^9.0.15",
"ipaddr.js": "^1.9.1",

View File

@ -1,4 +1,4 @@
FROM registry.gitlab.com/gitlab-org/gitlab-development-kit/asdf-bootstrapped-verify:main@sha256:14fa752a80df21f840fc48f4be8561bee21b78886ac718652582fdd788d34c32
FROM registry.gitlab.com/gitlab-org/gitlab-development-kit/asdf-bootstrapped-verify:main@sha256:af7e6e7a9d6338ca7045e878b9717a1c8feb507dbd1e69db7ef407af4074f27d
ENV GITLAB_LICENSE_MODE=test \
GDK_KILL_CONFIRM=true

View File

@ -6,12 +6,12 @@ module QA
module Artifact
class Show < QA::Page::Base
view 'app/views/projects/artifacts/_tree_directory.html.haml' do
element :directory_name_link
element 'directory-name-link'
end
def go_to_directory(name, retry_attempts = 1)
retry_on_exception(max_attempts: retry_attempts, reload: true, sleep_interval: 10) do
click_element(:directory_name_link, directory_name: name)
click_element('directory-name-link', directory_name: name)
end
end
end

View File

@ -8,26 +8,27 @@ module QA
include Component::CiBadgeLink
view 'app/assets/javascripts/jobs/components/log/log.vue' do
element :job_log_content
element 'job-log-content'
end
view 'app/assets/javascripts/jobs/components/job/sidebar/stages_dropdown.vue' do
element :pipeline_path, required: true
element 'pipeline-path', required: true
end
view 'app/assets/javascripts/jobs/components/job/sidebar/sidebar_header.vue' do
element :retry_button
element 'retry-button'
end
view 'app/assets/javascripts/jobs/components/job/sidebar/artifacts_block.vue' do
element :browse_artifacts_button
element 'browse-artifacts-button'
element 'artifacts-unlocked-message-content'
element 'artifacts-locked-message-content'
end
def successful?(timeout: 60)
raise "Timed out waiting for the build trace to load" unless loaded?
raise "Timed out waiting for the status to be a valid completed state" unless completed?(timeout: timeout)
job_log = find_element(:job_log_content).text
QA::Runtime::Logger.debug(" \n\n ------- Job log: ------- \n\n #{job_log} \n -------")
passed?
@ -38,28 +39,27 @@ module QA
result = ''
wait_until(reload: false, max_duration: wait, sleep_interval: 1) do
result = find_element(:job_log_content).text
result.include?('Job')
result = job_log.include?('Job') ? job_log : ''
result.present?
end
result
end
def has_browse_button?
has_element? :browse_artifacts_button
has_element?('browse-artifacts-button')
end
def click_browse_button
click_element :browse_artifacts_button
click_element('browse-artifacts-button')
end
def retry!
click_element :retry_button
click_element 'retry-button'
end
def has_job_log?
has_element? :job_log_content
def has_job_log?(wait: 1)
has_element?('job-log-content', wait: wait)
end
def has_status?(status, wait: 30)
@ -69,20 +69,28 @@ module QA
end
def has_locked_artifact?
has_element? :artifacts_locked_message_content
has_element?('artifacts-locked-message-content')
end
def has_unlocked_artifact?
has_element? :artifacts_unlocked_message_content
has_element?('artifacts-unlocked-message-content')
end
def go_to_pipeline
click_element('pipeline-path')
end
private
def loaded?(wait: 60)
wait_until(reload: true, max_duration: wait, sleep_interval: 1) do
has_element?(:job_log_content, wait: 1)
has_job_log?
end
end
def job_log
find_element('job-log-content').text
end
end
end
end

View File

@ -174,7 +174,7 @@ module QA
has_upload_menu_item?
# Use for stability, WebIDE inside an iframe is finnicky, webdriver sometimes moves too fast
Support::Waiter.wait_until(max_duration: 20, retry_on_exception: true) do
Support::Waiter.wait_until(max_duration: 60, retry_on_exception: true) do
click_upload_menu_item
enter_file_input(file_path)
end

View File

@ -70,7 +70,7 @@ module QA
Page::Project::Job::Show.perform do |job|
expect(job).to be_successful(timeout: 200)
job.click_element(:pipeline_path)
job.go_to_pipeline
end
Page::Project::Pipeline::Show.perform do |pipeline|

View File

@ -59,7 +59,7 @@ module QA
Page::Project::Job::Show.perform do |job|
expect(job).to be_successful(timeout: 800)
job.click_element(:pipeline_path)
job.go_to_pipeline
end
Page::Project::Pipeline::Show.perform do |pipeline|

View File

@ -101,7 +101,7 @@ module QA
Page::Project::Job::Show.perform do |job|
expect(job).to be_successful(timeout: 800)
job.click_element(:pipeline_path)
job.go_to_pipeline
end
Page::Project::Pipeline::Show.perform do |pipeline|

View File

@ -47,7 +47,7 @@ module QA
Page::Project::Job::Show.perform do |job|
expect(job).to be_successful(timeout: 600)
job.click_element(:pipeline_path)
job.go_to_pipeline
end
Page::Project::Pipeline::Show.perform do |pipeline|

View File

@ -149,11 +149,10 @@ RSpec.describe Profiles::NotificationsController do
it 'updates only permitted attributes' do
sign_in(user)
put :update, params: { user: { notification_email: 'new@example.com', email_opted_in: true, notified_of_own_activity: true, admin: true } }
put :update, params: { user: { notification_email: 'new@example.com', notified_of_own_activity: true, admin: true } }
user.reload
expect(user.notification_email).to eq('new@example.com')
expect(user.email_opted_in).to eq(true)
expect(user.notified_of_own_activity).to eq(true)
expect(user.admin).to eq(false)
expect(controller).to set_flash[:notice].to('Notification settings saved')

View File

@ -12,14 +12,6 @@ RSpec.describe 'User visits the notifications tab', :js, feature_category: :user
visit(profile_notifications_path)
end
it 'turns on the receive product marketing emails setting' do
expect(page).to have_content('Notifications')
expect do
check 'Receive product marketing emails'
end.to change { user.reload.email_opted_in }.to(true)
end
it 'changes the project notifications setting' do
expect(page).to have_content('Notifications')

View File

@ -1,13 +1,13 @@
import { GlEmptyState } from '@gitlab/ui';
import { shallowMount } from '@vue/test-utils';
import EmptyStateWithAnyIssues from '~/service_desk/components/empty_state_with_any_issues.vue';
import EmptyStateWithAnyIssues from '~/issues/service_desk/components/empty_state_with_any_issues.vue';
import {
noSearchResultsTitle,
noSearchResultsDescription,
infoBannerUserNote,
noOpenIssuesTitle,
noClosedIssuesTitle,
} from '~/service_desk/constants';
} from '~/issues/service_desk/constants';
describe('EmptyStateWithAnyIssues component', () => {
let wrapper;

View File

@ -1,7 +1,11 @@
import { GlEmptyState, GlLink } from '@gitlab/ui';
import { mountExtended } from 'helpers/vue_test_utils_helper';
import EmptyStateWithoutAnyIssues from '~/service_desk/components/empty_state_without_any_issues.vue';
import { infoBannerTitle, noIssuesSignedOutButtonText, learnMore } from '~/service_desk/constants';
import EmptyStateWithoutAnyIssues from '~/issues/service_desk/components/empty_state_without_any_issues.vue';
import {
infoBannerTitle,
noIssuesSignedOutButtonText,
learnMore,
} from '~/issues/service_desk/constants';
describe('EmptyStateWithoutAnyIssues component', () => {
let wrapper;

View File

@ -1,7 +1,7 @@
import { shallowMount } from '@vue/test-utils';
import { GlLink, GlButton } from '@gitlab/ui';
import InfoBanner from '~/service_desk/components/info_banner.vue';
import { infoBannerAdminNote, enableServiceDesk } from '~/service_desk/constants';
import InfoBanner from '~/issues/service_desk/components/info_banner.vue';
import { infoBannerAdminNote, enableServiceDesk } from '~/issues/service_desk/constants';
describe('InfoBanner', () => {
let wrapper;

View File

@ -18,14 +18,14 @@ import { issuableListTabs } from '~/vue_shared/issuable/list/constants';
import { TYPENAME_USER } from '~/graphql_shared/constants';
import { convertToGraphQLId, getIdFromGraphQLId } from '~/graphql_shared/utils';
import { getSortKey, getSortOptions } from '~/issues/list/utils';
import { STATUS_CLOSED, STATUS_OPEN, STATUS_ALL } from '~/service_desk/constants';
import getServiceDeskIssuesQuery from 'ee_else_ce/service_desk/queries/get_service_desk_issues.query.graphql';
import getServiceDeskIssuesCountsQuery from 'ee_else_ce/service_desk/queries/get_service_desk_issues_counts.query.graphql';
import setSortingPreferenceMutation from '~/service_desk/queries/set_sorting_preference.mutation.graphql';
import ServiceDeskListApp from '~/service_desk/components/service_desk_list_app.vue';
import InfoBanner from '~/service_desk/components/info_banner.vue';
import EmptyStateWithAnyIssues from '~/service_desk/components/empty_state_with_any_issues.vue';
import EmptyStateWithoutAnyIssues from '~/service_desk/components/empty_state_without_any_issues.vue';
import { STATUS_CLOSED, STATUS_OPEN, STATUS_ALL } from '~/issues/service_desk/constants';
import getServiceDeskIssuesQuery from 'ee_else_ce/issues/service_desk/queries/get_service_desk_issues.query.graphql';
import getServiceDeskIssuesCountsQuery from 'ee_else_ce/issues/service_desk/queries/get_service_desk_issues_counts.query.graphql';
import setSortingPreferenceMutation from '~/issues/service_desk/queries/set_sorting_preference.mutation.graphql';
import ServiceDeskListApp from '~/issues/service_desk/components/service_desk_list_app.vue';
import InfoBanner from '~/issues/service_desk/components/info_banner.vue';
import EmptyStateWithAnyIssues from '~/issues/service_desk/components/empty_state_with_any_issues.vue';
import EmptyStateWithoutAnyIssues from '~/issues/service_desk/components/empty_state_without_any_issues.vue';
import { createAlert, VARIANT_INFO } from '~/alert';
import {
TOKEN_TYPE_ASSIGNEE,

View File

@ -16,10 +16,10 @@ describe('Artifacts block', () => {
});
const findArtifactRemoveElt = () => wrapper.findByTestId('artifacts-remove-timeline');
const findJobLockedElt = () => wrapper.findByTestId('job-locked-message');
const findJobLockedElt = () => wrapper.findByTestId('artifacts-locked-message-content');
const findKeepBtn = () => wrapper.findByTestId('keep-artifacts');
const findDownloadBtn = () => wrapper.findByTestId('download-artifacts');
const findBrowseBtn = () => wrapper.findByTestId('browse-artifacts');
const findBrowseBtn = () => wrapper.findByTestId('browse-artifacts-button');
const findArtifactsHelpLink = () => wrapper.findByTestId('artifacts-help-link');
const findPopover = () => wrapper.findComponent(GlPopover);

View File

@ -0,0 +1,76 @@
import * as Sentry from '@sentry/browser';
import { dispatchSnowplowEvent } from '~/tracking/dispatch_snowplow_event';
import getStandardContext from '~/tracking/get_standard_context';
import { extraContext, servicePingContext } from './mock_data';
jest.mock('@sentry/browser');
jest.mock('~/tracking/get_standard_context');
const category = 'Incident Management';
const action = 'view_incident_details';
describe('dispatchSnowplowEvent', () => {
const snowplowMock = jest.fn();
global.window.snowplow = snowplowMock;
const mockStandardContext = { some: 'context' };
getStandardContext.mockReturnValue(mockStandardContext);
beforeEach(() => {
snowplowMock.mockClear();
Sentry.captureException.mockClear();
});
it('calls snowplow trackStructEvent with correct arguments', () => {
const data = {
label: 'Show Incident',
property: 'click_event',
value: '12',
context: extraContext,
extra: { namespace: 'GitLab' },
};
dispatchSnowplowEvent(category, action, data);
expect(snowplowMock).toHaveBeenCalledWith('trackStructEvent', {
category,
action,
label: data.label,
property: data.property,
value: Number(data.value),
context: [mockStandardContext, data.context],
});
});
it('throws an error if no category is provided', () => {
expect(() => {
dispatchSnowplowEvent(undefined, 'some-action', {});
}).toThrow('Tracking: no category provided for tracking.');
});
it('handles an array of contexts', () => {
const data = {
context: [extraContext, servicePingContext],
extra: { namespace: 'GitLab' },
};
dispatchSnowplowEvent(category, action, data);
expect(snowplowMock).toHaveBeenCalledWith('trackStructEvent', {
category,
action,
context: [mockStandardContext, ...data.context],
});
});
it('handles Sentry error capturing', () => {
snowplowMock.mockImplementation(() => {
throw new Error('some error');
});
dispatchSnowplowEvent(category, action, {});
expect(Sentry.captureException).toHaveBeenCalledTimes(1);
});
});

View File

@ -10,6 +10,7 @@ import {
} from '~/tracking/constants';
import * as utils from '~/tracking/utils';
import { Tracker } from '~/tracking/tracker';
import { extraContext } from './mock_data';
jest.mock('~/api', () => ({
trackInternalEvent: jest.fn(),
@ -22,11 +23,11 @@ jest.mock('~/tracking/utils', () => ({
Tracker.enabled = jest.fn();
const event = 'TestEvent';
describe('InternalEvents', () => {
describe('track_event', () => {
it('track_event calls API.trackInternalEvent with correct arguments', () => {
const event = 'TestEvent';
InternalEvents.track_event(event);
expect(API.trackInternalEvent).toHaveBeenCalledTimes(1);
@ -36,42 +37,65 @@ describe('InternalEvents', () => {
it('track_event calls tracking.event functions with correct arguments', () => {
const trackingSpy = mockTracking(GITLAB_INTERNAL_EVENT_CATEGORY, undefined, jest.spyOn);
const event = 'TestEvent';
InternalEvents.track_event(event);
InternalEvents.track_event(event, { context: extraContext });
expect(trackingSpy).toHaveBeenCalledTimes(1);
expect(trackingSpy).toHaveBeenCalledWith(GITLAB_INTERNAL_EVENT_CATEGORY, event, {
context: {
schema: SERVICE_PING_SCHEMA,
data: {
event_name: event,
data_source: 'redis_hll',
context: [
{
schema: SERVICE_PING_SCHEMA,
data: {
event_name: event,
data_source: 'redis_hll',
},
},
},
extraContext,
],
});
});
});
describe('mixin', () => {
let wrapper;
const Component = {
template: `
<div>
<button data-testid="button1" @click="handleButton1Click">Button 1</button>
<button data-testid="button2" @click="handleButton2Click">Button 2</button>
</div>
`,
methods: {
handleButton1Click() {
this.track_event(event);
},
handleButton2Click() {
this.track_event(event, extraContext);
},
},
mixins: [InternalEvents.mixin()],
};
beforeEach(() => {
const Component = {
render() {},
mixins: [InternalEvents.mixin()],
};
wrapper = shallowMountExtended(Component);
});
it('this.track_event function calls InternalEvent`s track function with an event', () => {
const event = 'TestEvent';
it('this.track_event function calls InternalEvent`s track function with an event', async () => {
const trackEventSpy = jest.spyOn(InternalEvents, 'track_event');
wrapper.vm.track_event(event);
await wrapper.findByTestId('button1').trigger('click');
expect(trackEventSpy).toHaveBeenCalledTimes(1);
expect(trackEventSpy).toHaveBeenCalledWith(event);
expect(trackEventSpy).toHaveBeenCalledWith(event, {});
});
it("this.track_event function calls InternalEvent's track function with an event and data", async () => {
const data = extraContext;
const trackEventSpy = jest.spyOn(InternalEvents, 'track_event');
await wrapper.findByTestId('button2').trigger('click');
expect(trackEventSpy).toHaveBeenCalledTimes(1);
expect(trackEventSpy).toHaveBeenCalledWith(event, data);
});
});

View File

@ -0,0 +1,17 @@
export const extraContext = {
schema: 'iglu:com.gitlab/design_management_context/jsonschema/1-0-0',
data: {
'design-version-number': '1.0.0',
'design-is-current-version': '1.0.0',
'internal-object-referrer': 'https://gitlab.com',
'design-collection-owner': 'GitLab',
},
};
export const servicePingContext = {
schema: 'iglu:com.gitlab/gitlab_service_ping/jsonschema/1-0-0',
data: {
event_name: 'track_incident_event',
data_source: 'redis_hll',
},
};

View File

@ -68,92 +68,92 @@ RSpec.describe UnnestedInFilters::Rewriter do
describe '#rewrite' do
let(:recorded_queries) { ActiveRecord::QueryRecorder.new { rewriter.rewrite.load } }
let(:relation) { User.where(state: :active, user_type: %i(support_bot alert_bot)).limit(2) }
let(:users_select) { 'SELECT "users".*' }
let(:users_select_with_ignored_columns) { 'SELECT ("users"."\w+", )+("users"."\w+")' }
let(:expected_query) do
<<~SQL
SELECT
"users".*
FROM
unnest('{1,2}'::smallint[]) AS "user_types"("user_type"),
LATERAL (
SELECT
"users".*
FROM
"users"
WHERE
"users"."state" = 'active' AND
(users."user_type" = "user_types"."user_type")
LIMIT 2
) AS users
LIMIT 2
SQL
let(:users_unnest) do
'FROM unnest\(\'{1\,2}\'::smallint\[\]\) AS "user_types"\("user_type"\)\, LATERAL \('
end
let(:users_where) do
'FROM
"users"
WHERE
"users"."state" = \'active\' AND
\(users."user_type" = "user_types"."user_type"\)
LIMIT 2\)
AS users
LIMIT 2'
end
let(:expected_query_regexp) do
Regexp.new(
"(#{users_select}|#{users_select_with_ignored_columns})
#{users_unnest}(#{users_select}|#{users_select_with_ignored_columns})
#{users_where}".squish
)
end
subject(:issued_query) { recorded_queries.occurrences.each_key.first }
it 'changes the query' do
expect(issued_query.gsub(/\s/, '')).to start_with(expected_query.gsub(/\s/, ''))
expect(issued_query).to match(expected_query_regexp)
end
context 'when the relation has a subquery' do
let(:relation) { User.where(state: User.select(:state), user_type: %i(support_bot alert_bot)).limit(1) }
let(:expected_query) do
<<~SQL
SELECT
"users".*
FROM
unnest(ARRAY(SELECT "users"."state" FROM "users")::character varying[]) AS "states"("state"),
unnest('{1,2}'::smallint[]) AS "user_types"("user_type"),
LATERAL (
SELECT
"users".*
FROM
"users"
WHERE
(users."state" = "states"."state") AND
(users."user_type" = "user_types"."user_type")
LIMIT 1
) AS users
LIMIT 1
SQL
let(:users_unnest) do
'FROM
unnest\(ARRAY\(SELECT "users"."state" FROM "users"\)::character varying\[\]\) AS "states"\("state"\)\,
unnest\(\'{1\,2}\'::smallint\[\]\) AS "user_types"\("user_type"\)\,
LATERAL \('
end
let(:users_where) do
'FROM
"users"
WHERE
\(users."state" = "states"."state"\) AND
\(users."user_type" = "user_types"."user_type"\)
LIMIT 1\)
AS users
LIMIT 1'
end
it 'changes the query' do
expect(issued_query.gsub(/\s/, '')).to start_with(expected_query.gsub(/\s/, ''))
expect(issued_query).to match(expected_query_regexp)
end
end
context 'when there is an order' do
let(:relation) { User.where(state: %w(active blocked banned)).order(order).limit(2) }
let(:expected_query) do
<<~SQL
SELECT
"users".*
FROM
unnest('{active,blocked,banned}'::charactervarying[]) AS "states"("state"),
LATERAL (
SELECT
"users".*
FROM
"users"
WHERE
(users."state" = "states"."state")
ORDER BY
"users"."user_type" DESC
LIMIT 2
) AS users
ORDER BY
"users"."user_type" DESC
LIMIT 2
SQL
let(:users_unnest) do
'FROM
unnest\(\'{active\,blocked\,banned}\'::character varying\[\]\) AS "states"\("state"\)\,
LATERAL \('
end
let(:users_where) do
'FROM
"users"
WHERE
\(users."state" = "states"."state"\)
ORDER BY
"users"."user_type" DESC
LIMIT 2\)
AS users
ORDER BY
"users"."user_type" DESC
LIMIT 2'
end
context 'when the order is an Arel node' do
let(:order) { { user_type: :desc } }
it 'changes the query' do
expect(issued_query.gsub(/\s/, '')).to start_with(expected_query.gsub(/\s/, ''))
expect(issued_query).to match(expected_query_regexp)
end
end
@ -171,7 +171,7 @@ RSpec.describe UnnestedInFilters::Rewriter do
end
it 'changes the query' do
expect(issued_query.gsub(/\s/, '')).to start_with(expected_query.gsub(/\s/, ''))
expect(issued_query).to match(expected_query_regexp)
end
end
end
@ -179,85 +179,82 @@ RSpec.describe UnnestedInFilters::Rewriter do
context 'when the combined attributes include the primary key' do
let(:relation) { User.where(user_type: %i(support_bot alert_bot)).order(id: :desc).limit(2) }
let(:expected_query) do
<<~SQL
SELECT
"users".*
FROM
"users"
WHERE
"users"."id" IN (
SELECT
"users"."id"
FROM
unnest('{1,2}' :: smallint []) AS "user_types"("user_type"),
LATERAL (
SELECT
"users"."user_type",
"users"."id"
FROM
"users"
WHERE
(users."user_type" = "user_types"."user_type")
ORDER BY
"users"."id" DESC
LIMIT
2
) AS users
ORDER BY
"users"."id" DESC
LIMIT
2
)
ORDER BY
let(:users_where) do
'FROM
"users"
WHERE
"users"."id" IN
\(SELECT
"users"."id"
FROM
unnest\(\'{1\,2}\'::smallint\[\]\) AS "user_types"\("user_type"\)\,
LATERAL
\(SELECT
"users"."user_type"\,
"users"."id"
FROM
"users"
WHERE
\(users."user_type" = "user_types"."user_type"\)
ORDER BY
"users"."id" DESC
LIMIT 2\)
AS users
ORDER BY
"users"."id" DESC
LIMIT
2
SQL
LIMIT 2\)
ORDER BY
"users"."id" DESC
LIMIT 2'
end
let(:expected_query_regexp) do
Regexp.new("(#{users_select}|#{users_select_with_ignored_columns}) #{users_where}".squish)
end
it 'changes the query' do
expect(issued_query.gsub(/\s/, '')).to start_with(expected_query.gsub(/\s/, ''))
expect(issued_query).to match(expected_query_regexp)
end
end
context 'when a join table is receiving an IN list query' do
let(:relation) { User.joins(:status).where(status: { message: %w[foo bar] }).order(id: :desc).limit(2) }
let(:expected_query) do
<<~SQL
SELECT
"users".*
FROM
"users"
WHERE
"users"."id" IN (
SELECT
"users"."id"
FROM
LATERAL (
SELECT
message,
"users"."id"
FROM
"users"
INNER JOIN "user_statuses" "status" ON "status"."user_id" = "users"."id"
WHERE
"status"."message" IN ('foo', 'bar')
ORDER BY
"users"."id" DESC
LIMIT 2) AS users
ORDER BY
"users"."id" DESC
LIMIT 2)
ORDER BY
let(:users_where) do
'FROM
"users"
WHERE
"users"."id" IN
\(SELECT
"users"."id"
FROM
LATERAL
\(SELECT
message,
"users"."id"
FROM
"users"
INNER JOIN "user_statuses" "status" ON "status"."user_id" = "users"."id"
WHERE
"status"."message" IN \(\'foo\'\, \'bar\'\)
ORDER BY
"users"."id" DESC
LIMIT 2\)
AS users
ORDER BY
"users"."id" DESC
LIMIT 2
SQL
LIMIT 2\)
ORDER BY
"users"."id" DESC
LIMIT 2'
end
let(:expected_query_regexp) do
Regexp.new("(#{users_select}|#{users_select_with_ignored_columns}) #{users_where}".squish)
end
it 'does not rewrite the in statement for the joined table' do
expect(issued_query.gsub(/\s/, '')).to start_with(expected_query.gsub(/\s/, ''))
expect(issued_query).to match(expected_query_regexp)
end
end

Some files were not shown because too many files have changed in this diff Show More