Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2023-03-22 09:08:32 +00:00
parent 7137270698
commit a48f9b5872
43 changed files with 616 additions and 646 deletions

View File

@ -123,7 +123,6 @@ Lint/AmbiguousOperatorPrecedence:
- 'spec/lib/gitlab/regex_spec.rb'
- 'spec/lib/gitlab/search/abuse_validators/no_abusive_term_length_validator_spec.rb'
- 'spec/lib/gitlab/slash_commands/deploy_spec.rb'
- 'spec/lib/gitlab/url_blocker_spec.rb'
- 'spec/mailers/notify_spec.rb'
- 'spec/models/appearance_spec.rb'
- 'spec/models/ci/build_spec.rb'

View File

@ -611,7 +611,6 @@ Lint/UnusedMethodArgument:
- 'spec/lib/gitlab/sidekiq_middleware/duplicate_jobs/server_spec.rb'
- 'spec/lib/gitlab/sidekiq_middleware/worker_context/server_spec.rb'
- 'spec/lib/gitlab/sidekiq_middleware_spec.rb'
- 'spec/lib/gitlab/url_blocker_spec.rb'
- 'spec/migrations/20211018152654_schedule_remove_duplicate_vulnerabilities_findings3_spec.rb'
- 'spec/migrations/20211116111644_schedule_remove_occurrence_pipelines_and_duplicate_vulnerabilities_findings_spec.rb'
- 'spec/migrations/20211207135331_schedule_recalculate_uuid_on_vulnerabilities_occurrences4_spec.rb'

View File

@ -2078,7 +2078,6 @@ RSpec/ContextWording:
- 'spec/lib/gitlab/tree_summary_spec.rb'
- 'spec/lib/gitlab/untrusted_regexp/ruby_syntax_spec.rb'
- 'spec/lib/gitlab/untrusted_regexp_spec.rb'
- 'spec/lib/gitlab/url_blocker_spec.rb'
- 'spec/lib/gitlab/url_sanitizer_spec.rb'
- 'spec/lib/gitlab/usage/metric_definition_spec.rb'
- 'spec/lib/gitlab/usage/metric_spec.rb'

View File

@ -4731,7 +4731,6 @@ RSpec/MissingFeatureCategory:
- 'spec/lib/gitlab/untrusted_regexp/ruby_syntax_spec.rb'
- 'spec/lib/gitlab/untrusted_regexp_spec.rb'
- 'spec/lib/gitlab/uploads_transfer_spec.rb'
- 'spec/lib/gitlab/url_blocker_spec.rb'
- 'spec/lib/gitlab/url_blockers/domain_allowlist_entry_spec.rb'
- 'spec/lib/gitlab/url_blockers/ip_allowlist_entry_spec.rb'
- 'spec/lib/gitlab/url_blockers/url_allowlist_spec.rb'

View File

@ -218,7 +218,6 @@ Style/StringConcatenation:
- 'spec/lib/gitlab/themes_spec.rb'
- 'spec/lib/gitlab/throttle_spec.rb'
- 'spec/lib/gitlab/tree_summary_spec.rb'
- 'spec/lib/gitlab/url_blocker_spec.rb'
- 'spec/lib/gitlab/utils_spec.rb'
- 'spec/lib/gitlab/visibility_level_spec.rb'
- 'spec/lib/gitlab/wiki_pages/front_matter_parser_spec.rb'

View File

@ -1 +1 @@
c4030b2e8f984424f37f2310a7266bb690c8e435
82b151a8899eb49d04600ec17c84042736949f48

View File

@ -27,11 +27,8 @@ module FinderWithGroupHierarchy
# we can preset root group for all of them to optimize permission checks
Group.preset_root_ancestor_for(groups)
# Preloading the max access level for the given groups to avoid N+1 queries
# during the access check.
if !skip_authorization && current_user && Feature.enabled?(:preload_max_access_levels_for_labels_finder, group)
Preloaders::UserMaxAccessLevelInGroupsPreloader.new(groups, current_user).execute
end
preload_associations(groups) if !skip_authorization && current_user && Feature.enabled?(
:preload_max_access_levels_for_labels_finder, group)
groups_user_can_read_items(groups).map(&:id)
end
@ -77,4 +74,10 @@ module FinderWithGroupHierarchy
groups.select { |group| authorized_to_read_item?(group) }
end
end
def preload_associations(groups)
Preloaders::UserMaxAccessLevelInGroupsPreloader.new(groups, current_user).execute
end
end
FinderWithGroupHierarchy.prepend_mod_with('FinderWithGroupHierarchy')

View File

@ -11,6 +11,11 @@ class LabelsFinder < UnionFinder
def initialize(current_user, params = {})
@current_user = current_user
@params = params
# Preload container records (project, group) by default, in some cases we invoke
# the LabelsPreloader on the loaded records to prevent all N+1 queries.
# In that case we disable the default with_preloaded_container scope because it
# interferes with the LabelsPreloader.
@preload_parent_association = params.fetch(:preload_parent_association, true)
end
def execute(skip_authorization: false)
@ -19,7 +24,9 @@ class LabelsFinder < UnionFinder
items = with_title(items)
items = by_subscription(items)
items = by_search(items)
sort(items.with_preloaded_container)
items = items.with_preloaded_container if @preload_parent_association
sort(items)
end
private

View File

@ -13,5 +13,11 @@ module Resolvers
required: false,
description: 'Include only group level labels.',
default_value: false
before_connection_authorization do |nodes, current_user|
if Feature.enabled?(:preload_max_access_levels_for_labels_finder)
Preloaders::LabelsPreloader.new(nodes, current_user).preload_all
end
end
end
end

View File

@ -17,6 +17,12 @@ module Resolvers
description: 'Include labels from ancestor groups.',
default_value: false
before_connection_authorization do |nodes, current_user|
if Feature.enabled?(:preload_max_access_levels_for_labels_finder)
Preloaders::LabelsPreloader.new(nodes, current_user).preload_all
end
end
def resolve(**args)
return Label.none if parent.nil?
@ -24,6 +30,13 @@ module Resolvers
# LabelsFinder uses `search` param, so we transform `search_term` into `search`
args[:search] = args.delete(:search_term)
# Optimization:
# Rely on the LabelsPreloader rather than the default parent record preloading in the
# finder because LabelsPreloader preloads more associations which are required for the
# permission check.
args[:preload_parent_association] = false if Feature.disabled?(:preload_max_access_levels_for_labels_finder)
LabelsFinder.new(current_user, parent_param.merge(args)).execute
end

View File

@ -1,150 +0,0 @@
# frozen_string_literal: true
module Clusters
module Applications
# DEPRECATED for removal in %14.0
# See https://gitlab.com/groups/gitlab-org/-/epics/4280
class Knative < ApplicationRecord
VERSION = '0.10.0'
REPOSITORY = 'https://charts.gitlab.io'
METRICS_CONFIG = 'https://gitlab.com/gitlab-org/charts/knative/-/raw/v0.9.0/vendor/istio-metrics.yml'
FETCH_IP_ADDRESS_DELAY = 30.seconds
API_GROUPS_PATH = 'config/knative/api_groups.yml'
self.table_name = 'clusters_applications_knative'
include ::Clusters::Concerns::ApplicationCore
include ::Clusters::Concerns::ApplicationStatus
include ::Clusters::Concerns::ApplicationVersion
include ::Clusters::Concerns::ApplicationData
include AfterCommitQueue
alias_method :original_set_initial_status, :set_initial_status
def set_initial_status
return unless cluster&.platform_kubernetes_rbac?
original_set_initial_status
end
state_machine :status do
after_transition any => [:installed] do |application|
application.run_after_commit do
ClusterWaitForIngressIpAddressWorker.perform_in(
FETCH_IP_ADDRESS_DELAY, application.name, application.id)
end
end
after_transition any => [:installed, :updated] do |application|
application.run_after_commit do
ClusterConfigureIstioWorker.perform_async(application.cluster_id)
end
end
end
attribute :version, default: VERSION
validates :hostname, presence: true, hostname: true
scope :for_cluster, -> (cluster) { where(cluster: cluster) }
def chart
'knative/knative'
end
def values
{ "domain" => hostname }.to_yaml
end
def available_domains
PagesDomain.instance_serverless
end
def find_available_domain(pages_domain_id)
available_domains.find_by(id: pages_domain_id)
end
def allowed_to_uninstall?
!pre_installed?
end
def install_command
helm_command_module::InstallCommand.new(
name: name,
version: VERSION,
rbac: cluster.platform_kubernetes_rbac?,
chart: chart,
files: files,
repository: REPOSITORY,
postinstall: install_knative_metrics
)
end
def schedule_status_update
return unless installed?
return if external_ip
return if external_hostname
ClusterWaitForIngressIpAddressWorker.perform_async(name, id)
end
def ingress_service
cluster.kubeclient.get_service('istio-ingressgateway', Clusters::Kubernetes::ISTIO_SYSTEM_NAMESPACE)
end
def uninstall_command
helm_command_module::DeleteCommand.new(
name: name,
rbac: cluster.platform_kubernetes_rbac?,
files: files,
predelete: delete_knative_services_and_metrics,
postdelete: delete_knative_istio_leftovers
)
end
private
def delete_knative_services_and_metrics
delete_knative_services + delete_knative_istio_metrics
end
def delete_knative_services
cluster.kubernetes_namespaces.map do |kubernetes_namespace|
Gitlab::Kubernetes::KubectlCmd.delete("ksvc", "--all", "-n", kubernetes_namespace.namespace)
end
end
def delete_knative_istio_leftovers
delete_knative_namespaces + delete_knative_and_istio_crds
end
def delete_knative_namespaces
[
Gitlab::Kubernetes::KubectlCmd.delete("--ignore-not-found", "ns", "knative-serving"),
Gitlab::Kubernetes::KubectlCmd.delete("--ignore-not-found", "ns", "knative-build")
]
end
def delete_knative_and_istio_crds
api_groups.map do |group|
Gitlab::Kubernetes::KubectlCmd.delete_crds_from_group(group)
end
end
# returns an array of CRDs to be postdelete since helm does not
# manage the CRDs it creates.
def api_groups
@api_groups ||= YAML.safe_load(File.read(Rails.root.join(API_GROUPS_PATH)))
end
# Relied on application_prometheus which is now removed
def install_knative_metrics
[]
end
# Relied on application_prometheus which is now removed
def delete_knative_istio_metrics
[]
end
end
end
end

View File

@ -14,8 +14,7 @@ module Clusters
APPLICATIONS = {
Clusters::Applications::Helm.application_name => Clusters::Applications::Helm,
Clusters::Applications::Ingress.application_name => Clusters::Applications::Ingress,
Clusters::Applications::Runner.application_name => Clusters::Applications::Runner,
Clusters::Applications::Knative.application_name => Clusters::Applications::Knative
Clusters::Applications::Runner.application_name => Clusters::Applications::Runner
}.freeze
DEFAULT_ENVIRONMENT = '*'
KUBE_INGRESS_BASE_DOMAIN = 'KUBE_INGRESS_BASE_DOMAIN'
@ -54,7 +53,6 @@ module Clusters
has_one_cluster_application :helm
has_one_cluster_application :ingress
has_one_cluster_application :runner
has_one_cluster_application :knative
has_many :kubernetes_namespaces
has_many :metrics_dashboard_annotations, class_name: 'Metrics::Dashboard::Annotation', inverse_of: :cluster
@ -272,10 +270,6 @@ module Clusters
!!application_ingress&.available?
end
def application_knative_available?
!!application_knative&.available?
end
def integration_prometheus_available?
!!integration_prometheus&.available?
end

View File

@ -11,4 +11,8 @@ class GroupLabel < Label
def subject_foreign_key
'group_id'
end
def preloaded_parent_container
association(:group).loaded? ? group : parent_container
end
end

View File

@ -20,25 +20,31 @@ module Preloaders
def preload_all
ActiveRecord::Associations::Preloader.new(
records: labels,
associations: { parent_container: :route }
).call
ActiveRecord::Associations::Preloader.new(
records: labels.select { |l| l.is_a? ProjectLabel },
records: project_labels,
associations: { project: [:project_feature, namespace: :route] }
).call
ActiveRecord::Associations::Preloader.new(
records: labels.select { |l| l.is_a? GroupLabel },
records: group_labels,
associations: { group: :route }
).call
Preloaders::UserMaxAccessLevelInProjectsPreloader.new(project_labels.map(&:project), user).execute
labels.each do |label|
label.lazy_subscription(user)
label.lazy_subscription(user, project) if project.present?
end
end
private
def group_labels
@group_labels ||= labels.select { |l| l.is_a? GroupLabel }
end
def project_labels
@project_labels ||= labels.select { |l| l.is_a? ProjectLabel }
end
end
end

View File

@ -23,6 +23,10 @@ class ProjectLabel < Label
super(project, target_project: target_project, format: format, full: full)
end
def preloaded_parent_container
association(:project).loaded? ? project : parent_container
end
private
def title_must_not_exist_at_group_level

View File

@ -1,5 +1,5 @@
# frozen_string_literal: true
class GroupLabelPolicy < BasePolicy
delegate { @subject.parent_container }
delegate { @subject.preloaded_parent_container }
end

View File

@ -1,5 +1,5 @@
# frozen_string_literal: true
class ProjectLabelPolicy < BasePolicy
delegate { @subject.parent_container }
delegate { @subject.preloaded_parent_container }
end

View File

@ -17,6 +17,8 @@
# validates :ftp_url, addressable_url: { schemes: %w(ftp) }
#
# validates :git_url, addressable_url: { schemes: %w(http https ssh git) }
#
# validates :smtp_adderss, addressable_url: { schemes: :none }
# end
#
# This validator can also block urls pointing to localhost or the local network to
@ -25,7 +27,7 @@
# Configuration options:
# * <tt>message</tt> - A custom error message, used when the URL is blank. (default is: "must be a valid URL").
# * <tt>blocked_message</tt> - A custom error message, used when the URL is blocked. Default: +'is blocked: %{exception_message}'+.
# * <tt>schemes</tt> - Array of URI schemes. Default: +['http', 'https']+
# * <tt>schemes</tt> - Array of URI schemes or `:none`. Default: +['http', 'https']+
# * <tt>allow_localhost</tt> - Allow urls pointing to +localhost+. Default: +true+
# * <tt>allow_local_network</tt> - Allow urls pointing to private network addresses. Default: +true+
# * <tt>allow_blank</tt> - Allow urls to be +blank+. Default: +false+

View File

@ -1,7 +1,5 @@
---
table_name: clusters_applications_knative
classes:
- Clusters::Applications::Knative
feature_categories:
- kubernetes_management
description: "(Deprecated) A GitLab managed Knative installation in a Kubernetes cluster"

View File

@ -0,0 +1,29 @@
# frozen_string_literal: true
class EnsureDesignUserMentionsNoteIdBigintBackfillIsFinishedForGitlabDotCom < Gitlab::Database::Migration[2.1]
include Gitlab::Database::MigrationHelpers::ConvertToBigint
restrict_gitlab_migration gitlab_schema: :gitlab_main
disable_ddl_transaction!
def up
return unless should_run?
ensure_batched_background_migration_is_finished(
job_class_name: 'CopyColumnUsingBackgroundMigrationJob',
table_name: 'design_user_mentions',
column_name: 'id',
job_arguments: [['note_id'], ['note_id_convert_to_bigint']]
)
end
def down
# no-op
end
private
def should_run?
com_or_dev_or_test_but_not_jh?
end
end

View File

@ -0,0 +1,68 @@
# frozen_string_literal: true
class SwapDesignUserMentionsNoteIdToBigintForGitlabDotCom < Gitlab::Database::Migration[2.1]
include Gitlab::Database::MigrationHelpers::ConvertToBigint
disable_ddl_transaction!
TABLE_NAME = 'design_user_mentions'
def up
return unless should_run?
swap
end
def down
return unless should_run?
swap
end
def swap
# This will replace the existing design_user_mentions_on_design_id_and_note_id_unique_index
add_concurrent_index TABLE_NAME, [:design_id, :note_id_convert_to_bigint], unique: true,
name: 'design_um_on_design_id_and_note_id_convert_to_bigint_unique'
# This will replace the existing index_design_user_mentions_on_note_id
add_concurrent_index TABLE_NAME, :note_id_convert_to_bigint, unique: true,
name: 'index_design_user_mentions_on_note_id_convert_to_bigint'
# This will replace the existing fk_rails_8de8c6d632
add_concurrent_foreign_key TABLE_NAME, :notes, column: :note_id_convert_to_bigint,
name: 'fk_design_user_mentions_note_id_convert_to_bigint',
on_delete: :cascade
with_lock_retries(raise_on_exhaustion: true) do
execute "LOCK TABLE notes, #{TABLE_NAME} IN ACCESS EXCLUSIVE MODE"
execute "ALTER TABLE #{TABLE_NAME} RENAME COLUMN note_id TO note_id_tmp"
execute "ALTER TABLE #{TABLE_NAME} RENAME COLUMN note_id_convert_to_bigint TO note_id"
execute "ALTER TABLE #{TABLE_NAME} RENAME COLUMN note_id_tmp TO note_id_convert_to_bigint"
function_name = Gitlab::Database::UnidirectionalCopyTrigger
.on_table(TABLE_NAME, connection: connection)
.name(:note_id, :note_id_convert_to_bigint)
execute "ALTER FUNCTION #{quote_table_name(function_name)} RESET ALL"
# Swap defaults
change_column_default TABLE_NAME, :note_id, nil
change_column_default TABLE_NAME, :note_id_convert_to_bigint, 0
execute 'DROP INDEX IF EXISTS design_user_mentions_on_design_id_and_note_id_unique_index'
rename_index TABLE_NAME, 'design_um_on_design_id_and_note_id_convert_to_bigint_unique',
'design_user_mentions_on_design_id_and_note_id_unique_index'
execute 'DROP INDEX IF EXISTS index_design_user_mentions_on_note_id'
rename_index TABLE_NAME, 'index_design_user_mentions_on_note_id_convert_to_bigint',
'index_design_user_mentions_on_note_id'
execute "ALTER TABLE #{TABLE_NAME} DROP CONSTRAINT IF EXISTS fk_rails_8de8c6d632"
rename_constraint(TABLE_NAME, 'fk_design_user_mentions_note_id_convert_to_bigint', 'fk_rails_8de8c6d632')
end
end
def should_run?
com_or_dev_or_test_but_not_jh?
end
end

View File

@ -0,0 +1 @@
16d266b15451c20c4512b3ee2673af997301b7a92b4eacb4db74e025231c9f43

View File

@ -0,0 +1 @@
b07b58c96f5f61e63619edc645384c15341feb217a521cdf8d90f37bc261addb

View File

@ -15367,11 +15367,11 @@ ALTER SEQUENCE design_management_versions_id_seq OWNED BY design_management_vers
CREATE TABLE design_user_mentions (
id bigint NOT NULL,
design_id integer NOT NULL,
note_id integer NOT NULL,
note_id_convert_to_bigint integer DEFAULT 0 NOT NULL,
mentioned_users_ids integer[],
mentioned_projects_ids integer[],
mentioned_groups_ids integer[],
note_id_convert_to_bigint bigint DEFAULT 0 NOT NULL
note_id bigint NOT NULL
);
CREATE SEQUENCE design_user_mentions_id_seq

View File

@ -6,21 +6,23 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Configure Gitaly **(FREE SELF)**
The Gitaly service itself is configured by using a [TOML configuration file](reference.md).
Configure Gitaly in one of two ways:
To change Gitaly settings:
::Tabs
**For Omnibus GitLab**
:::TabTitle Linux package (Omnibus)
1. Edit `/etc/gitlab/gitlab.rb` and add or change the
[Gitaly settings](https://gitlab.com/gitlab-org/omnibus-gitlab/blob/1dd07197c7e5ae23626aad5a4a070a800b670380/files/gitlab-config-template/gitlab.rb.template#L1622-1676).
1. Save the file and [reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure).
**For installations from source**
:::TabTitle Self-compiled (source)
1. Edit `/home/git/gitaly/config.toml` and add or change the [Gitaly settings](https://gitlab.com/gitlab-org/gitaly/blob/master/config.toml.example).
1. Save the file and [restart GitLab](../restart_gitlab.md#installations-from-source).
::EndTabs
The following configuration options are also available:
- Enabling [TLS support](#enable-tls-support).
@ -138,7 +140,11 @@ Gitaly and GitLab use two shared secrets for authentication:
- _Gitaly token_: used to authenticate gRPC requests to Gitaly
- _GitLab Shell token_: used for authentication callbacks from GitLab Shell to the GitLab internal API
**For Omnibus GitLab**
Configure authentication in one of two ways:
::Tabs
:::TabTitle Linux package (Omnibus)
To configure the _Gitaly token_, edit `/etc/gitlab/gitlab.rb`:
@ -167,7 +173,7 @@ Edit `/etc/gitlab/gitlab.rb`:
gitlab_shell['secret_token'] = 'shellsecret'
```
**For installations from source**
:::TabTitle Self-compiled (source)
1. Copy `/home/git/gitlab/.gitlab_shell_secret` from the Gitaly client to the same path on the
Gitaly servers (and any other Gitaly clients).
@ -189,9 +195,15 @@ Edit `/etc/gitlab/gitlab.rb`:
1. Save the file and [restart GitLab](../restart_gitlab.md#installations-from-source).
::EndTabs
#### Configure Gitaly server
**For Omnibus GitLab**
Configure Gitaly server in one of two ways:
::Tabs
:::TabTitle Linux package (Omnibus)
1. Edit `/etc/gitlab/gitlab.rb`:
@ -294,7 +306,7 @@ Updates to example must be made at:
- For GitLab 15.3 and later, run `sudo /opt/gitlab/embedded/bin/gitaly check /var/opt/gitlab/gitaly/config.toml`.
- For GitLab 15.2 and earlier, run `sudo /opt/gitlab/embedded/bin/gitaly-hooks check /var/opt/gitlab/gitaly/config.toml`.
**For installations from source**
:::TabTitle Self-compiled (source)
1. Edit `/home/git/gitaly/config.toml`:
@ -345,6 +357,8 @@ Updates to example must be made at:
- For GitLab 15.3 and later, run `sudo /opt/gitlab/embedded/bin/gitaly check /var/opt/gitlab/gitaly/config.toml`.
- For GitLab 15.2 and earlier, run `sudo /opt/gitlab/embedded/bin/gitaly-hooks check /var/opt/gitlab/gitaly/config.toml`.
::EndTabs
WARNING:
If directly copying repository data from a GitLab server to Gitaly, ensure that the metadata file,
default path `/var/opt/gitlab/git-data/repositories/.gitaly-metadata`, is not included in the transfer.
@ -381,7 +395,11 @@ You can't define Gitaly servers with some as a local Gitaly server
server (with `gitaly_address`) unless you use
[mixed configuration](#mixed-configuration).
**For Omnibus GitLab**
Configure Gitaly clients in one of two ways:
::Tabs
:::TabTitle Linux package (Omnibus)
1. Edit `/etc/gitlab/gitlab.rb`:
@ -415,7 +433,7 @@ server (with `gitaly_address`) unless you use
sudo gitlab-ctl tail gitaly
```
**For installations from source**
:::TabTitle Self-compiled (source)
1. Edit `/home/git/gitlab/config/gitlab.yml`:
@ -451,6 +469,8 @@ server (with `gitaly_address`) unless you use
tail -f /home/git/gitlab/log/gitaly.log
```
::EndTabs
When you tail the Gitaly logs on your Gitaly server, you should see requests coming in. One sure way
to trigger a Gitaly request is to clone a repository from GitLab over HTTP or HTTPS.
@ -532,9 +552,11 @@ Disabling Gitaly on the GitLab instance makes sense only when you run GitLab in
Gitaly runs on a separate machine from the GitLab instance. Disabling Gitaly on all machines in the cluster is not
a valid configuration (some machines much act as Gitaly servers).
To disable Gitaly on a GitLab server:
Disable Gitaly on a GitLab server in one of two ways:
**For Omnibus GitLab**
::Tabs
:::TabTitle Linux package (Omnibus)
1. Edit `/etc/gitlab/gitlab.rb`:
@ -544,7 +566,7 @@ To disable Gitaly on a GitLab server:
1. Save the file and [reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure).
**For installations from source**
:::TabTitle Self-compiled (source)
1. Edit `/etc/default/gitlab`:
@ -554,6 +576,8 @@ To disable Gitaly on a GitLab server:
1. Save the file and [restart GitLab](../restart_gitlab.md#installations-from-source).
::EndTabs
## Enable TLS support
Gitaly supports TLS encryption. To communicate with a Gitaly instance that listens for secure
@ -585,9 +609,11 @@ If you use a load balancer, it must be able to negotiate HTTP/2 using the ALPN T
### Configure Gitaly with TLS
To configure Gitaly with TLS:
Configure Gitaly with TLS in one of two ways:
**For Omnibus GitLab**
::Tabs
:::TabTitle Linux package (Omnibus)
1. Create certificates for Gitaly servers.
1. On the Gitaly clients, copy the certificates (or their certificate authority) into
@ -651,7 +677,7 @@ To configure Gitaly with TLS:
1. Saving the file.
1. [Reconfiguring GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure).
**For installations from source**
:::TabTitle Self-compiled (source)
1. Create certificates for Gitaly servers.
1. On the Gitaly clients, copy the certificates into the system trusted certificates:
@ -727,6 +753,8 @@ To configure Gitaly with TLS:
1. Saving the file.
1. [Restarting GitLab](../restart_gitlab.md#installations-from-source).
::EndTabs
### Observe type of Gitaly connections
For information on observing the type of Gitaly connections being served, see the
@ -761,10 +789,11 @@ requests, the default setting of having just one active `gitaly-ruby` sidecar mi
If you see `ResourceExhausted` errors from Gitaly, it's very likely that you have not enough
`gitaly-ruby` capacity.
You can increase the number of `gitaly-ruby` processes on your Gitaly server with the following
settings:
Increase the number of `gitaly-ruby` processes on your Gitaly server in one of two ways:
**For Omnibus GitLab**
::Tabs
:::TabTitle Linux package (Omnibus)
1. Edit `/etc/gitlab/gitlab.rb`:
@ -783,7 +812,7 @@ settings:
1. Save the file, and then [reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure).
**For installations from source**
:::TabTitle Self-compiled (source)
1. Edit `/home/git/gitaly/config.toml`:
@ -794,6 +823,8 @@ settings:
1. Save the file and [restart GitLab](../restart_gitlab.md#installations-from-source).
::EndTabs
## Limit RPC concurrency
WARNING:
@ -1025,7 +1056,11 @@ WARNING:
Background repository optimization is an experimental feature and may place significant load on the host while running.
Make sure to schedule this during off-peak hours and keep the duration short (for example, 30-60 minutes).
**For Omnibus GitLab**
Configure background repository optimization in one of two ways:
::Tabs
:::TabTitle Linux package (Omnibus)
Edit `/etc/gitlab/gitlab.rb` and add:
@ -1042,7 +1077,7 @@ gitaly['configuration'] = {
}
```
**For installations from source**
:::TabTitle Self-compiled (source)
Edit `/home/git/gitaly/config.toml` and add:
@ -1054,6 +1089,8 @@ duration = '30m'
storages = ["default"]
```
::EndTabs
## Rotate Gitaly authentication token
Rotating credentials in a production environment often requires downtime, causes outages, or both.
@ -1515,10 +1552,14 @@ By default, Gitaly doesn't sign commits made using GitLab UI. For example, commi
- Web IDE.
- Merge requests.
You can configure Gitaly to sign commits made using GitLab UI. The commits show as unverified and signed by an unknown user. Support for improvements is
proposed in issue [19185](https://gitlab.com/gitlab-org/gitlab/-/issues/19185).
You can configure Gitaly to sign commits made with the GitLab UI. The commits show as unverified and signed by an unknown
user. Support for improvements is proposed in [issue 19185](https://gitlab.com/gitlab-org/gitlab/-/issues/19185).
**For Omnibus GitLab**
Configure Gitaly to sign commits made with the GitLab UI in one of two ways:
::Tabs
:::TabTitle Linux package (Omnibus)
1. [Create a GPG key](../../user/project/repository/gpg_signed_commits/index.md#create-a-gpg-key)
and export it. For optimal performance, consider using an EdDSA key.
@ -1542,7 +1583,7 @@ proposed in issue [19185](https://gitlab.com/gitlab-org/gitlab/-/issues/19185).
1. Save the file and [reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure).
**For installations from source**
:::TabTitle Self-compiled (source)
1. [Create a GPG key](../../user/project/repository/gpg_signed_commits/index.md#create-a-gpg-key)
and export it. For optimal performance, consider using an EdDSA key.
@ -1561,6 +1602,8 @@ proposed in issue [19185](https://gitlab.com/gitlab-org/gitlab/-/issues/19185).
1. Save the file and [restart GitLab](../restart_gitlab.md#installations-from-source).
::EndTabs
## Generate configuration using an external command
> [Introduced](https://gitlab.com/gitlab-org/gitaly/-/issues/4828) in GitLab 15.10.
@ -1583,9 +1626,11 @@ require 'json'
JSON.generate({"gitlab": {"http_settings": {"password": `aws get-secret-value --secret-id ...`}}})
```
You must then make the script path known to Gitaly.
You must then make the script path known to Gitaly in one of two ways:
**For Omnibus GitLab**
::Tabs
:::TabTitle Linux package (Omnibus)
Edit `/etc/gitlab/gitlab.rb` and configure the `config_command`:
@ -1595,7 +1640,7 @@ gitaly['configuration'] = {
}
```
**For installations from source**
:::TabTitle Self-compiled (source)
Edit `/home/git/gitaly/config.toml` and configure `config_command`:
@ -1603,6 +1648,8 @@ Edit `/home/git/gitaly/config.toml` and configure `config_command`:
config_command = "/path/to/config_command"
```
::EndTabs
After configuration, Gitaly executes the command on startup and parses its
standard output as JSON. The resulting configuration is then merged back into
the other Gitaly configuration.

View File

@ -975,6 +975,10 @@ Use lowercase for **personal access token**.
Do not use **please**. For details, see the [Microsoft style guide](https://learn.microsoft.com/en-us/style-guide/a-z-word-list-term-collections/p/please).
## Premium
Use **Premium**, in uppercase, for the subscription tier.
## prerequisites
Use **prerequisites** when documenting the steps before a task. Do not use **requirements**.
@ -1320,6 +1324,10 @@ For example:
See also [**enter**](#enter).
## Ultimate
Use **Ultimate**, in uppercase, for the subscription tier.
## units of measurement
Use a space between the number and the unit of measurement. For example, **128 GB**.

View File

@ -418,7 +418,7 @@ To disable the Elasticsearch integration:
1. Expand **Advanced Search**.
1. Clear the **Pause Elasticsearch indexing** checkbox.
## Zero downtime reindexing
## Zero-downtime reindexing
The idea behind this reindexing method is to leverage the [Elasticsearch reindex API](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html)
and Elasticsearch index alias feature to perform the operation. We set up an index alias which connects to a
@ -427,6 +427,10 @@ the writes to the `primary` index. Then, we create another index and invoke the
index data onto the new index. After the reindexing job is complete, we switch to the new index by connecting the
index alias to it which becomes the new `primary` index. At the end, we resume the writes and typical operation resumes.
### Using zero-downtime reindexing
You can use zero-downtime reindexing to configure index settings or mappings that cannot be changed without creating a new index and copying existing data. You should not use zero-downtime reindexing to fix missing data. Zero-downtime reindexing does not add data to the search cluster if the data is not already indexed. You must complete all [advanced search migrations](#advanced-search-migrations) before you start reindexing.
### Trigger the reindex via the advanced search administration
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/34069) in GitLab 13.2.
@ -662,7 +666,7 @@ For basic guidance on choosing a cluster configuration you may refer to [Elastic
- Generally, you want to use at least a 2-node cluster configuration with one replica, which allows you to have resilience. If your storage usage is growing quickly, you may want to plan horizontal scaling (adding more nodes) beforehand.
- It's not recommended to use HDD storage with the search cluster, because it takes a hit on performance. It's better to use SSD storage (NVMe or SATA SSD drives for example).
- You should not use [coordinating-only nodes](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#coordinating-only-node) with large instances. Coordinating-only nodes are smaller than [data nodes](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#data-node), which can impact performance and advanced search migrations.
- You should not use [coordinating-only nodes](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#coordinating-only-node) with large instances. Coordinating-only nodes are smaller than [data nodes](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#data-node), which can impact performance and [advanced search migrations](#advanced-search-migrations).
- You can use the [GitLab Performance Tool](https://gitlab.com/gitlab-org/quality/performance) to benchmark search performance with different search cluster sizes and configurations.
- `Heap size` should be set to no more than 50% of your physical RAM. Additionally, it shouldn't be set to more than the threshold for zero-based compressed oops. The exact threshold varies, but 26 GB is safe on most systems, but can also be as large as 30 GB on some systems. See [Heap size settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#heap-size-settings) and [Setting JVM options](https://www.elastic.co/guide/en/elasticsearch/reference/current/jvm-options.html) for more details.
- Number of CPUs (CPU cores) per node usually corresponds to the `Number of Elasticsearch shards` setting described below.

View File

@ -12,7 +12,8 @@ module Gitlab
class << self
# Validates the given url according to the constraints specified by arguments.
#
# ports - Raises error if the given URL port does is not between given ports.
# schemes - Array of URI schemes or `:none` if scheme must not be set.
# ports - Raises error if the given URL port is not between given ports.
# allow_localhost - Raises error if URL resolves to a localhost IP address and argument is false.
# allow_local_network - Raises error if URL resolves to a link-local address and argument is false.
# allow_object_storage - Avoid raising an error if URL resolves to an object storage endpoint and argument is true.
@ -229,6 +230,12 @@ module Gitlab
end
def validate_scheme(scheme, schemes)
if schemes == :none
return if scheme.nil?
raise BlockedUrlError, "No scheme allowed but got `#{scheme}://`"
end
if scheme.blank? || (schemes.any? && schemes.exclude?(scheme))
raise BlockedUrlError, "Only allowed schemes are #{schemes.join(', ')}"
end

View File

@ -101,10 +101,5 @@ FactoryBot.define do
factory :clusters_applications_runner, class: 'Clusters::Applications::Runner' do
cluster factory: %i(cluster with_installed_helm provided_by_gcp)
end
factory :clusters_applications_knative, class: 'Clusters::Applications::Knative' do
hostname { 'example.com' }
cluster factory: %i(cluster with_installed_helm provided_by_gcp)
end
end
end

View File

@ -94,7 +94,6 @@ FactoryBot.define do
application_helm factory: %i(clusters_applications_helm installed)
application_ingress factory: %i(clusters_applications_ingress installed)
application_runner factory: %i(clusters_applications_runner installed)
application_knative factory: %i(clusters_applications_knative installed)
end
trait :with_domain do

View File

@ -337,7 +337,7 @@ describe('AppComponent', () => {
});
});
it('shows appropriate error alert if request forbidden to leave group', () => {
it('shows appropriate error alert if request forbids to leave group', () => {
const message = 'Failed to leave the group. Please make sure you are not the only owner.';
jest.spyOn(vm.service, 'leaveGroup').mockRejectedValue({ status: HTTP_STATUS_FORBIDDEN });
jest.spyOn(vm.store, 'removeGroup');

View File

@ -48,6 +48,67 @@ RSpec.describe Resolvers::GroupLabelsResolver do
end
end
describe 'association preloading', :saas do
let(:params) do
{
include_ancestor_groups: true,
include_descendant_groups: true,
only_group_labels: false
}
end
before do
group.add_developer(current_user)
stub_feature_flags(preload_max_access_levels_for_labels_finder: flag_enabled)
# warmup
resolve_labels(group, params).to_a
end
context 'when the preload_max_access_levels_for_labels_finder FF is on' do
let(:flag_enabled) { true }
it 'prevents N+1 queries' do
control = Gitlab::WithRequestStore.with_request_store do
ActiveRecord::QueryRecorder.new { resolve_labels(group, params).to_a }
end
another_project = create(:project, :private, group: sub_subgroup)
another_subgroup = create(:group, :private, parent: group)
create(:label, project: another_project, name: 'another project feature')
create(:group_label, group: another_subgroup, name: 'another group feature')
expect do
Gitlab::WithRequestStore.with_request_store do
resolve_labels(group, params).to_a
end
end.not_to exceed_query_limit(control.count)
end
end
context 'when the preload_max_access_levels_for_labels_finder FF is off' do
let(:flag_enabled) { false }
it 'creates N+1 queries' do
control = Gitlab::WithRequestStore.with_request_store do
ActiveRecord::QueryRecorder.new { resolve_labels(group, params).to_a }
end
another_project = create(:project, :private, group: sub_subgroup)
another_subgroup = create(:group, :private, parent: group)
create(:label, project: another_project, name: 'another project feature')
create(:group_label, group: another_subgroup, name: 'another group feature')
expect do
Gitlab::WithRequestStore.with_request_store do
resolve_labels(group, params).to_a
end
end.to exceed_query_limit(control.count)
end
end
end
context 'at group level' do
before_all do
group.add_developer(current_user)

View File

@ -48,6 +48,66 @@ RSpec.describe Resolvers::LabelsResolver do
end
end
describe 'association preloading' do
let_it_be(:project) { create(:project, :private, group: sub_subgroup) }
let(:params) do
{
include_ancestor_groups: true
}
end
before do
group.add_developer(current_user)
stub_feature_flags(preload_max_access_levels_for_labels_finder: flag_enabled)
# warmup
resolve_labels(project, params).to_a
end
context 'when the preload_max_access_levels_for_labels_finder FF is on' do
let(:flag_enabled) { true }
it 'prevents N+1 queries' do
control = Gitlab::WithRequestStore.with_request_store do
ActiveRecord::QueryRecorder.new { resolve_labels(project, params).to_a }
end
another_project = create(:project, :private, group: subgroup)
another_subgroup = create(:group, :private, parent: group)
create(:label, project: another_project, name: 'another project feature')
create(:group_label, group: another_subgroup, name: 'another group feature')
expect do
Gitlab::WithRequestStore.with_request_store do
resolve_labels(project, params).to_a
end
end.not_to exceed_query_limit(control.count)
end
end
context 'when the preload_max_access_levels_for_labels_finder FF is off' do
let(:flag_enabled) { false }
it 'creates N+1 queries' do
control = Gitlab::WithRequestStore.with_request_store do
ActiveRecord::QueryRecorder.new { resolve_labels(project, params).to_a }
end
another_project = create(:project, :private, group: subgroup)
create(:label, project: another_project, name: 'another project feature')
create(:group_label, group: subgroup, name: 'another group feature')
expect do
Gitlab::WithRequestStore.with_request_store do
resolve_labels(project, params).to_a
end
end.to exceed_query_limit(control.count)
end
end
end
context 'with a parent project' do
before_all do
group.add_developer(current_user)

View File

@ -1,89 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Kubernetes::Helm::Pod do
describe '#generate' do
using RSpec::Parameterized::TableSyntax
where(:helm_major_version, :expected_helm_version, :expected_command_env) do
2 | '2.17.0' | [:TILLER_NAMESPACE]
3 | '3.2.4' | nil
end
with_them do
let(:cluster) { create(:cluster, helm_major_version: helm_major_version) }
let(:app) { create(:clusters_applications_knative, cluster: cluster) }
let(:command) { app.install_command }
let(:namespace) { Gitlab::Kubernetes::Helm::NAMESPACE }
let(:service_account_name) { nil }
subject { described_class.new(command, namespace, service_account_name: service_account_name) }
context 'with a command' do
it 'generates a Kubeclient::Resource' do
expect(subject.generate).to be_a_kind_of(Kubeclient::Resource)
end
it 'generates the appropriate metadata' do
metadata = subject.generate.metadata
expect(metadata.name).to eq("install-#{app.name}")
expect(metadata.namespace).to eq('gitlab-managed-apps')
expect(metadata.labels['gitlab.org/action']).to eq('install')
expect(metadata.labels['gitlab.org/application']).to eq(app.name)
end
it 'generates a container spec' do
spec = subject.generate.spec
expect(spec.containers.count).to eq(1)
end
it 'generates the appropriate specifications for the container' do
container = subject.generate.spec.containers.first
expect(container.name).to eq('helm')
expect(container.image).to eq("registry.gitlab.com/gitlab-org/cluster-integration/helm-install-image/releases/#{expected_helm_version}-kube-1.13.12-alpine-3.12")
expect(container.env.map(&:name)).to include(:HELM_VERSION, :COMMAND_SCRIPT, *expected_command_env)
expect(container.command).to match_array(["/bin/sh"])
expect(container.args).to match_array(["-c", "$(COMMAND_SCRIPT)"])
end
it 'includes a never restart policy' do
spec = subject.generate.spec
expect(spec.restartPolicy).to eq('Never')
end
it 'includes volumes for the container' do
container = subject.generate.spec.containers.first
expect(container.volumeMounts.first['name']).to eq('configuration-volume')
expect(container.volumeMounts.first['mountPath']).to eq("/data/helm/#{app.name}/config")
end
it 'includes a volume inside the specification' do
spec = subject.generate.spec
expect(spec.volumes.first['name']).to eq('configuration-volume')
end
it 'mounts configMap specification in the volume' do
volume = subject.generate.spec.volumes.first
expect(volume.configMap['name']).to eq("values-content-configuration-#{app.name}")
expect(volume.configMap['items'].first['key']).to eq(:'values.yaml')
expect(volume.configMap['items'].first['path']).to eq(:'values.yaml')
end
it 'has no serviceAccountName' do
spec = subject.generate.spec
expect(spec.serviceAccountName).to be_nil
end
context 'with a service_account_name' do
let(:service_account_name) { 'sa' }
it 'uses the serviceAccountName provided' do
spec = subject.generate.spec
expect(spec.serviceAccountName).to eq(service_account_name)
end
end
end
end
end
end

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only, feature_category: :shared do
include StubRequests
let(:schemes) { %w[http https] }
@ -21,7 +21,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
end
shared_context 'instance configured to deny all requests' do
shared_context 'when instance configured to deny all requests' do
before do
allow(Gitlab::CurrentSettings).to receive(:current_application_settings?).and_return(true)
stub_application_setting(deny_all_requests_except_allowed: true)
@ -30,7 +30,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
shared_examples 'a URI denied by `deny_all_requests_except_allowed`' do
context 'when instance setting is enabled' do
include_context 'instance configured to deny all requests'
include_context 'when instance configured to deny all requests'
it 'blocks the request' do
expect { subject }.to raise_error(described_class::BlockedUrlError)
@ -81,7 +81,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
shared_examples 'a URI exempt from `deny_all_requests_except_allowed`' do
include_context 'instance configured to deny all requests'
include_context 'when instance configured to deny all requests'
it 'does not block the request' do
expect { subject }.not_to raise_error
@ -256,7 +256,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
context 'when domain is too long' do
let(:import_url) { 'https://example' + 'a' * 1024 + '.com' }
let(:import_url) { "https://example#{'a' * 1024}.com" }
it 'raises an error' do
expect { subject }.to raise_error(described_class::BlockedUrlError)
@ -285,7 +285,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
end
context 'DNS rebinding protection with IP allowed' do
context 'when DNS rebinding protection with IP allowed' do
let(:import_url) { 'http://a.192.168.0.120.3times.127.0.0.1.1time.repeat.rebind.network:9121/scrape?target=unix:///var/opt/gitlab/redis/redis.socket&amp;check-keys=*' }
before do
@ -302,7 +302,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
it_behaves_like 'a URI exempt from `deny_all_requests_except_allowed`'
end
context 'disabled DNS rebinding protection' do
context 'with disabled DNS rebinding protection' do
let(:options) { { dns_rebind_protection: false, schemes: schemes } }
context 'when URI is internal' do
@ -366,6 +366,32 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
end
end
describe 'options' do
describe 'schemes' do
context 'with :none' do
let(:schemes) { :none }
context 'when URL has no scheme' do
let(:import_url) { '1.1.1.1' }
it_behaves_like 'validates URI and hostname' do
let(:expected_uri) { import_url }
let(:expected_hostname) { nil }
end
end
context 'when URL has a scheme' do
let(:import_url) { 'http://1.1.1.1' }
it 'raises an error' do
expect { subject }
.to raise_error(described_class::BlockedUrlError, "No scheme allowed but got `http://`")
end
end
end
end
end
end
describe '#blocked_url?' do
@ -394,6 +420,11 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
expect(described_class.blocked_url?('https://gitlab.com/foo/foo.git', schemes: ['http'])).to be true
end
it 'return true when schemes is :none' do
expect(described_class.blocked_url?('gitlab.com', schemes: :none)).to be false
expect(described_class.blocked_url?('smtp://gitlab.com', schemes: :none)).to be true
end
it 'returns true for bad protocol on configured web/SSH host and ports' do
web_url = "javascript://#{Gitlab.host_with_port}/t.git%0aalert(1)"
expect(described_class.blocked_url?(web_url, schemes: schemes)).to be true
@ -483,7 +514,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
expect(described_class.blocked_url?('https://gitlab.com/foo/foo.git', schemes: schemes)).to be false
end
context 'when allow_local_network is' do
describe 'allow_local_network' do
let(:shared_address_space_ips) { ['100.64.0.0', '100.64.127.127', '100.64.255.255'] }
let(:local_ips) do
@ -564,11 +595,11 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
end
context 'true (default)' do
context 'when true (default)' do
it_behaves_like 'allows local requests', { allow_localhost: true, allow_local_network: true, schemes: %w[http https] }
end
context 'false' do
context 'when false' do
it 'blocks urls from private networks' do
local_ips.each do |ip|
stub_domain_resolv(fake_domain, ip) do
@ -721,14 +752,14 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
end
context 'when dns_rebinding_setting is' do
context 'enabled' do
describe 'dns_rebinding_setting' do
context 'when enabled' do
let(:dns_rebind_value) { true }
it_behaves_like 'allowlists the domain'
end
context 'disabled' do
context 'when disabled' do
let(:dns_rebind_value) { false }
it_behaves_like 'allowlists the domain'
@ -768,8 +799,8 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
end
context 'when enforce_user is' do
context 'false (default)' do
describe 'enforce_user' do
context 'when false (default)' do
it 'does not block urls with a non-alphanumeric username' do
expect(described_class).not_to be_blocked_url('ssh://-oProxyCommand=whoami@example.com/a', schemes: ['ssh'])
@ -781,7 +812,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
end
context 'true' do
context 'when true' do
it 'blocks urls with a non-alphanumeric username' do
aggregate_failures do
expect(described_class).to be_blocked_url('ssh://-oProxyCommand=whoami@example.com/a', enforce_user: true, schemes: ['ssh'])
@ -849,7 +880,7 @@ RSpec.describe Gitlab::UrlBlocker, :stub_invalid_dns_only do
end
end
def stub_domain_resolv(domain, ip, port = 80, &block)
def stub_domain_resolv(domain, ip, port = 80)
address = instance_double(Addrinfo,
ip_address: ip,
ipv4_private?: true,

View File

@ -0,0 +1,35 @@
# frozen_string_literal: true
require 'spec_helper'
require_migration!
RSpec.describe EnsureDesignUserMentionsNoteIdBigintBackfillIsFinishedForGitlabDotCom, feature_category: :database do
describe '#up' do
let(:migration_arguments) do
{
job_class_name: 'CopyColumnUsingBackgroundMigrationJob',
table_name: 'design_user_mentions',
column_name: 'id',
job_arguments: [['note_id'], ['note_id_convert_to_bigint']]
}
end
it 'ensures the migration is completed for GitLab.com, dev, or test' do
expect_next_instance_of(described_class) do |instance|
expect(instance).to receive(:com_or_dev_or_test_but_not_jh?).and_return(true)
expect(instance).to receive(:ensure_batched_background_migration_is_finished).with(migration_arguments)
end
migrate!
end
it 'skips the check for other instances' do
expect_next_instance_of(described_class) do |instance|
expect(instance).to receive(:com_or_dev_or_test_but_not_jh?).and_return(false)
expect(instance).not_to receive(:ensure_batched_background_migration_is_finished)
end
migrate!
end
end
end

View File

@ -0,0 +1,66 @@
# frozen_string_literal: true
require 'spec_helper'
require_migration!
RSpec.describe SwapDesignUserMentionsNoteIdToBigintForGitlabDotCom, feature_category: :database do
describe '#up' do
before do
# A we call `schema_migrate_down!` before each example, and for this migration
# `#down` is same as `#up`, we need to ensure we start from the expected state.
connection = described_class.new.connection
connection.execute('ALTER TABLE design_user_mentions ALTER COLUMN note_id TYPE integer')
connection.execute('ALTER TABLE design_user_mentions ALTER COLUMN note_id_convert_to_bigint TYPE bigint')
end
# rubocop: disable RSpec/AnyInstanceOf
it 'swaps the integer and bigint columns for GitLab.com, dev, or test' do
allow_any_instance_of(described_class).to receive(:com_or_dev_or_test_but_not_jh?).and_return(true)
user_mentions = table(:design_user_mentions)
disable_migrations_output do
reversible_migration do |migration|
migration.before -> {
user_mentions.reset_column_information
expect(user_mentions.columns.find { |c| c.name == 'note_id' }.sql_type).to eq('integer')
expect(user_mentions.columns.find { |c| c.name == 'note_id_convert_to_bigint' }.sql_type).to eq('bigint')
}
migration.after -> {
user_mentions.reset_column_information
expect(user_mentions.columns.find { |c| c.name == 'note_id' }.sql_type).to eq('bigint')
expect(user_mentions.columns.find { |c| c.name == 'note_id_convert_to_bigint' }.sql_type).to eq('integer')
}
end
end
end
it 'is a no-op for other instances' do
allow_any_instance_of(described_class).to receive(:com_or_dev_or_test_but_not_jh?).and_return(false)
user_mentions = table(:design_user_mentions)
disable_migrations_output do
reversible_migration do |migration|
migration.before -> {
user_mentions.reset_column_information
expect(user_mentions.columns.find { |c| c.name == 'note_id' }.sql_type).to eq('integer')
expect(user_mentions.columns.find { |c| c.name == 'note_id_convert_to_bigint' }.sql_type).to eq('bigint')
}
migration.after -> {
user_mentions.reset_column_information
expect(user_mentions.columns.find { |c| c.name == 'note_id' }.sql_type).to eq('integer')
expect(user_mentions.columns.find { |c| c.name == 'note_id_convert_to_bigint' }.sql_type).to eq('bigint')
}
end
end
end
# rubocop: enable RSpec/AnyInstanceOf
end
end

View File

@ -1,235 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Clusters::Applications::Knative do
let(:knative) { create(:clusters_applications_knative) }
before do
allow(ClusterWaitForIngressIpAddressWorker).to receive(:perform_in)
allow(ClusterWaitForIngressIpAddressWorker).to receive(:perform_async)
allow(ClusterConfigureIstioWorker).to receive(:perform_async)
end
include_examples 'cluster application core specs', :clusters_applications_knative
include_examples 'cluster application status specs', :clusters_applications_knative
include_examples 'cluster application helm specs', :clusters_applications_knative
include_examples 'cluster application version specs', :clusters_applications_knative
include_examples 'cluster application initial status specs'
describe 'default values' do
it { expect(subject.version).to eq(described_class::VERSION) }
end
describe 'when cloud run is enabled' do
let(:cluster) { create(:cluster, :provided_by_gcp, :cloud_run_enabled) }
let(:knative_cloud_run) { create(:clusters_applications_knative, cluster: cluster) }
it { expect(knative_cloud_run).to be_not_installable }
end
describe 'when rbac is not enabled' do
let(:cluster) { create(:cluster, :provided_by_gcp, :rbac_disabled) }
let(:knative_no_rbac) { create(:clusters_applications_knative, cluster: cluster) }
it { expect(knative_no_rbac).to be_not_installable }
end
describe 'make_installed with external_ip' do
before do
application.make_installed!
end
let(:application) { create(:clusters_applications_knative, :installing) }
it 'schedules a ClusterWaitForIngressIpAddressWorker' do
expect(ClusterWaitForIngressIpAddressWorker).to have_received(:perform_in)
.with(Clusters::Applications::Knative::FETCH_IP_ADDRESS_DELAY, 'knative', application.id)
end
end
describe 'configuring istio ingress gateway' do
context 'after installed' do
let(:application) { create(:clusters_applications_knative, :installing) }
before do
application.make_installed!
end
it 'schedules a ClusterConfigureIstioWorker' do
expect(ClusterConfigureIstioWorker).to have_received(:perform_async).with(application.cluster_id)
end
end
context 'after updated' do
let(:application) { create(:clusters_applications_knative, :updating) }
before do
application.make_installed!
end
it 'schedules a ClusterConfigureIstioWorker' do
expect(ClusterConfigureIstioWorker).to have_received(:perform_async).with(application.cluster_id)
end
end
end
describe '#can_uninstall?' do
subject { knative.can_uninstall? }
it { is_expected.to be_truthy }
end
describe '#schedule_status_update with external_ip' do
let(:application) { create(:clusters_applications_knative, :installed) }
before do
application.schedule_status_update
end
it 'schedules a ClusterWaitForIngressIpAddressWorker' do
expect(ClusterWaitForIngressIpAddressWorker).to have_received(:perform_async)
.with('knative', application.id)
end
context 'when the application is not installed' do
let(:application) { create(:clusters_applications_knative, :installing) }
it 'does not schedule a ClusterWaitForIngressIpAddressWorker' do
expect(ClusterWaitForIngressIpAddressWorker).not_to have_received(:perform_async)
end
end
context 'when there is already an external_ip' do
let(:application) { create(:clusters_applications_knative, :installed, external_ip: '111.222.222.111') }
it 'does not schedule a ClusterWaitForIngressIpAddressWorker' do
expect(ClusterWaitForIngressIpAddressWorker).not_to have_received(:perform_in)
end
end
context 'when there is already an external_hostname' do
let(:application) { create(:clusters_applications_knative, :installed, external_hostname: 'localhost.localdomain') }
it 'does not schedule a ClusterWaitForIngressIpAddressWorker' do
expect(ClusterWaitForIngressIpAddressWorker).not_to have_received(:perform_in)
end
end
end
shared_examples 'a command' do
it 'is an instance of Helm::InstallCommand' do
expect(subject).to be_an_instance_of(Gitlab::Kubernetes::Helm::V3::InstallCommand)
end
it 'is initialized with knative arguments' do
expect(subject.name).to eq('knative')
expect(subject.chart).to eq('knative/knative')
expect(subject.files).to eq(knative.files)
end
it 'does not install metrics for prometheus' do
expect(subject.postinstall).to be_empty
end
end
describe '#install_command' do
subject { knative.install_command }
it 'is initialized with latest version' do
expect(subject.version).to eq('0.10.0')
end
it_behaves_like 'a command'
end
describe '#update_command' do
let!(:current_installed_version) { knative.version = '0.1.0' }
subject { knative.update_command }
it 'is initialized with current version' do
expect(subject.version).to eq(current_installed_version)
end
it_behaves_like 'a command'
end
describe '#uninstall_command' do
subject { knative.uninstall_command }
it { is_expected.to be_an_instance_of(Gitlab::Kubernetes::Helm::V3::DeleteCommand) }
it "removes knative deployed services before uninstallation" do
2.times do |i|
cluster_project = create(:cluster_project, cluster: knative.cluster)
create(:cluster_kubernetes_namespace,
cluster: cluster_project.cluster,
cluster_project: cluster_project,
project: cluster_project.project,
namespace: "namespace_#{i}")
end
remove_namespaced_services_script = [
"kubectl delete ksvc --all -n #{knative.cluster.kubernetes_namespaces.first.namespace}",
"kubectl delete ksvc --all -n #{knative.cluster.kubernetes_namespaces.second.namespace}"
]
expect(subject.predelete).to match_array(remove_namespaced_services_script)
end
it "initializes command with all necessary postdelete script" do
api_groups = YAML.safe_load(File.read(Rails.root.join(Clusters::Applications::Knative::API_GROUPS_PATH)))
remove_knative_istio_leftovers_script = [
"kubectl delete --ignore-not-found ns knative-serving",
"kubectl delete --ignore-not-found ns knative-build"
]
full_delete_commands_size = api_groups.size + remove_knative_istio_leftovers_script.size
expect(subject.postdelete).to include(*remove_knative_istio_leftovers_script)
expect(subject.postdelete.size).to eq(full_delete_commands_size)
expect(subject.postdelete[2]).to include("kubectl api-resources -o name --api-group #{api_groups[0]} | xargs -r kubectl delete --ignore-not-found crd")
expect(subject.postdelete[3]).to include("kubectl api-resources -o name --api-group #{api_groups[1]} | xargs -r kubectl delete --ignore-not-found crd")
end
end
describe '#files' do
let(:application) { knative }
let(:values) { subject[:'values.yaml'] }
subject { application.files }
it 'includes knative specific keys in the values.yaml file' do
expect(values).to include('domain')
end
end
describe 'validations' do
it { is_expected.to validate_presence_of(:hostname) }
end
describe '#available_domains' do
let!(:domain) { create(:pages_domain, :instance_serverless) }
it 'returns all instance serverless domains' do
expect(PagesDomain).to receive(:instance_serverless).and_call_original
domains = subject.available_domains
expect(domains.length).to eq(1)
expect(domains).to include(domain)
end
end
describe '#find_available_domain' do
let!(:domain) { create(:pages_domain, :instance_serverless) }
it 'returns the domain scoped to available domains' do
expect(subject).to receive(:available_domains).and_call_original
expect(subject.find_available_domain(domain.id)).to eq(domain)
end
end
end

View File

@ -714,10 +714,9 @@ feature_category: :kubernetes_management do
let!(:helm) { create(:clusters_applications_helm, cluster: cluster) }
let!(:ingress) { create(:clusters_applications_ingress, cluster: cluster) }
let!(:runner) { create(:clusters_applications_runner, cluster: cluster) }
let!(:knative) { create(:clusters_applications_knative, cluster: cluster) }
it 'returns a list of created applications' do
is_expected.to contain_exactly(helm, ingress, runner, knative)
is_expected.to contain_exactly(helm, ingress, runner)
end
end
@ -1425,36 +1424,6 @@ feature_category: :kubernetes_management do
end
end
describe '#knative_pre_installed?' do
subject(:knative_pre_installed?) { cluster.knative_pre_installed? }
before do
allow(cluster).to receive(:provider).and_return(provider)
end
context 'without provider' do
let(:provider) {}
it { is_expected.to eq(false) }
end
context 'with provider' do
let(:provider) { instance_double(Clusters::Providers::Aws, knative_pre_installed?: knative_pre_installed?) }
context 'with knative_pre_installed? set to true' do
let(:knative_pre_installed?) { true }
it { is_expected.to eq(true) }
end
context 'with knative_pre_installed? set to false' do
let(:knative_pre_installed?) { false }
it { is_expected.to eq(false) }
end
end
end
describe '#platform_kubernetes_active?' do
subject(:platform_kubernetes_active?) { cluster.platform_kubernetes_active? }
@ -1574,34 +1543,4 @@ feature_category: :kubernetes_management do
end
end
end
describe '#application_knative_available?' do
subject(:application_knative_available?) { cluster.application_knative_available? }
before do
allow(cluster).to receive(:application_knative).and_return(application_knative)
end
context 'without application_knative' do
let(:application_knative) {}
it { is_expected.to eq(false) }
end
context 'with application_knative' do
let(:application_knative) { instance_double(Clusters::Applications::Knative, available?: available?) }
context 'with available? set to true' do
let(:available?) { true }
it { is_expected.to eq(true) }
end
context 'with available? set to false' do
let(:available?) { false }
it { is_expected.to eq(false) }
end
end
end
end

View File

@ -56,4 +56,39 @@ RSpec.describe GroupLabel do
end
end
end
describe '#preloaded_parent_container' do
let_it_be(:label) { create(:group_label) }
before do
label.reload # ensure associations are not loaded
end
context 'when group is loaded' do
it 'does not invoke a DB query' do
label.group
count = ActiveRecord::QueryRecorder.new { label.preloaded_parent_container }.count
expect(count).to eq(0)
expect(label.preloaded_parent_container).to eq(label.group)
end
end
context 'when parent_container is loaded' do
it 'does not invoke a DB query' do
label.parent_container
count = ActiveRecord::QueryRecorder.new { label.preloaded_parent_container }.count
expect(count).to eq(0)
expect(label.preloaded_parent_container).to eq(label.parent_container)
end
end
context 'when none of them are loaded' do
it 'invokes a DB query' do
count = ActiveRecord::QueryRecorder.new { label.preloaded_parent_container }.count
expect(count).to eq(1)
end
end
end
end

View File

@ -18,14 +18,24 @@ RSpec.describe Preloaders::LabelsPreloader do
context 'project labels' do
let_it_be(:projects) { create_list(:project, 3, :public, :repository) }
let_it_be(:labels) { projects.each { |p| create(:label, project: p) } }
let_it_be(:labels) { projects.map { |p| create(:label, project: p) } }
it_behaves_like 'an efficient database query'
it 'preloads the max access level', :request_store do
labels_with_preloaded_data
query_count = ActiveRecord::QueryRecorder.new do
projects.first.team.max_member_access_for_user_ids([user.id])
end.count
expect(query_count).to eq(0)
end
end
context 'group labels' do
let_it_be(:groups) { create_list(:group, 3) }
let_it_be(:labels) { groups.each { |g| create(:group_label, group: g) } }
let_it_be(:labels) { groups.map { |g| create(:group_label, group: g) } }
it_behaves_like 'an efficient database query'
end

View File

@ -119,4 +119,39 @@ RSpec.describe ProjectLabel do
end
end
end
describe '#preloaded_parent_container' do
let_it_be(:label) { create(:label) }
before do
label.reload # ensure associations are not loaded
end
context 'when project is loaded' do
it 'does not invoke a DB query' do
label.project
count = ActiveRecord::QueryRecorder.new { label.preloaded_parent_container }.count
expect(count).to eq(0)
expect(label.preloaded_parent_container).to eq(label.project)
end
end
context 'when parent_container is loaded' do
it 'does not invoke a DB query' do
label.parent_container
count = ActiveRecord::QueryRecorder.new { label.preloaded_parent_container }.count
expect(count).to eq(0)
expect(label.preloaded_parent_container).to eq(label.parent_container)
end
end
context 'when none of them are loaded' do
it 'invokes a DB query' do
count = ActiveRecord::QueryRecorder.new { label.preloaded_parent_container }.count
expect(count).to eq(1)
end
end
end
end

View File

@ -1,19 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'getting group label information', feature_category: :team_planning do
include GraphqlHelpers
let_it_be(:group) { create(:group, :public) }
let_it_be(:label_factory) { :group_label }
let_it_be(:label_attrs) { { group: group } }
it_behaves_like 'querying a GraphQL type with labels' do
let(:path_prefix) { ['group'] }
def make_query(fields)
graphql_query_for('group', { full_path: group.full_path }, fields)
end
end
end