Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
7014c82806
commit
f45c9dd1a8
|
|
@ -865,12 +865,6 @@ Migration/ReferToIndexByName:
|
|||
- !ruby/regexp /\Adb\/(post_)?migrate\/20200[1-7].*\.rb\z/
|
||||
- !ruby/regexp /\Aee\/db\/geo\/(post_)?migrate\/201.*\.rb\z/
|
||||
|
||||
Migration/CreateTableWithForeignKeys:
|
||||
# Disable this cop for all the existing migrations
|
||||
Exclude:
|
||||
- !ruby/regexp /\Adb\/(post_)?migrate\/201.*\.rb\z/
|
||||
- !ruby/regexp /\Adb\/(post_)?migrate\/2022[1-11].*\.rb\z/
|
||||
|
||||
Migration/PreventIndexCreation:
|
||||
Exclude:
|
||||
- !ruby/regexp /\Adb\/(post_)?migrate\/201.*\.rb\z/
|
||||
|
|
|
|||
|
|
@ -3703,7 +3703,6 @@ Layout/LineLength:
|
|||
- 'spec/lib/gitlab/import_export/uploads_manager_spec.rb'
|
||||
- 'spec/lib/gitlab/import_export/version_checker_spec.rb'
|
||||
- 'spec/lib/gitlab/import_sources_spec.rb'
|
||||
- 'spec/lib/gitlab/instrumentation/redis_interceptor_spec.rb'
|
||||
- 'spec/lib/gitlab/issuable_metadata_spec.rb'
|
||||
- 'spec/lib/gitlab/issues/rebalancing/state_spec.rb'
|
||||
- 'spec/lib/gitlab/jira/dvcs_spec.rb'
|
||||
|
|
|
|||
|
|
@ -102,7 +102,6 @@ Lint/AmbiguousOperatorPrecedence:
|
|||
- 'spec/lib/gitlab/database/batch_count_spec.rb'
|
||||
- 'spec/lib/gitlab/database/consistency_checker_spec.rb'
|
||||
- 'spec/lib/gitlab/graphql/tracers/metrics_tracer_spec.rb'
|
||||
- 'spec/lib/gitlab/instrumentation/redis_interceptor_spec.rb'
|
||||
- 'spec/lib/gitlab/issues/rebalancing/state_spec.rb'
|
||||
- 'spec/lib/gitlab/kroki_spec.rb'
|
||||
- 'spec/lib/gitlab/memory/instrumentation_spec.rb'
|
||||
|
|
|
|||
|
|
@ -3588,7 +3588,6 @@ RSpec/FeatureCategory:
|
|||
- 'spec/lib/gitlab/instrumentation/rate_limiting_gates_spec.rb'
|
||||
- 'spec/lib/gitlab/instrumentation/redis_base_spec.rb'
|
||||
- 'spec/lib/gitlab/instrumentation/redis_cluster_validator_spec.rb'
|
||||
- 'spec/lib/gitlab/instrumentation/redis_interceptor_spec.rb'
|
||||
- 'spec/lib/gitlab/instrumentation/redis_spec.rb'
|
||||
- 'spec/lib/gitlab/internal_post_receive/response_spec.rb'
|
||||
- 'spec/lib/gitlab/issuable/clone/attributes_rewriter_spec.rb'
|
||||
|
|
|
|||
|
|
@ -2334,7 +2334,6 @@ RSpec/NamedSubject:
|
|||
- 'spec/lib/gitlab/rack_attack_spec.rb'
|
||||
- 'spec/lib/gitlab/reactive_cache_set_cache_spec.rb'
|
||||
- 'spec/lib/gitlab/redis/boolean_spec.rb'
|
||||
- 'spec/lib/gitlab/redis/cross_slot_spec.rb'
|
||||
- 'spec/lib/gitlab/redis/db_load_balancing_spec.rb'
|
||||
- 'spec/lib/gitlab/redis/multi_store_spec.rb'
|
||||
- 'spec/lib/gitlab/redis/queues_spec.rb'
|
||||
|
|
|
|||
|
|
@ -2517,7 +2517,6 @@ Style/InlineDisableAnnotation:
|
|||
- 'lib/gitlab/import_export/project/relation_factory.rb'
|
||||
- 'lib/gitlab/import_sources.rb'
|
||||
- 'lib/gitlab/instrumentation/redis_cluster_validator.rb'
|
||||
- 'lib/gitlab/instrumentation/redis_interceptor.rb'
|
||||
- 'lib/gitlab/internal_events.rb'
|
||||
- 'lib/gitlab/issuable/clone/copy_resource_events_service.rb'
|
||||
- 'lib/gitlab/issues/rebalancing/state.rb'
|
||||
|
|
@ -2556,7 +2555,6 @@ Style/InlineDisableAnnotation:
|
|||
- 'lib/gitlab/pagination/offset_pagination.rb'
|
||||
- 'lib/gitlab/pagination_delegate.rb'
|
||||
- 'lib/gitlab/patch/action_cable_subscription_adapter_identifier.rb'
|
||||
- 'lib/gitlab/patch/node_loader.rb'
|
||||
- 'lib/gitlab/patch/prependable.rb'
|
||||
- 'lib/gitlab/patch/redis_cache_store.rb'
|
||||
- 'lib/gitlab/patch/sidekiq_cron_poller.rb'
|
||||
|
|
@ -2573,7 +2571,6 @@ Style/InlineDisableAnnotation:
|
|||
- 'lib/gitlab/rack_attack.rb'
|
||||
- 'lib/gitlab/rack_attack/request.rb'
|
||||
- 'lib/gitlab/rack_attack/store.rb'
|
||||
- 'lib/gitlab/redis/cross_slot.rb'
|
||||
- 'lib/gitlab/redis/hll.rb'
|
||||
- 'lib/gitlab/redis/multi_store.rb'
|
||||
- 'lib/gitlab/reference_extractor.rb'
|
||||
|
|
@ -2942,9 +2939,7 @@ Style/InlineDisableAnnotation:
|
|||
- 'spec/lib/gitlab/pagination/keyset/order_spec.rb'
|
||||
- 'spec/lib/gitlab/pagination/keyset/simple_order_builder_spec.rb'
|
||||
- 'spec/lib/gitlab/patch/database_config_spec.rb'
|
||||
- 'spec/lib/gitlab/patch/node_loader_spec.rb'
|
||||
- 'spec/lib/gitlab/quick_actions/dsl_spec.rb'
|
||||
- 'spec/lib/gitlab/redis/cross_slot_spec.rb'
|
||||
- 'spec/lib/gitlab/redis/multi_store_spec.rb'
|
||||
- 'spec/lib/gitlab/search/abuse_detection_spec.rb'
|
||||
- 'spec/lib/gitlab/shard_health_cache_spec.rb'
|
||||
|
|
|
|||
5
Gemfile
5
Gemfile
|
|
@ -288,8 +288,9 @@ gem 'js_regex', '~> 3.8' # rubocop:todo Gemfile/MissingFeatureCategory
|
|||
gem 'device_detector' # rubocop:todo Gemfile/MissingFeatureCategory
|
||||
|
||||
# Redis
|
||||
gem 'redis', '~> 4.8.0' # rubocop:todo Gemfile/MissingFeatureCategory
|
||||
gem 'redis-namespace', '~> 1.10.0' # rubocop:todo Gemfile/MissingFeatureCategory
|
||||
gem 'redis-namespace', '~> 1.10.0', feature_category: :redis
|
||||
gem 'redis', '~> 5.0.0', feature_category: :redis
|
||||
gem 'redis-clustering', '~> 5.0.0', feature_category: :redis
|
||||
gem 'connection_pool', '~> 2.4' # rubocop:todo Gemfile/MissingFeatureCategory
|
||||
|
||||
# Redis session store
|
||||
|
|
|
|||
|
|
@ -523,9 +523,11 @@
|
|||
{"name":"recaptcha","version":"5.12.3","platform":"ruby","checksum":"37d1894add9e70a54d0c6c7f0ecbeedffbfa7d075acfbd4c509818dfdebdb7ee"},
|
||||
{"name":"recursive-open-struct","version":"1.1.3","platform":"ruby","checksum":"a3538a72552fcebcd0ada657bdff313641a4a5fbc482c08cfb9a65acb1c9de5a"},
|
||||
{"name":"redcarpet","version":"3.6.0","platform":"ruby","checksum":"8ad1889c0355ff4c47174af14edd06d62f45a326da1da6e8a121d59bdcd2e9e9"},
|
||||
{"name":"redis","version":"4.8.0","platform":"ruby","checksum":"2000cf5014669c9dc821704b6d322a35a9a33852a95208911d9175d63b448a44"},
|
||||
{"name":"redis","version":"5.0.8","platform":"ruby","checksum":"3b770ea597850b26d6a9718fa184241e53e6c8a7ae0486ee8bfaefd29f26f3d8"},
|
||||
{"name":"redis-actionpack","version":"5.4.0","platform":"ruby","checksum":"f10cf649ab05914716d63334d7f709221ecc883b87cf348f90ecfe0c35ea3540"},
|
||||
{"name":"redis-client","version":"0.19.0","platform":"ruby","checksum":"6ed9af23ff5aa87cf4d59439db77082b4cae5a0abbdd114ec5420bd63456324d"},
|
||||
{"name":"redis-cluster-client","version":"0.7.5","platform":"ruby","checksum":"12fd1c9eda17157a5cd2ce46afba13a024c28d24922092299a8daa9f46e4e78a"},
|
||||
{"name":"redis-clustering","version":"5.0.8","platform":"ruby","checksum":"8e2f3de3b1a700668eeac59125636e01be6ecd985e635a4d5649c47d71f6e166"},
|
||||
{"name":"redis-namespace","version":"1.10.0","platform":"ruby","checksum":"2c1c6ea7c6c5e343e75b9bee3aa4c265e364a5b9966507397467af2bb3758d94"},
|
||||
{"name":"redis-rack","version":"3.0.0","platform":"ruby","checksum":"abb50b82ae10ad4d11ca2e4901bfc2b98256cdafbbd95f80c86fc9e001478380"},
|
||||
{"name":"redis-store","version":"1.10.0","platform":"ruby","checksum":"f258894f9f7e82834308a3d86242294f0cff2c9db9ae66e5cb4c553a5ec8b09e"},
|
||||
|
|
|
|||
11
Gemfile.lock
11
Gemfile.lock
|
|
@ -1385,13 +1385,19 @@ GEM
|
|||
json
|
||||
recursive-open-struct (1.1.3)
|
||||
redcarpet (3.6.0)
|
||||
redis (4.8.0)
|
||||
redis (5.0.8)
|
||||
redis-client (>= 0.17.0)
|
||||
redis-actionpack (5.4.0)
|
||||
actionpack (>= 5, < 8)
|
||||
redis-rack (>= 2.1.0, < 4)
|
||||
redis-store (>= 1.1.0, < 2)
|
||||
redis-client (0.19.0)
|
||||
connection_pool
|
||||
redis-cluster-client (0.7.5)
|
||||
redis-client (~> 0.12)
|
||||
redis-clustering (5.0.8)
|
||||
redis (= 5.0.8)
|
||||
redis-cluster-client (>= 0.7.0)
|
||||
redis-namespace (1.10.0)
|
||||
redis (>= 4)
|
||||
redis-rack (3.0.0)
|
||||
|
|
@ -2069,8 +2075,9 @@ DEPENDENCIES
|
|||
rbtrace (~> 0.4)
|
||||
re2 (= 2.7.0)
|
||||
recaptcha (~> 5.12)
|
||||
redis (~> 4.8.0)
|
||||
redis (~> 5.0.0)
|
||||
redis-actionpack (~> 5.4.0)
|
||||
redis-clustering (~> 5.0.0)
|
||||
redis-namespace (~> 1.10.0)
|
||||
request_store (~> 1.5.1)
|
||||
responders (~> 3.0)
|
||||
|
|
|
|||
|
|
@ -22,8 +22,7 @@ module Types
|
|||
type: Types::CustomEmojiType.connection_type,
|
||||
null: true,
|
||||
resolver: Resolvers::CustomEmojiResolver,
|
||||
description: 'Custom emoji in this namespace.',
|
||||
alpha: { milestone: '13.6' }
|
||||
description: 'Custom emoji in this namespace.'
|
||||
|
||||
field :share_with_group_lock,
|
||||
type: GraphQL::Types::Boolean,
|
||||
|
|
|
|||
|
|
@ -44,8 +44,8 @@ module Types
|
|||
mount_mutation Mutations::Clusters::AgentTokens::Create
|
||||
mount_mutation Mutations::Clusters::AgentTokens::Revoke
|
||||
mount_mutation Mutations::Commits::Create, calls_gitaly: true
|
||||
mount_mutation Mutations::CustomEmoji::Create, alpha: { milestone: '13.6' }
|
||||
mount_mutation Mutations::CustomEmoji::Destroy, alpha: { milestone: '13.6' }
|
||||
mount_mutation Mutations::CustomEmoji::Create
|
||||
mount_mutation Mutations::CustomEmoji::Destroy
|
||||
mount_mutation Mutations::CustomerRelations::Contacts::Create
|
||||
mount_mutation Mutations::CustomerRelations::Contacts::Update
|
||||
mount_mutation Mutations::CustomerRelations::Organizations::Create
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ class ActiveSession
|
|||
)
|
||||
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
|
||||
redis.pipelined do |pipeline|
|
||||
pipeline.setex(
|
||||
key_name(user.id, session_private_id),
|
||||
expiry,
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ module LimitedCapacity
|
|||
|
||||
def register(jid, max_jids)
|
||||
with_redis do |redis|
|
||||
redis.eval(LUA_REGISTER_SCRIPT, keys: [counter_key], argv: [jid, max_jids])
|
||||
redis.eval(LUA_REGISTER_SCRIPT, keys: [counter_key], argv: [jid.to_s, max_jids.to_i])
|
||||
end.present?
|
||||
end
|
||||
|
||||
|
|
@ -59,7 +59,7 @@ module LimitedCapacity
|
|||
end
|
||||
|
||||
def remove_job_keys(redis, keys)
|
||||
redis.srem?(counter_key, keys)
|
||||
redis.srem?(counter_key, keys) if keys.present?
|
||||
end
|
||||
|
||||
def with_redis(&block)
|
||||
|
|
|
|||
|
|
@ -21,12 +21,6 @@ end
|
|||
# :nocov:
|
||||
# rubocop:enable Gitlab/NoCodeCoverageComment
|
||||
|
||||
Redis::Client.prepend(Gitlab::Instrumentation::RedisInterceptor)
|
||||
Redis::Cluster::NodeLoader.prepend(Gitlab::Patch::NodeLoader)
|
||||
Redis::Cluster::SlotLoader.prepend(Gitlab::Patch::SlotLoader)
|
||||
Redis::Cluster::CommandLoader.prepend(Gitlab::Patch::CommandLoader)
|
||||
Redis::Cluster.prepend(Gitlab::Patch::RedisCluster)
|
||||
|
||||
# this only instruments `RedisClient` used in `Sidekiq.redis`
|
||||
RedisClient.register(Gitlab::Instrumentation::RedisClientMiddleware)
|
||||
RedisClient.prepend(Gitlab::Patch::RedisClient)
|
||||
|
|
|
|||
|
|
@ -25,7 +25,17 @@ raise "Do not configure cable.yml with a Redis Cluster as ActionCable only works
|
|||
# https://github.com/rails/rails/blob/bb5ac1623e8de08c1b7b62b1368758f0d3bb6379/actioncable/lib/action_cable/subscription_adapter/redis.rb#L18
|
||||
ActionCable::SubscriptionAdapter::Redis.redis_connector = lambda do |config|
|
||||
args = config.except(:adapter, :channel_prefix)
|
||||
.merge(instrumentation_class: ::Gitlab::Instrumentation::Redis::ActionCable)
|
||||
.merge(custom: { instrumentation_class: "ActionCable" })
|
||||
|
||||
if args[:url]
|
||||
# cable.yml uses the url key but unix sockets needs to be passed into Redis
|
||||
# under the `path` key. We do a simple reassignment to resolve that.
|
||||
uri = URI.parse(args[:url])
|
||||
if uri.scheme == 'unix'
|
||||
args[:path] = uri.path
|
||||
args.delete(:url)
|
||||
end
|
||||
end
|
||||
|
||||
::Redis.new(args)
|
||||
end
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ Peek::Adapters::Redis.prepend ::Gitlab::PerformanceBar::RedisAdapterWhenPeekEnab
|
|||
Peek.singleton_class.prepend ::Gitlab::PerformanceBar::WithTopLevelWarnings
|
||||
|
||||
Rails.application.config.peek.adapter = :redis, {
|
||||
client: ::Redis.new(Gitlab::Redis::Cache.params),
|
||||
client: Gitlab::Redis::Cache.redis,
|
||||
expires_in: 5.minutes
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -29,12 +29,12 @@ cookie_key = if Rails.env.development?
|
|||
"_gitlab_session"
|
||||
end
|
||||
|
||||
store = Gitlab::Redis::Sessions.store(namespace: Gitlab::Redis::Sessions::SESSION_NAMESPACE)
|
||||
::Redis::Store::Factory.prepend(Gitlab::Patch::RedisStoreFactory)
|
||||
|
||||
Rails.application.configure do
|
||||
config.session_store(
|
||||
:redis_store, # Using the cookie_store would enable session replay attacks.
|
||||
redis_store: store,
|
||||
redis_server: Gitlab::Redis::Sessions.params.merge(namespace: Gitlab::Redis::Sessions::SESSION_NAMESPACE),
|
||||
key: cookie_key,
|
||||
secure: Gitlab.config.gitlab.https,
|
||||
httponly: true,
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ def enable_semi_reliable_fetch_mode?
|
|||
end
|
||||
|
||||
# Custom Queues configuration
|
||||
queues_config_hash = Gitlab::Redis::Queues.redis_client_params
|
||||
queues_config_hash = Gitlab::Redis::Queues.params
|
||||
|
||||
enable_json_logs = Gitlab.config.sidekiq.log_format != 'text'
|
||||
|
||||
|
|
|
|||
|
|
@ -27,6 +27,10 @@
|
|||
reporter: exampleuser
|
||||
stage: stage
|
||||
issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/000000
|
||||
impact: # Can be one of: [critical, high, medium, low]
|
||||
scope: # Can be one or a combination of: [instance, group, project]
|
||||
resolution_role: # Can be one of: [Admin, Owner, Maintainer, Developer]
|
||||
manual_task: # Can be true or false. Use this to denote whether a resolution action must be performed manually (true), or if it can be automated by using the API or other automation (false).
|
||||
body: | # (required) Don't change this line.
|
||||
<!-- START OF BODY COMMENT
|
||||
|
||||
|
|
|
|||
|
|
@ -5,5 +5,5 @@ Odescription: |
|
|||
that could have been created with project import.
|
||||
feature_category: security_policy_management
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/127212
|
||||
queued_migration_version: 20230721095222
|
||||
queued_migration_version: 20230721095223
|
||||
milestone: '16.5'
|
||||
|
|
|
|||
|
|
@ -0,0 +1,23 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class DeleteOrphansApprovalProjectRules2 < Gitlab::Database::Migration[2.1]
|
||||
restrict_gitlab_migration gitlab_schema: :gitlab_main
|
||||
|
||||
PROJECT_MIGRATION = 'DeleteOrphansApprovalProjectRules2'
|
||||
INTERVAL = 2.minutes
|
||||
|
||||
def up
|
||||
queue_batched_background_migration(
|
||||
PROJECT_MIGRATION,
|
||||
:approval_project_rules,
|
||||
:id,
|
||||
job_interval: INTERVAL,
|
||||
batch_size: 500,
|
||||
sub_batch_size: 100
|
||||
)
|
||||
end
|
||||
|
||||
def down
|
||||
delete_batched_background_migration(PROJECT_MIGRATION, :approval_project_rules, :id, [])
|
||||
end
|
||||
end
|
||||
|
|
@ -1,22 +1,12 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class DeleteOrphansScanFindingLicenseScanningApprovalRules2 < Gitlab::Database::Migration[2.1]
|
||||
class DeleteOrphansApprovalMergeRequestRules2 < Gitlab::Database::Migration[2.1]
|
||||
restrict_gitlab_migration gitlab_schema: :gitlab_main
|
||||
|
||||
MERGE_REQUEST_MIGRATION = 'DeleteOrphansApprovalMergeRequestRules2'
|
||||
PROJECT_MIGRATION = 'DeleteOrphansApprovalProjectRules2'
|
||||
INTERVAL = 2.minutes
|
||||
|
||||
def up
|
||||
queue_batched_background_migration(
|
||||
PROJECT_MIGRATION,
|
||||
:approval_project_rules,
|
||||
:id,
|
||||
job_interval: INTERVAL,
|
||||
batch_size: 500,
|
||||
sub_batch_size: 100
|
||||
)
|
||||
|
||||
queue_batched_background_migration(
|
||||
MERGE_REQUEST_MIGRATION,
|
||||
:approval_merge_request_rules,
|
||||
|
|
@ -28,7 +18,6 @@ class DeleteOrphansScanFindingLicenseScanningApprovalRules2 < Gitlab::Database::
|
|||
end
|
||||
|
||||
def down
|
||||
delete_batched_background_migration(PROJECT_MIGRATION, :approval_project_rules, :id, [])
|
||||
delete_batched_background_migration(MERGE_REQUEST_MIGRATION, :approval_merge_request_rules, :id, [])
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1 @@
|
|||
cc90683b1e4cf2f23917058f4f8537d4b91bcace568016c6f57adae5845ce68c
|
||||
|
|
@ -124,7 +124,7 @@ To make sure your configuration is correct:
|
|||
1. Run in the console:
|
||||
|
||||
```ruby
|
||||
redis = Redis.new(Gitlab::Redis::SharedState.params)
|
||||
redis = Gitlab::Redis::SharedState.redis
|
||||
redis.info
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -59,7 +59,8 @@ To create a system hook:
|
|||
1. On the left sidebar, at the bottom, select **Admin Area**.
|
||||
1. Select **System Hooks**.
|
||||
1. Select **Add new webhook**.
|
||||
1. Provide the **URL** and **Secret Token**.
|
||||
1. Enter the **URL**.
|
||||
1. Optional. Enter a [**Secret Token**](../user/project/integrations/webhooks.md#validate-payloads-by-using-a-secret-token).
|
||||
1. Select the checkbox next to each optional **Trigger** you want to enable.
|
||||
1. Select **Enable SSL verification**, if desired.
|
||||
1. Select **Add system hook**.
|
||||
|
|
|
|||
|
|
@ -2707,10 +2707,6 @@ Input type: `CreateContainerRegistryProtectionRuleInput`
|
|||
|
||||
### `Mutation.createCustomEmoji`
|
||||
|
||||
NOTE:
|
||||
**Introduced** in 13.6.
|
||||
**Status**: Experiment.
|
||||
|
||||
Input type: `CreateCustomEmojiInput`
|
||||
|
||||
#### Arguments
|
||||
|
|
@ -3702,10 +3698,6 @@ Input type: `DestroyContainerRepositoryTagsInput`
|
|||
|
||||
### `Mutation.destroyCustomEmoji`
|
||||
|
||||
NOTE:
|
||||
**Introduced** in 13.6.
|
||||
**Status**: Experiment.
|
||||
|
||||
Input type: `DestroyCustomEmojiInput`
|
||||
|
||||
#### Arguments
|
||||
|
|
@ -19887,10 +19879,6 @@ four standard [pagination arguments](#connection-pagination-arguments):
|
|||
|
||||
Custom emoji in this namespace.
|
||||
|
||||
NOTE:
|
||||
**Introduced** in 13.6.
|
||||
**Status**: Experiment.
|
||||
|
||||
Returns [`CustomEmojiConnection`](#customemojiconnection).
|
||||
|
||||
This field returns a [connection](#connections). It accepts the
|
||||
|
|
|
|||
|
|
@ -110,26 +110,26 @@ To enable and disable them, run on the GitLab Rails console:
|
|||
|
||||
```ruby
|
||||
# To enable it for the instance:
|
||||
Feature.enable(:<dev_flag_name>, type: :gitlab_com_derisk)
|
||||
Feature.enable(:<dev_flag_name>)
|
||||
|
||||
# To disable it for the instance:
|
||||
Feature.disable(:<dev_flag_name>, type: :gitlab_com_derisk)
|
||||
Feature.disable(:<dev_flag_name>)
|
||||
|
||||
# To enable for a specific project:
|
||||
Feature.enable(:<dev_flag_name>, Project.find(<project id>), type: :gitlab_com_derisk)
|
||||
Feature.enable(:<dev_flag_name>, Project.find(<project id>))
|
||||
|
||||
# To disable for a specific project:
|
||||
Feature.disable(:<dev_flag_name>, Project.find(<project id>), type: :gitlab_com_derisk)
|
||||
Feature.disable(:<dev_flag_name>, Project.find(<project id>))
|
||||
```
|
||||
|
||||
To check a `gitlab_com_derisk` feature flag's state:
|
||||
|
||||
```ruby
|
||||
# Check if the feature flag is enabled
|
||||
Feature.enabled?(:dev_flag_name, type: :gitlab_com_derisk)
|
||||
Feature.enabled?(:dev_flag_name)
|
||||
|
||||
# Check if the feature flag is disabled
|
||||
Feature.disabled?(:dev_flag_name, type: :gitlab_com_derisk)
|
||||
Feature.disabled?(:dev_flag_name)
|
||||
```
|
||||
|
||||
### `wip` type
|
||||
|
|
@ -154,13 +154,13 @@ Once the feature is complete, the feature flag type can be changed to the `gitla
|
|||
|
||||
```ruby
|
||||
# Check if feature flag is enabled
|
||||
Feature.enabled?(:my_wip_flag, project, type: :wip)
|
||||
Feature.enabled?(:my_wip_flag, project)
|
||||
|
||||
# Check if feature flag is disabled
|
||||
Feature.disabled?(:my_wip_flag, project, type: :wip)
|
||||
Feature.disabled?(:my_wip_flag, project)
|
||||
|
||||
# Push feature flag to Frontend
|
||||
push_frontend_feature_flag(:my_wip_flag, project, type: :wip)
|
||||
push_frontend_feature_flag(:my_wip_flag, project)
|
||||
```
|
||||
|
||||
### `beta` type
|
||||
|
|
@ -187,13 +187,13 @@ Providing a flag in this case allows engineers and customers to disable the new
|
|||
|
||||
```ruby
|
||||
# Check if feature flag is enabled
|
||||
Feature.enabled?(:my_beta_flag, project, type: :beta)
|
||||
Feature.enabled?(:my_beta_flag, project)
|
||||
|
||||
# Check if feature flag is disabled
|
||||
Feature.disabled?(:my_beta_flag, project, type: :beta)
|
||||
Feature.disabled?(:my_beta_flag, project)
|
||||
|
||||
# Push feature flag to Frontend
|
||||
push_frontend_feature_flag(:my_beta_flag, project, type: :beta)
|
||||
push_frontend_feature_flag(:my_beta_flag, project)
|
||||
```
|
||||
|
||||
### `ops` type
|
||||
|
|
@ -219,13 +219,13 @@ instance/group/project/user setting.
|
|||
|
||||
```ruby
|
||||
# Check if feature flag is enabled
|
||||
Feature.enabled?(:my_ops_flag, project, type: :ops)
|
||||
Feature.enabled?(:my_ops_flag, project)
|
||||
|
||||
# Check if feature flag is disabled
|
||||
Feature.disabled?(:my_ops_flag, project, type: :ops)
|
||||
Feature.disabled?(:my_ops_flag, project)
|
||||
|
||||
# Push feature flag to Frontend
|
||||
push_frontend_feature_flag(:my_ops_flag, project, type: :ops)
|
||||
push_frontend_feature_flag(:my_ops_flag, project)
|
||||
```
|
||||
|
||||
### `experiment` type
|
||||
|
|
@ -261,9 +261,9 @@ During development (`RAILS_ENV=development`) or testing (`RAILS_ENV=test`) all f
|
|||
|
||||
This process is meant to ensure consistent feature flag usage in the codebase. All feature flags **must**:
|
||||
|
||||
- Be known. Only use feature flags that are explicitly defined.
|
||||
- Be known. Only use feature flags that are explicitly defined (except for feature flags of the types `experiment`, `worker` and `undefined`).
|
||||
- Not be defined twice. They have to be defined either in FOSS or EE, but not both.
|
||||
- Use a valid and consistent `type:` across all invocations.
|
||||
- For feature flags that don't have a definition file, use a valid and consistent `type:` across all invocations.
|
||||
- Have an owner.
|
||||
|
||||
All feature flags known to GitLab are self-documented in YAML files stored in:
|
||||
|
|
@ -302,24 +302,43 @@ Only feature flags that have a YAML definition file can be used when running the
|
|||
|
||||
```shell
|
||||
$ bin/feature-flag my_feature_flag
|
||||
>> Specify the group introducing the feature flag, like `group::project management`:
|
||||
?> group::cloud connector
|
||||
>> Specify the feature flag type
|
||||
?> beta
|
||||
You picked the type 'beta'
|
||||
|
||||
>> URL of the MR introducing the feature flag (enter to skip):
|
||||
?> https://gitlab.com/gitlab-org/gitlab/-/merge_requests/38602
|
||||
>> Specify the group label to which the feature flag belongs, from the following list:
|
||||
1. group::group1
|
||||
2. group::group2
|
||||
?> 2
|
||||
You picked the group 'group::group2'
|
||||
|
||||
>> Open this URL and fill in the rest of the details:
|
||||
https://gitlab.com/gitlab-org/gitlab/-/issues/new?issue%5Btitle%5D=%5BFeature+flag%5D+Rollout+of+%60test-flag%60&issuable_template=Feature+Flag+Roll+Out
|
||||
>> URL of the original feature issue (enter to skip):
|
||||
?> https://gitlab.com/gitlab-org/gitlab/-/issues/435435
|
||||
|
||||
>> URL of the MR introducing the feature flag (enter to skip and let Danger provide a suggestion directly in the MR):
|
||||
?> https://gitlab.com/gitlab-org/gitlab/-/merge_requests/141023
|
||||
|
||||
>> Username of the feature flag DRI (enter to skip):
|
||||
?> bob
|
||||
|
||||
>> Is this an EE only feature (enter to skip):
|
||||
?> [Return]
|
||||
|
||||
>> Press any key and paste the issue content that we copied to your clipboard! 🚀
|
||||
?> [Return automatically opens the "New issue" page where you only have to paste the issue content]
|
||||
|
||||
>> URL of the rollout issue (enter to skip):
|
||||
?> https://gitlab.com/gitlab-org/gitlab/-/issues/232533
|
||||
create config/feature_flags/development/my_feature_flag.yml
|
||||
?> https://gitlab.com/gitlab-org/gitlab/-/issues/437162
|
||||
|
||||
create config/feature_flags/beta/my_feature_flag.yml
|
||||
---
|
||||
name: my_feature_flag
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/38602
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/232533
|
||||
group: group::cloud connector
|
||||
type: development
|
||||
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/435435
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/141023
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/437162
|
||||
milestone: '16.9'
|
||||
group: group::composition analysis
|
||||
type: beta
|
||||
default_enabled: false
|
||||
```
|
||||
|
||||
|
|
@ -405,17 +424,15 @@ by `default_enabled:` in YAML definition.
|
|||
If feature flag does not have a YAML definition an error will be raised
|
||||
in development or test environment, while returning `false` on production.
|
||||
|
||||
If not specified, the default feature flag type for `Feature.enabled?` and `Feature.disabled?`
|
||||
is `type: development`. For all other feature flag types, you must specify the `type:`:
|
||||
For feature flags that don't have a definition file (only allowed for the `experiment`, `worker` and `undefined` types),
|
||||
you need to pass their `type:` when calling `Feature.enabled?` and `Feature.disabled?`:
|
||||
|
||||
```ruby
|
||||
if Feature.enabled?(:feature_flag, project, type: :ops)
|
||||
# execute code if ops feature flag is enabled
|
||||
else
|
||||
# execute code if ops feature flag is disabled
|
||||
if Feature.enabled?(:experiment_feature_flag, project, type: :experiment)
|
||||
# execute code if feature flag is enabled
|
||||
end
|
||||
|
||||
if Feature.disabled?(:my_feature_flag, project, type: :ops)
|
||||
if Feature.disabled?(:worker_feature_flag, project, type: :worker)
|
||||
# execute code if feature flag is disabled
|
||||
end
|
||||
```
|
||||
|
|
@ -492,12 +509,12 @@ so checking for `gon.features.vim_bindings` would not work.
|
|||
See the [Vue guide](../fe_guide/vue.md#accessing-feature-flags) for details about
|
||||
how to access feature flags in a Vue component.
|
||||
|
||||
If not specified, the default feature flag type for `push_frontend_feature_flag`
|
||||
is `type: development`. For all other feature flag types, you must specify the `type:`:
|
||||
For feature flags that don't have a definition file (only allowed for the `experiment`, `worker` and `undefined` types),
|
||||
you need to pass their `type:` when calling `push_frontend_feature_flag`:
|
||||
|
||||
```ruby
|
||||
before_action do
|
||||
push_frontend_feature_flag(:vim_bindings, project, type: :ops)
|
||||
push_frontend_feature_flag(:vim_bindings, project, type: :experiment)
|
||||
end
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -81,10 +81,10 @@ Developers are highly encouraged to use [hash-tags](https://redis.io/docs/refere
|
|||
where appropriate to facilitate future adoption of Redis Cluster in more Redis types. For example, the Namespace model uses hash-tags
|
||||
for its [config cache keys](https://gitlab.com/gitlab-org/gitlab/-/blob/1a12337058f260d38405886d82da5e8bb5d8da0b/app/models/namespace.rb#L786).
|
||||
|
||||
To perform multi-key commands, developers may use the [`Gitlab::Redis::CrossSlot::Pipeline`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/redis/cross_slot.rb) wrapper.
|
||||
To perform multi-key commands, developers may use the [`.pipelined`](https://github.com/redis-rb/redis-cluster-client#interfaces) method which splits and sends commands to each node and aggregates replies.
|
||||
However, this does not work for [transactions](https://redis.io/docs/interact/transactions/) as Redis Cluster does not support cross-slot transactions.
|
||||
|
||||
For `Rails.cache`, we handle the `MGET` command found in `read_multi_get` by [patching it](https://gitlab.com/gitlab-org/gitlab/-/blob/c2bad2aac25e2f2778897bd4759506a72b118b15/lib/gitlab/patch/redis_cache_store.rb#L10) to use the `Gitlab::Redis::CrossSlot::Pipeline` wrapper.
|
||||
For `Rails.cache`, we handle the `MGET` command found in `read_multi_get` by [patching it](https://gitlab.com/gitlab-org/gitlab/-/blob/c2bad2aac25e2f2778897bd4759506a72b118b15/lib/gitlab/patch/redis_cache_store.rb#L10) to use the `.pipelined` method.
|
||||
The minimum size of the pipeline is set to 1000 commands and it can be adjusted by using the `GITLAB_REDIS_CLUSTER_PIPELINE_BATCH_LIMIT` environment variable.
|
||||
|
||||
## Redis in structured logging
|
||||
|
|
|
|||
|
|
@ -147,8 +147,8 @@ module Gitlab
|
|||
# Don't use multistore if redis.foo configuration is not provided
|
||||
return super if config_fallback?
|
||||
|
||||
primary_store = ::Redis.new(params)
|
||||
secondary_store = ::Redis.new(config_fallback.params)
|
||||
primary_store = init_redis(params)
|
||||
secondary_store = init_redis(config_fallback.params)
|
||||
|
||||
MultiStore.new(primary_store, secondary_store, store_name)
|
||||
end
|
||||
|
|
|
|||
|
|
@ -109,26 +109,31 @@ You can define URL variables directly using the REST API.
|
|||
## Configure your webhook receiver endpoint
|
||||
|
||||
Webhook receiver endpoints should be fast and stable.
|
||||
Slow and unstable receivers can be [disabled automatically](#failing-webhooks) to ensure system reliability. Webhooks that fail might lead to [duplicate events](#webhook-fails-or-multiple-webhook-requests-are-triggered).
|
||||
Slow and unstable receivers might be [disabled automatically](#auto-disabled-webhooks) to ensure system reliability. Webhooks that fail might lead to [duplicate events](#webhook-fails-or-multiple-webhook-requests-are-triggered).
|
||||
|
||||
Endpoints should follow these best practices:
|
||||
|
||||
- **Respond quickly with a `200` or `201` status response.** Avoid any significant processing of webhooks in the same request.
|
||||
Instead, implement a queue to handle webhooks after they are received. The timeout limit for webhooks is [10 seconds on GitLab.com](../../../user/gitlab_com/index.md#other-limits).
|
||||
Instead, implement a queue to handle webhooks after they are received. Webhook receivers that do not respond before the [timeout limit](#webhook-timeout-limits) might be [disabled automatically](#auto-disabled-webhooks) on GitLab.com.
|
||||
- **Be prepared to handle duplicate events.** In [some circumstances](#webhook-fails-or-multiple-webhook-requests-are-triggered), the same event may be sent twice. To mitigate this issue, ensure your endpoint is
|
||||
reliably fast and stable.
|
||||
- **Keep the response headers and body minimal.**
|
||||
GitLab does not examine the response headers or body. GitLab stores them so you can examine them later in the logs to help diagnose problems. You should limit the number and size of headers returned. You can also respond to the webhook request with an empty body.
|
||||
- Only return client error status responses (in the `4xx` range) to
|
||||
indicate that the webhook has been misconfigured. Responses in this range can lead to your webhooks being [automatically disabled](#failing-webhooks). For example, if your receiver
|
||||
indicate that the webhook has been misconfigured. Responses in this range might cause your webhooks to be [disabled automatically](#auto-disabled-webhooks). For example, if your receiver
|
||||
only supports push events, you can return `400` if sent an issue
|
||||
payload, as that is an indication that the hook has been set up
|
||||
incorrectly. Alternatively, you can ignore unrecognized event
|
||||
payloads.
|
||||
- Never return `500` server error status responses if the event has been handled as this can cause the webhook to be [temporarily disabled](#failing-webhooks).
|
||||
- Never return `500` server error status responses if the event has been handled. These responses might cause the webhook to be [disabled automatically](#auto-disabled-webhooks).
|
||||
- Invalid HTTP responses are treated as failed requests.
|
||||
|
||||
## Failing webhooks
|
||||
## Webhook timeout limits
|
||||
|
||||
For GitLab.com, the timeout limit for webhooks is [10 seconds](../../../user/gitlab_com/index.md#other-limits).
|
||||
For GitLab self-managed, an administrator can [change the webhook timeout limit](../../../administration/instance_limits.md#webhook-timeout).
|
||||
|
||||
## Auto-disabled webhooks
|
||||
|
||||
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60837) for project webhooks in GitLab 13.12 [with a flag](../../../administration/feature_flags.md) named `web_hooks_disable_failed`. Disabled by default.
|
||||
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/329849) for project webhooks in GitLab 15.7. Feature flag `web_hooks_disable_failed` removed.
|
||||
|
|
@ -139,9 +144,10 @@ FLAG:
|
|||
On self-managed GitLab, by default this feature is not available. To make it available, an administrator can [enable the feature flag](../../../administration/feature_flags.md) named `auto_disabling_web_hooks`.
|
||||
On GitLab.com, this feature is available.
|
||||
|
||||
Project or group webhooks that fail four consecutive times are automatically disabled.
|
||||
Project or group webhooks that fail four consecutive times are disabled automatically.
|
||||
|
||||
Project or group webhooks that return response codes in the `5xx` range are understood to be failing
|
||||
Project or group webhooks that return response codes in the `5xx` range or experience a
|
||||
[timeout](#webhook-timeout-limits) or other HTTP errors are understood to be failing
|
||||
intermittently and are temporarily disabled. These webhooks are initially disabled
|
||||
for one minute, which is extended on each subsequent failure up to a maximum of 24 hours.
|
||||
|
||||
|
|
@ -432,7 +438,7 @@ To view the table:
|
|||
|
||||
1. In your project or group, on the left sidebar, select **Settings > Webhooks**.
|
||||
1. Scroll down to the webhooks.
|
||||
1. Each [failing webhook](#failing-webhooks) has a badge listing it as:
|
||||
1. Each [auto-disabled webhook](#auto-disabled-webhooks) has a badge listing it as:
|
||||
|
||||
- **Failed to connect** if it's misconfigured and must be manually re-enabled.
|
||||
- **Fails to connect** if it's temporarily disabled and is automatically
|
||||
|
|
@ -472,9 +478,7 @@ Missing intermediate certificates are common causes of verification failure.
|
|||
|
||||
### Webhook fails or multiple webhook requests are triggered
|
||||
|
||||
If you're receiving multiple webhook requests, the webhook might have timed out.
|
||||
|
||||
GitLab expects a response in [10 seconds](../../../user/gitlab_com/index.md#other-limits). On self-managed GitLab instances, you can [change the webhook timeout limit](../../../administration/instance_limits.md#webhook-timeout).
|
||||
If you're receiving multiple webhook requests, the webhook might have [timed out](#webhook-timeout-limits).
|
||||
|
||||
### Webhook is not triggered
|
||||
|
||||
|
|
@ -482,5 +486,5 @@ GitLab expects a response in [10 seconds](../../../user/gitlab_com/index.md#othe
|
|||
|
||||
If a webhook is not triggered, check that:
|
||||
|
||||
- The webhook was not [automatically disabled](#failing-webhooks).
|
||||
- The webhook was not [disabled automatically](#auto-disabled-webhooks).
|
||||
- The GitLab instance is not in [Silent Mode](../../../administration/silent_mode/index.md).
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ module Gitlab
|
|||
|
||||
Gitlab::Redis::SharedState.with do |redis|
|
||||
redis.multi do |r|
|
||||
r.zadd(key, time, ip)
|
||||
r.zadd(key, time, ip) if ip
|
||||
r.zremrangebyscore(key, 0, time - config.unique_ips_limit_time_window)
|
||||
r.zcard(key)
|
||||
end.last
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ module Gitlab
|
|||
ttl_jitter = 2.hours.to_i
|
||||
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
|
||||
redis.pipelined do |pipeline|
|
||||
keys.each { |key| pipeline.expire(key, ttl_duration + rand(-ttl_jitter..ttl_jitter)) }
|
||||
end
|
||||
end
|
||||
|
|
@ -25,7 +25,7 @@ module Gitlab
|
|||
end
|
||||
|
||||
def redis
|
||||
@redis ||= ::Redis.new(Gitlab::Redis::Cache.params)
|
||||
@redis ||= Gitlab::Redis::Cache.redis
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ module Gitlab
|
|||
def store_in_cache
|
||||
with_redis do |redis|
|
||||
redis.pipelined do |p|
|
||||
p.mapped_hmset(cache_key, { sha: sha, status: status, ref: ref })
|
||||
p.mapped_hmset(cache_key, { sha: sha.to_s, status: status.to_s, ref: ref.to_s })
|
||||
p.expire(cache_key, STATUS_KEY_TTL)
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ module Gitlab
|
|||
key = cache_key_for(raw_key)
|
||||
|
||||
with_redis do |redis|
|
||||
redis.sismember(key, value)
|
||||
redis.sismember(key, value || value.to_s)
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -162,7 +162,7 @@ module Gitlab
|
|||
def self.write_multiple(mapping, key_prefix: nil, timeout: TIMEOUT)
|
||||
with_redis do |redis|
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
|
||||
redis.pipelined do |pipeline|
|
||||
mapping.each do |raw_key, value|
|
||||
key = cache_key_for("#{key_prefix}#{raw_key}")
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,182 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Cleanup
|
||||
module OrphanJobArtifactFinalObjects
|
||||
class GenerateList
|
||||
include Gitlab::Utils::StrongMemoize
|
||||
|
||||
UnsupportedProviderError = Class.new(StandardError)
|
||||
|
||||
DEFAULT_FILENAME = 'orphan_job_artifact_final_objects.csv'
|
||||
LAST_PAGE_MARKER_REDIS_KEY = 'orphan-job-artifact-objects-cleanup-last-page-marker'
|
||||
|
||||
PAGINATORS = {
|
||||
google: Gitlab::Cleanup::OrphanJobArtifactFinalObjects::Paginators::Google,
|
||||
aws: Gitlab::Cleanup::OrphanJobArtifactFinalObjects::Paginators::Aws
|
||||
}.freeze
|
||||
|
||||
def initialize(provider: nil, filename: nil, force_restart: false, logger: Gitlab::AppLogger)
|
||||
@paginator = determine_paginator!(provider)
|
||||
@force_restart = force_restart
|
||||
@logger = logger
|
||||
@filename = filename || DEFAULT_FILENAME
|
||||
end
|
||||
|
||||
def run!
|
||||
log_info('Looking for orphan job artifact objects under the `@final` directories')
|
||||
|
||||
initialize_file
|
||||
|
||||
each_final_object do |object|
|
||||
log_orphan_object(object) if object.orphan?
|
||||
end
|
||||
|
||||
log_info("Done. All orphan objects are listed in #{filename}.")
|
||||
ensure
|
||||
file.close
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :paginator, :file, :filename, :force_restart, :logger
|
||||
|
||||
def initialize_file
|
||||
# If the file already exists, and this is not a force restart,
|
||||
# new entries will be appended to it. Otherwise, force restart will
|
||||
# cause a truncation of the existing file.
|
||||
mode = force_restart ? 'w+' : 'a+'
|
||||
@file = File.open(filename, mode)
|
||||
end
|
||||
|
||||
def determine_paginator!(provided_provider)
|
||||
# provider can be nil if user didn't specify it when running the clean up task.
|
||||
# In this case, we automatically determine the provider based on the object storage configuration.
|
||||
provider = provided_provider
|
||||
provider ||= configuration.connection.provider
|
||||
klass = PAGINATORS.fetch(provider.downcase.to_sym)
|
||||
klass.new(bucket_prefix: bucket_prefix)
|
||||
rescue KeyError
|
||||
msg = if provided_provider.present?
|
||||
"The provided provider is unsupported. Please select from #{PAGINATORS.keys.join(', ')}."
|
||||
else
|
||||
<<-MSG.strip_heredoc
|
||||
The provider found in the object storage configuration is unsupported.
|
||||
Please re-run the task and specify a provider from #{PAGINATORS.keys.join(', ')},
|
||||
whichever is compatible with your provider's object storage API."
|
||||
MSG
|
||||
end
|
||||
|
||||
raise UnsupportedProviderError, msg
|
||||
end
|
||||
|
||||
def each_final_object
|
||||
each_batch do |files|
|
||||
files.each_file_this_page do |fog_file|
|
||||
object = ::Gitlab::Cleanup::OrphanJobArtifactFinalObjects::JobArtifactObject.new(
|
||||
fog_file,
|
||||
bucket_prefix: bucket_prefix
|
||||
)
|
||||
|
||||
# We still need to check here if the object is in the final location because
|
||||
# if the provider does not support filtering objects by glob pattern, we will
|
||||
# then receive all job artifact objects here, even the ones not in the @final directory.
|
||||
yield object if object.in_final_location?
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def each_batch
|
||||
next_marker = resume_from_last_page_marker
|
||||
|
||||
loop do
|
||||
batch = fetch_batch(next_marker)
|
||||
yield batch
|
||||
|
||||
break if paginator.last_page?(batch)
|
||||
|
||||
next_marker = paginator.get_next_marker(batch)
|
||||
save_last_page_marker(next_marker)
|
||||
end
|
||||
|
||||
clear_last_page_marker
|
||||
end
|
||||
|
||||
def fetch_batch(marker)
|
||||
page_name = marker ? "marker: #{marker}" : "first page"
|
||||
log_info("Loading page (#{page_name})")
|
||||
|
||||
# We are using files.all instead of files.each because we want to track the
|
||||
# current page token so that we can resume from it if ever the task is abruptly interrupted.
|
||||
artifacts_directory.files.all(
|
||||
paginator.filters(marker)
|
||||
)
|
||||
end
|
||||
|
||||
def resume_from_last_page_marker
|
||||
if force_restart
|
||||
log_info("Force restarted. Will not resume from last known page marker.")
|
||||
nil
|
||||
else
|
||||
get_last_page_marker
|
||||
end
|
||||
end
|
||||
|
||||
def get_last_page_marker
|
||||
Gitlab::Redis::SharedState.with do |redis|
|
||||
marker = redis.get(LAST_PAGE_MARKER_REDIS_KEY)
|
||||
log_info("Resuming from last page marker: #{marker}") if marker
|
||||
marker
|
||||
end
|
||||
end
|
||||
|
||||
def save_last_page_marker(marker)
|
||||
Gitlab::Redis::SharedState.with do |redis|
|
||||
# Set TTL to 1 day (86400 seconds)
|
||||
redis.set(LAST_PAGE_MARKER_REDIS_KEY, marker, ex: 86400)
|
||||
end
|
||||
end
|
||||
|
||||
def clear_last_page_marker
|
||||
Gitlab::Redis::SharedState.with do |redis|
|
||||
redis.del(LAST_PAGE_MARKER_REDIS_KEY)
|
||||
end
|
||||
end
|
||||
|
||||
def connection
|
||||
::Fog::Storage.new(configuration['connection'].symbolize_keys)
|
||||
end
|
||||
|
||||
def configuration
|
||||
Gitlab.config.artifacts.object_store
|
||||
end
|
||||
|
||||
def bucket
|
||||
configuration.remote_directory
|
||||
end
|
||||
|
||||
def bucket_prefix
|
||||
configuration.bucket_prefix
|
||||
end
|
||||
|
||||
def artifacts_directory
|
||||
connection.directories.new(key: bucket)
|
||||
end
|
||||
strong_memoize_attr :artifacts_directory
|
||||
|
||||
def log_orphan_object(object)
|
||||
add_orphan_object_to_list(object)
|
||||
log_info("Found orphan object #{object.path} (#{object.size} bytes)")
|
||||
end
|
||||
|
||||
def add_orphan_object_to_list(object)
|
||||
file.puts([object.path, object.size].join(','))
|
||||
end
|
||||
|
||||
def log_info(msg)
|
||||
logger.info(msg)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -1,161 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Cleanup
|
||||
class OrphanJobArtifactFinalObjectsCleaner
|
||||
include Gitlab::Utils::StrongMemoize
|
||||
|
||||
UnsupportedProviderError = Class.new(StandardError)
|
||||
|
||||
PAGINATORS = {
|
||||
google: Gitlab::Cleanup::OrphanJobArtifactFinalObjects::Paginators::Google,
|
||||
aws: Gitlab::Cleanup::OrphanJobArtifactFinalObjects::Paginators::Aws
|
||||
}.freeze
|
||||
|
||||
LAST_PAGE_MARKER_REDIS_KEY = 'orphan-job-artifact-objects-cleanup-last-page-marker'
|
||||
|
||||
def initialize(provider: nil, dry_run: true, force_restart: false, logger: Gitlab::AppLogger)
|
||||
@paginator = determine_paginator!(provider)
|
||||
@dry_run = dry_run
|
||||
@force_restart = force_restart
|
||||
@logger = logger
|
||||
end
|
||||
|
||||
def run!
|
||||
log_info('Looking for orphan job artifact objects under the `@final` directories')
|
||||
|
||||
each_final_object do |object|
|
||||
next unless object.orphan?
|
||||
|
||||
object.delete unless dry_run
|
||||
log_info("Delete #{object.path} (#{object.size} bytes)")
|
||||
end
|
||||
|
||||
log_info("Done.")
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :paginator, :dry_run, :force_restart, :logger
|
||||
|
||||
def determine_paginator!(provided_provider)
|
||||
# provider can be nil if user didn't specify it when running the clean up task.
|
||||
# In this case, we automatically determine the provider based on the object storage configuration.
|
||||
provider = provided_provider
|
||||
provider ||= configuration.connection.provider
|
||||
klass = PAGINATORS.fetch(provider.downcase.to_sym)
|
||||
klass.new(bucket_prefix: bucket_prefix)
|
||||
rescue KeyError
|
||||
msg = if provided_provider.present?
|
||||
"The provided provider is unsupported. Please select from #{PAGINATORS.keys.join(', ')}."
|
||||
else
|
||||
<<-MSG.strip_heredoc
|
||||
The provider found in the object storage configuration is unsupported.
|
||||
Please re-run the task and specify a provider from #{PAGINATORS.keys.join(', ')},
|
||||
whichever is compatible with your provider's object storage API."
|
||||
MSG
|
||||
end
|
||||
|
||||
raise UnsupportedProviderError, msg
|
||||
end
|
||||
|
||||
def each_final_object
|
||||
each_batch do |files|
|
||||
files.each_file_this_page do |fog_file|
|
||||
object = ::Gitlab::Cleanup::OrphanJobArtifactFinalObjects::JobArtifactObject.new(
|
||||
fog_file,
|
||||
bucket_prefix: bucket_prefix
|
||||
)
|
||||
|
||||
# We still need to check here if the object is in the final location because
|
||||
# if the provider does not support filtering objects by glob pattern, we will
|
||||
# then receive all job artifact objects here, even the ones not in the @final directory.
|
||||
yield object if object.in_final_location?
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def each_batch
|
||||
next_marker = resume_from_last_page_marker
|
||||
|
||||
loop do
|
||||
batch = fetch_batch(next_marker)
|
||||
yield batch
|
||||
|
||||
break if paginator.last_page?(batch)
|
||||
|
||||
next_marker = paginator.get_next_marker(batch)
|
||||
save_last_page_marker(next_marker)
|
||||
end
|
||||
|
||||
clear_last_page_marker
|
||||
end
|
||||
|
||||
def fetch_batch(marker)
|
||||
page_name = marker ? "marker: #{marker}" : "first page"
|
||||
log_info("Loading page (#{page_name})")
|
||||
|
||||
# We are using files.all instead of files.each because we want to track the
|
||||
# current page token so that we can resume from it if ever the task is abruptly interrupted.
|
||||
artifacts_directory.files.all(
|
||||
paginator.filters(marker)
|
||||
)
|
||||
end
|
||||
|
||||
def resume_from_last_page_marker
|
||||
if force_restart
|
||||
log_info("Force restarted. Will not resume from last known page marker.")
|
||||
nil
|
||||
else
|
||||
get_last_page_marker
|
||||
end
|
||||
end
|
||||
|
||||
def get_last_page_marker
|
||||
Gitlab::Redis::SharedState.with do |redis|
|
||||
marker = redis.get(LAST_PAGE_MARKER_REDIS_KEY)
|
||||
log_info("Resuming from last page marker: #{marker}") if marker
|
||||
marker
|
||||
end
|
||||
end
|
||||
|
||||
def save_last_page_marker(marker)
|
||||
Gitlab::Redis::SharedState.with do |redis|
|
||||
# Set TTL to 1 day (86400 seconds)
|
||||
redis.set(LAST_PAGE_MARKER_REDIS_KEY, marker, ex: 86400)
|
||||
end
|
||||
end
|
||||
|
||||
def clear_last_page_marker
|
||||
Gitlab::Redis::SharedState.with do |redis|
|
||||
redis.del(LAST_PAGE_MARKER_REDIS_KEY)
|
||||
end
|
||||
end
|
||||
|
||||
def connection
|
||||
::Fog::Storage.new(configuration['connection'].symbolize_keys)
|
||||
end
|
||||
|
||||
def configuration
|
||||
Gitlab.config.artifacts.object_store
|
||||
end
|
||||
|
||||
def bucket
|
||||
configuration.remote_directory
|
||||
end
|
||||
|
||||
def bucket_prefix
|
||||
configuration.bucket_prefix
|
||||
end
|
||||
|
||||
def artifacts_directory
|
||||
connection.directories.new(key: bucket)
|
||||
end
|
||||
strong_memoize_attr :artifacts_directory
|
||||
|
||||
def log_info(msg)
|
||||
logger.info("#{'[DRY RUN] ' if dry_run}#{msg}")
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -197,7 +197,9 @@ module Gitlab
|
|||
record_hit_ratio(results)
|
||||
|
||||
results.map! do |result|
|
||||
Gitlab::Json.parse(gzip_decompress(result), symbolize_names: true) unless result.nil?
|
||||
unless result.nil?
|
||||
Gitlab::Json.parse(gzip_decompress(result.force_encoding(Encoding::UTF_8)), symbolize_names: true)
|
||||
end
|
||||
end
|
||||
|
||||
file_paths.zip(results).to_h
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ module Gitlab
|
|||
def write_multiple(mapping)
|
||||
with_redis do |redis|
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipelined|
|
||||
redis.pipelined do |pipelined|
|
||||
mapping.each do |raw_key, value|
|
||||
key = cache_key_for(raw_key)
|
||||
|
||||
|
|
@ -42,7 +42,7 @@ module Gitlab
|
|||
with_redis do |redis|
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
if Gitlab::Redis::ClusterUtil.cluster?(redis)
|
||||
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
|
||||
redis.pipelined do |pipeline|
|
||||
keys.each { |key| pipeline.get(key) }
|
||||
end
|
||||
else
|
||||
|
|
@ -54,7 +54,7 @@ module Gitlab
|
|||
content.map! do |lines|
|
||||
next unless lines
|
||||
|
||||
Gitlab::Json.parse(gzip_decompress(lines)).map! do |line|
|
||||
Gitlab::Json.parse(gzip_decompress(lines.force_encoding(Encoding::UTF_8))).map! do |line|
|
||||
Gitlab::Diff::Line.safe_init_from_hash(line)
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ module Gitlab
|
|||
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
with_redis do |redis|
|
||||
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
|
||||
redis.pipelined do |pipeline|
|
||||
keys.each_with_index do |key, i|
|
||||
pipeline.set(redis_shared_state_key(key), etags[i], ex: EXPIRY_TIME, nx: only_if_missing)
|
||||
end
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ module Gitlab
|
|||
|
||||
def self.cancel(key, uuid)
|
||||
return unless key.present?
|
||||
return unless uuid.present?
|
||||
|
||||
Gitlab::Redis::SharedState.with do |redis|
|
||||
redis.eval(LUA_CANCEL_SCRIPT, keys: [ensure_prefixed_key(key)], argv: [uuid])
|
||||
|
|
@ -110,7 +111,7 @@ module Gitlab
|
|||
# false if the lease is taken by a different UUID or inexistent.
|
||||
def renew
|
||||
Gitlab::Redis::SharedState.with do |redis|
|
||||
result = redis.eval(LUA_RENEW_SCRIPT, keys: [@redis_shared_state_key], argv: [@uuid, @timeout])
|
||||
result = redis.eval(LUA_RENEW_SCRIPT, keys: [@redis_shared_state_key], argv: [@uuid, @timeout.to_i])
|
||||
result == @uuid
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'rails'
|
||||
require 'redis'
|
||||
require 'redis-clustering'
|
||||
|
||||
module Gitlab
|
||||
module Instrumentation
|
||||
|
|
@ -230,7 +230,7 @@ module Gitlab
|
|||
end
|
||||
|
||||
def key_slot(key)
|
||||
::Redis::Cluster::KeySlotConverter.convert(extract_hash_tag(key))
|
||||
::RedisClient::Cluster::KeySlotConverter.convert(extract_hash_tag(key))
|
||||
end
|
||||
|
||||
# This is almost identical to Redis::Cluster::Command#extract_hash_tag,
|
||||
|
|
|
|||
|
|
@ -1,55 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Instrumentation
|
||||
module RedisInterceptor
|
||||
include RedisHelper
|
||||
|
||||
def call(command)
|
||||
instrument_call([command], instrumentation_class) do
|
||||
super
|
||||
end
|
||||
end
|
||||
|
||||
def call_pipeline(pipeline)
|
||||
instrument_call(pipeline.commands, instrumentation_class, true) do
|
||||
super
|
||||
end
|
||||
end
|
||||
|
||||
def write(command)
|
||||
measure_write_size(command, instrumentation_class) if ::RequestStore.active?
|
||||
super
|
||||
end
|
||||
|
||||
def read
|
||||
result = super
|
||||
measure_read_size(result, instrumentation_class) if ::RequestStore.active?
|
||||
result
|
||||
end
|
||||
|
||||
def ensure_connected
|
||||
super do
|
||||
instrument_reconnection_errors do
|
||||
yield
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def instrument_reconnection_errors
|
||||
yield
|
||||
rescue ::Redis::BaseConnectionError => ex
|
||||
instrumentation_class.instance_count_connection_exception(ex)
|
||||
|
||||
raise ex
|
||||
end
|
||||
|
||||
# That's required so it knows which GitLab Redis instance
|
||||
# it's interacting with in order to categorize accordingly.
|
||||
#
|
||||
def instrumentation_class
|
||||
@options[:instrumentation_class] # rubocop:disable Gitlab/ModuleWithInstanceVariables
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -100,7 +100,7 @@ module Gitlab
|
|||
def refresh_keys_expiration
|
||||
with_redis do |redis|
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
|
||||
redis.pipelined do |pipeline|
|
||||
pipeline.expire(issue_ids_key, REDIS_EXPIRY_TIME)
|
||||
pipeline.expire(current_index_key, REDIS_EXPIRY_TIME)
|
||||
pipeline.expire(current_project_key, REDIS_EXPIRY_TIME)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ module Gitlab
|
|||
|
||||
data = Gitlab::Redis::Cache.with do |r|
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
Gitlab::Redis::CrossSlot::Pipeline.new(r).pipelined do |pipeline|
|
||||
r.pipelined do |pipeline|
|
||||
subjects.each do |subject|
|
||||
new(subject).read(pipeline)
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,19 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Patch
|
||||
module CommandLoader
|
||||
extend ActiveSupport::Concern
|
||||
|
||||
class_methods do
|
||||
# Shuffle the node list to spread out initial connection creation amongst all nodes
|
||||
#
|
||||
# The input is a Redis::Cluster::Node instance which is an Enumerable.
|
||||
# `super` receives an Array of Redis::Client instead of a Redis::Cluster::Node
|
||||
def load(nodes)
|
||||
super(nodes.to_a.shuffle)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -1,52 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
# Patch to address https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/2212#note_1287996694
|
||||
# It uses hostname instead of IP address if the former is present in `CLUSTER NODES` output.
|
||||
if Gem::Version.new(Redis::VERSION) > Gem::Version.new('4.8.1')
|
||||
raise 'New version of redis detected, please remove or update this patch'
|
||||
end
|
||||
|
||||
module Gitlab
|
||||
module Patch
|
||||
module NodeLoader
|
||||
extend ActiveSupport::Concern
|
||||
|
||||
class_methods do
|
||||
# Shuffle the node list to spread out initial connection creation amongst all nodes
|
||||
#
|
||||
# The input is a Redis::Cluster::Node instance which is an Enumerable.
|
||||
# `super` receives an Array of Redis::Client instead of a Redis::Cluster::Node
|
||||
def load_flags(nodes)
|
||||
super(nodes.to_a.shuffle)
|
||||
end
|
||||
end
|
||||
|
||||
def self.prepended(base)
|
||||
base.class_eval do
|
||||
# monkey-patches https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/cluster/node_loader.rb#L23
|
||||
def self.fetch_node_info(node)
|
||||
node.call(%i[cluster nodes]).split("\n").map(&:split).to_h do |arr|
|
||||
[
|
||||
extract_host_identifier(arr[1]),
|
||||
(arr[2].split(',') & %w[master slave]).first # rubocop:disable Naming/InclusiveLanguage
|
||||
]
|
||||
end
|
||||
end
|
||||
|
||||
# Since `CLUSTER SLOT` uses the preferred endpoint determined by
|
||||
# the `cluster-preferred-endpoint-type` config value, we will prefer hostname over IP address.
|
||||
# See https://redis.io/commands/cluster-nodes/ for details on the output format.
|
||||
#
|
||||
# @param [String] Address info matching fhe format: <ip:port@cport[,hostname[,auxiliary_field=value]*]>
|
||||
def self.extract_host_identifier(node_address)
|
||||
ip_chunk, hostname, _auxiliaries = node_address.split(',')
|
||||
return ip_chunk.split('@').first if hostname.blank?
|
||||
|
||||
port = ip_chunk.split('@').first.split(':')[1]
|
||||
"#{hostname}:#{port}"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -20,7 +20,7 @@ module Gitlab
|
|||
delete_count = 0
|
||||
redis.with do |conn|
|
||||
entries.each_slice(pipeline_batch_size) do |subset|
|
||||
delete_count += Gitlab::Redis::CrossSlot::Pipeline.new(conn).pipelined do |pipeline|
|
||||
delete_count += conn.pipelined do |pipeline|
|
||||
subset.each { |entry| pipeline.del(entry) }
|
||||
end.sum
|
||||
end
|
||||
|
|
@ -58,7 +58,7 @@ module Gitlab
|
|||
|
||||
def pipeline_mget(conn, keys)
|
||||
keys.each_slice(pipeline_batch_size).flat_map do |subset|
|
||||
Gitlab::Redis::CrossSlot::Pipeline.new(conn).pipelined do |p|
|
||||
conn.pipelined do |p|
|
||||
subset.each { |key| p.get(key) }
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,21 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
# Patch to expose `find_node_key` method for cross-slot pipelining
|
||||
# In redis v5.0.x, cross-slot pipelining is implemented via redis-cluster-client.
|
||||
# This patch should be removed since there is no need for it.
|
||||
# Gitlab::Redis::CrossSlot and its usage should be removed as well.
|
||||
if Gem::Version.new(Redis::VERSION) != Gem::Version.new('4.8.0')
|
||||
raise 'New version of redis detected, please remove or update this patch'
|
||||
end
|
||||
|
||||
module Gitlab
|
||||
module Patch
|
||||
module RedisCluster
|
||||
# _find_node_key exposes a private function of the same name in Redis::Cluster.
|
||||
# See https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/cluster.rb#L282
|
||||
def _find_node_key(command)
|
||||
find_node_key(command)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Patch
|
||||
module RedisStoreFactory
|
||||
def create
|
||||
# rubocop:disable Gitlab/ModuleWithInstanceVariables -- patched code references @options in redis-store
|
||||
opt = @options
|
||||
# rubocop:enable Gitlab/ModuleWithInstanceVariables
|
||||
return Gitlab::Redis::ClusterStore.new(opt) if opt[:nodes]
|
||||
|
||||
super
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Patch
|
||||
module SlotLoader
|
||||
extend ActiveSupport::Concern
|
||||
|
||||
class_methods do
|
||||
# Shuffle the node list to spread out initial connection creation amongst all nodes
|
||||
#
|
||||
# The input is a Redis::Cluster::Node instance which is an Enumerable.
|
||||
# `super` receives an Array of Redis::Client instead of a Redis::Cluster::Node
|
||||
def load(nodes)
|
||||
super(nodes.to_a.shuffle)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'redis-clustering'
|
||||
require 'redis/store/ttl'
|
||||
require 'redis/store/interface'
|
||||
require 'redis/store/namespace'
|
||||
require 'redis/store/serialization'
|
||||
|
||||
module Gitlab
|
||||
module Redis
|
||||
class ClusterStore < ::Redis::Cluster
|
||||
include ::Redis::Store::Interface
|
||||
|
||||
def initialize(options = {})
|
||||
orig_options = options.dup
|
||||
|
||||
@serializer = orig_options.key?(:serializer) ? orig_options.delete(:serializer) : Marshal
|
||||
|
||||
unless orig_options[:marshalling].nil?
|
||||
# `marshalling` only used here, might not be supported in `super`
|
||||
@serializer = orig_options.delete(:marshalling) ? Marshal : nil
|
||||
end
|
||||
|
||||
_remove_unsupported_options(options)
|
||||
super(options)
|
||||
|
||||
_extend_marshalling
|
||||
_extend_namespace orig_options
|
||||
end
|
||||
|
||||
# copies ::Redis::Store::Ttl implementation in a redis-v5 compatible manner
|
||||
def set(key, value, options = nil)
|
||||
ttl = get_ttl(options)
|
||||
if ttl
|
||||
setex(key, ttl.to_i, value, raw: true)
|
||||
else
|
||||
super(key, value)
|
||||
end
|
||||
end
|
||||
|
||||
# copies ::Redis::Store::Ttl implementation in a redis-v5 compatible manner
|
||||
def setnx(key, value, options = nil)
|
||||
ttl = get_ttl(options)
|
||||
if ttl
|
||||
multi do |m|
|
||||
m.setnx(key, value)
|
||||
m.expire(key, ttl)
|
||||
end
|
||||
else
|
||||
super(key, value)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def get_ttl(options)
|
||||
# https://github.com/redis-store/redis-store/blob/v1.10.0/lib/redis/store/ttl.rb#L37
|
||||
options[:expire_after] || options[:expires_in] || options[:expire_in] if options
|
||||
end
|
||||
|
||||
def _remove_unsupported_options(options)
|
||||
# Unsupported keywords should be removed to avoid errors
|
||||
# https://github.com/redis-rb/redis-client/blob/v0.13.0/lib/redis_client/config.rb#L21
|
||||
options.delete(:raw)
|
||||
options.delete(:serializer)
|
||||
options.delete(:marshalling)
|
||||
options.delete(:namespace)
|
||||
options.delete(:scheme)
|
||||
end
|
||||
|
||||
def _extend_marshalling
|
||||
extend ::Redis::Store::Serialization unless @serializer.nil?
|
||||
end
|
||||
|
||||
def _extend_namespace(options)
|
||||
@namespace = options[:namespace]
|
||||
extend ::Redis::Store::Namespace
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -13,14 +13,14 @@ module Gitlab
|
|||
if obj.is_a?(MultiStore)
|
||||
cluster?(obj.primary_store) || cluster?(obj.secondary_store)
|
||||
else
|
||||
obj.respond_to?(:_client) && obj._client.is_a?(::Redis::Cluster)
|
||||
obj.is_a?(::Redis::Cluster)
|
||||
end
|
||||
end
|
||||
|
||||
def batch_unlink(keys, redis)
|
||||
expired_count = 0
|
||||
keys.each_slice(1000) do |subset|
|
||||
expired_count += Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
|
||||
expired_count += redis.pipelined do |pipeline|
|
||||
subset.each { |key| pipeline.unlink(key) }
|
||||
end.sum
|
||||
end
|
||||
|
|
@ -30,7 +30,7 @@ module Gitlab
|
|||
# Redis cluster alternative to mget
|
||||
def batch_get(keys, redis)
|
||||
keys.each_slice(1000).flat_map do |subset|
|
||||
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
|
||||
redis.pipelined do |pipeline|
|
||||
subset.map { |key| pipeline.get(key) }
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -0,0 +1,53 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Redis
|
||||
module CommandBuilder
|
||||
extend self
|
||||
|
||||
# Ref: https://github.com/redis-rb/redis-client/blob/v0.19.1/lib/redis_client/command_builder.rb
|
||||
# we modify the command builder to convert nil to strings as this behaviour was present in
|
||||
# https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/connection/command_helper.rb#L20
|
||||
#
|
||||
# Note that we only adopt the Ruby3.x-compatible logic in .generate.
|
||||
# Symbol.method_defined?(:name) is true in Ruby 3
|
||||
def generate(args, kwargs = nil)
|
||||
command = args.flat_map do |element|
|
||||
case element
|
||||
when Hash
|
||||
element.flatten
|
||||
else
|
||||
element
|
||||
end
|
||||
end
|
||||
|
||||
kwargs&.each do |key, value|
|
||||
if value
|
||||
if value == true
|
||||
command << key.name
|
||||
else
|
||||
command << key.name << value
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
command.map! do |element|
|
||||
case element
|
||||
when String
|
||||
element
|
||||
when Symbol
|
||||
element.name
|
||||
when Integer, Float, NilClass
|
||||
element.to_s
|
||||
else
|
||||
raise TypeError, "Unsupported command argument type: #{element.class}"
|
||||
end
|
||||
end
|
||||
|
||||
raise ArgumentError, "can't issue an empty redis command" if command.empty?
|
||||
|
||||
command
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -1,141 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Redis
|
||||
module CrossSlot
|
||||
class Router
|
||||
attr_reader :node_mapping, :futures, :node_sequence, :cmd_queue
|
||||
|
||||
delegate :respond_to_missing?, to: :@redis
|
||||
|
||||
# This map contains redis-rb methods which does not map directly
|
||||
# to a standard Redis command. It is used transform unsupported commands to standard commands
|
||||
# to find the node key for unsupported commands.
|
||||
#
|
||||
# Redis::Cluster::Command only contains details of commands which the Redis Server
|
||||
# returns. Hence, commands like mapped_hmget and hscan_each internally will call the
|
||||
# base command, hmget and hscan respectively.
|
||||
#
|
||||
# See https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/cluster/command.rb
|
||||
UNSUPPORTED_CMD_MAPPING = {
|
||||
# Internally, redis-rb calls the supported Redis command and transforms the output.
|
||||
# See https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/commands/hashes.rb#L104
|
||||
mapped_hmget: :hmget
|
||||
}.freeze
|
||||
|
||||
# Initializes the CrossSlot::Router
|
||||
# @param {::Redis}
|
||||
def initialize(redis)
|
||||
@redis = redis
|
||||
@node_mapping = {}
|
||||
@futures = {}
|
||||
@node_sequence = []
|
||||
@cmd_queue = []
|
||||
end
|
||||
|
||||
# For now we intercept every redis.call and return a Gitlab-Future object.
|
||||
# This method groups every commands to a node for fan-out. Commands are grouped using the first key.
|
||||
#
|
||||
# rubocop:disable Style/MissingRespondToMissing
|
||||
def method_missing(cmd, *args, **kwargs, &blk)
|
||||
# Note that we can re-map the command without affecting execution as it is
|
||||
# solely for finding the node key. The original cmd will be executed.
|
||||
node = @redis._client._find_node_key([UNSUPPORTED_CMD_MAPPING.fetch(cmd, cmd)] + args)
|
||||
|
||||
@node_mapping[node] ||= []
|
||||
@futures[node] ||= []
|
||||
|
||||
@node_sequence << node
|
||||
@node_mapping[node] << [cmd, args, kwargs || {}, blk]
|
||||
f = Future.new
|
||||
@futures[node] << f
|
||||
@cmd_queue << [f, cmd, args, kwargs || {}, blk]
|
||||
f
|
||||
end
|
||||
# rubocop:enable Style/MissingRespondToMissing
|
||||
end
|
||||
|
||||
# Wraps over redis-rb's Future in
|
||||
# https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/pipeline.rb#L244
|
||||
class Future
|
||||
def set(future, is_val = false)
|
||||
@redis_future = future
|
||||
@is_val = is_val
|
||||
end
|
||||
|
||||
def value
|
||||
return @redis_val if @is_val
|
||||
|
||||
@redis_future.value
|
||||
end
|
||||
end
|
||||
|
||||
# Pipeline allows cross-slot pipelined to be called. The fan-out logic is implemented in
|
||||
# https://github.com/redis-rb/redis-cluster-client/blob/master/lib/redis_client/cluster/pipeline.rb
|
||||
# which is available in redis-rb v5.0.
|
||||
#
|
||||
# This file can be deprecated after redis-rb v4.8.0 is upgraded to v5.0
|
||||
class Pipeline
|
||||
# Initializes the CrossSlot::Pipeline
|
||||
# @param {::Redis}
|
||||
def initialize(redis)
|
||||
@redis = redis
|
||||
end
|
||||
|
||||
# pipelined is used in place of ::Redis `.pipelined` when running in a cluster context
|
||||
# where cross-slot operations may happen.
|
||||
def pipelined(&block)
|
||||
# Directly call .pipelined and defer the pipeline execution to MultiStore.
|
||||
# MultiStore could wrap over 0, 1, or 2 Redis Cluster clients, handling it here
|
||||
# will not work for 2 clients since the key-slot topology can differ.
|
||||
if use_cross_slot_pipelining?
|
||||
router = Router.new(@redis)
|
||||
yield router
|
||||
execute_commands(router)
|
||||
else
|
||||
# use redis-rb's pipelined method
|
||||
@redis.pipelined(&block)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def use_cross_slot_pipelining?
|
||||
!@redis.instance_of?(::Gitlab::Redis::MultiStore) && @redis._client.instance_of?(::Redis::Cluster)
|
||||
end
|
||||
|
||||
def execute_commands(router)
|
||||
router.node_mapping.each do |node_key, commands|
|
||||
# TODO possibly use Threads to speed up but for now `n` is 3-5 which is small.
|
||||
@redis.pipelined do |p|
|
||||
commands.each_with_index do |command, idx|
|
||||
future = router.futures[node_key][idx]
|
||||
cmd, args, kwargs, blk = command
|
||||
future.set(p.public_send(cmd, *args, **kwargs, &blk)) # rubocop:disable GitlabSecurity/PublicSend
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
router.node_sequence.map do |node_key|
|
||||
router.futures[node_key].shift.value
|
||||
end
|
||||
rescue ::Redis::CommandError => err
|
||||
if err.message.start_with?('MOVED', 'ASK')
|
||||
Gitlab::ErrorTracking.log_exception(err)
|
||||
return execute_commands_sequentially(router)
|
||||
end
|
||||
|
||||
raise
|
||||
end
|
||||
|
||||
def execute_commands_sequentially(router)
|
||||
router.cmd_queue.map do |command|
|
||||
future, cmd, args, kwargs, blk = command
|
||||
future.set(@redis.public_send(cmd, *args, **kwargs, &blk), true) # rubocop:disable GitlabSecurity/PublicSend
|
||||
future.value
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -432,16 +432,6 @@ module Gitlab
|
|||
|
||||
# rubocop:disable GitlabSecurity/PublicSend
|
||||
def send_command(redis_instance, command_name, *args, **kwargs, &block)
|
||||
# Run wrapped pipeline for each instance individually so that the fan-out is distinct.
|
||||
# If both primary and secondary are Redis Clusters, the slot-node distribution could
|
||||
# be different.
|
||||
#
|
||||
# We ignore args and kwargs since `pipelined` does not accept arguments
|
||||
# See https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis.rb#L164
|
||||
if command_name.to_s == 'pipelined' && redis_instance._client.instance_of?(::Redis::Cluster)
|
||||
return Gitlab::Redis::CrossSlot::Pipeline.new(redis_instance).pipelined(&block)
|
||||
end
|
||||
|
||||
if block
|
||||
# Make sure that block is wrapped and executed only on the redis instance that is executing the block
|
||||
redis_instance.send(command_name, *args, **kwargs) do |*params|
|
||||
|
|
@ -462,7 +452,7 @@ module Gitlab
|
|||
end
|
||||
|
||||
def redis_store?(pool)
|
||||
pool.with { |c| c.instance_of?(Gitlab::Redis::MultiStore) || c.is_a?(::Redis) }
|
||||
pool.with { |c| c.instance_of?(Gitlab::Redis::MultiStore) || c.is_a?(::Redis) || c.is_a?(::Redis::Cluster) }
|
||||
end
|
||||
|
||||
def validate_stores!
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ module Gitlab
|
|||
CommandExecutionError = Class.new(StandardError)
|
||||
|
||||
class << self
|
||||
delegate :params, :url, :store, :encrypted_secrets, :redis_client_params, to: :new
|
||||
delegate :params, :url, :store, :encrypted_secrets, to: :new
|
||||
|
||||
def with
|
||||
pool.with { |redis| yield redis }
|
||||
|
|
@ -90,7 +90,17 @@ module Gitlab
|
|||
end
|
||||
|
||||
def redis
|
||||
::Redis.new(params)
|
||||
init_redis(params)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def init_redis(config)
|
||||
if config[:nodes].present?
|
||||
::Redis::Cluster.new(config.merge({ concurrency: { model: :none } }))
|
||||
else
|
||||
::Redis.new(config)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -99,13 +109,8 @@ module Gitlab
|
|||
end
|
||||
|
||||
def params
|
||||
redis_store_options
|
||||
end
|
||||
|
||||
# redis_client_params modifies redis_store_options to be compatible with redis-client
|
||||
# TODO: when redis-rb is updated to v5, there is no need to support 2 types of config format
|
||||
def redis_client_params
|
||||
options = redis_store_options
|
||||
options[:command_builder] = CommandBuilder
|
||||
|
||||
# avoid passing classes into options as Sidekiq scrubs the options with Marshal.dump + Marshal.load
|
||||
# ref https://github.com/sidekiq/sidekiq/blob/v7.1.6/lib/sidekiq/redis_connection.rb#L37
|
||||
|
|
@ -114,14 +119,14 @@ module Gitlab
|
|||
# we use strings to look up Gitlab::Instrumentation::Redis.storage_hash as a bypass
|
||||
options[:custom] = { instrumentation_class: self.class.store_name }
|
||||
|
||||
# TODO: add support for cluster when upgrading to redis-rb v5.y.z we do not need cluster support
|
||||
# as Sidekiq workload should not and does not run in a Redis Cluster
|
||||
# support to be added in https://gitlab.com/gitlab-org/gitlab/-/merge_requests/134862
|
||||
if options[:sentinels]
|
||||
# name is required in RedisClient::SentinelConfig
|
||||
# https://github.com/redis-rb/redis-client/blob/1ab081c1d0e47df5d55e011c9390c70b2eef6731/lib/redis_client/sentinel_config.rb#L17
|
||||
options[:name] = options[:host]
|
||||
options.except(:scheme, :instrumentation_class, :host, :port)
|
||||
elsif options[:cluster]
|
||||
options[:nodes] = options[:cluster].map { |c| c.except(:scheme) }
|
||||
options.except(:scheme, :instrumentation_class, :cluster)
|
||||
else
|
||||
# remove disallowed keys as seen in
|
||||
# https://github.com/redis-rb/redis-client/blob/1ab081c1d0e47df5d55e011c9390c70b2eef6731/lib/redis_client/config.rb#L21
|
||||
|
|
@ -134,7 +139,7 @@ module Gitlab
|
|||
end
|
||||
|
||||
def db
|
||||
redis_store_options[:db]
|
||||
redis_store_options[:db] || 0
|
||||
end
|
||||
|
||||
def sentinels
|
||||
|
|
@ -156,7 +161,7 @@ module Gitlab
|
|||
end
|
||||
|
||||
def store(extras = {})
|
||||
::Redis::Store::Factory.create(redis_store_options.merge(extras))
|
||||
::Redis::Store::Factory.create(params.merge(extras))
|
||||
end
|
||||
|
||||
def encrypted_secrets
|
||||
|
|
@ -182,7 +187,11 @@ module Gitlab
|
|||
final_config = parse_extra_config(decrypted_config)
|
||||
|
||||
result = if final_config[:cluster].present?
|
||||
final_config[:db] = 0 # Redis Cluster only supports db 0
|
||||
final_config[:cluster] = final_config[:cluster].map do |node|
|
||||
next node unless node.is_a?(String)
|
||||
|
||||
::Redis::Store::Factory.extract_host_options_from_uri(node)
|
||||
end
|
||||
final_config
|
||||
else
|
||||
parse_redis_url(final_config)
|
||||
|
|
|
|||
|
|
@ -86,6 +86,8 @@ module Gitlab
|
|||
|
||||
full_key = cache_key(key)
|
||||
|
||||
hash = standardize_hash(hash)
|
||||
|
||||
with do |redis|
|
||||
results = redis.pipelined do |pipeline|
|
||||
# Set each hash key to the provided value
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ module Gitlab
|
|||
|
||||
with do |redis|
|
||||
redis.multi do |multi|
|
||||
multi.sismember(full_key, value)
|
||||
multi.sismember(full_key, value.to_s)
|
||||
multi.exists?(full_key) # rubocop:disable CodeReuse/ActiveRecord
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ module Gitlab
|
|||
argv = []
|
||||
job_wal_locations.each do |connection_name, location|
|
||||
diff = pg_wal_lsn_diff(connection_name)
|
||||
argv += [connection_name, diff || '', location]
|
||||
argv += [connection_name, diff ? diff.to_f : '', location]
|
||||
end
|
||||
|
||||
with_redis { |r| r.eval(UPDATE_WAL_COOKIE_SCRIPT, keys: [cookie_key], argv: argv) }
|
||||
|
|
|
|||
|
|
@ -63,28 +63,28 @@ namespace :gitlab do
|
|||
end
|
||||
end
|
||||
|
||||
desc 'GitLab | Cleanup | Clean orphan job artifact files stored in the @final directory in object storage'
|
||||
task :orphan_job_artifact_final_objects, [:provider] => :gitlab_environment do |_, args|
|
||||
desc 'GitLab | Cleanup | Generate a CSV file of orphan job artifact objects stored in the @final directory'
|
||||
task :list_orphan_job_artifact_final_objects, [:provider] => :gitlab_environment do |_, args|
|
||||
warn_user_is_not_gitlab
|
||||
|
||||
force_restart = ENV['FORCE_RESTART'].present?
|
||||
filename = ENV['FILENAME']
|
||||
|
||||
begin
|
||||
cleaner = Gitlab::Cleanup::OrphanJobArtifactFinalObjectsCleaner.new(
|
||||
generator = Gitlab::Cleanup::OrphanJobArtifactFinalObjects::GenerateList.new(
|
||||
provider: args.provider,
|
||||
force_restart: force_restart,
|
||||
dry_run: dry_run?,
|
||||
filename: filename,
|
||||
logger: logger
|
||||
)
|
||||
|
||||
cleaner.run!
|
||||
generator.run!
|
||||
|
||||
if dry_run?
|
||||
logger.info "To clean up all orphan files that were found, run this command with DRY_RUN=false".color(:yellow)
|
||||
end
|
||||
rescue Gitlab::Cleanup::OrphanJobArtifactFinalObjectsCleaner::UnsupportedProviderError => e
|
||||
# TODO: Add back the log message here to instruct which rake task to run to process the
|
||||
# generated CSV file and actually delete the orphan objects.
|
||||
rescue Gitlab::Cleanup::OrphanJobArtifactFinalObjects::GenerateList::UnsupportedProviderError => e
|
||||
abort %(#{e.message}
|
||||
Usage: rake "gitlab:cleanup:orphan_job_artifact_final_objects[provider]")
|
||||
Usage: rake "gitlab:cleanup:list_orphan_job_artifact_final_objects[provider]")
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@
|
|||
"@gitlab/svgs": "3.83.0",
|
||||
"@gitlab/ui": "^74.2.0",
|
||||
"@gitlab/visual-review-tools": "1.7.3",
|
||||
"@gitlab/web-ide": "^0.0.1-dev-20240206230318",
|
||||
"@gitlab/web-ide": "^0.0.1-dev-20240208022507",
|
||||
"@mattiasbuelens/web-streams-adapter": "^0.1.0",
|
||||
"@rails/actioncable": "7.0.8",
|
||||
"@rails/ujs": "7.0.8",
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ module QA
|
|||
end
|
||||
end
|
||||
|
||||
it 'publishes a nuget package at the project endpoint and installs it from the group endpoint', testcase: params[:testcase] do
|
||||
it 'publishes a nuget package at the project endpoint and installs it from the group endpoint', :blocking, testcase: params[:testcase] do
|
||||
Flow::Login.sign_in
|
||||
|
||||
nuget_upload_yaml = ERB.new(read_fixture('package_managers/nuget', 'nuget_upload_package.yaml.erb')).result(binding)
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ module QA
|
|||
end
|
||||
|
||||
context 'when at the project level' do
|
||||
it 'publishes and installs a pypi package', testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/348015' do
|
||||
it 'publishes and installs a pypi package', :blocking, testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/348015' do
|
||||
Page::Project::Menu.perform(&:go_to_package_registry)
|
||||
|
||||
Page::Project::Packages::Index.perform do |index|
|
||||
|
|
|
|||
|
|
@ -109,7 +109,8 @@ RSpec.describe ProductAnalyticsTracking, :snowplow, feature_category: :product_a
|
|||
end
|
||||
|
||||
it 'tracks total Redis counters' do
|
||||
expect(Gitlab::Usage::Metrics::Instrumentations::TotalCountMetric).to receive(:redis_key).twice # total and 7d
|
||||
expect(Gitlab::Usage::Metrics::Instrumentations::TotalCountMetric).to receive(:redis_key)
|
||||
.twice.and_call_original # total and 7d
|
||||
|
||||
get :index
|
||||
end
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ RSpec.describe 'Work item', :js, feature_category: :team_planning do
|
|||
|
||||
context 'for signed in user' do
|
||||
before do
|
||||
stub_const("AutocompleteSources::ExpiresIn::AUTOCOMPLETE_EXPIRES_IN", 0)
|
||||
project.add_developer(user)
|
||||
sign_in(user)
|
||||
visit work_items_path
|
||||
|
|
|
|||
|
|
@ -0,0 +1,8 @@
|
|||
# redis://[:password@]host[:port][/db-number][?option=value]
|
||||
# more details: http://www.iana.org/assignments/uri-schemes/prov/redis
|
||||
development:
|
||||
url: redis://:mynewpassword@development-host:6379/99
|
||||
test:
|
||||
url: redis://:mynewpassword@test-host:6379/99
|
||||
production:
|
||||
url: redis://:mynewpassword@production-host:6379/99
|
||||
|
|
@ -27,7 +27,7 @@ RSpec.describe 'ActionCableSubscriptionAdapterIdentifier override' do
|
|||
|
||||
sub = ActionCable.server.pubsub.send(:redis_connection)
|
||||
|
||||
expect(sub.connection[:id]).to eq('unix:///home/localuser/redis/redis.socket/0')
|
||||
expect(sub.connection[:id]).to eq('/home/localuser/redis/redis.socket/0')
|
||||
expect(ActionCable.server.config.cable[:id]).to be_nil
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ RSpec.describe 'Session initializer for GitLab' do
|
|||
|
||||
describe 'config#session_store' do
|
||||
it 'initialized as a redis_store with a proper servers configuration' do
|
||||
expect(subject).to receive(:session_store).with(:redis_store, a_hash_including(redis_store: kind_of(::Redis::Store)))
|
||||
expect(subject).to receive(:session_store).with(:redis_store, a_hash_including(redis_server: Gitlab::Redis::Sessions.params.merge(namespace: Gitlab::Redis::Sessions::SESSION_NAMESPACE)))
|
||||
|
||||
load_session_store
|
||||
end
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@ require 'spec_helper'
|
|||
RSpec.describe ClickHouse::MigrationSupport::ExclusiveLock, feature_category: :database do
|
||||
include ExclusiveLeaseHelpers
|
||||
|
||||
let(:worker_id) { 1 }
|
||||
|
||||
let(:worker_class) do
|
||||
# This worker will be active longer than the ClickHouse worker TTL
|
||||
Class.new do
|
||||
|
|
@ -81,7 +83,7 @@ RSpec.describe ClickHouse::MigrationSupport::ExclusiveLock, feature_category: :d
|
|||
end
|
||||
|
||||
around do |example|
|
||||
described_class.register_running_worker(worker_class, anything) do
|
||||
described_class.register_running_worker(worker_class, worker_id) do
|
||||
example.run
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ require 'spec_helper'
|
|||
|
||||
RSpec.describe Gitlab::BackgroundMigration::Redis::BackfillProjectPipelineStatusTtl,
|
||||
:clean_gitlab_redis_cache, feature_category: :redis do
|
||||
let(:redis) { ::Redis.new(::Gitlab::Redis::Cache.params) }
|
||||
let(:redis) { ::Gitlab::Redis::Cache.redis }
|
||||
let(:keys) { ["cache:gitlab:project:1:pipeline_status", "cache:gitlab:project:2:pipeline_status"] }
|
||||
let(:invalid_keys) { ["cache:gitlab:project:pipeline_status:1", "cache:gitlab:project:pipeline_status:2"] }
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,239 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Cleanup::OrphanJobArtifactFinalObjects::GenerateList, :orphan_final_artifacts_cleanup, :clean_gitlab_redis_shared_state, feature_category: :build_artifacts do
|
||||
describe '#run!' do
|
||||
let(:generator) do
|
||||
described_class.new(
|
||||
provider: specified_provider,
|
||||
force_restart: force_restart,
|
||||
filename: filename
|
||||
)
|
||||
end
|
||||
|
||||
let(:filename) { 'orphan_objects.csv' }
|
||||
let(:force_restart) { false }
|
||||
let(:remote_directory) { 'artifacts' }
|
||||
let(:bucket_prefix) { nil }
|
||||
|
||||
subject(:run) { generator.run! }
|
||||
|
||||
before do
|
||||
stub_const('Gitlab::Cleanup::OrphanJobArtifactFinalObjects::Paginators::BasePaginator::BATCH_SIZE', 2)
|
||||
|
||||
Gitlab.config.artifacts.object_store.tap do |config|
|
||||
config[:remote_directory] = remote_directory
|
||||
config[:bucket_prefix] = bucket_prefix
|
||||
end
|
||||
|
||||
allow(Gitlab::AppLogger).to receive(:info)
|
||||
end
|
||||
|
||||
after do
|
||||
File.delete(filename) if File.file?(filename)
|
||||
end
|
||||
|
||||
shared_examples_for 'handling supported provider' do
|
||||
let(:fog_connection) do
|
||||
stub_object_storage_uploader(
|
||||
config: Gitlab.config.artifacts.object_store,
|
||||
uploader: JobArtifactUploader,
|
||||
direct_upload: true
|
||||
)
|
||||
end
|
||||
|
||||
let!(:orphan_final_object_1) { create_fog_file }
|
||||
let!(:orphan_final_object_2) { create_fog_file }
|
||||
let!(:orphan_non_final_object) { create_fog_file(final: false) }
|
||||
|
||||
let!(:non_orphan_final_object_1) do
|
||||
create_fog_file.tap do |file|
|
||||
create(:ci_job_artifact, file_final_path: path_without_bucket_prefix(file.key))
|
||||
end
|
||||
end
|
||||
|
||||
let!(:non_orphan_final_object_2) do
|
||||
create_fog_file.tap do |file|
|
||||
create(:ci_job_artifact, file_final_path: path_without_bucket_prefix(file.key))
|
||||
end
|
||||
end
|
||||
|
||||
shared_context 'when resuming from marker' do
|
||||
let(:dummy_error) { Class.new(StandardError) }
|
||||
|
||||
before do
|
||||
fetch_counter = 0
|
||||
|
||||
allow(generator).to receive(:fetch_batch).and_wrap_original do |m, *args|
|
||||
raise dummy_error if fetch_counter == 1
|
||||
|
||||
fetch_counter += 1
|
||||
m.call(*args)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
shared_examples_for 'listing orphan final job artifact objects' do
|
||||
it 'lists paths and sizes of all orphan objects to the generated file' do
|
||||
run
|
||||
|
||||
expect_start_log_message
|
||||
expect_first_page_loading_log_message
|
||||
expect_found_orphan_artifact_object_log_message(orphan_final_object_1)
|
||||
expect_found_orphan_artifact_object_log_message(orphan_final_object_2)
|
||||
expect_no_found_orphan_artifact_object_log_message(orphan_non_final_object)
|
||||
expect_no_found_orphan_artifact_object_log_message(non_orphan_final_object_1)
|
||||
expect_no_found_orphan_artifact_object_log_message(non_orphan_final_object_2)
|
||||
expect_done_log_message(filename)
|
||||
|
||||
expect_orphans_list_to_contain_exactly(filename, [
|
||||
orphan_final_object_1,
|
||||
orphan_final_object_2
|
||||
])
|
||||
end
|
||||
|
||||
context 'when interrupted in the middle of processing pages' do
|
||||
include_context 'when resuming from marker'
|
||||
|
||||
let!(:orphan_final_object_3) { create_fog_file }
|
||||
let!(:orphan_final_object_4) { create_fog_file }
|
||||
let!(:orphan_final_object_5) { create_fog_file }
|
||||
|
||||
before do
|
||||
# To better test that the file still contains the orphan objects
|
||||
# from previous execution, we want to only have orphan final objects
|
||||
# in the storage for now. This is because we can't guarantee load order
|
||||
# but we want to be sure that there is an orphan object loaded in the first
|
||||
# execution. Now, we will only have 5 objects in the storage, and they
|
||||
# are orphan_final_object_1 to 5.
|
||||
# rubocop:disable Rails/SaveBang -- The destroy method here is not ActiveRecord
|
||||
orphan_non_final_object.destroy
|
||||
non_orphan_final_object_1.destroy
|
||||
non_orphan_final_object_2.destroy
|
||||
# rubocop:enable Rails/SaveBang
|
||||
end
|
||||
|
||||
it 'resumes from last known page marker on the next run' do
|
||||
expect { generator.run! }.to raise_error(dummy_error)
|
||||
saved_marker = fetch_saved_marker
|
||||
|
||||
new_generator = described_class.new(
|
||||
provider: specified_provider,
|
||||
force_restart: false,
|
||||
filename: filename
|
||||
)
|
||||
|
||||
new_generator.run!
|
||||
|
||||
expect_resuming_from_marker_log_message(saved_marker)
|
||||
|
||||
# Given we can't guarantee the order of the objects because
|
||||
# of random path generation, we can't tell which page they will
|
||||
# fall in, so we will just ensure that they
|
||||
# were all logged in the end.
|
||||
expect_found_orphan_artifact_object_log_message(orphan_final_object_1)
|
||||
expect_found_orphan_artifact_object_log_message(orphan_final_object_2)
|
||||
expect_found_orphan_artifact_object_log_message(orphan_final_object_3)
|
||||
expect_found_orphan_artifact_object_log_message(orphan_final_object_4)
|
||||
expect_found_orphan_artifact_object_log_message(orphan_final_object_5)
|
||||
|
||||
expect_orphans_list_to_contain_exactly(filename, [
|
||||
orphan_final_object_1,
|
||||
orphan_final_object_2,
|
||||
orphan_final_object_3,
|
||||
orphan_final_object_4,
|
||||
orphan_final_object_5
|
||||
])
|
||||
end
|
||||
|
||||
context 'and force_restart is true' do
|
||||
it 'starts from the first page on the next run' do
|
||||
expect { generator.run! }.to raise_error(dummy_error)
|
||||
|
||||
# Given the batch size is 2, and we only have 5 objects right now in the storage
|
||||
# and they are all orphans, we expect the file to have 2 entries.
|
||||
expect_orphans_list_to_have_number_of_entries(2)
|
||||
|
||||
# Before we re-run, we want to delete some of the objects so we can
|
||||
# test that the file indeed was truncated first before adding in new entries.
|
||||
# Let's delete 4 objects.
|
||||
# rubocop:disable Rails/SaveBang -- The destroy method here is not ActiveRecord
|
||||
orphan_final_object_2.destroy
|
||||
orphan_final_object_3.destroy
|
||||
orphan_final_object_4.destroy
|
||||
orphan_final_object_5.destroy
|
||||
# rubocop:enable Rails/SaveBang
|
||||
|
||||
new_generator = described_class.new(
|
||||
provider: specified_provider,
|
||||
force_restart: true,
|
||||
filename: filename
|
||||
)
|
||||
|
||||
new_generator.run!
|
||||
|
||||
expect_no_resuming_from_marker_log_message
|
||||
|
||||
# Now we should have only 1 entry. Given the first restart, the file
|
||||
# should have been truncated first, before new entries are added.
|
||||
expect_orphans_list_to_have_number_of_entries(1)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when not configured to use bucket_prefix' do
|
||||
let(:remote_directory) { 'artifacts' }
|
||||
let(:bucket_prefix) { nil }
|
||||
|
||||
it_behaves_like 'listing orphan final job artifact objects'
|
||||
end
|
||||
|
||||
context 'when configured to use bucket_prefix' do
|
||||
let(:remote_directory) { 'main-bucket' }
|
||||
let(:bucket_prefix) { 'my/artifacts' }
|
||||
|
||||
it_behaves_like 'listing orphan final job artifact objects'
|
||||
end
|
||||
end
|
||||
|
||||
context 'when defaulting to provider in the object store configuration' do
|
||||
let(:specified_provider) { nil }
|
||||
|
||||
it_behaves_like 'handling supported provider'
|
||||
|
||||
context 'and the configured provider is not supported' do
|
||||
before do
|
||||
allow(Gitlab.config.artifacts.object_store.connection).to receive(:provider).and_return('somethingelse')
|
||||
end
|
||||
|
||||
it 'raises an error' do
|
||||
expect { run }.to raise_error(
|
||||
described_class::UnsupportedProviderError,
|
||||
/The provider found in the object storage configuration is unsupported/
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when provider is specified' do
|
||||
context 'and provider is supported' do
|
||||
let(:specified_provider) { 'aws' }
|
||||
|
||||
it_behaves_like 'handling supported provider'
|
||||
end
|
||||
|
||||
context 'and provider is not supported' do
|
||||
let(:specified_provider) { 'somethingelse' }
|
||||
|
||||
it 'raises an error' do
|
||||
expect { run }.to raise_error(
|
||||
described_class::UnsupportedProviderError,
|
||||
/The provided provider is unsupported/
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -1,263 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Cleanup::OrphanJobArtifactFinalObjectsCleaner, :orphan_final_artifacts_cleanup, :clean_gitlab_redis_shared_state, feature_category: :build_artifacts do
|
||||
describe '#run!' do
|
||||
let(:cleaner) do
|
||||
described_class.new(
|
||||
provider: specified_provider,
|
||||
force_restart: force_restart,
|
||||
dry_run: dry_run
|
||||
)
|
||||
end
|
||||
|
||||
let(:dry_run) { true }
|
||||
let(:force_restart) { false }
|
||||
let(:remote_directory) { 'artifacts' }
|
||||
let(:bucket_prefix) { nil }
|
||||
|
||||
subject(:run) { cleaner.run! }
|
||||
|
||||
before do
|
||||
stub_const('Gitlab::Cleanup::OrphanJobArtifactFinalObjects::Paginators::BasePaginator::BATCH_SIZE', 2)
|
||||
|
||||
Rake.application.rake_require 'tasks/gitlab/cleanup'
|
||||
|
||||
Gitlab.config.artifacts.object_store.tap do |config|
|
||||
config[:remote_directory] = remote_directory
|
||||
config[:bucket_prefix] = bucket_prefix
|
||||
end
|
||||
|
||||
allow(Gitlab::AppLogger).to receive(:info)
|
||||
end
|
||||
|
||||
shared_examples_for 'cleaning up orphan final job artifact objects' do
|
||||
let(:fog_connection) do
|
||||
stub_object_storage_uploader(
|
||||
config: Gitlab.config.artifacts.object_store,
|
||||
uploader: JobArtifactUploader,
|
||||
direct_upload: true
|
||||
)
|
||||
end
|
||||
|
||||
let!(:orphan_final_object_1) { create_fog_file }
|
||||
let!(:orphan_final_object_2) { create_fog_file }
|
||||
let!(:orphan_non_final_object) { create_fog_file(final: false) }
|
||||
|
||||
let!(:non_orphan_final_object_1) do
|
||||
create_fog_file.tap do |file|
|
||||
create(:ci_job_artifact, file_final_path: path_without_bucket_prefix(file.key))
|
||||
end
|
||||
end
|
||||
|
||||
let!(:non_orphan_final_object_2) do
|
||||
create_fog_file.tap do |file|
|
||||
create(:ci_job_artifact, file_final_path: path_without_bucket_prefix(file.key))
|
||||
end
|
||||
end
|
||||
|
||||
shared_context 'when resuming from marker' do
|
||||
let(:dummy_error) { Class.new(StandardError) }
|
||||
|
||||
before do
|
||||
fetch_counter = 0
|
||||
|
||||
allow(cleaner).to receive(:fetch_batch).and_wrap_original do |m, *args|
|
||||
raise dummy_error if fetch_counter == 1
|
||||
|
||||
fetch_counter += 1
|
||||
m.call(*args)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
shared_examples_for 'handling dry run mode' do
|
||||
context 'when on dry run (which is default)' do
|
||||
it 'logs orphan objects to delete but does not delete them' do
|
||||
run
|
||||
|
||||
expect_start_log_message
|
||||
expect_first_page_loading_log_message
|
||||
expect_page_loading_via_marker_log_message(times: 3)
|
||||
expect_delete_log_message(orphan_final_object_1)
|
||||
expect_delete_log_message(orphan_final_object_2)
|
||||
expect_no_delete_log_message(orphan_non_final_object)
|
||||
expect_no_delete_log_message(non_orphan_final_object_1)
|
||||
expect_no_delete_log_message(non_orphan_final_object_2)
|
||||
expect_done_log_message
|
||||
|
||||
expect_object_to_exist(orphan_final_object_1)
|
||||
expect_object_to_exist(orphan_final_object_2)
|
||||
expect_object_to_exist(orphan_non_final_object)
|
||||
expect_object_to_exist(non_orphan_final_object_1)
|
||||
expect_object_to_exist(non_orphan_final_object_2)
|
||||
end
|
||||
|
||||
context 'when interrupted in the middle of processing pages' do
|
||||
include_context 'when resuming from marker'
|
||||
|
||||
it 'resumes from last known page marker on the next run' do
|
||||
expect { cleaner.run! }.to raise_error(dummy_error)
|
||||
saved_marker = fetch_saved_marker
|
||||
|
||||
new_cleaner = described_class.new(
|
||||
provider: specified_provider,
|
||||
force_restart: false,
|
||||
dry_run: true
|
||||
)
|
||||
|
||||
new_cleaner.run!
|
||||
|
||||
expect_resuming_from_marker_log_message(saved_marker)
|
||||
|
||||
# Given we can't guarantee the order of the objects because
|
||||
# of random path generation, we can't tell which page they will
|
||||
# fall in, so we will just ensure that they
|
||||
# were all logged in the end.
|
||||
expect_delete_log_message(orphan_final_object_1)
|
||||
expect_delete_log_message(orphan_final_object_2)
|
||||
|
||||
# Ensure that they were not deleted because this is just dry run.
|
||||
expect_object_to_exist(orphan_final_object_1)
|
||||
expect_object_to_exist(orphan_final_object_2)
|
||||
end
|
||||
|
||||
context 'and force_restart is true' do
|
||||
it 'starts from the first page on the next run' do
|
||||
expect { cleaner.run! }.to raise_error(dummy_error)
|
||||
|
||||
new_cleaner = described_class.new(
|
||||
provider: specified_provider,
|
||||
force_restart: true,
|
||||
dry_run: true
|
||||
)
|
||||
|
||||
new_cleaner.run!
|
||||
|
||||
expect_no_resuming_from_marker_log_message
|
||||
|
||||
# Ensure that they were not deleted because this is just dry run.
|
||||
expect_object_to_exist(orphan_final_object_1)
|
||||
expect_object_to_exist(orphan_final_object_2)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when dry run is set to false' do
|
||||
let(:dry_run) { false }
|
||||
|
||||
it 'logs orphan objects to delete and deletes them' do
|
||||
expect_object_to_exist(orphan_final_object_1)
|
||||
expect_object_to_exist(orphan_final_object_2)
|
||||
|
||||
run
|
||||
|
||||
expect_start_log_message
|
||||
expect_first_page_loading_log_message
|
||||
expect_page_loading_via_marker_log_message(times: 3)
|
||||
expect_delete_log_message(orphan_final_object_1)
|
||||
expect_delete_log_message(orphan_final_object_2)
|
||||
expect_no_delete_log_message(orphan_non_final_object)
|
||||
expect_no_delete_log_message(non_orphan_final_object_1)
|
||||
expect_no_delete_log_message(non_orphan_final_object_2)
|
||||
expect_done_log_message
|
||||
|
||||
expect_object_to_be_deleted(orphan_final_object_1)
|
||||
expect_object_to_be_deleted(orphan_final_object_2)
|
||||
expect_object_to_exist(orphan_non_final_object)
|
||||
expect_object_to_exist(non_orphan_final_object_1)
|
||||
expect_object_to_exist(non_orphan_final_object_2)
|
||||
end
|
||||
|
||||
context 'when interrupted in the middle of processing pages' do
|
||||
include_context 'when resuming from marker'
|
||||
|
||||
it 'resumes from last known page marker on the next run' do
|
||||
expect { cleaner.run! }.to raise_error(dummy_error)
|
||||
saved_marker = fetch_saved_marker
|
||||
|
||||
new_cleaner = described_class.new(
|
||||
provider: specified_provider,
|
||||
force_restart: false,
|
||||
dry_run: false
|
||||
)
|
||||
|
||||
new_cleaner.run!
|
||||
|
||||
expect_resuming_from_marker_log_message(saved_marker)
|
||||
|
||||
# Given we can't guarantee the order of the objects because
|
||||
# of random path generation, we can't tell which page they will
|
||||
# fall in, so we will just ensure that they
|
||||
# were all logged in the end.
|
||||
expect_delete_log_message(orphan_final_object_1)
|
||||
expect_delete_log_message(orphan_final_object_2)
|
||||
|
||||
# Ensure that they were deleted because this is not dry run.
|
||||
expect_object_to_be_deleted(orphan_final_object_1)
|
||||
expect_object_to_be_deleted(orphan_final_object_2)
|
||||
end
|
||||
|
||||
context 'and force_restart is true' do
|
||||
it 'starts from the first page on the next run' do
|
||||
expect { cleaner.run! }.to raise_error(dummy_error)
|
||||
|
||||
new_cleaner = described_class.new(
|
||||
provider: specified_provider,
|
||||
force_restart: true,
|
||||
dry_run: false
|
||||
)
|
||||
|
||||
new_cleaner.run!
|
||||
|
||||
expect_no_resuming_from_marker_log_message
|
||||
|
||||
# Ensure that they were deleted because this is not a dry run.
|
||||
expect_object_to_be_deleted(orphan_final_object_1)
|
||||
expect_object_to_be_deleted(orphan_final_object_2)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when not configured to use bucket_prefix' do
|
||||
let(:remote_directory) { 'artifacts' }
|
||||
let(:bucket_prefix) { nil }
|
||||
|
||||
it_behaves_like 'handling dry run mode'
|
||||
end
|
||||
|
||||
context 'when configured to use bucket_prefix' do
|
||||
let(:remote_directory) { 'main-bucket' }
|
||||
let(:bucket_prefix) { 'my/artifacts' }
|
||||
|
||||
it_behaves_like 'handling dry run mode'
|
||||
end
|
||||
end
|
||||
|
||||
context 'when defaulting to provider in the object store configuration' do
|
||||
let(:specified_provider) { nil }
|
||||
|
||||
it_behaves_like 'cleaning up orphan final job artifact objects'
|
||||
end
|
||||
|
||||
context 'when provider is specified' do
|
||||
context 'and provider is supported' do
|
||||
let(:specified_provider) { 'aws' }
|
||||
|
||||
it_behaves_like 'cleaning up orphan final job artifact objects'
|
||||
end
|
||||
|
||||
context 'and provider is not supported' do
|
||||
let(:specified_provider) { 'somethingelse' }
|
||||
|
||||
it 'raises an error' do
|
||||
expect { run }.to raise_error(described_class::UnsupportedProviderError)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -28,7 +28,7 @@ RSpec.describe Gitlab::Database::Migrations::RunnerBackoff::Communicator, :clean
|
|||
|
||||
it 'reads from Redis' do
|
||||
recorder = RedisCommands::Recorder.new { subject }
|
||||
expect(recorder.log).to include([:exists, 'gitlab:exclusive_lease:gitlab/database/migration/runner/backoff'])
|
||||
expect(recorder.log).to include(['exists', 'gitlab:exclusive_lease:gitlab/database/migration/runner/backoff'])
|
||||
end
|
||||
|
||||
context 'with runner_migrations_backoff disabled' do
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ RSpec.describe Gitlab::DiscussionsDiff::HighlightCache, :clean_gitlab_redis_cach
|
|||
|
||||
mapping.each do |key, value|
|
||||
full_key = described_class.cache_key_for(key)
|
||||
found_key = Gitlab::Redis::Cache.with { |r| r.get(full_key) }
|
||||
found_key = Gitlab::Redis::Cache.with { |r| r.get(full_key).force_encoding("UTF-8") }
|
||||
|
||||
expect(described_class.gzip_decompress(found_key)).to eq(value.to_json)
|
||||
end
|
||||
|
|
|
|||
|
|
@ -23,9 +23,9 @@ RSpec.describe Gitlab::ExternalAuthorization::Cache, :clean_gitlab_redis_cache d
|
|||
describe '#load' do
|
||||
it 'reads stored info from redis' do
|
||||
freeze_time do
|
||||
set_in_redis(:access, false)
|
||||
set_in_redis(:access, false.to_s)
|
||||
set_in_redis(:reason, 'Access denied for now')
|
||||
set_in_redis(:refreshed_at, Time.now)
|
||||
set_in_redis(:refreshed_at, Time.now.to_s)
|
||||
|
||||
access, reason, refreshed_at = cache.load
|
||||
|
||||
|
|
|
|||
|
|
@ -9,10 +9,9 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
|
|||
include RedisHelpers
|
||||
|
||||
let_it_be(:redis_store_class) { define_helper_redis_store_class }
|
||||
let_it_be(:redis_client) { RedisClient.new(redis_store_class.redis_client_params) }
|
||||
|
||||
before do
|
||||
redis_client.call("flushdb")
|
||||
redis_store_class.with(&:flushdb)
|
||||
end
|
||||
|
||||
describe 'read and write' do
|
||||
|
|
@ -24,27 +23,30 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
|
|||
# The response is 1001, so 4 bytes. Exercise counting an integer reply.
|
||||
[[:set, 'foobar', 1000]] | [:incr, 'foobar'] | (4 + 6) | 4
|
||||
|
||||
# Exercise counting empty multi bulk reply. Returns an empty hash `{}`
|
||||
[] | [:hgetall, 'foobar'] | (7 + 6) | 2
|
||||
# Exercise counting empty multi bulk reply.
|
||||
[] | [:hgetall, 'foobar'] | (7 + 6) | 0
|
||||
|
||||
# Hgetall response length is combined length of keys and values in the
|
||||
# hash. Exercises counting of a multi bulk reply
|
||||
# Returns `{"field"=>"hello world"}`, 5 for field, 11 for hello world, 8 for {, }, 4 "s, =, >
|
||||
[[:hset, 'myhash', 'field', 'hello world']] | [:hgetall, 'myhash'] | (7 + 6) | (5 + 11 + 8)
|
||||
[[:hset, 'myhash', 'field', 'hello world']] | [:hgetall, 'myhash'] | (7 + 6) | (5 + 11)
|
||||
|
||||
# Exercise counting of a bulk reply
|
||||
[[:set, 'foo', 'bar' * 100]] | [:get, 'foo'] | (3 + 3) | (3 * 100)
|
||||
|
||||
# Nested array response: [['foo', 0.0], ['bar', 1.0]]. Returns scores as float.
|
||||
# Nested array response: [['foo', 0], ['bar', 1.1000000000000001]] due to Redis precision
|
||||
# See https://github.com/redis/redis/issues/1499
|
||||
[[:zadd, 'myset', 0, 'foo'],
|
||||
[:zadd, 'myset', 1, 'bar']] | [:zrange, 'myset', 0, -1, 'withscores'] | (6 + 5 + 1 + 2 + 10) | (3 + 3 + 3 + 3)
|
||||
[:zadd, 'myset', 1.1,
|
||||
'bar']] | [:zrange, 'myset', 0, -1, 'withscores'] | (6 + 5 + 1 + 2 + 10) | (3 + 1 + 3 + 18)
|
||||
end
|
||||
|
||||
with_them do
|
||||
it 'counts bytes read and written' do
|
||||
setup.each { |cmd| redis_client.call(*cmd) }
|
||||
RequestStore.clear!
|
||||
redis_client.call(*command)
|
||||
redis_store_class.with do |redis|
|
||||
setup.each { |cmd| redis.call(cmd) }
|
||||
RequestStore.clear!
|
||||
redis.call(command)
|
||||
end
|
||||
|
||||
expect(Gitlab::Instrumentation::Redis.read_bytes).to eq(expect_read)
|
||||
expect(Gitlab::Instrumentation::Redis.write_bytes).to eq(expect_write)
|
||||
|
|
@ -58,35 +60,48 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
|
|||
it 'counts successful requests' do
|
||||
expect(instrumentation_class).to receive(:instance_count_request).with(1).and_call_original
|
||||
|
||||
redis_client.call(:get, 'foobar')
|
||||
redis_store_class.with { |redis| redis.call(:get, 'foobar') }
|
||||
end
|
||||
|
||||
it 'counts successful pipelined requests' do
|
||||
expect(instrumentation_class).to receive(:instance_count_request).with(2).and_call_original
|
||||
expect(instrumentation_class).to receive(:instance_count_pipelined_request).with(2).and_call_original
|
||||
|
||||
redis_client.pipelined do |pipeline|
|
||||
pipeline.call(:get, '{foobar}buz')
|
||||
pipeline.call(:get, '{foobar}baz')
|
||||
redis_store_class.with do |redis|
|
||||
redis.pipelined do |pipeline|
|
||||
pipeline.call(:get, '{foobar}buz')
|
||||
pipeline.call(:get, '{foobar}baz')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when encountering exceptions' do
|
||||
before do
|
||||
allow(redis_client.instance_variable_get(:@raw_connection)).to receive(:call).and_raise(
|
||||
RedisClient::Error)
|
||||
where(:case_name, :exception, :exception_counter) do
|
||||
'generic exception' | Redis::CommandError.new | :instance_count_exception
|
||||
'moved redirection' | Redis::CommandError.new("MOVED 123 127.0.0.1:6380") | :instance_count_cluster_redirection
|
||||
'ask redirection' | Redis::CommandError.new("ASK 123 127.0.0.1:6380") | :instance_count_cluster_redirection
|
||||
end
|
||||
|
||||
it 'counts exception' do
|
||||
expect(instrumentation_class).to receive(:instance_count_exception)
|
||||
.with(instance_of(RedisClient::Error)).and_call_original
|
||||
expect(instrumentation_class).to receive(:log_exception)
|
||||
.with(instance_of(RedisClient::Error)).and_call_original
|
||||
expect(instrumentation_class).to receive(:instance_count_request).and_call_original
|
||||
with_them do
|
||||
before do
|
||||
redis_store_class.with do |redis|
|
||||
# We need to go 1 layer deeper to stub _client as we monkey-patch Redis::Client
|
||||
# with the interceptor. Stubbing `redis` will skip the instrumentation_class.
|
||||
allow(redis._client.instance_variable_get(:@raw_connection)).to receive(:call).and_raise(exception)
|
||||
end
|
||||
end
|
||||
|
||||
expect do
|
||||
redis_client.call(:auth, 'foo', 'bar')
|
||||
end.to raise_error(RedisClient::Error)
|
||||
it 'counts exception' do
|
||||
expect(instrumentation_class).to receive(exception_counter)
|
||||
.with(instance_of(Redis::CommandError)).and_call_original
|
||||
expect(instrumentation_class).to receive(:log_exception)
|
||||
.with(instance_of(Redis::CommandError)).and_call_original
|
||||
expect(instrumentation_class).to receive(:instance_count_request).and_call_original
|
||||
|
||||
expect do
|
||||
redis_store_class.with { |redis| redis.call(:auth, 'foo', 'bar') }
|
||||
end.to raise_exception(Redis::CommandError)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -99,7 +114,7 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
|
|||
expect(instrumentation_class).to receive(:increment_cross_slot_request_count).and_call_original
|
||||
expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
|
||||
|
||||
redis_client.call(:mget, 'foo', 'bar')
|
||||
redis_store_class.with { |redis| redis.call(:mget, 'foo', 'bar') }
|
||||
end
|
||||
|
||||
it 'does not count allowed cross-slot requests' do
|
||||
|
|
@ -107,7 +122,7 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
|
|||
expect(instrumentation_class).to receive(:increment_allowed_cross_slot_request_count).and_call_original
|
||||
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
redis_client.call(:mget, 'foo', 'bar')
|
||||
redis_store_class.with { |redis| redis.call(:mget, 'foo', 'bar') }
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -116,7 +131,7 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
|
|||
expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
|
||||
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
redis_client.call(:mget, 'bar')
|
||||
redis_store_class.with { |redis| redis.call(:get, 'bar') }
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -124,7 +139,7 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
|
|||
expect(instrumentation_class).not_to receive(:increment_cross_slot_request_count).and_call_original
|
||||
expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
|
||||
|
||||
redis_client.call(:mget, '{foo}bar', '{foo}baz')
|
||||
redis_store_class.with { |redis| redis.call(:mget, '{foo}bar', '{foo}baz') }
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -135,7 +150,7 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
|
|||
|
||||
it 'still runs cross-slot validation' do
|
||||
expect do
|
||||
redis_client.call('mget', 'foo', 'bar')
|
||||
redis_store_class.with { |redis| redis.mget('foo', 'bar') }
|
||||
end.to raise_error(instance_of(Gitlab::Instrumentation::RedisClusterValidator::CrossSlotError))
|
||||
end
|
||||
end
|
||||
|
|
@ -157,7 +172,7 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
|
|||
expect(instrumentation_class).to receive(:instance_observe_duration).with(a_value > 0)
|
||||
.and_call_original
|
||||
|
||||
redis_client.call(*command)
|
||||
redis_store_class.with { |redis| redis.call(*command) }
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -166,17 +181,21 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
|
|||
expect(instrumentation_class).to receive(:instance_observe_duration).twice.with(a_value > 0)
|
||||
.and_call_original
|
||||
|
||||
redis_client.pipelined do |pipeline|
|
||||
pipeline.call(:get, '{foobar}buz')
|
||||
pipeline.call(:get, '{foobar}baz')
|
||||
redis_store_class.with do |redis|
|
||||
redis.pipelined do |pipeline|
|
||||
pipeline.call(:get, '{foobar}buz')
|
||||
pipeline.call(:get, '{foobar}baz')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
it 'raises error when keys are not from the same slot' do
|
||||
expect do
|
||||
redis_client.pipelined do |pipeline|
|
||||
pipeline.call(:get, 'foo')
|
||||
pipeline.call(:get, 'bar')
|
||||
redis_store_class.with do |redis|
|
||||
redis.pipelined do |pipeline|
|
||||
pipeline.call(:get, 'foo')
|
||||
pipeline.call(:get, 'bar')
|
||||
end
|
||||
end
|
||||
end.to raise_error(instance_of(Gitlab::Instrumentation::RedisClusterValidator::CrossSlotError))
|
||||
end
|
||||
|
|
@ -200,11 +219,11 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
|
|||
|
||||
with_them do
|
||||
it 'skips requests we do not want in the apdex' do
|
||||
setup.each { |cmd| redis_client.call(*cmd) }
|
||||
setup.each { |cmd| redis_store_class.with { |redis| redis.call(*cmd) } }
|
||||
|
||||
expect(instrumentation_class).not_to receive(:instance_observe_duration)
|
||||
|
||||
redis_client.call(*command)
|
||||
redis_store_class.with { |redis| redis.call(*command) }
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -212,10 +231,12 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
|
|||
it 'skips requests that have blocking commands' do
|
||||
expect(instrumentation_class).not_to receive(:instance_observe_duration)
|
||||
|
||||
redis_client.pipelined do |pipeline|
|
||||
pipeline.call(:get, '{foobar}buz')
|
||||
pipeline.call(:rpush, '{foobar}baz', 1)
|
||||
pipeline.call(:brpop, '{foobar}baz', 0)
|
||||
redis_store_class.with do |redis|
|
||||
redis.pipelined do |pipeline|
|
||||
pipeline.call(:get, '{foobar}buz')
|
||||
pipeline.call(:rpush, '{foobar}baz', 1)
|
||||
pipeline.call(:brpop, '{foobar}baz', 0)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,261 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
require 'rspec-parameterized'
|
||||
require 'support/helpers/rails_helpers'
|
||||
|
||||
RSpec.describe Gitlab::Instrumentation::RedisInterceptor, :request_store, feature_category: :scalability do
|
||||
using RSpec::Parameterized::TableSyntax
|
||||
include RedisHelpers
|
||||
|
||||
let_it_be(:redis_store_class) { define_helper_redis_store_class }
|
||||
|
||||
before do
|
||||
redis_store_class.with(&:flushdb)
|
||||
end
|
||||
|
||||
describe 'read and write' do
|
||||
where(:setup, :command, :expect_write, :expect_read) do
|
||||
# The response is 'OK', the request size is the combined size of array
|
||||
# elements. Exercise counting of a status reply.
|
||||
[] | [:set, 'foo', 'bar'] | 3 + 3 + 3 | 2
|
||||
|
||||
# The response is 1001, so 4 bytes. Exercise counting an integer reply.
|
||||
[[:set, 'foobar', 1000]] | [:incr, 'foobar'] | 4 + 6 | 4
|
||||
|
||||
# Exercise counting empty multi bulk reply
|
||||
[] | [:hgetall, 'foobar'] | 7 + 6 | 0
|
||||
|
||||
# Hgetall response length is combined length of keys and values in the
|
||||
# hash. Exercises counting of a multi bulk reply
|
||||
[[:hset, 'myhash', 'field', 'hello world']] | [:hgetall, 'myhash'] | 7 + 6 | 5 + 11
|
||||
|
||||
# Exercise counting of a bulk reply
|
||||
[[:set, 'foo', 'bar' * 100]] | [:get, 'foo'] | 3 + 3 | 3 * 100
|
||||
|
||||
# Nested array response: [['foo', 0], ['bar', 1]]
|
||||
[[:zadd, 'myset', 0, 'foo'], [:zadd, 'myset', 1, 'bar']] | [:zrange, 'myset', 0, -1, 'withscores'] | 6 + 5 + 1 + 2 + 10 | 3 + 1 + 3 + 1
|
||||
end
|
||||
|
||||
with_them do
|
||||
it 'counts bytes read and written' do
|
||||
redis_store_class.with do |redis|
|
||||
setup.each { |cmd| redis.call(cmd) }
|
||||
RequestStore.clear!
|
||||
redis.call(command)
|
||||
end
|
||||
|
||||
expect(Gitlab::Instrumentation::Redis.read_bytes).to eq(expect_read)
|
||||
expect(Gitlab::Instrumentation::Redis.write_bytes).to eq(expect_write)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe 'counting' do
|
||||
let(:instrumentation_class) { redis_store_class.instrumentation_class }
|
||||
|
||||
it 'counts successful requests' do
|
||||
expect(instrumentation_class).to receive(:instance_count_request).with(1).and_call_original
|
||||
|
||||
redis_store_class.with { |redis| redis.call(:get, 'foobar') }
|
||||
end
|
||||
|
||||
it 'counts successful pipelined requests' do
|
||||
expect(instrumentation_class).to receive(:instance_count_request).with(2).and_call_original
|
||||
expect(instrumentation_class).to receive(:instance_count_pipelined_request).with(2).and_call_original
|
||||
|
||||
redis_store_class.with do |redis|
|
||||
redis.pipelined do |pipeline|
|
||||
pipeline.call(:get, '{foobar}buz')
|
||||
pipeline.call(:get, '{foobar}baz')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when encountering connection exceptions within process' do
|
||||
before do
|
||||
redis_store_class.with do |redis|
|
||||
allow(redis._client).to receive(:write).and_call_original
|
||||
end
|
||||
end
|
||||
|
||||
it 'counts connection exceptions' do
|
||||
redis_store_class.with do |redis|
|
||||
expect(redis._client).to receive(:write).with([:get, 'foobar']).and_raise(::Redis::ConnectionError)
|
||||
end
|
||||
|
||||
expect(instrumentation_class).to receive(:instance_count_connection_exception)
|
||||
.with(instance_of(Redis::ConnectionError)).and_call_original
|
||||
|
||||
redis_store_class.with { |redis| redis.call(:get, 'foobar') }
|
||||
end
|
||||
end
|
||||
|
||||
context 'when encountering exceptions' do
|
||||
where(:case_name, :exception, :exception_counter) do
|
||||
'generic exception' | Redis::CommandError | :instance_count_exception
|
||||
'moved redirection' | Redis::CommandError.new("MOVED 123 127.0.0.1:6380") | :instance_count_cluster_redirection
|
||||
'ask redirection' | Redis::CommandError.new("ASK 123 127.0.0.1:6380") | :instance_count_cluster_redirection
|
||||
end
|
||||
|
||||
with_them do
|
||||
before do
|
||||
redis_store_class.with do |redis|
|
||||
# We need to go 1 layer deeper to stub _client as we monkey-patch Redis::Client
|
||||
# with the interceptor. Stubbing `redis` will skip the instrumentation_class.
|
||||
allow(redis._client).to receive(:process).and_raise(exception)
|
||||
end
|
||||
end
|
||||
|
||||
it 'counts exception' do
|
||||
expect(instrumentation_class).to receive(exception_counter)
|
||||
.with(instance_of(Redis::CommandError)).and_call_original
|
||||
expect(instrumentation_class).to receive(:log_exception)
|
||||
.with(instance_of(Redis::CommandError)).and_call_original
|
||||
expect(instrumentation_class).to receive(:instance_count_request).and_call_original
|
||||
|
||||
expect do
|
||||
redis_store_class.with { |redis| redis.call(:auth, 'foo', 'bar') }
|
||||
end.to raise_exception(Redis::CommandError)
|
||||
|
||||
expect(Thread.current[:redis_client_error_count]).to eq(0)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'in production environment' do
|
||||
before do
|
||||
stub_rails_env('production') # to avoid raising CrossSlotError
|
||||
end
|
||||
|
||||
it 'counts disallowed cross-slot requests' do
|
||||
expect(instrumentation_class).to receive(:increment_cross_slot_request_count).and_call_original
|
||||
expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
|
||||
|
||||
redis_store_class.with { |redis| redis.call(:mget, 'foo', 'bar') }
|
||||
end
|
||||
|
||||
it 'does not count allowed cross-slot requests' do
|
||||
expect(instrumentation_class).not_to receive(:increment_cross_slot_request_count).and_call_original
|
||||
expect(instrumentation_class).to receive(:increment_allowed_cross_slot_request_count).and_call_original
|
||||
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
redis_store_class.with { |redis| redis.call(:mget, 'foo', 'bar') }
|
||||
end
|
||||
end
|
||||
|
||||
it 'does not count allowed non-cross-slot requests' do
|
||||
expect(instrumentation_class).not_to receive(:increment_cross_slot_request_count).and_call_original
|
||||
expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
|
||||
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
redis_store_class.with { |redis| redis.call(:get, 'bar') }
|
||||
end
|
||||
end
|
||||
|
||||
it 'skips count for non-cross-slot requests' do
|
||||
expect(instrumentation_class).not_to receive(:increment_cross_slot_request_count).and_call_original
|
||||
expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
|
||||
|
||||
redis_store_class.with { |redis| redis.call(:mget, '{foo}bar', '{foo}baz') }
|
||||
end
|
||||
end
|
||||
|
||||
context 'without active RequestStore' do
|
||||
before do
|
||||
::RequestStore.end!
|
||||
end
|
||||
|
||||
it 'still runs cross-slot validation' do
|
||||
expect do
|
||||
redis_store_class.with { |redis| redis.mget('foo', 'bar') }
|
||||
end.to raise_error(instance_of(Gitlab::Instrumentation::RedisClusterValidator::CrossSlotError))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe 'latency' do
|
||||
let(:instrumentation_class) { redis_store_class.instrumentation_class }
|
||||
|
||||
describe 'commands in the apdex' do
|
||||
where(:command) do
|
||||
[
|
||||
[[:get, 'foobar']],
|
||||
[%w[GET foobar]]
|
||||
]
|
||||
end
|
||||
|
||||
with_them do
|
||||
it 'measures requests we want in the apdex' do
|
||||
expect(instrumentation_class).to receive(:instance_observe_duration).with(a_value > 0)
|
||||
.and_call_original
|
||||
|
||||
redis_store_class.with { |redis| redis.call(*command) }
|
||||
end
|
||||
end
|
||||
|
||||
context 'with pipelined commands' do
|
||||
it 'measures requests that do not have blocking commands' do
|
||||
expect(instrumentation_class).to receive(:instance_observe_duration).twice.with(a_value > 0)
|
||||
.and_call_original
|
||||
|
||||
redis_store_class.with do |redis|
|
||||
redis.pipelined do |pipeline|
|
||||
pipeline.call(:get, '{foobar}buz')
|
||||
pipeline.call(:get, '{foobar}baz')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
it 'raises error when keys are not from the same slot' do
|
||||
expect do
|
||||
redis_store_class.with do |redis|
|
||||
redis.pipelined do |pipeline|
|
||||
pipeline.call(:get, 'foo')
|
||||
pipeline.call(:get, 'bar')
|
||||
end
|
||||
end
|
||||
end.to raise_error(instance_of(Gitlab::Instrumentation::RedisClusterValidator::CrossSlotError))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe 'commands not in the apdex' do
|
||||
where(:setup, :command) do
|
||||
[['rpush', 'foobar', 1]] | ['brpop', 'foobar', 0]
|
||||
[['rpush', 'foobar', 1]] | ['blpop', 'foobar', 0]
|
||||
[['rpush', '{abc}foobar', 1]] | ['brpoplpush', '{abc}foobar', '{abc}bazqux', 0]
|
||||
[['rpush', '{abc}foobar', 1]] | ['brpoplpush', '{abc}foobar', '{abc}bazqux', 0]
|
||||
[['zadd', 'foobar', 1, 'a']] | ['bzpopmin', 'foobar', 0]
|
||||
[['zadd', 'foobar', 1, 'a']] | ['bzpopmax', 'foobar', 0]
|
||||
[['xadd', 'mystream', 1, 'myfield', 'mydata']] | ['xread', 'block', 1, 'streams', 'mystream', '0-0']
|
||||
[['xadd', 'foobar', 1, 'myfield', 'mydata'], ['xgroup', 'create', 'foobar', 'mygroup', 0]] | ['xreadgroup', 'group', 'mygroup', 'myconsumer', 'block', 1, 'streams', 'foobar', '0-0']
|
||||
[] | ['command']
|
||||
end
|
||||
|
||||
with_them do
|
||||
it 'skips requests we do not want in the apdex' do
|
||||
redis_store_class.with { |redis| setup.each { |cmd| redis.call(*cmd) } }
|
||||
|
||||
expect(instrumentation_class).not_to receive(:instance_observe_duration)
|
||||
|
||||
redis_store_class.with { |redis| redis.call(*command) }
|
||||
end
|
||||
end
|
||||
|
||||
context 'with pipelined commands' do
|
||||
it 'skips requests that have blocking commands' do
|
||||
expect(instrumentation_class).not_to receive(:instance_observe_duration)
|
||||
|
||||
redis_store_class.with do |redis|
|
||||
redis.pipelined do |pipeline|
|
||||
pipeline.call(:get, '{foobar}buz')
|
||||
pipeline.call(:rpush, '{foobar}baz', 1)
|
||||
pipeline.call(:brpop, '{foobar}baz', 0)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -1,80 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Patch::NodeLoader, feature_category: :redis do
|
||||
using RSpec::Parameterized::TableSyntax
|
||||
|
||||
describe '#fetch_node_info' do
|
||||
let(:redis) { double(:redis) } # rubocop:disable RSpec/VerifiedDoubles
|
||||
|
||||
# rubocop:disable Naming/InclusiveLanguage
|
||||
where(:case_name, :args, :value) do
|
||||
[
|
||||
[
|
||||
'when only ip address is present',
|
||||
"07c37df 127.0.0.1:30004@31004 slave e7d1eec 0 1426238317239 4 connected
|
||||
67ed2db 127.0.0.1:30002@31002 master - 0 1426238316232 2 connected 5461-10922
|
||||
292f8b3 127.0.0.1:30003@31003 master - 0 1426238318243 3 connected 10923-16383
|
||||
6ec2392 127.0.0.1:30005@31005 slave 67ed2db 0 1426238316232 5 connected
|
||||
824fe11 127.0.0.1:30006@31006 slave 292f8b3 0 1426238317741 6 connected
|
||||
e7d1eec 127.0.0.1:30001@31001 myself,master - 0 0 1 connected 0-5460",
|
||||
{
|
||||
'127.0.0.1:30004' => 'slave', '127.0.0.1:30002' => 'master', '127.0.0.1:30003' => 'master',
|
||||
'127.0.0.1:30005' => 'slave', '127.0.0.1:30006' => 'slave', '127.0.0.1:30001' => 'master'
|
||||
}
|
||||
],
|
||||
[
|
||||
'when hostname is present',
|
||||
"07c37df 127.0.0.1:30004@31004,host1 slave e7d1eec 0 1426238317239 4 connected
|
||||
67ed2db 127.0.0.1:30002@31002,host2 master - 0 1426238316232 2 connected 5461-10922
|
||||
292f8b3 127.0.0.1:30003@31003,host3 master - 0 1426238318243 3 connected 10923-16383
|
||||
6ec2392 127.0.0.1:30005@31005,host4 slave 67ed2db 0 1426238316232 5 connected
|
||||
824fe11 127.0.0.1:30006@31006,host5 slave 292f8b3 0 1426238317741 6 connected
|
||||
e7d1eec 127.0.0.1:30001@31001,host6 myself,master - 0 0 1 connected 0-5460",
|
||||
{
|
||||
'host1:30004' => 'slave', 'host2:30002' => 'master', 'host3:30003' => 'master',
|
||||
'host4:30005' => 'slave', 'host5:30006' => 'slave', 'host6:30001' => 'master'
|
||||
}
|
||||
],
|
||||
[
|
||||
'when auxiliary fields are present',
|
||||
"07c37df 127.0.0.1:30004@31004,,shard-id=69bc slave e7d1eec 0 1426238317239 4 connected
|
||||
67ed2db 127.0.0.1:30002@31002,,shard-id=114f master - 0 1426238316232 2 connected 5461-10922
|
||||
292f8b3 127.0.0.1:30003@31003,,shard-id=fdb3 master - 0 1426238318243 3 connected 10923-16383
|
||||
6ec2392 127.0.0.1:30005@31005,,shard-id=114f slave 67ed2db 0 1426238316232 5 connected
|
||||
824fe11 127.0.0.1:30006@31006,,shard-id=fdb3 slave 292f8b3 0 1426238317741 6 connected
|
||||
e7d1eec 127.0.0.1:30001@31001,,shard-id=69bc myself,master - 0 0 1 connected 0-5460",
|
||||
{
|
||||
'127.0.0.1:30004' => 'slave', '127.0.0.1:30002' => 'master', '127.0.0.1:30003' => 'master',
|
||||
'127.0.0.1:30005' => 'slave', '127.0.0.1:30006' => 'slave', '127.0.0.1:30001' => 'master'
|
||||
}
|
||||
],
|
||||
[
|
||||
'when hostname and auxiliary fields are present',
|
||||
"07c37df 127.0.0.1:30004@31004,host1,shard-id=69bc slave e7d1eec 0 1426238317239 4 connected
|
||||
67ed2db 127.0.0.1:30002@31002,host2,shard-id=114f master - 0 1426238316232 2 connected 5461-10922
|
||||
292f8b3 127.0.0.1:30003@31003,host3,shard-id=fdb3 master - 0 1426238318243 3 connected 10923-16383
|
||||
6ec2392 127.0.0.1:30005@31005,host4,shard-id=114f slave 67ed2db 0 1426238316232 5 connected
|
||||
824fe11 127.0.0.1:30006@31006,host5,shard-id=fdb3 slave 292f8b3 0 1426238317741 6 connected
|
||||
e7d1eec 127.0.0.1:30001@31001,host6,shard-id=69bc myself,master - 0 0 1 connected 0-5460",
|
||||
{
|
||||
'host1:30004' => 'slave', 'host2:30002' => 'master', 'host3:30003' => 'master',
|
||||
'host4:30005' => 'slave', 'host5:30006' => 'slave', 'host6:30001' => 'master'
|
||||
}
|
||||
]
|
||||
]
|
||||
end
|
||||
# rubocop:enable Naming/InclusiveLanguage
|
||||
|
||||
with_them do
|
||||
before do
|
||||
allow(redis).to receive(:call).with([:cluster, :nodes]).and_return(args)
|
||||
end
|
||||
|
||||
it do
|
||||
expect(Redis::Cluster::NodeLoader.load_flags([redis])).to eq(value)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -13,6 +13,8 @@ RSpec.describe Gitlab::Patch::RedisCacheStore, :use_clean_rails_redis_caching, f
|
|||
cache.write('{user1}:x', 1)
|
||||
cache.write('{user1}:y', 2)
|
||||
cache.write('{user1}:z', 3)
|
||||
|
||||
cache.instance_variable_set(:@pipeline_batch_size, nil)
|
||||
end
|
||||
|
||||
describe '#read_multi_mget' do
|
||||
|
|
@ -34,7 +36,7 @@ RSpec.describe Gitlab::Patch::RedisCacheStore, :use_clean_rails_redis_caching, f
|
|||
end
|
||||
|
||||
context 'when reading large amount of keys' do
|
||||
let(:input_size) { 2000 }
|
||||
let(:input_size) { 2100 }
|
||||
let(:chunk_size) { 1000 }
|
||||
|
||||
shared_examples 'read large amount of keys' do
|
||||
|
|
@ -45,10 +47,11 @@ RSpec.describe Gitlab::Patch::RedisCacheStore, :use_clean_rails_redis_caching, f
|
|||
::Gitlab::Redis::ClusterUtil.cluster?(redis.default_store)
|
||||
|
||||
if normal_cluster || multistore_cluster
|
||||
expect_next_instances_of(Gitlab::Redis::CrossSlot::Pipeline, 2) do |pipeline|
|
||||
obj = instance_double(::Redis)
|
||||
expect(pipeline).to receive(:pipelined).and_yield(obj)
|
||||
expect(obj).to receive(:get).exactly(chunk_size).times
|
||||
times = (input_size.to_f / chunk_size).ceil
|
||||
expect(redis).to receive(:pipelined).exactly(times).times.and_call_original
|
||||
|
||||
expect_next_instances_of(::Redis::PipelinedConnection, times) do |p|
|
||||
expect(p).to receive(:get).at_most(chunk_size).times
|
||||
end
|
||||
else
|
||||
expect(redis).to receive(:mget).and_call_original
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ RSpec.describe Gitlab::Patch::RedisClient, feature_category: :redis do
|
|||
include RedisHelpers
|
||||
|
||||
let_it_be(:redis_store_class) { define_helper_redis_store_class }
|
||||
let_it_be(:redis_client) { RedisClient.new(redis_store_class.redis_client_params) }
|
||||
let_it_be(:redis_client) { RedisClient.new(redis_store_class.params) }
|
||||
|
||||
before do
|
||||
Thread.current[:redis_client_error_count] = 1
|
||||
|
|
|
|||
|
|
@ -0,0 +1,29 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Patch::RedisStoreFactory, feature_category: :redis do
|
||||
describe '#create' do
|
||||
let(:params) { { host: 'localhost' } }
|
||||
|
||||
subject(:factory_create) { ::Redis::Store::Factory.create(params) } # rubocop:disable Rails/SaveBang -- redis-store does not implement create!
|
||||
|
||||
context 'when using standalone Redis' do
|
||||
it 'does not create ClusterStore' do
|
||||
expect(Gitlab::Redis::ClusterStore).not_to receive(:new)
|
||||
|
||||
factory_create
|
||||
end
|
||||
end
|
||||
|
||||
context 'when using a Redis Cluster' do
|
||||
let(:params) { { nodes: ["redis://localhost:6001", "redis://localhost:6002"] } }
|
||||
|
||||
it 'creates a ClusterStore' do
|
||||
expect(Gitlab::Redis::ClusterStore).to receive(:new).with(params.merge({ raw: false }))
|
||||
|
||||
factory_create
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -102,7 +102,7 @@ RSpec.describe Gitlab::RackAttack::Store, :clean_gitlab_redis_rate_limiting, fea
|
|||
before do
|
||||
broken_redis = Redis.new(
|
||||
url: 'redis://127.0.0.0:0',
|
||||
instrumentation_class: Gitlab::Redis::RateLimiting.instrumentation_class
|
||||
custom: { instrumentation_class: Gitlab::Redis::RateLimiting.instrumentation_class }
|
||||
)
|
||||
allow(Gitlab::Redis::RateLimiting).to receive(:with).and_yield(broken_redis)
|
||||
end
|
||||
|
|
|
|||
|
|
@ -0,0 +1,116 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
# This spec only runs if a Redis Cluster is configured for Gitlab::Redis::Cache.
|
||||
# ::Redis::Cluster fetches the cluster details from the server on `initialize` and will raise
|
||||
# an error if the cluster is not found.
|
||||
#
|
||||
# An example would be the following in config/redis.yml assuming gdk is set up with redis-cluster.
|
||||
# test:
|
||||
# cache
|
||||
# cluster:
|
||||
# - "redis://127.0.0.1:6003"
|
||||
# - "redis://127.0.0.1:6004"
|
||||
# - "redis://127.0.0.1:6005"
|
||||
RSpec.describe Gitlab::Redis::ClusterStore, :clean_gitlab_redis_cache,
|
||||
feature_category: :redis, if: ::Gitlab::Redis::Cache.params[:nodes] do
|
||||
let(:params) { ::Gitlab::Redis::Cache.params }
|
||||
|
||||
subject(:store) { ::Redis::Store::Factory.create(params) } # rubocop:disable Rails/SaveBang -- not a rails method
|
||||
|
||||
describe '.new' do
|
||||
it 'initialises a cluster store' do
|
||||
expect(store).to be_instance_of(::Gitlab::Redis::ClusterStore)
|
||||
end
|
||||
|
||||
it 'extends Serialization by default' do
|
||||
expect(store.is_a?(::Redis::Store::Serialization)).to eq(true)
|
||||
end
|
||||
|
||||
it 'sets a default serializer when left empty' do
|
||||
expect(store.instance_variable_get(:@serializer)).to eq(Marshal)
|
||||
end
|
||||
|
||||
context 'when serializer field is defined' do
|
||||
let(:params) { ::Gitlab::Redis::Cache.params.merge(serializer: Class) }
|
||||
|
||||
it 'sets serializer according to the options' do
|
||||
expect(store.instance_variable_get(:@serializer)).to eq(Class)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when marshalling field is defined' do
|
||||
let(:params) { ::Gitlab::Redis::Cache.params.merge(marshalling: true, serializer: Class) }
|
||||
|
||||
it 'overrides serializer with Marshal' do
|
||||
expect(store.instance_variable_get(:@serializer)).to eq(Marshal)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when marshalling field is false' do
|
||||
let(:params) { ::Gitlab::Redis::Cache.params.merge(marshalling: false, serializer: Class) }
|
||||
|
||||
it 'overrides serializer with Marshal' do
|
||||
expect(store.instance_variable_get(:@serializer)).to eq(nil)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when namespace is defined' do
|
||||
let(:params) { ::Gitlab::Redis::Cache.params.merge(namespace: 'testing') }
|
||||
|
||||
it 'extends namespace' do
|
||||
expect(store.is_a?(::Redis::Store::Namespace)).to eq(true)
|
||||
end
|
||||
|
||||
it 'write keys with namespace' do
|
||||
store.set('testkey', 1)
|
||||
|
||||
::Gitlab::Redis::Cache.with do |conn|
|
||||
expect(conn.exists('testing:testkey')).to eq(1)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '#set' do
|
||||
context 'when ttl is added' do
|
||||
it 'writes the key and sets a ttl' do
|
||||
expect(store.set('test', 1, expire_after: 100)).to eq('OK')
|
||||
|
||||
expect(store.ttl('test')).to be > 95
|
||||
expect(store.get('test')).to eq(1)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when there is no ttl' do
|
||||
it 'sets the key' do
|
||||
expect(store.set('test', 1)).to eq('OK')
|
||||
|
||||
expect(store.get('test')).to eq(1)
|
||||
expect(store.ttl('test')).to eq(-1)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '#setnx' do
|
||||
context 'when ttl is added' do
|
||||
it 'writes the key if not exists and sets a ttl' do
|
||||
expect(store.setnx('test', 1, expire_after: 100)).to eq([true, true])
|
||||
expect(store.ttl('test')).to be > 95
|
||||
expect(store.get('test')).to eq(1)
|
||||
expect(store.setnx('test', 1, expire_after: 100)).to eq([false, true])
|
||||
end
|
||||
end
|
||||
|
||||
context 'when there is no ttl' do
|
||||
it 'writes the key if not exists' do
|
||||
expect(store.setnx('test', 1)).to eq(true)
|
||||
expect(store.setnx('test', 1)).to eq(false)
|
||||
|
||||
expect(store.get('test')).to eq(1)
|
||||
expect(store.ttl('test')).to eq(-1)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -5,10 +5,14 @@ require 'spec_helper'
|
|||
RSpec.describe Gitlab::Redis::ClusterUtil, feature_category: :scalability do
|
||||
using RSpec::Parameterized::TableSyntax
|
||||
|
||||
let(:router_stub) { instance_double(::RedisClient::Cluster::Router) }
|
||||
|
||||
before do
|
||||
allow(::RedisClient::Cluster::Router).to receive(:new).and_return(router_stub)
|
||||
end
|
||||
|
||||
describe '.cluster?' do
|
||||
context 'when MultiStore' do
|
||||
let(:redis_cluster) { instance_double(::Redis::Cluster) }
|
||||
|
||||
where(:pri_store, :sec_store, :expected_val) do
|
||||
:cluster | :cluster | true
|
||||
:cluster | :single | true
|
||||
|
|
@ -17,10 +21,7 @@ RSpec.describe Gitlab::Redis::ClusterUtil, feature_category: :scalability do
|
|||
end
|
||||
|
||||
before do
|
||||
# stub all initialiser steps in Redis::Cluster.new to avoid connecting to a Redis Cluster node
|
||||
allow(::Redis::Cluster).to receive(:new).and_return(redis_cluster)
|
||||
allow(redis_cluster).to receive(:is_a?).with(::Redis::Cluster).and_return(true)
|
||||
allow(redis_cluster).to receive(:id).and_return(1)
|
||||
allow(router_stub).to receive(:node_keys).and_return([])
|
||||
|
||||
allow(Gitlab::Redis::MultiStore).to receive(:same_redis_store?).and_return(false)
|
||||
skip_default_enabled_yaml_check
|
||||
|
|
@ -28,8 +29,8 @@ RSpec.describe Gitlab::Redis::ClusterUtil, feature_category: :scalability do
|
|||
|
||||
with_them do
|
||||
it 'returns expected value' do
|
||||
primary_redis = pri_store == :cluster ? ::Redis.new(cluster: ['redis://localhost:6000']) : ::Redis.new
|
||||
secondary_redis = sec_store == :cluster ? ::Redis.new(cluster: ['redis://localhost:6000']) : ::Redis.new
|
||||
primary_redis = pri_store == :cluster ? Redis::Cluster.new(nodes: ['redis://localhost:6000']) : Redis.new
|
||||
secondary_redis = sec_store == :cluster ? Redis::Cluster.new(nodes: ['redis://localhost:6000']) : Redis.new
|
||||
primary_pool = ConnectionPool.new { primary_redis }
|
||||
secondary_pool = ConnectionPool.new { secondary_redis }
|
||||
multistore = Gitlab::Redis::MultiStore.new(primary_pool, secondary_pool, 'teststore')
|
||||
|
|
@ -48,16 +49,8 @@ RSpec.describe Gitlab::Redis::ClusterUtil, feature_category: :scalability do
|
|||
end
|
||||
|
||||
context 'when is Redis::Cluster' do
|
||||
let(:redis_cluster) { instance_double(::Redis::Cluster) }
|
||||
|
||||
before do
|
||||
# stub all initialiser steps in Redis::Cluster.new to avoid connecting to a Redis Cluster node
|
||||
allow(::Redis::Cluster).to receive(:new).and_return(redis_cluster)
|
||||
allow(redis_cluster).to receive(:is_a?).with(::Redis::Cluster).and_return(true)
|
||||
end
|
||||
|
||||
it 'returns true' do
|
||||
expect(described_class.cluster?(::Redis.new(cluster: ['redis://localhost:6000']))).to be_truthy
|
||||
expect(described_class.cluster?(Redis::Cluster.new(nodes: ['redis://localhost:6000']))).to be_truthy
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -0,0 +1,57 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
# references specs in https://github.com/redis-rb/redis-client/blob/master/test/redis_client/command_builder_test.rb
|
||||
# we add `handles nil arguments` to test our own added logic
|
||||
RSpec.describe Gitlab::Redis::CommandBuilder, feature_category: :redis do
|
||||
describe '.generate' do
|
||||
def call(*args, **kwargs)
|
||||
described_class.generate(args, kwargs)
|
||||
end
|
||||
|
||||
it 'handles nil arguments' do
|
||||
expect(call("a", nil)).to eq(["a", ""])
|
||||
end
|
||||
|
||||
it 'handles positional arguments' do
|
||||
expect(call("a", "b", "c")).to eq(%w[a b c])
|
||||
end
|
||||
|
||||
it 'handles arrays' do
|
||||
expect(call("a", %w[b c])).to eq(%w[a b c])
|
||||
end
|
||||
|
||||
it 'handles hashes' do
|
||||
expect(call("a", { "b" => "c" })).to eq(%w[a b c])
|
||||
end
|
||||
|
||||
it 'handles symbols' do
|
||||
expect(call(:a, { b: :c }, :d)).to eq(%w[a b c d])
|
||||
end
|
||||
|
||||
it 'handles numerics' do
|
||||
expect(call(1, 2.3)).to eq(["1", "2.3"])
|
||||
end
|
||||
|
||||
it 'handles kwargs booleans' do
|
||||
expect(call(ttl: nil, ex: false, withscores: true)).to eq(["withscores"])
|
||||
end
|
||||
|
||||
it 'handles kwargs values' do
|
||||
expect(call(ttl: 42)).to eq(%w[ttl 42])
|
||||
end
|
||||
|
||||
it 'handles nil kwargs' do
|
||||
expect(call(%i[a b c])).to eq(%w[a b c])
|
||||
end
|
||||
|
||||
it 'raises error on unsupported types' do
|
||||
expect { call(hash: {}) }.to raise_error(TypeError)
|
||||
end
|
||||
|
||||
it 'raises error on empty commands' do
|
||||
expect { call }.to raise_error(ArgumentError)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -1,134 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Redis::CrossSlot, feature_category: :redis do
|
||||
include RedisHelpers
|
||||
|
||||
let_it_be(:redis_store_class) { define_helper_redis_store_class }
|
||||
|
||||
before do
|
||||
redis_store_class.with(&:flushdb)
|
||||
end
|
||||
|
||||
describe '.pipelined' do
|
||||
context 'when using redis client' do
|
||||
before do
|
||||
redis_store_class.with { |redis| redis.set('a', 1) }
|
||||
end
|
||||
|
||||
it 'performs redis-rb pipelined' do
|
||||
expect(Gitlab::Redis::CrossSlot::Router).not_to receive(:new)
|
||||
|
||||
expect(
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
redis_store_class.with do |redis|
|
||||
described_class::Pipeline.new(redis).pipelined do |p|
|
||||
p.get('a')
|
||||
p.set('b', 1)
|
||||
end
|
||||
end
|
||||
end
|
||||
).to eq(%w[1 OK])
|
||||
end
|
||||
end
|
||||
|
||||
context 'when using with MultiStore' do
|
||||
let_it_be(:primary_db) { 1 }
|
||||
let_it_be(:secondary_db) { 2 }
|
||||
let_it_be(:primary_store) { create_redis_store(redis_store_class.params, db: primary_db, serializer: nil) }
|
||||
let_it_be(:secondary_store) { create_redis_store(redis_store_class.params, db: secondary_db, serializer: nil) }
|
||||
let_it_be(:primary_pool) { ConnectionPool.new { primary_store } }
|
||||
let_it_be(:secondary_pool) { ConnectionPool.new { secondary_store } }
|
||||
let_it_be(:multistore) { Gitlab::Redis::MultiStore.new(primary_pool, secondary_pool, 'testing') }
|
||||
|
||||
before do
|
||||
primary_store.set('a', 1)
|
||||
secondary_store.set('a', 1)
|
||||
skip_default_enabled_yaml_check
|
||||
end
|
||||
|
||||
it 'performs multistore pipelined' do
|
||||
expect(Gitlab::Redis::CrossSlot::Router).not_to receive(:new)
|
||||
|
||||
expect(
|
||||
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
|
||||
multistore.with_borrowed_connection do
|
||||
described_class::Pipeline.new(multistore).pipelined do |p|
|
||||
p.get('a')
|
||||
p.set('b', 1)
|
||||
end
|
||||
end
|
||||
end
|
||||
).to eq(%w[1 OK])
|
||||
end
|
||||
end
|
||||
|
||||
context 'when using Redis::Cluster' do
|
||||
# Only stub redis client internals since the CI pipeline does not run a Redis Cluster
|
||||
let(:redis) { double(:redis) } # rubocop:disable RSpec/VerifiedDoubles
|
||||
let(:client) { double(:client) } # rubocop:disable RSpec/VerifiedDoubles
|
||||
let(:pipeline) { double(:pipeline) } # rubocop:disable RSpec/VerifiedDoubles
|
||||
|
||||
let(:arguments) { %w[a b c d] }
|
||||
|
||||
subject do
|
||||
described_class::Pipeline.new(redis).pipelined do |p|
|
||||
arguments.each { |key| p.get(key) }
|
||||
end
|
||||
end
|
||||
|
||||
before do
|
||||
allow(redis).to receive(:_client).and_return(client)
|
||||
allow(redis).to receive(:pipelined).and_yield(pipeline)
|
||||
allow(client).to receive(:instance_of?).with(::Redis::Cluster).and_return(true)
|
||||
end
|
||||
|
||||
it 'fan-out and fan-in commands to separate shards' do
|
||||
# simulate fan-out to 3 shards with random order
|
||||
expect(client).to receive(:_find_node_key).exactly(4).times.and_return(3, 2, 1, 3)
|
||||
|
||||
arguments.each do |key|
|
||||
f = double('future') # rubocop:disable RSpec/VerifiedDoubles
|
||||
expect(pipeline).to receive(:get).with(key).and_return(f)
|
||||
expect(f).to receive(:value).and_return(key)
|
||||
end
|
||||
|
||||
expect(subject).to eq(arguments)
|
||||
end
|
||||
|
||||
shared_examples 'fallback on cross-slot' do |redirection|
|
||||
context 'when redis cluster undergoing slot migration' do
|
||||
before do
|
||||
allow(pipeline).to receive(:get).and_raise(::Redis::CommandError.new("#{redirection} 1 127.0.0.1:7001"))
|
||||
end
|
||||
|
||||
it 'logs error and executes sequentially' do
|
||||
expect(client).to receive(:_find_node_key).exactly(4).times.and_return(3, 2, 1, 3)
|
||||
expect(Gitlab::ErrorTracking).to receive(:log_exception).with(an_instance_of(::Redis::CommandError))
|
||||
|
||||
arguments.each do |key|
|
||||
expect(redis).to receive(:get).with(key).and_return(key)
|
||||
end
|
||||
|
||||
subject
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
it_behaves_like 'fallback on cross-slot', 'MOVED'
|
||||
it_behaves_like 'fallback on cross-slot', 'ASK'
|
||||
|
||||
context 'when receiving non-MOVED/ASK command errors' do
|
||||
before do
|
||||
allow(pipeline).to receive(:get).and_raise(::Redis::CommandError.new)
|
||||
allow(client).to receive(:_find_node_key).exactly(4).times.and_return(3, 2, 1, 3)
|
||||
end
|
||||
|
||||
it 'raises error' do
|
||||
expect { subject }.to raise_error(::Redis::CommandError)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -58,6 +58,7 @@ RSpec.describe Gitlab::Redis::MultiStore, feature_category: :redis do
|
|||
context 'when primary_store is not a ::Redis instance' do
|
||||
before do
|
||||
allow(primary_store).to receive(:is_a?).with(::Redis).and_return(false)
|
||||
allow(primary_store).to receive(:is_a?).with(::Redis::Cluster).and_return(false)
|
||||
end
|
||||
|
||||
it 'fails with exception' do
|
||||
|
|
@ -69,6 +70,7 @@ RSpec.describe Gitlab::Redis::MultiStore, feature_category: :redis do
|
|||
context 'when secondary_store is not a ::Redis instance' do
|
||||
before do
|
||||
allow(secondary_store).to receive(:is_a?).with(::Redis).and_return(false)
|
||||
allow(secondary_store).to receive(:is_a?).with(::Redis::Cluster).and_return(false)
|
||||
end
|
||||
|
||||
it 'fails with exception' do
|
||||
|
|
@ -618,35 +620,6 @@ RSpec.describe Gitlab::Redis::MultiStore, feature_category: :redis do
|
|||
end
|
||||
end
|
||||
|
||||
context 'when either store is a an instance of ::Redis::Cluster' do
|
||||
let(:pipeline) { double }
|
||||
let(:client) { double }
|
||||
|
||||
before do
|
||||
allow(client).to receive(:instance_of?).with(::Redis::Cluster).and_return(true)
|
||||
allow(pipeline).to receive(:pipelined)
|
||||
multi_store.with_borrowed_connection do
|
||||
allow(multi_store.default_store).to receive(:_client).and_return(client)
|
||||
end
|
||||
end
|
||||
|
||||
it 'calls cross-slot pipeline within multistore' do
|
||||
if name == :pipelined
|
||||
# we intentionally exclude `.and_call_original` since primary_store/secondary_store
|
||||
# may not be running on a proper Redis Cluster.
|
||||
multi_store.with_borrowed_connection do
|
||||
expect(Gitlab::Redis::CrossSlot::Pipeline).to receive(:new)
|
||||
.with(multi_store.default_store)
|
||||
.exactly(:once)
|
||||
.and_return(pipeline)
|
||||
expect(Gitlab::Redis::CrossSlot::Pipeline).not_to receive(:new).with(multi_store.non_default_store)
|
||||
end
|
||||
end
|
||||
|
||||
subject
|
||||
end
|
||||
end
|
||||
|
||||
context 'when with_readonly_pipeline is used' do
|
||||
it 'calls the default store only' do
|
||||
expect(primary_store).to receive(:send).and_call_original
|
||||
|
|
|
|||
|
|
@ -8,9 +8,9 @@ RSpec.describe Gitlab::Redis::Sessions do
|
|||
describe '#store' do
|
||||
subject(:store) { described_class.store(namespace: described_class::SESSION_NAMESPACE) }
|
||||
|
||||
# Check that Gitlab::Redis::Sessions is configured as RedisStore.
|
||||
# Check that Gitlab::Redis::Sessions is configured as RedisStore or ClusterStore
|
||||
it 'instantiates an instance of Redis::Store' do
|
||||
expect(store).to be_instance_of(::Redis::Store)
|
||||
expect([::Redis::Store, ::Gitlab::Redis::ClusterStore].include?(store.class)).to eq(true)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -0,0 +1,17 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
require_migration!
|
||||
|
||||
RSpec.describe DeleteOrphansApprovalProjectRules2, feature_category: :security_policy_management do
|
||||
describe '#up' do
|
||||
it 'schedules background migration for project approval rules' do
|
||||
migrate!
|
||||
|
||||
expect(described_class::PROJECT_MIGRATION).to have_scheduled_batched_migration(
|
||||
table_name: :approval_project_rules,
|
||||
column_name: :id,
|
||||
interval: described_class::INTERVAL)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
require_migration!
|
||||
|
||||
RSpec.describe DeleteOrphansScanFindingLicenseScanningApprovalRules2, feature_category: :security_policy_management do
|
||||
describe '#up' do
|
||||
it 'schedules background migration for both levels of approval rules' do
|
||||
migrate!
|
||||
|
||||
expect(described_class::MERGE_REQUEST_MIGRATION).to have_scheduled_batched_migration(
|
||||
table_name: :approval_merge_request_rules,
|
||||
column_name: :id,
|
||||
interval: described_class::INTERVAL)
|
||||
|
||||
expect(described_class::PROJECT_MIGRATION).to have_scheduled_batched_migration(
|
||||
table_name: :approval_project_rules,
|
||||
column_name: :id,
|
||||
interval: described_class::INTERVAL)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
require_migration!
|
||||
|
||||
RSpec.describe DeleteOrphansApprovalMergeRequestRules2, feature_category: :security_policy_management do
|
||||
describe '#up' do
|
||||
it 'schedules background migration for merge request approval rules' do
|
||||
migrate!
|
||||
|
||||
expect(described_class::MERGE_REQUEST_MIGRATION).to have_scheduled_batched_migration(
|
||||
table_name: :approval_merge_request_rules,
|
||||
column_name: :id,
|
||||
interval: described_class::INTERVAL)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -539,15 +539,15 @@ end
|
|||
|
||||
Rack::Test::UploadedFile.prepend(TouchRackUploadedFile)
|
||||
|
||||
# Monkey-patch to enable ActiveSupport::Notifications for Redis commands
|
||||
# Inject middleware to enable ActiveSupport::Notifications for Redis commands
|
||||
module RedisCommands
|
||||
module Instrumentation
|
||||
def process(commands, &block)
|
||||
ActiveSupport::Notifications.instrument('redis.process_commands', commands: commands) do
|
||||
super(commands, &block)
|
||||
def call(command, redis_config)
|
||||
ActiveSupport::Notifications.instrument('redis.process_commands', commands: command) do
|
||||
super(command, redis_config)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
Redis::Client.prepend(RedisCommands::Instrumentation)
|
||||
RedisClient.register(RedisCommands::Instrumentation)
|
||||
|
|
|
|||
|
|
@ -60,8 +60,8 @@ module DnsHelpers
|
|||
def permit_redis!
|
||||
# https://github.com/redis-rb/redis-client/blob/v0.11.2/lib/redis_client/ruby_connection.rb#L51 uses Socket.tcp that
|
||||
# calls Addrinfo.getaddrinfo internally.
|
||||
hosts = Gitlab::Redis::ALL_CLASSES.map do |redis_instance|
|
||||
redis_instance.redis_client_params[:host]
|
||||
hosts = Gitlab::Redis::ALL_CLASSES.flat_map do |redis_instance|
|
||||
redis_instance.params[:host] || redis_instance.params[:nodes]&.map { |n| n[:host] }
|
||||
end.uniq.compact
|
||||
|
||||
hosts.each do |host|
|
||||
|
|
|
|||
|
|
@ -36,8 +36,8 @@ module OrphanFinalArtifactsCleanupHelpers
|
|||
expect_log_message("Looking for orphan job artifact objects")
|
||||
end
|
||||
|
||||
def expect_done_log_message
|
||||
expect_log_message("Done")
|
||||
def expect_done_log_message(filename)
|
||||
expect_log_message("Done. All orphan objects are listed in #{filename}.")
|
||||
end
|
||||
|
||||
def expect_first_page_loading_log_message
|
||||
|
|
@ -56,24 +56,43 @@ module OrphanFinalArtifactsCleanupHelpers
|
|||
expect(Gitlab::AppLogger).not_to have_received(:info).with(a_string_including("Resuming"))
|
||||
end
|
||||
|
||||
def expect_delete_log_message(fog_file)
|
||||
expect_log_message("Delete #{fog_file.key} (#{fog_file.content_length} bytes)")
|
||||
def expect_found_orphan_artifact_object_log_message(fog_file)
|
||||
expect_log_message("Found orphan object #{fog_file.key} (#{fog_file.content_length} bytes)")
|
||||
end
|
||||
|
||||
def expect_no_delete_log_message(fog_file)
|
||||
expect_no_log_message("Delete #{fog_file.key} (#{fog_file.content_length} bytes)")
|
||||
def expect_no_found_orphan_artifact_object_log_message(fog_file)
|
||||
expect_no_log_message("Found orphan object #{fog_file.key} (#{fog_file.content_length} bytes)")
|
||||
end
|
||||
|
||||
def expect_log_message(message, times: 1)
|
||||
message = "[DRY RUN] #{message}" if dry_run
|
||||
expect(Gitlab::AppLogger).to have_received(:info).with(a_string_including(message)).exactly(times).times
|
||||
end
|
||||
|
||||
def expect_no_log_message(message)
|
||||
message = "[DRY RUN] #{message}" if dry_run
|
||||
expect(Gitlab::AppLogger).not_to have_received(:info).with(a_string_including(message))
|
||||
end
|
||||
|
||||
def expect_orphan_objects_list_to_include(lines, fog_file)
|
||||
expect(lines).to include([fog_file.key, fog_file.content_length].join(','))
|
||||
end
|
||||
|
||||
def expect_orphan_objects_list_not_to_include(lines, fog_file)
|
||||
expect(lines).not_to include([fog_file.key, fog_file.content_length].join(','))
|
||||
end
|
||||
|
||||
def expect_orphans_list_to_contain_exactly(filename, fog_files)
|
||||
lines = File.readlines(filename).map(&:strip)
|
||||
expected_objects = fog_files.map { |f| [f.key, f.content_length].join(',') }
|
||||
|
||||
# Given we can't guarantee order of which object will be listed first,
|
||||
# we just use match_array.
|
||||
expect(lines).to match_array(expected_objects)
|
||||
end
|
||||
|
||||
def expect_orphans_list_to_have_number_of_entries(count)
|
||||
expect(File.readlines(filename).count).to eq(count)
|
||||
end
|
||||
|
||||
def fetch_saved_marker
|
||||
Gitlab::Redis::SharedState.with do |redis|
|
||||
redis.get(described_class::LAST_PAGE_MARKER_REDIS_KEY)
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ module ExceedRedisCallLimitHelpers
|
|||
end
|
||||
|
||||
def verify_commands_count(command, expected, block)
|
||||
@actual = build_recorder(block).by_command(command).count
|
||||
@actual = build_recorder(block).by_command(command.to_s).count
|
||||
|
||||
@actual > expected
|
||||
end
|
||||
|
|
|
|||
|
|
@ -6297,7 +6297,6 @@
|
|||
- './spec/lib/gitlab/instrumentation/rate_limiting_gates_spec.rb'
|
||||
- './spec/lib/gitlab/instrumentation/redis_base_spec.rb'
|
||||
- './spec/lib/gitlab/instrumentation/redis_cluster_validator_spec.rb'
|
||||
- './spec/lib/gitlab/instrumentation/redis_interceptor_spec.rb'
|
||||
- './spec/lib/gitlab/instrumentation/redis_spec.rb'
|
||||
- './spec/lib/gitlab/internal_post_receive/response_spec.rb'
|
||||
- './spec/lib/gitlab/issuable/clone/attributes_rewriter_spec.rb'
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ RSpec.shared_examples Gitlab::BitbucketImport::ObjectImporter do
|
|||
|
||||
expect(Gitlab::JobWaiter).to receive(:notify).with(waiter_key, anything)
|
||||
|
||||
worker.perform(project_id, {}, waiter_key)
|
||||
worker.class.perform_inline(project_id, {}, waiter_key)
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -49,7 +49,7 @@ RSpec.shared_examples Gitlab::BitbucketImport::ObjectImporter do
|
|||
expect(Gitlab::BitbucketImport::Logger).to receive(:info).twice
|
||||
expect_next(worker.importer_class, project, kind_of(Hash)).to receive(:execute)
|
||||
|
||||
worker.perform(project_id, {}, waiter_key)
|
||||
worker.class.perform_inline(project_id, {}, waiter_key)
|
||||
end
|
||||
|
||||
it_behaves_like 'notifies the waiter'
|
||||
|
|
@ -62,7 +62,7 @@ RSpec.shared_examples Gitlab::BitbucketImport::ObjectImporter do
|
|||
it 'tracks the error' do
|
||||
expect(Gitlab::Import::ImportFailureService).to receive(:track).once
|
||||
|
||||
worker.perform(project_id, {}, waiter_key)
|
||||
worker.class.perform_inline(project_id, {}, waiter_key)
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -74,7 +74,7 @@ RSpec.shared_examples Gitlab::BitbucketImport::ObjectImporter do
|
|||
it 'tracks the error and raises the error' do
|
||||
expect(Gitlab::Import::ImportFailureService).to receive(:track).once
|
||||
|
||||
expect { worker.perform(project_id, {}, waiter_key) }.to raise_error(StandardError)
|
||||
expect { worker.class.perform_inline(project_id, {}, waiter_key) }.to raise_error(StandardError)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -85,7 +85,7 @@ RSpec.shared_examples Gitlab::BitbucketImport::ObjectImporter do
|
|||
it 'does not call the importer' do
|
||||
expect_next(worker.importer_class).not_to receive(:execute)
|
||||
|
||||
worker.perform(project_id, {}, waiter_key)
|
||||
worker.class.perform_inline(project_id, {}, waiter_key)
|
||||
end
|
||||
|
||||
it_behaves_like 'notifies the waiter'
|
||||
|
|
|
|||
|
|
@ -19,6 +19,10 @@ RSpec.shared_examples Gitlab::BitbucketServerImport::ObjectImporter do
|
|||
let(:project_id) { project_id }
|
||||
let(:waiter_key) { 'key' }
|
||||
|
||||
before do
|
||||
allow(Gitlab::JobWaiter).to receive(:notify).with(waiter_key, anything, ttl: Gitlab::Import::JOB_WAITER_TTL)
|
||||
end
|
||||
|
||||
shared_examples 'notifies the waiter' do
|
||||
specify do
|
||||
allow_next(worker.importer_class).to receive(:execute)
|
||||
|
|
|
|||
|
|
@ -80,18 +80,17 @@ RSpec.shared_examples "redis_shared_examples" do
|
|||
|
||||
context 'with new format' do
|
||||
it_behaves_like 'redis store' do
|
||||
let(:config_file_name) { config_new_format_host }
|
||||
# use new format host without sentinel details as `.to_s` checks `config` which
|
||||
# tries to resolve master/replica details with an actual sentinel instance.
|
||||
# https://github.com/redis-rb/redis-client/blob/v0.18.0/lib/redis_client/sentinel_config.rb#L128
|
||||
let(:config_file_name) { "spec/fixtures/config/redis_new_format_host_standalone.yml" }
|
||||
let(:host) { "development-host:#{redis_port}" }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '.redis_client_params' do
|
||||
# .redis_client_params wraps over `.redis_store_options` by modifying its outputs
|
||||
# to be compatible with `RedisClient`. We test for compatibility in this block while
|
||||
# the contents of redis_store_options are tested in the `.params` block.
|
||||
|
||||
subject { described_class.new(rails_env).redis_client_params }
|
||||
describe '.params' do
|
||||
subject { described_class.new(rails_env).params }
|
||||
|
||||
let(:rails_env) { 'development' }
|
||||
let(:config_file_name) { config_old_format_socket }
|
||||
|
|
@ -103,56 +102,6 @@ RSpec.shared_examples "redis_shared_examples" do
|
|||
end
|
||||
end
|
||||
|
||||
context 'when url is host based' do
|
||||
context 'with old format' do
|
||||
let(:config_file_name) { config_old_format_host }
|
||||
|
||||
it 'does not raise ArgumentError for invalid keywords' do
|
||||
expect { RedisClient.config(**subject) }.not_to raise_error
|
||||
end
|
||||
|
||||
it_behaves_like 'instrumentation_class in custom key'
|
||||
end
|
||||
|
||||
context 'with new format' do
|
||||
let(:config_file_name) { config_new_format_host }
|
||||
|
||||
where(:rails_env, :host) do
|
||||
[
|
||||
%w[development development-host],
|
||||
%w[test test-host],
|
||||
%w[production production-host]
|
||||
]
|
||||
end
|
||||
|
||||
with_them do
|
||||
it 'does not raise ArgumentError for invalid keywords in SentinelConfig' do
|
||||
expect(subject[:name]).to eq(host)
|
||||
expect { RedisClient.sentinel(**subject) }.not_to raise_error
|
||||
end
|
||||
|
||||
it_behaves_like 'instrumentation_class in custom key'
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when url contains unix socket reference' do
|
||||
let(:config_file_name) { config_old_format_socket }
|
||||
|
||||
it 'does not raise ArgumentError for invalid keywords' do
|
||||
expect { RedisClient.config(**subject) }.not_to raise_error
|
||||
end
|
||||
|
||||
it_behaves_like 'instrumentation_class in custom key'
|
||||
end
|
||||
end
|
||||
|
||||
describe '.params' do
|
||||
subject { described_class.new(rails_env).params }
|
||||
|
||||
let(:rails_env) { 'development' }
|
||||
let(:config_file_name) { config_old_format_socket }
|
||||
|
||||
it 'withstands mutation' do
|
||||
params1 = described_class.params
|
||||
params2 = described_class.params
|
||||
|
|
@ -251,10 +200,16 @@ RSpec.shared_examples "redis_shared_examples" do
|
|||
|
||||
with_them do
|
||||
it 'returns hash with host, port, db, and password' do
|
||||
is_expected.to include(host: host, password: 'mynewpassword', port: redis_port, db: redis_database)
|
||||
is_expected.to include(name: host, password: 'mynewpassword', db: redis_database)
|
||||
is_expected.not_to have_key(:url)
|
||||
end
|
||||
|
||||
it 'does not raise ArgumentError for invalid keywords in SentinelConfig' do
|
||||
expect { RedisClient.sentinel(**subject) }.not_to raise_error
|
||||
end
|
||||
end
|
||||
|
||||
it_behaves_like 'instrumentation_class in custom key'
|
||||
end
|
||||
|
||||
context 'with redis cluster format' do
|
||||
|
|
@ -272,13 +227,19 @@ RSpec.shared_examples "redis_shared_examples" do
|
|||
it 'returns hash with cluster and password' do
|
||||
is_expected.to include(
|
||||
password: 'myclusterpassword',
|
||||
cluster: [
|
||||
nodes: [
|
||||
{ host: "#{host}1", port: redis_port },
|
||||
{ host: "#{host}2", port: redis_port }
|
||||
]
|
||||
)
|
||||
is_expected.not_to have_key(:url)
|
||||
end
|
||||
|
||||
it 'does not raise ArgumentError for invalid keywords in ClusterConfig' do
|
||||
expect { RedisClient::ClusterConfig.new(**subject) }.not_to raise_error
|
||||
end
|
||||
|
||||
it_behaves_like 'instrumentation_class in custom key'
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ RSpec.describe RedisCommands::Recorder, :use_clean_rails_redis_caching do
|
|||
it 'records Redis commands' do
|
||||
recorder = described_class.new { cache.read('key1') }
|
||||
|
||||
expect(recorder.log).to include([:get, 'cache:gitlab:key1'])
|
||||
expect(recorder.log).to include(['get', 'cache:gitlab:key1'])
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -35,10 +35,10 @@ RSpec.describe RedisCommands::Recorder, :use_clean_rails_redis_caching do
|
|||
cache.delete('key1')
|
||||
end
|
||||
|
||||
expect(recorder.log).to include([:set, 'cache:gitlab:key1', anything, anything, anything])
|
||||
expect(recorder.log).to include([:get, 'cache:gitlab:key1'])
|
||||
expect(recorder.log).to include([:get, 'cache:gitlab:key2'])
|
||||
expect(recorder.log).to include([:del, 'cache:gitlab:key1'])
|
||||
expect(recorder.log).to include(['set', 'cache:gitlab:key1', anything, anything, anything])
|
||||
expect(recorder.log).to include(['get', 'cache:gitlab:key1'])
|
||||
expect(recorder.log).to include(['get', 'cache:gitlab:key2'])
|
||||
expect(recorder.log).to include(['del', 'cache:gitlab:key1'])
|
||||
end
|
||||
|
||||
it 'does not record commands before the call' do
|
||||
|
|
@ -48,8 +48,8 @@ RSpec.describe RedisCommands::Recorder, :use_clean_rails_redis_caching do
|
|||
cache.read('key1')
|
||||
end
|
||||
|
||||
expect(recorder.log).not_to include([:set, anything, anything])
|
||||
expect(recorder.log).to include([:get, 'cache:gitlab:key1'])
|
||||
expect(recorder.log).not_to include(['set', anything, anything])
|
||||
expect(recorder.log).to include(['get', 'cache:gitlab:key1'])
|
||||
end
|
||||
|
||||
it 'refreshes recording after reinitialization' do
|
||||
|
|
@ -68,15 +68,15 @@ RSpec.describe RedisCommands::Recorder, :use_clean_rails_redis_caching do
|
|||
cache.read('key4')
|
||||
end
|
||||
|
||||
expect(recorder1.log).to include([:get, 'cache:gitlab:key2'])
|
||||
expect(recorder1.log).not_to include([:get, 'cache:gitlab:key1'])
|
||||
expect(recorder1.log).not_to include([:get, 'cache:gitlab:key3'])
|
||||
expect(recorder1.log).not_to include([:get, 'cache:gitlab:key4'])
|
||||
expect(recorder1.log).to include(['get', 'cache:gitlab:key2'])
|
||||
expect(recorder1.log).not_to include(['get', 'cache:gitlab:key1'])
|
||||
expect(recorder1.log).not_to include(['get', 'cache:gitlab:key3'])
|
||||
expect(recorder1.log).not_to include(['get', 'cache:gitlab:key4'])
|
||||
|
||||
expect(recorder2.log).to include([:get, 'cache:gitlab:key4'])
|
||||
expect(recorder2.log).not_to include([:get, 'cache:gitlab:key1'])
|
||||
expect(recorder2.log).not_to include([:get, 'cache:gitlab:key2'])
|
||||
expect(recorder2.log).not_to include([:get, 'cache:gitlab:key3'])
|
||||
expect(recorder2.log).to include(['get', 'cache:gitlab:key4'])
|
||||
expect(recorder2.log).not_to include(['get', 'cache:gitlab:key1'])
|
||||
expect(recorder2.log).not_to include(['get', 'cache:gitlab:key2'])
|
||||
expect(recorder2.log).not_to include(['get', 'cache:gitlab:key3'])
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -91,10 +91,10 @@ RSpec.describe RedisCommands::Recorder, :use_clean_rails_redis_caching do
|
|||
cache.delete('key2')
|
||||
end
|
||||
|
||||
expect(recorder.log).to include([:set, 'cache:gitlab:key1', anything, anything, anything])
|
||||
expect(recorder.log).to include([:get, 'cache:gitlab:key1'])
|
||||
expect(recorder.log).not_to include([:get, 'cache:gitlab:key2'])
|
||||
expect(recorder.log).not_to include([:del, 'cache:gitlab:key2'])
|
||||
expect(recorder.log).to include(['set', 'cache:gitlab:key1', anything, anything, anything])
|
||||
expect(recorder.log).to include(['get', 'cache:gitlab:key1'])
|
||||
expect(recorder.log).not_to include(['get', 'cache:gitlab:key2'])
|
||||
expect(recorder.log).not_to include(['del', 'cache:gitlab:key2'])
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -107,7 +107,7 @@ RSpec.describe RedisCommands::Recorder, :use_clean_rails_redis_caching do
|
|||
cache.delete('key2')
|
||||
end
|
||||
|
||||
expect(recorder.by_command(:del)).to match_array([[:del, 'cache:gitlab:key2']])
|
||||
expect(recorder.by_command('del')).to match_array([['del', 'cache:gitlab:key2']])
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue