Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-02-14 09:08:15 +00:00
parent ce8437d779
commit 336b65a1c7
83 changed files with 722 additions and 1094 deletions

View File

@ -3703,7 +3703,6 @@ Layout/LineLength:
- 'spec/lib/gitlab/import_export/uploads_manager_spec.rb'
- 'spec/lib/gitlab/import_export/version_checker_spec.rb'
- 'spec/lib/gitlab/import_sources_spec.rb'
- 'spec/lib/gitlab/instrumentation/redis_interceptor_spec.rb'
- 'spec/lib/gitlab/issuable_metadata_spec.rb'
- 'spec/lib/gitlab/issues/rebalancing/state_spec.rb'
- 'spec/lib/gitlab/jira/dvcs_spec.rb'

View File

@ -102,7 +102,6 @@ Lint/AmbiguousOperatorPrecedence:
- 'spec/lib/gitlab/database/batch_count_spec.rb'
- 'spec/lib/gitlab/database/consistency_checker_spec.rb'
- 'spec/lib/gitlab/graphql/tracers/metrics_tracer_spec.rb'
- 'spec/lib/gitlab/instrumentation/redis_interceptor_spec.rb'
- 'spec/lib/gitlab/issues/rebalancing/state_spec.rb'
- 'spec/lib/gitlab/kroki_spec.rb'
- 'spec/lib/gitlab/memory/instrumentation_spec.rb'

View File

@ -3586,7 +3586,6 @@ RSpec/FeatureCategory:
- 'spec/lib/gitlab/instrumentation/rate_limiting_gates_spec.rb'
- 'spec/lib/gitlab/instrumentation/redis_base_spec.rb'
- 'spec/lib/gitlab/instrumentation/redis_cluster_validator_spec.rb'
- 'spec/lib/gitlab/instrumentation/redis_interceptor_spec.rb'
- 'spec/lib/gitlab/instrumentation/redis_spec.rb'
- 'spec/lib/gitlab/internal_post_receive/response_spec.rb'
- 'spec/lib/gitlab/issuable/clone/attributes_rewriter_spec.rb'

View File

@ -2331,7 +2331,6 @@ RSpec/NamedSubject:
- 'spec/lib/gitlab/rack_attack_spec.rb'
- 'spec/lib/gitlab/reactive_cache_set_cache_spec.rb'
- 'spec/lib/gitlab/redis/boolean_spec.rb'
- 'spec/lib/gitlab/redis/cross_slot_spec.rb'
- 'spec/lib/gitlab/redis/db_load_balancing_spec.rb'
- 'spec/lib/gitlab/redis/multi_store_spec.rb'
- 'spec/lib/gitlab/redis/queues_spec.rb'

View File

@ -2517,7 +2517,6 @@ Style/InlineDisableAnnotation:
- 'lib/gitlab/import_export/project/relation_factory.rb'
- 'lib/gitlab/import_sources.rb'
- 'lib/gitlab/instrumentation/redis_cluster_validator.rb'
- 'lib/gitlab/instrumentation/redis_interceptor.rb'
- 'lib/gitlab/internal_events.rb'
- 'lib/gitlab/issuable/clone/copy_resource_events_service.rb'
- 'lib/gitlab/issues/rebalancing/state.rb'
@ -2556,7 +2555,6 @@ Style/InlineDisableAnnotation:
- 'lib/gitlab/pagination/offset_pagination.rb'
- 'lib/gitlab/pagination_delegate.rb'
- 'lib/gitlab/patch/action_cable_subscription_adapter_identifier.rb'
- 'lib/gitlab/patch/node_loader.rb'
- 'lib/gitlab/patch/prependable.rb'
- 'lib/gitlab/patch/redis_cache_store.rb'
- 'lib/gitlab/patch/sidekiq_cron_poller.rb'
@ -2573,7 +2571,6 @@ Style/InlineDisableAnnotation:
- 'lib/gitlab/rack_attack.rb'
- 'lib/gitlab/rack_attack/request.rb'
- 'lib/gitlab/rack_attack/store.rb'
- 'lib/gitlab/redis/cross_slot.rb'
- 'lib/gitlab/redis/hll.rb'
- 'lib/gitlab/redis/multi_store.rb'
- 'lib/gitlab/reference_extractor.rb'
@ -2942,9 +2939,7 @@ Style/InlineDisableAnnotation:
- 'spec/lib/gitlab/pagination/keyset/order_spec.rb'
- 'spec/lib/gitlab/pagination/keyset/simple_order_builder_spec.rb'
- 'spec/lib/gitlab/patch/database_config_spec.rb'
- 'spec/lib/gitlab/patch/node_loader_spec.rb'
- 'spec/lib/gitlab/quick_actions/dsl_spec.rb'
- 'spec/lib/gitlab/redis/cross_slot_spec.rb'
- 'spec/lib/gitlab/redis/multi_store_spec.rb'
- 'spec/lib/gitlab/search/abuse_detection_spec.rb'
- 'spec/lib/gitlab/shard_health_cache_spec.rb'

View File

@ -1 +1 @@
v16.9.2
v16.10.0-rc1

View File

@ -288,8 +288,9 @@ gem 'js_regex', '~> 3.8' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'device_detector' # rubocop:todo Gemfile/MissingFeatureCategory
# Redis
gem 'redis', '~> 4.8.0' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'redis-namespace', '~> 1.10.0' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'redis-namespace', '~> 1.10.0', feature_category: :redis
gem 'redis', '~> 5.0.0', feature_category: :redis
gem 'redis-clustering', '~> 5.0.0', feature_category: :redis
gem 'connection_pool', '~> 2.4' # rubocop:todo Gemfile/MissingFeatureCategory
# Redis session store

View File

@ -523,9 +523,11 @@
{"name":"recaptcha","version":"5.12.3","platform":"ruby","checksum":"37d1894add9e70a54d0c6c7f0ecbeedffbfa7d075acfbd4c509818dfdebdb7ee"},
{"name":"recursive-open-struct","version":"1.1.3","platform":"ruby","checksum":"a3538a72552fcebcd0ada657bdff313641a4a5fbc482c08cfb9a65acb1c9de5a"},
{"name":"redcarpet","version":"3.6.0","platform":"ruby","checksum":"8ad1889c0355ff4c47174af14edd06d62f45a326da1da6e8a121d59bdcd2e9e9"},
{"name":"redis","version":"4.8.0","platform":"ruby","checksum":"2000cf5014669c9dc821704b6d322a35a9a33852a95208911d9175d63b448a44"},
{"name":"redis","version":"5.0.8","platform":"ruby","checksum":"3b770ea597850b26d6a9718fa184241e53e6c8a7ae0486ee8bfaefd29f26f3d8"},
{"name":"redis-actionpack","version":"5.4.0","platform":"ruby","checksum":"f10cf649ab05914716d63334d7f709221ecc883b87cf348f90ecfe0c35ea3540"},
{"name":"redis-client","version":"0.20.0","platform":"ruby","checksum":"239ac38fe4f0b62c8d2d8641989319b736b7dd40eebec868fd874a14686bdc8c"},
{"name":"redis-cluster-client","version":"0.7.5","platform":"ruby","checksum":"12fd1c9eda17157a5cd2ce46afba13a024c28d24922092299a8daa9f46e4e78a"},
{"name":"redis-clustering","version":"5.0.8","platform":"ruby","checksum":"8e2f3de3b1a700668eeac59125636e01be6ecd985e635a4d5649c47d71f6e166"},
{"name":"redis-namespace","version":"1.10.0","platform":"ruby","checksum":"2c1c6ea7c6c5e343e75b9bee3aa4c265e364a5b9966507397467af2bb3758d94"},
{"name":"redis-rack","version":"3.0.0","platform":"ruby","checksum":"abb50b82ae10ad4d11ca2e4901bfc2b98256cdafbbd95f80c86fc9e001478380"},
{"name":"redis-store","version":"1.10.0","platform":"ruby","checksum":"f258894f9f7e82834308a3d86242294f0cff2c9db9ae66e5cb4c553a5ec8b09e"},

View File

@ -1385,13 +1385,19 @@ GEM
json
recursive-open-struct (1.1.3)
redcarpet (3.6.0)
redis (4.8.0)
redis (5.0.8)
redis-client (>= 0.17.0)
redis-actionpack (5.4.0)
actionpack (>= 5, < 8)
redis-rack (>= 2.1.0, < 4)
redis-store (>= 1.1.0, < 2)
redis-client (0.20.0)
connection_pool
redis-cluster-client (0.7.5)
redis-client (~> 0.12)
redis-clustering (5.0.8)
redis (= 5.0.8)
redis-cluster-client (>= 0.7.0)
redis-namespace (1.10.0)
redis (>= 4)
redis-rack (3.0.0)
@ -2069,8 +2075,9 @@ DEPENDENCIES
rbtrace (~> 0.4)
re2 (= 2.7.0)
recaptcha (~> 5.12)
redis (~> 4.8.0)
redis (~> 5.0.0)
redis-actionpack (~> 5.4.0)
redis-clustering (~> 5.0.0)
redis-namespace (~> 1.10.0)
request_store (~> 1.5.1)
responders (~> 3.0)

View File

@ -84,7 +84,7 @@ class ActiveSession
)
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
redis.pipelined do |pipeline|
pipeline.setex(
key_name(user.id, session_private_id),
expiry,

View File

@ -26,8 +26,9 @@
.merge-requests-holder
= render 'merge_requests', new_merge_request_path: new_merge_request_path
- if new_merge_request_email
.gl-text-center.gl-pt-5.gl-pb-7
.js-issuable-by-email{ data: { initial_email: new_merge_request_email, issuable_type: issuable_type, emails_help_page_path: help_page_path('development/emails', anchor: 'email-namespace'), quick_actions_help_path: help_page_path('user/project/quick_actions'), markdown_help_path: help_page_path('user/markdown'), reset_path: new_issuable_address_project_path(@project, issuable_type: issuable_type) } }
- else
= render 'shared/empty_states/merge_requests', button_path: new_merge_request_path
- if new_merge_request_email
.gl-text-center.gl-pt-5.gl-pb-7
.js-issuable-by-email{ data: { initial_email: new_merge_request_email, issuable_type: issuable_type, emails_help_page_path: help_page_path('development/emails', anchor: 'email-namespace'), quick_actions_help_path: help_page_path('user/project/quick_actions'), markdown_help_path: help_page_path('user/markdown'), reset_path: new_issuable_address_project_path(@project, issuable_type: issuable_type) } }

View File

@ -21,7 +21,7 @@ module LimitedCapacity
def register(jid, max_jids)
with_redis do |redis|
redis.eval(LUA_REGISTER_SCRIPT, keys: [counter_key], argv: [jid, max_jids])
redis.eval(LUA_REGISTER_SCRIPT, keys: [counter_key], argv: [jid.to_s, max_jids.to_i])
end.present?
end
@ -59,7 +59,7 @@ module LimitedCapacity
end
def remove_job_keys(redis, keys)
redis.srem?(counter_key, keys)
redis.srem?(counter_key, keys) if keys.present?
end
def with_redis(&block)

View File

@ -21,12 +21,6 @@ end
# :nocov:
# rubocop:enable Gitlab/NoCodeCoverageComment
Redis::Client.prepend(Gitlab::Instrumentation::RedisInterceptor)
Redis::Cluster::NodeLoader.prepend(Gitlab::Patch::NodeLoader)
Redis::Cluster::SlotLoader.prepend(Gitlab::Patch::SlotLoader)
Redis::Cluster::CommandLoader.prepend(Gitlab::Patch::CommandLoader)
Redis::Cluster.prepend(Gitlab::Patch::RedisCluster)
# this only instruments `RedisClient` used in `Sidekiq.redis`
RedisClient.register(Gitlab::Instrumentation::RedisClientMiddleware)
RedisClient.prepend(Gitlab::Patch::RedisClient)

View File

@ -25,7 +25,7 @@ raise "Do not configure cable.yml with a Redis Cluster as ActionCable only works
# https://github.com/rails/rails/blob/bb5ac1623e8de08c1b7b62b1368758f0d3bb6379/actioncable/lib/action_cable/subscription_adapter/redis.rb#L18
ActionCable::SubscriptionAdapter::Redis.redis_connector = lambda do |config|
args = config.except(:adapter, :channel_prefix)
.merge(instrumentation_class: ::Gitlab::Instrumentation::Redis::ActionCable)
.merge(custom: { instrumentation_class: "ActionCable" })
::Redis.new(args)
end

View File

@ -6,7 +6,7 @@ Peek::Adapters::Redis.prepend ::Gitlab::PerformanceBar::RedisAdapterWhenPeekEnab
Peek.singleton_class.prepend ::Gitlab::PerformanceBar::WithTopLevelWarnings
Rails.application.config.peek.adapter = :redis, {
client: ::Redis.new(Gitlab::Redis::Cache.params),
client: Gitlab::Redis::Cache.redis,
expires_in: 5.minutes
}

View File

@ -29,12 +29,12 @@ cookie_key = if Rails.env.development?
"_gitlab_session"
end
store = Gitlab::Redis::Sessions.store(namespace: Gitlab::Redis::Sessions::SESSION_NAMESPACE)
::Redis::Store::Factory.prepend(Gitlab::Patch::RedisStoreFactory)
Rails.application.configure do
config.session_store(
:redis_store, # Using the cookie_store would enable session replay attacks.
redis_store: store,
redis_server: Gitlab::Redis::Sessions.params.merge(namespace: Gitlab::Redis::Sessions::SESSION_NAMESPACE),
key: cookie_key,
secure: Gitlab.config.gitlab.https,
httponly: true,

View File

@ -16,7 +16,13 @@ def load_cron_jobs!
end
# Custom Queues configuration
queues_config_hash = Gitlab::Redis::Queues.redis_client_params
#
# We omit :command_builder since Sidekiq::RedisConnection performs a deep clone using
# Marshal.load(Marshal.dump(options.slice(*keys))) on the Redis config and Gitlab::Redis::CommandBuilder
# can't be referred to.
#
# We do not need the custom command builder since Sidekiq will handle the typing of Redis arguments.
queues_config_hash = Gitlab::Redis::Queues.params.except(:command_builder)
enable_json_logs = Gitlab.config.sidekiq.log_format != 'text'

View File

@ -0,0 +1,12 @@
- title: "Deprecating Windows Server 2019 in favor of 2022"
removal_milestone: "17.0"
announcement_milestone: "16.9"
breaking_change: true
reporter: gabrielengel_gl
stage: Verify
issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/438554
manual_task: true
body: | # (required) Don't change this line.
We have recently announced the release of Windows Server 2022 for our SaaS runners on Windows (Beta). With it, we are deprecating Windows 2019 in GitLab 17.0.
For more information about how to migrate to using Windows 2022, see [Windows 2022 support for GitLab SaaS runners now available](https://about.gitlab.com/blog/2024/01/22/windows-2022-support-for-gitlab-saas-runners/).

View File

@ -124,7 +124,7 @@ To make sure your configuration is correct:
1. Run in the console:
```ruby
redis = Redis.new(Gitlab::Redis::SharedState.params)
redis = Gitlab::Redis::SharedState.redis
redis.info
```

View File

@ -56,29 +56,77 @@ To resolve this issue, disable the **Jira Connect Proxy URL** setting.
1. Clear the **Jira Connect Proxy URL** text box.
1. Select **Save changes**.
If the issue persists, verify that your self-managed GitLab instance can connect to
`connect-install-keys.atlassian.com` to get the public key from Atlassian.
To test connectivity, run the following command:
```shell
# A `404` status code is expected because you're not passing a token
curl --head "https://connect-install-keys.atlassian.com"
```
## Data sync fails with `Invalid JWT`
If the GitLab for Jira Cloud app continuously fails to sync data, it may be due to an outdated secret token. Atlassian can send new secret tokens that must be processed and stored by GitLab.
If GitLab fails to store the token or misses the new token request, an `Invalid JWT` error occurs.
If the GitLab for Jira Cloud app continuously fails to sync data from a self-managed GitLab instance,
a secret token might be outdated. Atlassian can send new secret tokens to GitLab.
If GitLab fails to process or store these tokens, an `Invalid JWT` error occurs.
To resolve this issue on GitLab self-managed, follow one of the solutions below, depending on your app installation method.
To resolve this issue on your self-managed GitLab instance:
- If you installed the app from the official marketplace listing:
- Confirm your self-managed GitLab instance is publicly available to:
- GitLab.com (if you [installed the app from the official Atlassian Marketplace listing](jira_cloud_app.md#connect-the-gitlab-for-jira-cloud-app)).
- Jira Cloud (if you [installed the app manually](jira_cloud_app.md#install-the-gitlab-for-jira-cloud-app-manually)).
- Ensure the token request sent to the `/-/jira_connect/events/installed` endpoint when you install the app is accessible from Jira.
The following `curl` command must return a `401` status code:
1. Open the GitLab for Jira Cloud app on Jira.
```shell
curl --include --request POST "https://gitlab.example.com/-/jira_connect/events/installed"
```
- If your self-managed GitLab instance has [SSL configured](https://docs.gitlab.com/omnibus/settings/ssl/), check your
[certificates are valid and publicly trusted](https://docs.gitlab.com/omnibus/settings/ssl/ssl_troubleshooting.html#useful-openssl-debugging-commands).
Depending on how you installed the app, you might want to check the following:
- If you [installed the app from the official Atlassian Marketplace listing](jira_cloud_app.md#connect-the-gitlab-for-jira-cloud-app),
switch between GitLab versions in the GitLab for Jira Cloud app:
1. In Jira, on the top bar, select **Apps > Manage your apps**.
1. Expand **GitLab for Jira (GitLab.com)**.
1. Select **Get started**.
1. Select **Change GitLab version**.
1. Select **GitLab.com (SaaS)**.
1. Select **GitLab.com (SaaS)**, then select **Save**.
1. Select **Change GitLab version** again.
1. Select **GitLab (self-managed)**.
1. Enter your **GitLab instance URL**.
1. Select **Save**.
1. Select **GitLab (self-managed)**, then select **Next**.
1. Select all checkboxes, then select **Next**.
1. Enter your **GitLab instance URL**, then select **Save**.
- If you [installed the GitLab for Jira Cloud app manually](jira_cloud_app.md#install-the-gitlab-for-jira-cloud-app-manually):
If this method does not work, [submit a support ticket](https://support.gitlab.com/hc/en-us/requests/new) if you're a Premium or Ultimate customer
and provide your GitLab instance URL and Jira URL. GitLab Support can try to run the following scripts to resolve the issue:
- In GitLab 14.9 and later:
- Contact the [Jira Software Cloud support](https://support.atlassian.com/jira-software-cloud/) and ask to trigger a new installed lifecycle event for the GitLab for Jira Cloud app in your group.
- In all GitLab versions:
- Re-install the GitLab for Jira Cloud app. This method might remove all synced data from the [Jira development panel](../../integration/jira/development_panel.md).
```ruby
# Check if GitLab.com can connect to the self-managed instance
checker = Gitlab::TcpChecker.new("gitlab.example.com", 443)
# Returns `true` if successful
checker.check
# Returns an error if the check fails
checker.error
```
```ruby
# Locate the installation record for the self-managed instance
installation = JiraConnectInstallation.find_by_instance_url("https://gitlab.example.com")
# Try to send the token again from GitLab.com to the self-managed instance
ProxyLifecycleEventService.execute(installation, :installed, installation.instance_url)
```
- If you [installed the app manually](jira_cloud_app.md#install-the-gitlab-for-jira-cloud-app-manually):
- Ask [Jira Cloud Support](https://support.atlassian.com/jira-software-cloud/) to verify that Jira can connect to your
self-managed GitLab instance.
- [Reinstall the app](jira_cloud_app.md#install-the-gitlab-for-jira-cloud-app-manually). This method might remove all [synced data](../../integration/jira/connect-app.md#gitlab-data-synced-to-jira) from the [Jira development panel](../../integration/jira/development_panel.md).
## `Failed to update the GitLab instance`

View File

@ -2568,8 +2568,10 @@ The name of the Docker image that the job runs in. Similar to [`image`](#image)
**Example of `image:name`**:
```yaml
image:
name: "registry.example.com/my/image:latest"
test-job:
image:
name: "registry.example.com/my/image:latest"
script: echo "Hello world"
```
**Related topics**:
@ -2594,9 +2596,11 @@ where each shell token is a separate string in the array.
**Example of `image:entrypoint`**:
```yaml
image:
name: super/sql:experimental
entrypoint: [""]
test-job:
image:
name: super/sql:experimental
entrypoint: [""]
script: echo "Hello world"
```
**Related topics**:

View File

@ -81,10 +81,10 @@ Developers are highly encouraged to use [hash-tags](https://redis.io/docs/refere
where appropriate to facilitate future adoption of Redis Cluster in more Redis types. For example, the Namespace model uses hash-tags
for its [config cache keys](https://gitlab.com/gitlab-org/gitlab/-/blob/1a12337058f260d38405886d82da5e8bb5d8da0b/app/models/namespace.rb#L786).
To perform multi-key commands, developers may use the [`Gitlab::Redis::CrossSlot::Pipeline`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/redis/cross_slot.rb) wrapper.
To perform multi-key commands, developers may use the [`.pipelined`](https://github.com/redis-rb/redis-cluster-client#interfaces) method which splits and sends commands to each node and aggregates replies.
However, this does not work for [transactions](https://redis.io/docs/interact/transactions/) as Redis Cluster does not support cross-slot transactions.
For `Rails.cache`, we handle the `MGET` command found in `read_multi_get` by [patching it](https://gitlab.com/gitlab-org/gitlab/-/blob/c2bad2aac25e2f2778897bd4759506a72b118b15/lib/gitlab/patch/redis_cache_store.rb#L10) to use the `Gitlab::Redis::CrossSlot::Pipeline` wrapper.
For `Rails.cache`, we handle the `MGET` command found in `read_multi_get` by [patching it](https://gitlab.com/gitlab-org/gitlab/-/blob/c2bad2aac25e2f2778897bd4759506a72b118b15/lib/gitlab/patch/redis_cache_store.rb#L10) to use the `.pipelined` method.
The minimum size of the pipeline is set to 1000 commands and it can be adjusted by using the `GITLAB_REDIS_CLUSTER_PIPELINE_BATCH_LIMIT` environment variable.
## Redis in structured logging

View File

@ -147,8 +147,8 @@ module Gitlab
# Don't use multistore if redis.foo configuration is not provided
return super if config_fallback?
primary_store = ::Redis.new(params)
secondary_store = ::Redis.new(config_fallback.params)
primary_store = init_redis(params)
secondary_store = init_redis(config_fallback.params)
MultiStore.new(primary_store, secondary_store, store_name)
end

View File

@ -844,6 +844,22 @@ The parameters, `sign_in_text` and `help_text`, are deprecated in the [Settings
<div class="deprecation breaking-change" data-milestone="17.0">
### Deprecating Windows Server 2019 in favor of 2022
<div class="deprecation-notes">
- Announced in GitLab <span class="milestone">16.9</span>
- Removal in GitLab <span class="milestone">17.0</span> ([breaking change](https://docs.gitlab.com/ee/update/terminology.html#breaking-change))
- To discuss this change or learn more, see the [deprecation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/438554).
</div>
We have recently announced the release of Windows Server 2022 for our SaaS runners on Windows (Beta). With it, we are deprecating Windows 2019 in GitLab 17.0.
For more information about how to migrate to using Windows 2022, see [Windows 2022 support for GitLab SaaS runners now available](https://about.gitlab.com/blog/2024/01/22/windows-2022-support-for-gitlab-saas-runners/).
</div>
<div class="deprecation breaking-change" data-milestone="17.0">
### DingTalk OmniAuth provider
<div class="deprecation-notes">

View File

@ -14,7 +14,7 @@ module Gitlab
ttl_jitter = 2.hours.to_i
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
redis.pipelined do |pipeline|
keys.each { |key| pipeline.expire(key, ttl_duration + rand(-ttl_jitter..ttl_jitter)) }
end
end
@ -25,7 +25,7 @@ module Gitlab
end
def redis
@redis ||= ::Redis.new(Gitlab::Redis::Cache.params)
@redis ||= Gitlab::Redis::Cache.redis
end
end
end

View File

@ -139,7 +139,7 @@ module Gitlab
key = cache_key_for(raw_key)
with_redis do |redis|
redis.sismember(key, value)
redis.sismember(key, value || value.to_s)
end
end
@ -162,7 +162,7 @@ module Gitlab
def self.write_multiple(mapping, key_prefix: nil, timeout: TIMEOUT)
with_redis do |redis|
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
redis.pipelined do |pipeline|
mapping.each do |raw_key, value|
key = cache_key_for("#{key_prefix}#{raw_key}")

View File

@ -197,7 +197,9 @@ module Gitlab
record_hit_ratio(results)
results.map! do |result|
Gitlab::Json.parse(gzip_decompress(result), symbolize_names: true) unless result.nil?
unless result.nil?
Gitlab::Json.parse(gzip_decompress(result.force_encoding(Encoding::UTF_8)), symbolize_names: true)
end
end
file_paths.zip(results).to_h

View File

@ -16,7 +16,7 @@ module Gitlab
def write_multiple(mapping)
with_redis do |redis|
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipelined|
redis.pipelined do |pipelined|
mapping.each do |raw_key, value|
key = cache_key_for(raw_key)
@ -42,7 +42,7 @@ module Gitlab
with_redis do |redis|
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
if Gitlab::Redis::ClusterUtil.cluster?(redis)
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
redis.pipelined do |pipeline|
keys.each { |key| pipeline.get(key) }
end
else
@ -54,7 +54,7 @@ module Gitlab
content.map! do |lines|
next unless lines
Gitlab::Json.parse(gzip_decompress(lines)).map! do |line|
Gitlab::Json.parse(gzip_decompress(lines.force_encoding(Encoding::UTF_8))).map! do |line|
Gitlab::Diff::Line.safe_init_from_hash(line)
end
end

View File

@ -17,7 +17,7 @@ module Gitlab
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
with_redis do |redis|
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
redis.pipelined do |pipeline|
keys.each_with_index do |key, i|
pipeline.set(redis_shared_state_key(key), etags[i], ex: EXPIRY_TIME, nx: only_if_missing)
end

View File

@ -62,6 +62,7 @@ module Gitlab
def self.cancel(key, uuid)
return unless key.present?
return unless uuid.present?
Gitlab::Redis::SharedState.with do |redis|
redis.eval(LUA_CANCEL_SCRIPT, keys: [ensure_prefixed_key(key)], argv: [uuid])
@ -142,7 +143,7 @@ module Gitlab
# false if the lease is taken by a different UUID or inexistent.
def renew
Gitlab::Redis::SharedState.with do |redis|
result = redis.eval(LUA_RENEW_SCRIPT, keys: [@redis_shared_state_key], argv: [@uuid, @timeout])
result = redis.eval(LUA_RENEW_SCRIPT, keys: [@redis_shared_state_key], argv: [@uuid, @timeout.to_i])
result == @uuid
end
end

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
require 'rails'
require 'redis'
require 'redis-clustering'
module Gitlab
module Instrumentation
@ -230,7 +230,7 @@ module Gitlab
end
def key_slot(key)
::Redis::Cluster::KeySlotConverter.convert(extract_hash_tag(key))
::RedisClient::Cluster::KeySlotConverter.convert(extract_hash_tag(key))
end
# This is almost identical to Redis::Cluster::Command#extract_hash_tag,

View File

@ -1,55 +0,0 @@
# frozen_string_literal: true
module Gitlab
module Instrumentation
module RedisInterceptor
include RedisHelper
def call(command)
instrument_call([command], instrumentation_class) do
super
end
end
def call_pipeline(pipeline)
instrument_call(pipeline.commands, instrumentation_class, true) do
super
end
end
def write(command)
measure_write_size(command, instrumentation_class) if ::RequestStore.active?
super
end
def read
result = super
measure_read_size(result, instrumentation_class) if ::RequestStore.active?
result
end
def ensure_connected
super do
instrument_reconnection_errors do
yield
end
end
end
def instrument_reconnection_errors
yield
rescue ::Redis::BaseConnectionError => ex
instrumentation_class.instance_count_connection_exception(ex)
raise ex
end
# That's required so it knows which GitLab Redis instance
# it's interacting with in order to categorize accordingly.
#
def instrumentation_class
@options[:instrumentation_class] # rubocop:disable Gitlab/ModuleWithInstanceVariables
end
end
end
end

View File

@ -100,7 +100,7 @@ module Gitlab
def refresh_keys_expiration
with_redis do |redis|
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
redis.pipelined do |pipeline|
pipeline.expire(issue_ids_key, REDIS_EXPIRY_TIME)
pipeline.expire(current_index_key, REDIS_EXPIRY_TIME)
pipeline.expire(current_project_key, REDIS_EXPIRY_TIME)

View File

@ -11,7 +11,7 @@ module Gitlab
data = Gitlab::Redis::Cache.with do |r|
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
Gitlab::Redis::CrossSlot::Pipeline.new(r).pipelined do |pipeline|
r.pipelined do |pipeline|
subjects.each do |subject|
new(subject).read(pipeline)
end

View File

@ -1,19 +0,0 @@
# frozen_string_literal: true
module Gitlab
module Patch
module CommandLoader
extend ActiveSupport::Concern
class_methods do
# Shuffle the node list to spread out initial connection creation amongst all nodes
#
# The input is a Redis::Cluster::Node instance which is an Enumerable.
# `super` receives an Array of Redis::Client instead of a Redis::Cluster::Node
def load(nodes)
super(nodes.to_a.shuffle)
end
end
end
end
end

View File

@ -1,52 +0,0 @@
# frozen_string_literal: true
# Patch to address https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/2212#note_1287996694
# It uses hostname instead of IP address if the former is present in `CLUSTER NODES` output.
if Gem::Version.new(Redis::VERSION) > Gem::Version.new('4.8.1')
raise 'New version of redis detected, please remove or update this patch'
end
module Gitlab
module Patch
module NodeLoader
extend ActiveSupport::Concern
class_methods do
# Shuffle the node list to spread out initial connection creation amongst all nodes
#
# The input is a Redis::Cluster::Node instance which is an Enumerable.
# `super` receives an Array of Redis::Client instead of a Redis::Cluster::Node
def load_flags(nodes)
super(nodes.to_a.shuffle)
end
end
def self.prepended(base)
base.class_eval do
# monkey-patches https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/cluster/node_loader.rb#L23
def self.fetch_node_info(node)
node.call(%i[cluster nodes]).split("\n").map(&:split).to_h do |arr|
[
extract_host_identifier(arr[1]),
(arr[2].split(',') & %w[master slave]).first # rubocop:disable Naming/InclusiveLanguage
]
end
end
# Since `CLUSTER SLOT` uses the preferred endpoint determined by
# the `cluster-preferred-endpoint-type` config value, we will prefer hostname over IP address.
# See https://redis.io/commands/cluster-nodes/ for details on the output format.
#
# @param [String] Address info matching fhe format: <ip:port@cport[,hostname[,auxiliary_field=value]*]>
def self.extract_host_identifier(node_address)
ip_chunk, hostname, _auxiliaries = node_address.split(',')
return ip_chunk.split('@').first if hostname.blank?
port = ip_chunk.split('@').first.split(':')[1]
"#{hostname}:#{port}"
end
end
end
end
end
end

View File

@ -20,7 +20,7 @@ module Gitlab
delete_count = 0
redis.with do |conn|
entries.each_slice(pipeline_batch_size) do |subset|
delete_count += Gitlab::Redis::CrossSlot::Pipeline.new(conn).pipelined do |pipeline|
delete_count += conn.pipelined do |pipeline|
subset.each { |entry| pipeline.del(entry) }
end.sum
end
@ -58,7 +58,7 @@ module Gitlab
def pipeline_mget(conn, keys)
keys.each_slice(pipeline_batch_size).flat_map do |subset|
Gitlab::Redis::CrossSlot::Pipeline.new(conn).pipelined do |p|
conn.pipelined do |p|
subset.each { |key| p.get(key) }
end
end

View File

@ -1,21 +0,0 @@
# frozen_string_literal: true
# Patch to expose `find_node_key` method for cross-slot pipelining
# In redis v5.0.x, cross-slot pipelining is implemented via redis-cluster-client.
# This patch should be removed since there is no need for it.
# Gitlab::Redis::CrossSlot and its usage should be removed as well.
if Gem::Version.new(Redis::VERSION) != Gem::Version.new('4.8.0')
raise 'New version of redis detected, please remove or update this patch'
end
module Gitlab
module Patch
module RedisCluster
# _find_node_key exposes a private function of the same name in Redis::Cluster.
# See https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/cluster.rb#L282
def _find_node_key(command)
find_node_key(command)
end
end
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
module Gitlab
module Patch
module RedisStoreFactory
def create
# rubocop:disable Gitlab/ModuleWithInstanceVariables -- patched code references @options in redis-store
opt = @options
# rubocop:enable Gitlab/ModuleWithInstanceVariables
return Gitlab::Redis::ClusterStore.new(opt) if opt[:nodes]
super
end
end
end
end

View File

@ -1,19 +0,0 @@
# frozen_string_literal: true
module Gitlab
module Patch
module SlotLoader
extend ActiveSupport::Concern
class_methods do
# Shuffle the node list to spread out initial connection creation amongst all nodes
#
# The input is a Redis::Cluster::Node instance which is an Enumerable.
# `super` receives an Array of Redis::Client instead of a Redis::Cluster::Node
def load(nodes)
super(nodes.to_a.shuffle)
end
end
end
end
end

View File

@ -0,0 +1,81 @@
# frozen_string_literal: true
require 'redis-clustering'
require 'redis/store/ttl'
require 'redis/store/interface'
require 'redis/store/namespace'
require 'redis/store/serialization'
module Gitlab
module Redis
class ClusterStore < ::Redis::Cluster
include ::Redis::Store::Interface
def initialize(options = {})
orig_options = options.dup
@serializer = orig_options.key?(:serializer) ? orig_options.delete(:serializer) : Marshal
unless orig_options[:marshalling].nil?
# `marshalling` only used here, might not be supported in `super`
@serializer = orig_options.delete(:marshalling) ? Marshal : nil
end
_remove_unsupported_options(options)
super(options)
_extend_marshalling
_extend_namespace orig_options
end
# copies ::Redis::Store::Ttl implementation in a redis-v5 compatible manner
def set(key, value, options = nil)
ttl = get_ttl(options)
if ttl
setex(key, ttl.to_i, value, raw: true)
else
super(key, value)
end
end
# copies ::Redis::Store::Ttl implementation in a redis-v5 compatible manner
def setnx(key, value, options = nil)
ttl = get_ttl(options)
if ttl
multi do |m|
m.setnx(key, value)
m.expire(key, ttl)
end
else
super(key, value)
end
end
private
def get_ttl(options)
# https://github.com/redis-store/redis-store/blob/v1.10.0/lib/redis/store/ttl.rb#L37
options[:expire_after] || options[:expires_in] || options[:expire_in] if options
end
def _remove_unsupported_options(options)
# Unsupported keywords should be removed to avoid errors
# https://github.com/redis-rb/redis-client/blob/v0.13.0/lib/redis_client/config.rb#L21
options.delete(:raw)
options.delete(:serializer)
options.delete(:marshalling)
options.delete(:namespace)
options.delete(:scheme)
end
def _extend_marshalling
extend ::Redis::Store::Serialization unless @serializer.nil?
end
def _extend_namespace(options)
@namespace = options[:namespace]
extend ::Redis::Store::Namespace
end
end
end
end

View File

@ -13,14 +13,14 @@ module Gitlab
if obj.is_a?(MultiStore)
cluster?(obj.primary_store) || cluster?(obj.secondary_store)
else
obj.respond_to?(:_client) && obj._client.is_a?(::Redis::Cluster)
obj.is_a?(::Redis::Cluster)
end
end
def batch_unlink(keys, redis)
expired_count = 0
keys.each_slice(1000) do |subset|
expired_count += Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
expired_count += redis.pipelined do |pipeline|
subset.each { |key| pipeline.unlink(key) }
end.sum
end
@ -30,7 +30,7 @@ module Gitlab
# Redis cluster alternative to mget
def batch_get(keys, redis)
keys.each_slice(1000).flat_map do |subset|
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
redis.pipelined do |pipeline|
subset.map { |key| pipeline.get(key) }
end
end

View File

@ -0,0 +1,53 @@
# frozen_string_literal: true
module Gitlab
module Redis
module CommandBuilder
extend self
# Ref: https://github.com/redis-rb/redis-client/blob/v0.19.1/lib/redis_client/command_builder.rb
# we modify the command builder to convert nil to strings as this behaviour was present in
# https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/connection/command_helper.rb#L20
#
# Note that we only adopt the Ruby3.x-compatible logic in .generate.
# Symbol.method_defined?(:name) is true in Ruby 3
def generate(args, kwargs = nil)
command = args.flat_map do |element|
case element
when Hash
element.flatten
else
element
end
end
kwargs&.each do |key, value|
if value
if value == true
command << key.name
else
command << key.name << value
end
end
end
command.map! do |element|
case element
when String
element
when Symbol
element.name
when Integer, Float, NilClass
element.to_s
else
raise TypeError, "Unsupported command argument type: #{element.class}"
end
end
raise ArgumentError, "can't issue an empty redis command" if command.empty?
command
end
end
end
end

View File

@ -1,141 +0,0 @@
# frozen_string_literal: true
module Gitlab
module Redis
module CrossSlot
class Router
attr_reader :node_mapping, :futures, :node_sequence, :cmd_queue
delegate :respond_to_missing?, to: :@redis
# This map contains redis-rb methods which does not map directly
# to a standard Redis command. It is used transform unsupported commands to standard commands
# to find the node key for unsupported commands.
#
# Redis::Cluster::Command only contains details of commands which the Redis Server
# returns. Hence, commands like mapped_hmget and hscan_each internally will call the
# base command, hmget and hscan respectively.
#
# See https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/cluster/command.rb
UNSUPPORTED_CMD_MAPPING = {
# Internally, redis-rb calls the supported Redis command and transforms the output.
# See https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/commands/hashes.rb#L104
mapped_hmget: :hmget
}.freeze
# Initializes the CrossSlot::Router
# @param {::Redis}
def initialize(redis)
@redis = redis
@node_mapping = {}
@futures = {}
@node_sequence = []
@cmd_queue = []
end
# For now we intercept every redis.call and return a Gitlab-Future object.
# This method groups every commands to a node for fan-out. Commands are grouped using the first key.
#
# rubocop:disable Style/MissingRespondToMissing
def method_missing(cmd, *args, **kwargs, &blk)
# Note that we can re-map the command without affecting execution as it is
# solely for finding the node key. The original cmd will be executed.
node = @redis._client._find_node_key([UNSUPPORTED_CMD_MAPPING.fetch(cmd, cmd)] + args)
@node_mapping[node] ||= []
@futures[node] ||= []
@node_sequence << node
@node_mapping[node] << [cmd, args, kwargs || {}, blk]
f = Future.new
@futures[node] << f
@cmd_queue << [f, cmd, args, kwargs || {}, blk]
f
end
# rubocop:enable Style/MissingRespondToMissing
end
# Wraps over redis-rb's Future in
# https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/pipeline.rb#L244
class Future
def set(future, is_val = false)
@redis_future = future
@is_val = is_val
end
def value
return @redis_val if @is_val
@redis_future.value
end
end
# Pipeline allows cross-slot pipelined to be called. The fan-out logic is implemented in
# https://github.com/redis-rb/redis-cluster-client/blob/master/lib/redis_client/cluster/pipeline.rb
# which is available in redis-rb v5.0.
#
# This file can be deprecated after redis-rb v4.8.0 is upgraded to v5.0
class Pipeline
# Initializes the CrossSlot::Pipeline
# @param {::Redis}
def initialize(redis)
@redis = redis
end
# pipelined is used in place of ::Redis `.pipelined` when running in a cluster context
# where cross-slot operations may happen.
def pipelined(&block)
# Directly call .pipelined and defer the pipeline execution to MultiStore.
# MultiStore could wrap over 0, 1, or 2 Redis Cluster clients, handling it here
# will not work for 2 clients since the key-slot topology can differ.
if use_cross_slot_pipelining?
router = Router.new(@redis)
yield router
execute_commands(router)
else
# use redis-rb's pipelined method
@redis.pipelined(&block)
end
end
private
def use_cross_slot_pipelining?
!@redis.instance_of?(::Gitlab::Redis::MultiStore) && @redis._client.instance_of?(::Redis::Cluster)
end
def execute_commands(router)
router.node_mapping.each do |node_key, commands|
# TODO possibly use Threads to speed up but for now `n` is 3-5 which is small.
@redis.pipelined do |p|
commands.each_with_index do |command, idx|
future = router.futures[node_key][idx]
cmd, args, kwargs, blk = command
future.set(p.public_send(cmd, *args, **kwargs, &blk)) # rubocop:disable GitlabSecurity/PublicSend
end
end
end
router.node_sequence.map do |node_key|
router.futures[node_key].shift.value
end
rescue ::Redis::CommandError => err
if err.message.start_with?('MOVED', 'ASK')
Gitlab::ErrorTracking.log_exception(err)
return execute_commands_sequentially(router)
end
raise
end
def execute_commands_sequentially(router)
router.cmd_queue.map do |command|
future, cmd, args, kwargs, blk = command
future.set(@redis.public_send(cmd, *args, **kwargs, &blk), true) # rubocop:disable GitlabSecurity/PublicSend
future.value
end
end
end
end
end
end

View File

@ -432,16 +432,6 @@ module Gitlab
# rubocop:disable GitlabSecurity/PublicSend
def send_command(redis_instance, command_name, *args, **kwargs, &block)
# Run wrapped pipeline for each instance individually so that the fan-out is distinct.
# If both primary and secondary are Redis Clusters, the slot-node distribution could
# be different.
#
# We ignore args and kwargs since `pipelined` does not accept arguments
# See https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis.rb#L164
if command_name.to_s == 'pipelined' && redis_instance._client.instance_of?(::Redis::Cluster)
return Gitlab::Redis::CrossSlot::Pipeline.new(redis_instance).pipelined(&block)
end
if block
# Make sure that block is wrapped and executed only on the redis instance that is executing the block
redis_instance.send(command_name, *args, **kwargs) do |*params|
@ -462,7 +452,7 @@ module Gitlab
end
def redis_store?(pool)
pool.with { |c| c.instance_of?(Gitlab::Redis::MultiStore) || c.is_a?(::Redis) }
pool.with { |c| c.instance_of?(Gitlab::Redis::MultiStore) || c.is_a?(::Redis) || c.is_a?(::Redis::Cluster) }
end
def validate_stores!

View File

@ -20,7 +20,7 @@ module Gitlab
CommandExecutionError = Class.new(StandardError)
class << self
delegate :params, :url, :store, :encrypted_secrets, :redis_client_params, to: :new
delegate :params, :url, :store, :encrypted_secrets, to: :new
def with
pool.with { |redis| yield redis }
@ -90,7 +90,17 @@ module Gitlab
end
def redis
::Redis.new(params)
init_redis(params)
end
private
def init_redis(config)
if config[:nodes].present?
::Redis::Cluster.new(config.merge({ concurrency: { model: :none } }))
else
::Redis.new(config)
end
end
end
@ -99,13 +109,8 @@ module Gitlab
end
def params
redis_store_options
end
# redis_client_params modifies redis_store_options to be compatible with redis-client
# TODO: when redis-rb is updated to v5, there is no need to support 2 types of config format
def redis_client_params
options = redis_store_options
options[:command_builder] = CommandBuilder
# avoid passing classes into options as Sidekiq scrubs the options with Marshal.dump + Marshal.load
# ref https://github.com/sidekiq/sidekiq/blob/v7.1.6/lib/sidekiq/redis_connection.rb#L37
@ -114,14 +119,14 @@ module Gitlab
# we use strings to look up Gitlab::Instrumentation::Redis.storage_hash as a bypass
options[:custom] = { instrumentation_class: self.class.store_name }
# TODO: add support for cluster when upgrading to redis-rb v5.y.z we do not need cluster support
# as Sidekiq workload should not and does not run in a Redis Cluster
# support to be added in https://gitlab.com/gitlab-org/gitlab/-/merge_requests/134862
if options[:sentinels]
# name is required in RedisClient::SentinelConfig
# https://github.com/redis-rb/redis-client/blob/1ab081c1d0e47df5d55e011c9390c70b2eef6731/lib/redis_client/sentinel_config.rb#L17
options[:name] = options[:host]
options.except(:scheme, :instrumentation_class, :host, :port)
elsif options[:cluster]
options[:nodes] = options[:cluster].map { |c| c.except(:scheme) }
options.except(:scheme, :instrumentation_class, :cluster)
else
# remove disallowed keys as seen in
# https://github.com/redis-rb/redis-client/blob/1ab081c1d0e47df5d55e011c9390c70b2eef6731/lib/redis_client/config.rb#L21
@ -134,7 +139,7 @@ module Gitlab
end
def db
redis_store_options[:db]
redis_store_options[:db] || 0
end
def sentinels
@ -156,7 +161,7 @@ module Gitlab
end
def store(extras = {})
::Redis::Store::Factory.create(redis_store_options.merge(extras))
::Redis::Store::Factory.create(params.merge(extras))
end
def encrypted_secrets
@ -182,7 +187,11 @@ module Gitlab
final_config = parse_extra_config(decrypted_config)
result = if final_config[:cluster].present?
final_config[:db] = 0 # Redis Cluster only supports db 0
final_config[:cluster] = final_config[:cluster].map do |node|
next node unless node.is_a?(String)
::Redis::Store::Factory.extract_host_options_from_uri(node)
end
final_config
else
parse_redis_url(final_config)

View File

@ -86,6 +86,8 @@ module Gitlab
full_key = cache_key(key)
hash = standardize_hash(hash)
with do |redis|
results = redis.pipelined do |pipeline|
# Set each hash key to the provided value

View File

@ -62,7 +62,7 @@ module Gitlab
with do |redis|
redis.multi do |multi|
multi.sismember(full_key, value)
multi.sismember(full_key, value.to_s)
multi.exists?(full_key) # rubocop:disable CodeReuse/ActiveRecord
end
end

View File

@ -75,7 +75,7 @@ module Gitlab
argv = []
job_wal_locations.each do |connection_name, location|
diff = pg_wal_lsn_diff(connection_name)
argv += [connection_name, diff || '', location]
argv += [connection_name, diff ? diff.to_f : '', location]
end
with_redis { |r| r.eval(UPDATE_WAL_COOKIE_SCRIPT, keys: [cookie_key], argv: argv) }

View File

@ -1,4 +1,4 @@
ARG GDK_SHA=5dd3fde91e00da3e35c8358fd66e4cba8b97d46d
ARG GDK_SHA=fb96a94cf54d4b0c5db2a3bdf905874dbbc1cf67
FROM registry.gitlab.com/gitlab-org/gitlab-development-kit/asdf-bootstrapped-verify/main:${GDK_SHA} as base

View File

@ -109,7 +109,8 @@ RSpec.describe ProductAnalyticsTracking, :snowplow, feature_category: :product_a
end
it 'tracks total Redis counters' do
expect(Gitlab::Usage::Metrics::Instrumentations::TotalCountMetric).to receive(:redis_key).twice # total and 7d
expect(Gitlab::Usage::Metrics::Instrumentations::TotalCountMetric).to receive(:redis_key)
.twice.and_call_original # total and 7d
get :index
end

View File

@ -0,0 +1,8 @@
# redis://[:password@]host[:port][/db-number][?option=value]
# more details: http://www.iana.org/assignments/uri-schemes/prov/redis
development:
url: redis://:mynewpassword@development-host:6379/99
test:
url: redis://:mynewpassword@test-host:6379/99
production:
url: redis://:mynewpassword@production-host:6379/99

View File

@ -27,7 +27,7 @@ RSpec.describe 'ActionCableSubscriptionAdapterIdentifier override' do
sub = ActionCable.server.pubsub.send(:redis_connection)
expect(sub.connection[:id]).to eq('unix:///home/localuser/redis/redis.socket/0')
expect(sub.connection[:id]).to eq('/home/localuser/redis/redis.socket/0')
expect(ActionCable.server.config.cable[:id]).to be_nil
end
end

View File

@ -15,7 +15,7 @@ RSpec.describe 'Session initializer for GitLab' do
describe 'config#session_store' do
it 'initialized as a redis_store with a proper servers configuration' do
expect(subject).to receive(:session_store).with(:redis_store, a_hash_including(redis_store: kind_of(::Redis::Store)))
expect(subject).to receive(:session_store).with(:redis_store, a_hash_including(redis_server: Gitlab::Redis::Sessions.params.merge(namespace: Gitlab::Redis::Sessions::SESSION_NAMESPACE)))
load_session_store
end

View File

@ -5,6 +5,8 @@ require 'spec_helper'
RSpec.describe ClickHouse::MigrationSupport::ExclusiveLock, feature_category: :database do
include ExclusiveLeaseHelpers
let(:worker_id) { 1 }
let(:worker_class) do
# This worker will be active longer than the ClickHouse worker TTL
Class.new do
@ -81,7 +83,7 @@ RSpec.describe ClickHouse::MigrationSupport::ExclusiveLock, feature_category: :d
end
around do |example|
described_class.register_running_worker(worker_class, anything) do
described_class.register_running_worker(worker_class, worker_id) do
example.run
end
end

View File

@ -4,7 +4,7 @@ require 'spec_helper'
RSpec.describe Gitlab::BackgroundMigration::Redis::BackfillProjectPipelineStatusTtl,
:clean_gitlab_redis_cache, feature_category: :redis do
let(:redis) { ::Redis.new(::Gitlab::Redis::Cache.params) }
let(:redis) { ::Gitlab::Redis::Cache.redis }
let(:keys) { ["cache:gitlab:project:1:pipeline_status", "cache:gitlab:project:2:pipeline_status"] }
let(:invalid_keys) { ["cache:gitlab:project:pipeline_status:1", "cache:gitlab:project:pipeline_status:2"] }

View File

@ -28,7 +28,7 @@ RSpec.describe Gitlab::Database::Migrations::RunnerBackoff::Communicator, :clean
it 'reads from Redis' do
recorder = RedisCommands::Recorder.new { subject }
expect(recorder.log).to include([:exists, 'gitlab:exclusive_lease:gitlab/database/migration/runner/backoff'])
expect(recorder.log).to include(['exists', 'gitlab:exclusive_lease:gitlab/database/migration/runner/backoff'])
end
context 'with runner_migrations_backoff disabled' do

View File

@ -33,7 +33,7 @@ RSpec.describe Gitlab::DiscussionsDiff::HighlightCache, :clean_gitlab_redis_cach
mapping.each do |key, value|
full_key = described_class.cache_key_for(key)
found_key = Gitlab::Redis::Cache.with { |r| r.get(full_key) }
found_key = Gitlab::Redis::Cache.with { |r| r.get(full_key).force_encoding("UTF-8") }
expect(described_class.gzip_decompress(found_key)).to eq(value.to_json)
end

View File

@ -23,9 +23,9 @@ RSpec.describe Gitlab::ExternalAuthorization::Cache, :clean_gitlab_redis_cache d
describe '#load' do
it 'reads stored info from redis' do
freeze_time do
set_in_redis(:access, false)
set_in_redis(:access, false.to_s)
set_in_redis(:reason, 'Access denied for now')
set_in_redis(:refreshed_at, Time.now)
set_in_redis(:refreshed_at, Time.now.to_s)
access, reason, refreshed_at = cache.load

View File

@ -9,10 +9,9 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
include RedisHelpers
let_it_be(:redis_store_class) { define_helper_redis_store_class }
let_it_be(:redis_client) { RedisClient.new(redis_store_class.redis_client_params) }
before do
redis_client.call("flushdb")
redis_store_class.with(&:flushdb)
end
describe 'read and write' do
@ -24,27 +23,30 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
# The response is 1001, so 4 bytes. Exercise counting an integer reply.
[[:set, 'foobar', 1000]] | [:incr, 'foobar'] | (4 + 6) | 4
# Exercise counting empty multi bulk reply. Returns an empty hash `{}`
[] | [:hgetall, 'foobar'] | (7 + 6) | 2
# Exercise counting empty multi bulk reply.
[] | [:hgetall, 'foobar'] | (7 + 6) | 0
# Hgetall response length is combined length of keys and values in the
# hash. Exercises counting of a multi bulk reply
# Returns `{"field"=>"hello world"}`, 5 for field, 11 for hello world, 8 for {, }, 4 "s, =, >
[[:hset, 'myhash', 'field', 'hello world']] | [:hgetall, 'myhash'] | (7 + 6) | (5 + 11 + 8)
[[:hset, 'myhash', 'field', 'hello world']] | [:hgetall, 'myhash'] | (7 + 6) | (5 + 11)
# Exercise counting of a bulk reply
[[:set, 'foo', 'bar' * 100]] | [:get, 'foo'] | (3 + 3) | (3 * 100)
# Nested array response: [['foo', 0.0], ['bar', 1.0]]. Returns scores as float.
# Nested array response: [['foo', 0], ['bar', 1.1000000000000001]] due to Redis precision
# See https://github.com/redis/redis/issues/1499
[[:zadd, 'myset', 0, 'foo'],
[:zadd, 'myset', 1, 'bar']] | [:zrange, 'myset', 0, -1, 'withscores'] | (6 + 5 + 1 + 2 + 10) | (3 + 3 + 3 + 3)
[:zadd, 'myset', 1.1,
'bar']] | [:zrange, 'myset', 0, -1, 'withscores'] | (6 + 5 + 1 + 2 + 10) | (3 + 1 + 3 + 18)
end
with_them do
it 'counts bytes read and written' do
setup.each { |cmd| redis_client.call(*cmd) }
RequestStore.clear!
redis_client.call(*command)
redis_store_class.with do |redis|
setup.each { |cmd| redis.call(cmd) }
RequestStore.clear!
redis.call(command)
end
expect(Gitlab::Instrumentation::Redis.read_bytes).to eq(expect_read)
expect(Gitlab::Instrumentation::Redis.write_bytes).to eq(expect_write)
@ -58,35 +60,48 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
it 'counts successful requests' do
expect(instrumentation_class).to receive(:instance_count_request).with(1).and_call_original
redis_client.call(:get, 'foobar')
redis_store_class.with { |redis| redis.call(:get, 'foobar') }
end
it 'counts successful pipelined requests' do
expect(instrumentation_class).to receive(:instance_count_request).with(2).and_call_original
expect(instrumentation_class).to receive(:instance_count_pipelined_request).with(2).and_call_original
redis_client.pipelined do |pipeline|
pipeline.call(:get, '{foobar}buz')
pipeline.call(:get, '{foobar}baz')
redis_store_class.with do |redis|
redis.pipelined do |pipeline|
pipeline.call(:get, '{foobar}buz')
pipeline.call(:get, '{foobar}baz')
end
end
end
context 'when encountering exceptions' do
before do
allow(redis_client.instance_variable_get(:@raw_connection)).to receive(:call).and_raise(
RedisClient::Error)
where(:case_name, :exception, :exception_counter) do
'generic exception' | Redis::CommandError.new | :instance_count_exception
'moved redirection' | Redis::CommandError.new("MOVED 123 127.0.0.1:6380") | :instance_count_cluster_redirection
'ask redirection' | Redis::CommandError.new("ASK 123 127.0.0.1:6380") | :instance_count_cluster_redirection
end
it 'counts exception' do
expect(instrumentation_class).to receive(:instance_count_exception)
.with(instance_of(RedisClient::Error)).and_call_original
expect(instrumentation_class).to receive(:log_exception)
.with(instance_of(RedisClient::Error)).and_call_original
expect(instrumentation_class).to receive(:instance_count_request).and_call_original
with_them do
before do
redis_store_class.with do |redis|
# We need to go 1 layer deeper to stub _client as we monkey-patch Redis::Client
# with the interceptor. Stubbing `redis` will skip the instrumentation_class.
allow(redis._client.instance_variable_get(:@raw_connection)).to receive(:call).and_raise(exception)
end
end
expect do
redis_client.call(:auth, 'foo', 'bar')
end.to raise_error(RedisClient::Error)
it 'counts exception' do
expect(instrumentation_class).to receive(exception_counter)
.with(instance_of(Redis::CommandError)).and_call_original
expect(instrumentation_class).to receive(:log_exception)
.with(instance_of(Redis::CommandError)).and_call_original
expect(instrumentation_class).to receive(:instance_count_request).and_call_original
expect do
redis_store_class.with { |redis| redis.call(:auth, 'foo', 'bar') }
end.to raise_exception(Redis::CommandError)
end
end
end
@ -99,7 +114,7 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
expect(instrumentation_class).to receive(:increment_cross_slot_request_count).and_call_original
expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
redis_client.call(:mget, 'foo', 'bar')
redis_store_class.with { |redis| redis.call(:mget, 'foo', 'bar') }
end
it 'does not count allowed cross-slot requests' do
@ -107,7 +122,7 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
expect(instrumentation_class).to receive(:increment_allowed_cross_slot_request_count).and_call_original
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
redis_client.call(:mget, 'foo', 'bar')
redis_store_class.with { |redis| redis.call(:mget, 'foo', 'bar') }
end
end
@ -116,7 +131,7 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
redis_client.call(:mget, 'bar')
redis_store_class.with { |redis| redis.call(:get, 'bar') }
end
end
@ -124,7 +139,7 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
expect(instrumentation_class).not_to receive(:increment_cross_slot_request_count).and_call_original
expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
redis_client.call(:mget, '{foo}bar', '{foo}baz')
redis_store_class.with { |redis| redis.call(:mget, '{foo}bar', '{foo}baz') }
end
end
@ -135,7 +150,7 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
it 'still runs cross-slot validation' do
expect do
redis_client.call('mget', 'foo', 'bar')
redis_store_class.with { |redis| redis.mget('foo', 'bar') }
end.to raise_error(instance_of(Gitlab::Instrumentation::RedisClusterValidator::CrossSlotError))
end
end
@ -157,7 +172,7 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
expect(instrumentation_class).to receive(:instance_observe_duration).with(a_value > 0)
.and_call_original
redis_client.call(*command)
redis_store_class.with { |redis| redis.call(*command) }
end
end
@ -166,17 +181,21 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
expect(instrumentation_class).to receive(:instance_observe_duration).twice.with(a_value > 0)
.and_call_original
redis_client.pipelined do |pipeline|
pipeline.call(:get, '{foobar}buz')
pipeline.call(:get, '{foobar}baz')
redis_store_class.with do |redis|
redis.pipelined do |pipeline|
pipeline.call(:get, '{foobar}buz')
pipeline.call(:get, '{foobar}baz')
end
end
end
it 'raises error when keys are not from the same slot' do
expect do
redis_client.pipelined do |pipeline|
pipeline.call(:get, 'foo')
pipeline.call(:get, 'bar')
redis_store_class.with do |redis|
redis.pipelined do |pipeline|
pipeline.call(:get, 'foo')
pipeline.call(:get, 'bar')
end
end
end.to raise_error(instance_of(Gitlab::Instrumentation::RedisClusterValidator::CrossSlotError))
end
@ -200,11 +219,11 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
with_them do
it 'skips requests we do not want in the apdex' do
setup.each { |cmd| redis_client.call(*cmd) }
setup.each { |cmd| redis_store_class.with { |redis| redis.call(*cmd) } }
expect(instrumentation_class).not_to receive(:instance_observe_duration)
redis_client.call(*command)
redis_store_class.with { |redis| redis.call(*command) }
end
end
@ -212,10 +231,12 @@ RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, f
it 'skips requests that have blocking commands' do
expect(instrumentation_class).not_to receive(:instance_observe_duration)
redis_client.pipelined do |pipeline|
pipeline.call(:get, '{foobar}buz')
pipeline.call(:rpush, '{foobar}baz', 1)
pipeline.call(:brpop, '{foobar}baz', 0)
redis_store_class.with do |redis|
redis.pipelined do |pipeline|
pipeline.call(:get, '{foobar}buz')
pipeline.call(:rpush, '{foobar}baz', 1)
pipeline.call(:brpop, '{foobar}baz', 0)
end
end
end
end

View File

@ -1,261 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
require 'rspec-parameterized'
require 'support/helpers/rails_helpers'
RSpec.describe Gitlab::Instrumentation::RedisInterceptor, :request_store, feature_category: :scalability do
using RSpec::Parameterized::TableSyntax
include RedisHelpers
let_it_be(:redis_store_class) { define_helper_redis_store_class }
before do
redis_store_class.with(&:flushdb)
end
describe 'read and write' do
where(:setup, :command, :expect_write, :expect_read) do
# The response is 'OK', the request size is the combined size of array
# elements. Exercise counting of a status reply.
[] | [:set, 'foo', 'bar'] | 3 + 3 + 3 | 2
# The response is 1001, so 4 bytes. Exercise counting an integer reply.
[[:set, 'foobar', 1000]] | [:incr, 'foobar'] | 4 + 6 | 4
# Exercise counting empty multi bulk reply
[] | [:hgetall, 'foobar'] | 7 + 6 | 0
# Hgetall response length is combined length of keys and values in the
# hash. Exercises counting of a multi bulk reply
[[:hset, 'myhash', 'field', 'hello world']] | [:hgetall, 'myhash'] | 7 + 6 | 5 + 11
# Exercise counting of a bulk reply
[[:set, 'foo', 'bar' * 100]] | [:get, 'foo'] | 3 + 3 | 3 * 100
# Nested array response: [['foo', 0], ['bar', 1]]
[[:zadd, 'myset', 0, 'foo'], [:zadd, 'myset', 1, 'bar']] | [:zrange, 'myset', 0, -1, 'withscores'] | 6 + 5 + 1 + 2 + 10 | 3 + 1 + 3 + 1
end
with_them do
it 'counts bytes read and written' do
redis_store_class.with do |redis|
setup.each { |cmd| redis.call(cmd) }
RequestStore.clear!
redis.call(command)
end
expect(Gitlab::Instrumentation::Redis.read_bytes).to eq(expect_read)
expect(Gitlab::Instrumentation::Redis.write_bytes).to eq(expect_write)
end
end
end
describe 'counting' do
let(:instrumentation_class) { redis_store_class.instrumentation_class }
it 'counts successful requests' do
expect(instrumentation_class).to receive(:instance_count_request).with(1).and_call_original
redis_store_class.with { |redis| redis.call(:get, 'foobar') }
end
it 'counts successful pipelined requests' do
expect(instrumentation_class).to receive(:instance_count_request).with(2).and_call_original
expect(instrumentation_class).to receive(:instance_count_pipelined_request).with(2).and_call_original
redis_store_class.with do |redis|
redis.pipelined do |pipeline|
pipeline.call(:get, '{foobar}buz')
pipeline.call(:get, '{foobar}baz')
end
end
end
context 'when encountering connection exceptions within process' do
before do
redis_store_class.with do |redis|
allow(redis._client).to receive(:write).and_call_original
end
end
it 'counts connection exceptions' do
redis_store_class.with do |redis|
expect(redis._client).to receive(:write).with([:get, 'foobar']).and_raise(::Redis::ConnectionError)
end
expect(instrumentation_class).to receive(:instance_count_connection_exception)
.with(instance_of(Redis::ConnectionError)).and_call_original
redis_store_class.with { |redis| redis.call(:get, 'foobar') }
end
end
context 'when encountering exceptions' do
where(:case_name, :exception, :exception_counter) do
'generic exception' | Redis::CommandError | :instance_count_exception
'moved redirection' | Redis::CommandError.new("MOVED 123 127.0.0.1:6380") | :instance_count_cluster_redirection
'ask redirection' | Redis::CommandError.new("ASK 123 127.0.0.1:6380") | :instance_count_cluster_redirection
end
with_them do
before do
redis_store_class.with do |redis|
# We need to go 1 layer deeper to stub _client as we monkey-patch Redis::Client
# with the interceptor. Stubbing `redis` will skip the instrumentation_class.
allow(redis._client).to receive(:process).and_raise(exception)
end
end
it 'counts exception' do
expect(instrumentation_class).to receive(exception_counter)
.with(instance_of(Redis::CommandError)).and_call_original
expect(instrumentation_class).to receive(:log_exception)
.with(instance_of(Redis::CommandError)).and_call_original
expect(instrumentation_class).to receive(:instance_count_request).and_call_original
expect do
redis_store_class.with { |redis| redis.call(:auth, 'foo', 'bar') }
end.to raise_exception(Redis::CommandError)
expect(Thread.current[:redis_client_error_count]).to eq(0)
end
end
end
context 'in production environment' do
before do
stub_rails_env('production') # to avoid raising CrossSlotError
end
it 'counts disallowed cross-slot requests' do
expect(instrumentation_class).to receive(:increment_cross_slot_request_count).and_call_original
expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
redis_store_class.with { |redis| redis.call(:mget, 'foo', 'bar') }
end
it 'does not count allowed cross-slot requests' do
expect(instrumentation_class).not_to receive(:increment_cross_slot_request_count).and_call_original
expect(instrumentation_class).to receive(:increment_allowed_cross_slot_request_count).and_call_original
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
redis_store_class.with { |redis| redis.call(:mget, 'foo', 'bar') }
end
end
it 'does not count allowed non-cross-slot requests' do
expect(instrumentation_class).not_to receive(:increment_cross_slot_request_count).and_call_original
expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
redis_store_class.with { |redis| redis.call(:get, 'bar') }
end
end
it 'skips count for non-cross-slot requests' do
expect(instrumentation_class).not_to receive(:increment_cross_slot_request_count).and_call_original
expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
redis_store_class.with { |redis| redis.call(:mget, '{foo}bar', '{foo}baz') }
end
end
context 'without active RequestStore' do
before do
::RequestStore.end!
end
it 'still runs cross-slot validation' do
expect do
redis_store_class.with { |redis| redis.mget('foo', 'bar') }
end.to raise_error(instance_of(Gitlab::Instrumentation::RedisClusterValidator::CrossSlotError))
end
end
end
describe 'latency' do
let(:instrumentation_class) { redis_store_class.instrumentation_class }
describe 'commands in the apdex' do
where(:command) do
[
[[:get, 'foobar']],
[%w[GET foobar]]
]
end
with_them do
it 'measures requests we want in the apdex' do
expect(instrumentation_class).to receive(:instance_observe_duration).with(a_value > 0)
.and_call_original
redis_store_class.with { |redis| redis.call(*command) }
end
end
context 'with pipelined commands' do
it 'measures requests that do not have blocking commands' do
expect(instrumentation_class).to receive(:instance_observe_duration).twice.with(a_value > 0)
.and_call_original
redis_store_class.with do |redis|
redis.pipelined do |pipeline|
pipeline.call(:get, '{foobar}buz')
pipeline.call(:get, '{foobar}baz')
end
end
end
it 'raises error when keys are not from the same slot' do
expect do
redis_store_class.with do |redis|
redis.pipelined do |pipeline|
pipeline.call(:get, 'foo')
pipeline.call(:get, 'bar')
end
end
end.to raise_error(instance_of(Gitlab::Instrumentation::RedisClusterValidator::CrossSlotError))
end
end
end
describe 'commands not in the apdex' do
where(:setup, :command) do
[['rpush', 'foobar', 1]] | ['brpop', 'foobar', 0]
[['rpush', 'foobar', 1]] | ['blpop', 'foobar', 0]
[['rpush', '{abc}foobar', 1]] | ['brpoplpush', '{abc}foobar', '{abc}bazqux', 0]
[['rpush', '{abc}foobar', 1]] | ['brpoplpush', '{abc}foobar', '{abc}bazqux', 0]
[['zadd', 'foobar', 1, 'a']] | ['bzpopmin', 'foobar', 0]
[['zadd', 'foobar', 1, 'a']] | ['bzpopmax', 'foobar', 0]
[['xadd', 'mystream', 1, 'myfield', 'mydata']] | ['xread', 'block', 1, 'streams', 'mystream', '0-0']
[['xadd', 'foobar', 1, 'myfield', 'mydata'], ['xgroup', 'create', 'foobar', 'mygroup', 0]] | ['xreadgroup', 'group', 'mygroup', 'myconsumer', 'block', 1, 'streams', 'foobar', '0-0']
[] | ['command']
end
with_them do
it 'skips requests we do not want in the apdex' do
redis_store_class.with { |redis| setup.each { |cmd| redis.call(*cmd) } }
expect(instrumentation_class).not_to receive(:instance_observe_duration)
redis_store_class.with { |redis| redis.call(*command) }
end
end
context 'with pipelined commands' do
it 'skips requests that have blocking commands' do
expect(instrumentation_class).not_to receive(:instance_observe_duration)
redis_store_class.with do |redis|
redis.pipelined do |pipeline|
pipeline.call(:get, '{foobar}buz')
pipeline.call(:rpush, '{foobar}baz', 1)
pipeline.call(:brpop, '{foobar}baz', 0)
end
end
end
end
end
end
end

View File

@ -1,80 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Patch::NodeLoader, feature_category: :redis do
using RSpec::Parameterized::TableSyntax
describe '#fetch_node_info' do
let(:redis) { double(:redis) } # rubocop:disable RSpec/VerifiedDoubles
# rubocop:disable Naming/InclusiveLanguage
where(:case_name, :args, :value) do
[
[
'when only ip address is present',
"07c37df 127.0.0.1:30004@31004 slave e7d1eec 0 1426238317239 4 connected
67ed2db 127.0.0.1:30002@31002 master - 0 1426238316232 2 connected 5461-10922
292f8b3 127.0.0.1:30003@31003 master - 0 1426238318243 3 connected 10923-16383
6ec2392 127.0.0.1:30005@31005 slave 67ed2db 0 1426238316232 5 connected
824fe11 127.0.0.1:30006@31006 slave 292f8b3 0 1426238317741 6 connected
e7d1eec 127.0.0.1:30001@31001 myself,master - 0 0 1 connected 0-5460",
{
'127.0.0.1:30004' => 'slave', '127.0.0.1:30002' => 'master', '127.0.0.1:30003' => 'master',
'127.0.0.1:30005' => 'slave', '127.0.0.1:30006' => 'slave', '127.0.0.1:30001' => 'master'
}
],
[
'when hostname is present',
"07c37df 127.0.0.1:30004@31004,host1 slave e7d1eec 0 1426238317239 4 connected
67ed2db 127.0.0.1:30002@31002,host2 master - 0 1426238316232 2 connected 5461-10922
292f8b3 127.0.0.1:30003@31003,host3 master - 0 1426238318243 3 connected 10923-16383
6ec2392 127.0.0.1:30005@31005,host4 slave 67ed2db 0 1426238316232 5 connected
824fe11 127.0.0.1:30006@31006,host5 slave 292f8b3 0 1426238317741 6 connected
e7d1eec 127.0.0.1:30001@31001,host6 myself,master - 0 0 1 connected 0-5460",
{
'host1:30004' => 'slave', 'host2:30002' => 'master', 'host3:30003' => 'master',
'host4:30005' => 'slave', 'host5:30006' => 'slave', 'host6:30001' => 'master'
}
],
[
'when auxiliary fields are present',
"07c37df 127.0.0.1:30004@31004,,shard-id=69bc slave e7d1eec 0 1426238317239 4 connected
67ed2db 127.0.0.1:30002@31002,,shard-id=114f master - 0 1426238316232 2 connected 5461-10922
292f8b3 127.0.0.1:30003@31003,,shard-id=fdb3 master - 0 1426238318243 3 connected 10923-16383
6ec2392 127.0.0.1:30005@31005,,shard-id=114f slave 67ed2db 0 1426238316232 5 connected
824fe11 127.0.0.1:30006@31006,,shard-id=fdb3 slave 292f8b3 0 1426238317741 6 connected
e7d1eec 127.0.0.1:30001@31001,,shard-id=69bc myself,master - 0 0 1 connected 0-5460",
{
'127.0.0.1:30004' => 'slave', '127.0.0.1:30002' => 'master', '127.0.0.1:30003' => 'master',
'127.0.0.1:30005' => 'slave', '127.0.0.1:30006' => 'slave', '127.0.0.1:30001' => 'master'
}
],
[
'when hostname and auxiliary fields are present',
"07c37df 127.0.0.1:30004@31004,host1,shard-id=69bc slave e7d1eec 0 1426238317239 4 connected
67ed2db 127.0.0.1:30002@31002,host2,shard-id=114f master - 0 1426238316232 2 connected 5461-10922
292f8b3 127.0.0.1:30003@31003,host3,shard-id=fdb3 master - 0 1426238318243 3 connected 10923-16383
6ec2392 127.0.0.1:30005@31005,host4,shard-id=114f slave 67ed2db 0 1426238316232 5 connected
824fe11 127.0.0.1:30006@31006,host5,shard-id=fdb3 slave 292f8b3 0 1426238317741 6 connected
e7d1eec 127.0.0.1:30001@31001,host6,shard-id=69bc myself,master - 0 0 1 connected 0-5460",
{
'host1:30004' => 'slave', 'host2:30002' => 'master', 'host3:30003' => 'master',
'host4:30005' => 'slave', 'host5:30006' => 'slave', 'host6:30001' => 'master'
}
]
]
end
# rubocop:enable Naming/InclusiveLanguage
with_them do
before do
allow(redis).to receive(:call).with([:cluster, :nodes]).and_return(args)
end
it do
expect(Redis::Cluster::NodeLoader.load_flags([redis])).to eq(value)
end
end
end
end

View File

@ -13,6 +13,8 @@ RSpec.describe Gitlab::Patch::RedisCacheStore, :use_clean_rails_redis_caching, f
cache.write('{user1}:x', 1)
cache.write('{user1}:y', 2)
cache.write('{user1}:z', 3)
cache.instance_variable_set(:@pipeline_batch_size, nil)
end
describe '#read_multi_mget' do
@ -34,7 +36,7 @@ RSpec.describe Gitlab::Patch::RedisCacheStore, :use_clean_rails_redis_caching, f
end
context 'when reading large amount of keys' do
let(:input_size) { 2000 }
let(:input_size) { 2100 }
let(:chunk_size) { 1000 }
shared_examples 'read large amount of keys' do
@ -45,10 +47,11 @@ RSpec.describe Gitlab::Patch::RedisCacheStore, :use_clean_rails_redis_caching, f
::Gitlab::Redis::ClusterUtil.cluster?(redis.default_store)
if normal_cluster || multistore_cluster
expect_next_instances_of(Gitlab::Redis::CrossSlot::Pipeline, 2) do |pipeline|
obj = instance_double(::Redis)
expect(pipeline).to receive(:pipelined).and_yield(obj)
expect(obj).to receive(:get).exactly(chunk_size).times
times = (input_size.to_f / chunk_size).ceil
expect(redis).to receive(:pipelined).exactly(times).times.and_call_original
expect_next_instances_of(::Redis::PipelinedConnection, times) do |p|
expect(p).to receive(:get).at_most(chunk_size).times
end
else
expect(redis).to receive(:mget).and_call_original

View File

@ -6,7 +6,7 @@ RSpec.describe Gitlab::Patch::RedisClient, feature_category: :redis do
include RedisHelpers
let_it_be(:redis_store_class) { define_helper_redis_store_class }
let_it_be(:redis_client) { RedisClient.new(redis_store_class.redis_client_params) }
let_it_be(:redis_client) { RedisClient.new(redis_store_class.params) }
before do
Thread.current[:redis_client_error_count] = 1

View File

@ -0,0 +1,29 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Patch::RedisStoreFactory, feature_category: :redis do
describe '#create' do
let(:params) { { host: 'localhost' } }
subject(:factory_create) { ::Redis::Store::Factory.create(params) } # rubocop:disable Rails/SaveBang -- redis-store does not implement create!
context 'when using standalone Redis' do
it 'does not create ClusterStore' do
expect(Gitlab::Redis::ClusterStore).not_to receive(:new)
factory_create
end
end
context 'when using a Redis Cluster' do
let(:params) { { nodes: ["redis://localhost:6001", "redis://localhost:6002"] } }
it 'creates a ClusterStore' do
expect(Gitlab::Redis::ClusterStore).to receive(:new).with(params.merge({ raw: false }))
factory_create
end
end
end
end

View File

@ -102,7 +102,7 @@ RSpec.describe Gitlab::RackAttack::Store, :clean_gitlab_redis_rate_limiting, fea
before do
broken_redis = Redis.new(
url: 'redis://127.0.0.0:0',
instrumentation_class: Gitlab::Redis::RateLimiting.instrumentation_class
custom: { instrumentation_class: Gitlab::Redis::RateLimiting.instrumentation_class }
)
allow(Gitlab::Redis::RateLimiting).to receive(:with).and_yield(broken_redis)
end

View File

@ -0,0 +1,116 @@
# frozen_string_literal: true
require 'spec_helper'
# This spec only runs if a Redis Cluster is configured for Gitlab::Redis::Cache.
# ::Redis::Cluster fetches the cluster details from the server on `initialize` and will raise
# an error if the cluster is not found.
#
# An example would be the following in config/redis.yml assuming gdk is set up with redis-cluster.
# test:
# cache
# cluster:
# - "redis://127.0.0.1:6003"
# - "redis://127.0.0.1:6004"
# - "redis://127.0.0.1:6005"
RSpec.describe Gitlab::Redis::ClusterStore, :clean_gitlab_redis_cache,
feature_category: :redis, if: ::Gitlab::Redis::Cache.params[:nodes] do
let(:params) { ::Gitlab::Redis::Cache.params }
subject(:store) { ::Redis::Store::Factory.create(params) } # rubocop:disable Rails/SaveBang -- not a rails method
describe '.new' do
it 'initialises a cluster store' do
expect(store).to be_instance_of(::Gitlab::Redis::ClusterStore)
end
it 'extends Serialization by default' do
expect(store.is_a?(::Redis::Store::Serialization)).to eq(true)
end
it 'sets a default serializer when left empty' do
expect(store.instance_variable_get(:@serializer)).to eq(Marshal)
end
context 'when serializer field is defined' do
let(:params) { ::Gitlab::Redis::Cache.params.merge(serializer: Class) }
it 'sets serializer according to the options' do
expect(store.instance_variable_get(:@serializer)).to eq(Class)
end
end
context 'when marshalling field is defined' do
let(:params) { ::Gitlab::Redis::Cache.params.merge(marshalling: true, serializer: Class) }
it 'overrides serializer with Marshal' do
expect(store.instance_variable_get(:@serializer)).to eq(Marshal)
end
end
context 'when marshalling field is false' do
let(:params) { ::Gitlab::Redis::Cache.params.merge(marshalling: false, serializer: Class) }
it 'overrides serializer with Marshal' do
expect(store.instance_variable_get(:@serializer)).to eq(nil)
end
end
context 'when namespace is defined' do
let(:params) { ::Gitlab::Redis::Cache.params.merge(namespace: 'testing') }
it 'extends namespace' do
expect(store.is_a?(::Redis::Store::Namespace)).to eq(true)
end
it 'write keys with namespace' do
store.set('testkey', 1)
::Gitlab::Redis::Cache.with do |conn|
expect(conn.exists('testing:testkey')).to eq(1)
end
end
end
end
describe '#set' do
context 'when ttl is added' do
it 'writes the key and sets a ttl' do
expect(store.set('test', 1, expire_after: 100)).to eq('OK')
expect(store.ttl('test')).to be > 95
expect(store.get('test')).to eq(1)
end
end
context 'when there is no ttl' do
it 'sets the key' do
expect(store.set('test', 1)).to eq('OK')
expect(store.get('test')).to eq(1)
expect(store.ttl('test')).to eq(-1)
end
end
end
describe '#setnx' do
context 'when ttl is added' do
it 'writes the key if not exists and sets a ttl' do
expect(store.setnx('test', 1, expire_after: 100)).to eq([true, true])
expect(store.ttl('test')).to be > 95
expect(store.get('test')).to eq(1)
expect(store.setnx('test', 1, expire_after: 100)).to eq([false, true])
end
end
context 'when there is no ttl' do
it 'writes the key if not exists' do
expect(store.setnx('test', 1)).to eq(true)
expect(store.setnx('test', 1)).to eq(false)
expect(store.get('test')).to eq(1)
expect(store.ttl('test')).to eq(-1)
end
end
end
end

View File

@ -5,10 +5,14 @@ require 'spec_helper'
RSpec.describe Gitlab::Redis::ClusterUtil, feature_category: :scalability do
using RSpec::Parameterized::TableSyntax
let(:router_stub) { instance_double(::RedisClient::Cluster::Router) }
before do
allow(::RedisClient::Cluster::Router).to receive(:new).and_return(router_stub)
end
describe '.cluster?' do
context 'when MultiStore' do
let(:redis_cluster) { instance_double(::Redis::Cluster) }
where(:pri_store, :sec_store, :expected_val) do
:cluster | :cluster | true
:cluster | :single | true
@ -17,10 +21,7 @@ RSpec.describe Gitlab::Redis::ClusterUtil, feature_category: :scalability do
end
before do
# stub all initialiser steps in Redis::Cluster.new to avoid connecting to a Redis Cluster node
allow(::Redis::Cluster).to receive(:new).and_return(redis_cluster)
allow(redis_cluster).to receive(:is_a?).with(::Redis::Cluster).and_return(true)
allow(redis_cluster).to receive(:id).and_return(1)
allow(router_stub).to receive(:node_keys).and_return([])
allow(Gitlab::Redis::MultiStore).to receive(:same_redis_store?).and_return(false)
skip_default_enabled_yaml_check
@ -28,8 +29,8 @@ RSpec.describe Gitlab::Redis::ClusterUtil, feature_category: :scalability do
with_them do
it 'returns expected value' do
primary_redis = pri_store == :cluster ? ::Redis.new(cluster: ['redis://localhost:6000']) : ::Redis.new
secondary_redis = sec_store == :cluster ? ::Redis.new(cluster: ['redis://localhost:6000']) : ::Redis.new
primary_redis = pri_store == :cluster ? Redis::Cluster.new(nodes: ['redis://localhost:6000']) : Redis.new
secondary_redis = sec_store == :cluster ? Redis::Cluster.new(nodes: ['redis://localhost:6000']) : Redis.new
primary_pool = ConnectionPool.new { primary_redis }
secondary_pool = ConnectionPool.new { secondary_redis }
multistore = Gitlab::Redis::MultiStore.new(primary_pool, secondary_pool, 'teststore')
@ -48,16 +49,8 @@ RSpec.describe Gitlab::Redis::ClusterUtil, feature_category: :scalability do
end
context 'when is Redis::Cluster' do
let(:redis_cluster) { instance_double(::Redis::Cluster) }
before do
# stub all initialiser steps in Redis::Cluster.new to avoid connecting to a Redis Cluster node
allow(::Redis::Cluster).to receive(:new).and_return(redis_cluster)
allow(redis_cluster).to receive(:is_a?).with(::Redis::Cluster).and_return(true)
end
it 'returns true' do
expect(described_class.cluster?(::Redis.new(cluster: ['redis://localhost:6000']))).to be_truthy
expect(described_class.cluster?(Redis::Cluster.new(nodes: ['redis://localhost:6000']))).to be_truthy
end
end
end

View File

@ -0,0 +1,57 @@
# frozen_string_literal: true
require 'spec_helper'
# references specs in https://github.com/redis-rb/redis-client/blob/master/test/redis_client/command_builder_test.rb
# we add `handles nil arguments` to test our own added logic
RSpec.describe Gitlab::Redis::CommandBuilder, feature_category: :redis do
describe '.generate' do
def call(*args, **kwargs)
described_class.generate(args, kwargs)
end
it 'handles nil arguments' do
expect(call("a", nil)).to eq(["a", ""])
end
it 'handles positional arguments' do
expect(call("a", "b", "c")).to eq(%w[a b c])
end
it 'handles arrays' do
expect(call("a", %w[b c])).to eq(%w[a b c])
end
it 'handles hashes' do
expect(call("a", { "b" => "c" })).to eq(%w[a b c])
end
it 'handles symbols' do
expect(call(:a, { b: :c }, :d)).to eq(%w[a b c d])
end
it 'handles numerics' do
expect(call(1, 2.3)).to eq(["1", "2.3"])
end
it 'handles kwargs booleans' do
expect(call(ttl: nil, ex: false, withscores: true)).to eq(["withscores"])
end
it 'handles kwargs values' do
expect(call(ttl: 42)).to eq(%w[ttl 42])
end
it 'handles nil kwargs' do
expect(call(%i[a b c])).to eq(%w[a b c])
end
it 'raises error on unsupported types' do
expect { call(hash: {}) }.to raise_error(TypeError)
end
it 'raises error on empty commands' do
expect { call }.to raise_error(ArgumentError)
end
end
end

View File

@ -1,134 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Redis::CrossSlot, feature_category: :redis do
include RedisHelpers
let_it_be(:redis_store_class) { define_helper_redis_store_class }
before do
redis_store_class.with(&:flushdb)
end
describe '.pipelined' do
context 'when using redis client' do
before do
redis_store_class.with { |redis| redis.set('a', 1) }
end
it 'performs redis-rb pipelined' do
expect(Gitlab::Redis::CrossSlot::Router).not_to receive(:new)
expect(
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
redis_store_class.with do |redis|
described_class::Pipeline.new(redis).pipelined do |p|
p.get('a')
p.set('b', 1)
end
end
end
).to eq(%w[1 OK])
end
end
context 'when using with MultiStore' do
let_it_be(:primary_db) { 1 }
let_it_be(:secondary_db) { 2 }
let_it_be(:primary_store) { create_redis_store(redis_store_class.params, db: primary_db, serializer: nil) }
let_it_be(:secondary_store) { create_redis_store(redis_store_class.params, db: secondary_db, serializer: nil) }
let_it_be(:primary_pool) { ConnectionPool.new { primary_store } }
let_it_be(:secondary_pool) { ConnectionPool.new { secondary_store } }
let_it_be(:multistore) { Gitlab::Redis::MultiStore.new(primary_pool, secondary_pool, 'testing') }
before do
primary_store.set('a', 1)
secondary_store.set('a', 1)
skip_default_enabled_yaml_check
end
it 'performs multistore pipelined' do
expect(Gitlab::Redis::CrossSlot::Router).not_to receive(:new)
expect(
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
multistore.with_borrowed_connection do
described_class::Pipeline.new(multistore).pipelined do |p|
p.get('a')
p.set('b', 1)
end
end
end
).to eq(%w[1 OK])
end
end
context 'when using Redis::Cluster' do
# Only stub redis client internals since the CI pipeline does not run a Redis Cluster
let(:redis) { double(:redis) } # rubocop:disable RSpec/VerifiedDoubles
let(:client) { double(:client) } # rubocop:disable RSpec/VerifiedDoubles
let(:pipeline) { double(:pipeline) } # rubocop:disable RSpec/VerifiedDoubles
let(:arguments) { %w[a b c d] }
subject do
described_class::Pipeline.new(redis).pipelined do |p|
arguments.each { |key| p.get(key) }
end
end
before do
allow(redis).to receive(:_client).and_return(client)
allow(redis).to receive(:pipelined).and_yield(pipeline)
allow(client).to receive(:instance_of?).with(::Redis::Cluster).and_return(true)
end
it 'fan-out and fan-in commands to separate shards' do
# simulate fan-out to 3 shards with random order
expect(client).to receive(:_find_node_key).exactly(4).times.and_return(3, 2, 1, 3)
arguments.each do |key|
f = double('future') # rubocop:disable RSpec/VerifiedDoubles
expect(pipeline).to receive(:get).with(key).and_return(f)
expect(f).to receive(:value).and_return(key)
end
expect(subject).to eq(arguments)
end
shared_examples 'fallback on cross-slot' do |redirection|
context 'when redis cluster undergoing slot migration' do
before do
allow(pipeline).to receive(:get).and_raise(::Redis::CommandError.new("#{redirection} 1 127.0.0.1:7001"))
end
it 'logs error and executes sequentially' do
expect(client).to receive(:_find_node_key).exactly(4).times.and_return(3, 2, 1, 3)
expect(Gitlab::ErrorTracking).to receive(:log_exception).with(an_instance_of(::Redis::CommandError))
arguments.each do |key|
expect(redis).to receive(:get).with(key).and_return(key)
end
subject
end
end
end
it_behaves_like 'fallback on cross-slot', 'MOVED'
it_behaves_like 'fallback on cross-slot', 'ASK'
context 'when receiving non-MOVED/ASK command errors' do
before do
allow(pipeline).to receive(:get).and_raise(::Redis::CommandError.new)
allow(client).to receive(:_find_node_key).exactly(4).times.and_return(3, 2, 1, 3)
end
it 'raises error' do
expect { subject }.to raise_error(::Redis::CommandError)
end
end
end
end
end

View File

@ -58,6 +58,7 @@ RSpec.describe Gitlab::Redis::MultiStore, feature_category: :redis do
context 'when primary_store is not a ::Redis instance' do
before do
allow(primary_store).to receive(:is_a?).with(::Redis).and_return(false)
allow(primary_store).to receive(:is_a?).with(::Redis::Cluster).and_return(false)
end
it 'fails with exception' do
@ -69,6 +70,7 @@ RSpec.describe Gitlab::Redis::MultiStore, feature_category: :redis do
context 'when secondary_store is not a ::Redis instance' do
before do
allow(secondary_store).to receive(:is_a?).with(::Redis).and_return(false)
allow(secondary_store).to receive(:is_a?).with(::Redis::Cluster).and_return(false)
end
it 'fails with exception' do
@ -618,35 +620,6 @@ RSpec.describe Gitlab::Redis::MultiStore, feature_category: :redis do
end
end
context 'when either store is a an instance of ::Redis::Cluster' do
let(:pipeline) { double }
let(:client) { double }
before do
allow(client).to receive(:instance_of?).with(::Redis::Cluster).and_return(true)
allow(pipeline).to receive(:pipelined)
multi_store.with_borrowed_connection do
allow(multi_store.default_store).to receive(:_client).and_return(client)
end
end
it 'calls cross-slot pipeline within multistore' do
if name == :pipelined
# we intentionally exclude `.and_call_original` since primary_store/secondary_store
# may not be running on a proper Redis Cluster.
multi_store.with_borrowed_connection do
expect(Gitlab::Redis::CrossSlot::Pipeline).to receive(:new)
.with(multi_store.default_store)
.exactly(:once)
.and_return(pipeline)
expect(Gitlab::Redis::CrossSlot::Pipeline).not_to receive(:new).with(multi_store.non_default_store)
end
end
subject
end
end
context 'when with_readonly_pipeline is used' do
it 'calls the default store only' do
expect(primary_store).to receive(:send).and_call_original

View File

@ -8,9 +8,9 @@ RSpec.describe Gitlab::Redis::Sessions do
describe '#store' do
subject(:store) { described_class.store(namespace: described_class::SESSION_NAMESPACE) }
# Check that Gitlab::Redis::Sessions is configured as RedisStore.
# Check that Gitlab::Redis::Sessions is configured as RedisStore or ClusterStore
it 'instantiates an instance of Redis::Store' do
expect(store).to be_instance_of(::Redis::Store)
expect([::Redis::Store, ::Gitlab::Redis::ClusterStore].include?(store.class)).to eq(true)
end
end
end

View File

@ -544,18 +544,18 @@ end
Rack::Test::UploadedFile.prepend(TouchRackUploadedFile)
# Monkey-patch to enable ActiveSupport::Notifications for Redis commands
# Inject middleware to enable ActiveSupport::Notifications for Redis commands
module RedisCommands
module Instrumentation
def process(commands, &block)
ActiveSupport::Notifications.instrument('redis.process_commands', commands: commands) do
super(commands, &block)
def call(command, redis_config)
ActiveSupport::Notifications.instrument('redis.process_commands', commands: command) do
super(command, redis_config)
end
end
end
end
Redis::Client.prepend(RedisCommands::Instrumentation)
RedisClient.register(RedisCommands::Instrumentation)
module UsersInternalAllowExclusiveLease
extend ActiveSupport::Concern

View File

@ -60,8 +60,8 @@ module DnsHelpers
def permit_redis!
# https://github.com/redis-rb/redis-client/blob/v0.11.2/lib/redis_client/ruby_connection.rb#L51 uses Socket.tcp that
# calls Addrinfo.getaddrinfo internally.
hosts = Gitlab::Redis::ALL_CLASSES.map do |redis_instance|
redis_instance.redis_client_params[:host]
hosts = Gitlab::Redis::ALL_CLASSES.flat_map do |redis_instance|
redis_instance.params[:host] || redis_instance.params[:nodes]&.map { |n| n[:host] }
end.uniq.compact
hosts.each do |host|

View File

@ -14,7 +14,7 @@ module ExceedRedisCallLimitHelpers
end
def verify_commands_count(command, expected, block)
@actual = build_recorder(block).by_command(command).count
@actual = build_recorder(block).by_command(command.to_s).count
@actual > expected
end

View File

@ -6292,7 +6292,6 @@
- './spec/lib/gitlab/instrumentation/rate_limiting_gates_spec.rb'
- './spec/lib/gitlab/instrumentation/redis_base_spec.rb'
- './spec/lib/gitlab/instrumentation/redis_cluster_validator_spec.rb'
- './spec/lib/gitlab/instrumentation/redis_interceptor_spec.rb'
- './spec/lib/gitlab/instrumentation/redis_spec.rb'
- './spec/lib/gitlab/internal_post_receive/response_spec.rb'
- './spec/lib/gitlab/issuable/clone/attributes_rewriter_spec.rb'

View File

@ -25,7 +25,7 @@ RSpec.shared_examples Gitlab::BitbucketImport::ObjectImporter do
expect(Gitlab::JobWaiter).to receive(:notify).with(waiter_key, anything)
worker.perform(project_id, {}, waiter_key)
worker.class.perform_inline(project_id, {}, waiter_key)
end
end
@ -49,7 +49,7 @@ RSpec.shared_examples Gitlab::BitbucketImport::ObjectImporter do
expect(Gitlab::BitbucketImport::Logger).to receive(:info).twice
expect_next(worker.importer_class, project, kind_of(Hash)).to receive(:execute)
worker.perform(project_id, {}, waiter_key)
worker.class.perform_inline(project_id, {}, waiter_key)
end
it_behaves_like 'notifies the waiter'
@ -62,7 +62,7 @@ RSpec.shared_examples Gitlab::BitbucketImport::ObjectImporter do
it 'tracks the error' do
expect(Gitlab::Import::ImportFailureService).to receive(:track).once
worker.perform(project_id, {}, waiter_key)
worker.class.perform_inline(project_id, {}, waiter_key)
end
end
@ -74,7 +74,7 @@ RSpec.shared_examples Gitlab::BitbucketImport::ObjectImporter do
it 'tracks the error and raises the error' do
expect(Gitlab::Import::ImportFailureService).to receive(:track).once
expect { worker.perform(project_id, {}, waiter_key) }.to raise_error(StandardError)
expect { worker.class.perform_inline(project_id, {}, waiter_key) }.to raise_error(StandardError)
end
end
end
@ -85,7 +85,7 @@ RSpec.shared_examples Gitlab::BitbucketImport::ObjectImporter do
it 'does not call the importer' do
expect_next(worker.importer_class).not_to receive(:execute)
worker.perform(project_id, {}, waiter_key)
worker.class.perform_inline(project_id, {}, waiter_key)
end
it_behaves_like 'notifies the waiter'

View File

@ -19,6 +19,10 @@ RSpec.shared_examples Gitlab::BitbucketServerImport::ObjectImporter do
let(:project_id) { project_id }
let(:waiter_key) { 'key' }
before do
allow(Gitlab::JobWaiter).to receive(:notify).with(waiter_key, anything, ttl: Gitlab::Import::JOB_WAITER_TTL)
end
shared_examples 'notifies the waiter' do
specify do
allow_next(worker.importer_class).to receive(:execute)

View File

@ -80,18 +80,17 @@ RSpec.shared_examples "redis_shared_examples" do
context 'with new format' do
it_behaves_like 'redis store' do
let(:config_file_name) { config_new_format_host }
# use new format host without sentinel details as `.to_s` checks `config` which
# tries to resolve master/replica details with an actual sentinel instance.
# https://github.com/redis-rb/redis-client/blob/v0.18.0/lib/redis_client/sentinel_config.rb#L128
let(:config_file_name) { "spec/fixtures/config/redis_new_format_host_standalone.yml" }
let(:host) { "development-host:#{redis_port}" }
end
end
end
describe '.redis_client_params' do
# .redis_client_params wraps over `.redis_store_options` by modifying its outputs
# to be compatible with `RedisClient`. We test for compatibility in this block while
# the contents of redis_store_options are tested in the `.params` block.
subject { described_class.new(rails_env).redis_client_params }
describe '.params' do
subject { described_class.new(rails_env).params }
let(:rails_env) { 'development' }
let(:config_file_name) { config_old_format_socket }
@ -103,56 +102,6 @@ RSpec.shared_examples "redis_shared_examples" do
end
end
context 'when url is host based' do
context 'with old format' do
let(:config_file_name) { config_old_format_host }
it 'does not raise ArgumentError for invalid keywords' do
expect { RedisClient.config(**subject) }.not_to raise_error
end
it_behaves_like 'instrumentation_class in custom key'
end
context 'with new format' do
let(:config_file_name) { config_new_format_host }
where(:rails_env, :host) do
[
%w[development development-host],
%w[test test-host],
%w[production production-host]
]
end
with_them do
it 'does not raise ArgumentError for invalid keywords in SentinelConfig' do
expect(subject[:name]).to eq(host)
expect { RedisClient.sentinel(**subject) }.not_to raise_error
end
it_behaves_like 'instrumentation_class in custom key'
end
end
end
context 'when url contains unix socket reference' do
let(:config_file_name) { config_old_format_socket }
it 'does not raise ArgumentError for invalid keywords' do
expect { RedisClient.config(**subject) }.not_to raise_error
end
it_behaves_like 'instrumentation_class in custom key'
end
end
describe '.params' do
subject { described_class.new(rails_env).params }
let(:rails_env) { 'development' }
let(:config_file_name) { config_old_format_socket }
it 'withstands mutation' do
params1 = described_class.params
params2 = described_class.params
@ -251,10 +200,16 @@ RSpec.shared_examples "redis_shared_examples" do
with_them do
it 'returns hash with host, port, db, and password' do
is_expected.to include(host: host, password: 'mynewpassword', port: redis_port, db: redis_database)
is_expected.to include(name: host, password: 'mynewpassword', db: redis_database)
is_expected.not_to have_key(:url)
end
it 'does not raise ArgumentError for invalid keywords in SentinelConfig' do
expect { RedisClient.sentinel(**subject) }.not_to raise_error
end
end
it_behaves_like 'instrumentation_class in custom key'
end
context 'with redis cluster format' do
@ -272,13 +227,19 @@ RSpec.shared_examples "redis_shared_examples" do
it 'returns hash with cluster and password' do
is_expected.to include(
password: 'myclusterpassword',
cluster: [
nodes: [
{ host: "#{host}1", port: redis_port },
{ host: "#{host}2", port: redis_port }
]
)
is_expected.not_to have_key(:url)
end
it 'does not raise ArgumentError for invalid keywords in ClusterConfig' do
expect { RedisClient::ClusterConfig.new(**subject) }.not_to raise_error
end
it_behaves_like 'instrumentation_class in custom key'
end
end
end

View File

@ -13,7 +13,7 @@ RSpec.describe RedisCommands::Recorder, :use_clean_rails_redis_caching do
it 'records Redis commands' do
recorder = described_class.new { cache.read('key1') }
expect(recorder.log).to include([:get, 'cache:gitlab:key1'])
expect(recorder.log).to include(['get', 'cache:gitlab:key1'])
end
end
@ -35,10 +35,10 @@ RSpec.describe RedisCommands::Recorder, :use_clean_rails_redis_caching do
cache.delete('key1')
end
expect(recorder.log).to include([:set, 'cache:gitlab:key1', anything, anything, anything])
expect(recorder.log).to include([:get, 'cache:gitlab:key1'])
expect(recorder.log).to include([:get, 'cache:gitlab:key2'])
expect(recorder.log).to include([:del, 'cache:gitlab:key1'])
expect(recorder.log).to include(['set', 'cache:gitlab:key1', anything, anything, anything])
expect(recorder.log).to include(['get', 'cache:gitlab:key1'])
expect(recorder.log).to include(['get', 'cache:gitlab:key2'])
expect(recorder.log).to include(['del', 'cache:gitlab:key1'])
end
it 'does not record commands before the call' do
@ -48,8 +48,8 @@ RSpec.describe RedisCommands::Recorder, :use_clean_rails_redis_caching do
cache.read('key1')
end
expect(recorder.log).not_to include([:set, anything, anything])
expect(recorder.log).to include([:get, 'cache:gitlab:key1'])
expect(recorder.log).not_to include(['set', anything, anything])
expect(recorder.log).to include(['get', 'cache:gitlab:key1'])
end
it 'refreshes recording after reinitialization' do
@ -68,15 +68,15 @@ RSpec.describe RedisCommands::Recorder, :use_clean_rails_redis_caching do
cache.read('key4')
end
expect(recorder1.log).to include([:get, 'cache:gitlab:key2'])
expect(recorder1.log).not_to include([:get, 'cache:gitlab:key1'])
expect(recorder1.log).not_to include([:get, 'cache:gitlab:key3'])
expect(recorder1.log).not_to include([:get, 'cache:gitlab:key4'])
expect(recorder1.log).to include(['get', 'cache:gitlab:key2'])
expect(recorder1.log).not_to include(['get', 'cache:gitlab:key1'])
expect(recorder1.log).not_to include(['get', 'cache:gitlab:key3'])
expect(recorder1.log).not_to include(['get', 'cache:gitlab:key4'])
expect(recorder2.log).to include([:get, 'cache:gitlab:key4'])
expect(recorder2.log).not_to include([:get, 'cache:gitlab:key1'])
expect(recorder2.log).not_to include([:get, 'cache:gitlab:key2'])
expect(recorder2.log).not_to include([:get, 'cache:gitlab:key3'])
expect(recorder2.log).to include(['get', 'cache:gitlab:key4'])
expect(recorder2.log).not_to include(['get', 'cache:gitlab:key1'])
expect(recorder2.log).not_to include(['get', 'cache:gitlab:key2'])
expect(recorder2.log).not_to include(['get', 'cache:gitlab:key3'])
end
end
@ -91,10 +91,10 @@ RSpec.describe RedisCommands::Recorder, :use_clean_rails_redis_caching do
cache.delete('key2')
end
expect(recorder.log).to include([:set, 'cache:gitlab:key1', anything, anything, anything])
expect(recorder.log).to include([:get, 'cache:gitlab:key1'])
expect(recorder.log).not_to include([:get, 'cache:gitlab:key2'])
expect(recorder.log).not_to include([:del, 'cache:gitlab:key2'])
expect(recorder.log).to include(['set', 'cache:gitlab:key1', anything, anything, anything])
expect(recorder.log).to include(['get', 'cache:gitlab:key1'])
expect(recorder.log).not_to include(['get', 'cache:gitlab:key2'])
expect(recorder.log).not_to include(['del', 'cache:gitlab:key2'])
end
end
@ -107,7 +107,7 @@ RSpec.describe RedisCommands::Recorder, :use_clean_rails_redis_caching do
cache.delete('key2')
end
expect(recorder.by_command(:del)).to match_array([[:del, 'cache:gitlab:key2']])
expect(recorder.by_command('del')).to match_array([['del', 'cache:gitlab:key2']])
end
end

View File

@ -108,17 +108,24 @@ RSpec.describe Gitlab::GithubGistsImport::ImportGistWorker, feature_category: :i
before do
allow(importer).to receive(:execute).and_return(importer_result)
allow(Gitlab::GithubGistsImport::Representation::Gist)
.to receive(:from_json_hash)
.with(anything)
.and_return(gist_object)
end
it 'tracks and logs error' do
# use `anything` since jid is created in Sidekiq's middleware. `jid` does not exist until
# perform_inline is called.
expect(Gitlab::GithubImport::Logger)
.to receive(:error)
.with(log_attributes.merge('message' => 'importer failed', 'exception.message' => 'error_message'))
.with(log_attributes.merge('message' => 'importer failed', 'exception.message' => 'error_message',
'jid' => anything))
expect(Gitlab::JobWaiter)
.to receive(:notify)
.with('some_key', subject.jid, ttl: Gitlab::Import::JOB_WAITER_TTL)
.with('some_key', anything, ttl: Gitlab::Import::JOB_WAITER_TTL)
subject.perform(user.id, gist_hash, 'some_key')
subject.class.perform_inline(user.id, gist_hash, 'some_key') # perform_inline calls .perform
expect_snowplow_event(
category: 'Gitlab::GithubGistsImport::ImportGistWorker',
@ -130,7 +137,7 @@ RSpec.describe Gitlab::GithubGistsImport::ImportGistWorker, feature_category: :i
end
it 'persists failure' do
expect { subject.perform(user.id, gist_hash, 'some_key') }
expect { subject.class.perform_inline(user.id, gist_hash, 'some_key') }
.to change { ImportFailure.where(user: user).count }.from(0).to(1)
expect(ImportFailure.where(user_id: user.id).first).to have_attributes(

View File

@ -46,7 +46,7 @@ RSpec.describe Gitlab::JiraImport::ImportIssueWorker, feature_category: :importe
context 'when import label does not exist' do
it 'does not record import failure' do
subject.perform(project.id, 123, issue_attrs, some_key)
subject.class.perform_inline(project.id, 123, issue_attrs, some_key)
expect(label.issues.count).to eq(0)
expect(Gitlab::Cache::Import::Caching.read(Gitlab::JiraImport.failed_issues_counter_cache_key(project.id)).to_i).to eq(0)
@ -57,7 +57,7 @@ RSpec.describe Gitlab::JiraImport::ImportIssueWorker, feature_category: :importe
before do
Gitlab::JiraImport.cache_import_label_id(project.id, label.id)
subject.perform(project.id, 123, issue_attrs, some_key)
subject.class.perform_inline(project.id, 123, issue_attrs, some_key)
end
it 'does not record import failure' do

View File

@ -30,7 +30,7 @@ RSpec.describe RedisMigrationWorker, :clean_gitlab_redis_shared_state, feature_c
end
def redis
::Redis.new(::Gitlab::Redis::Cache.params)
::Gitlab::Redis::Cache.redis
end
end
end