Merge branch 'master' into delegate_opt
This commit is contained in:
		
						commit
						8a0ad56182
					
				|  | @ -13,6 +13,7 @@ on: | ||||||
|       - '*.bzl' |       - '*.bzl' | ||||||
|       - '*.bazel' |       - '*.bazel' | ||||||
|       - .github/workflows/test.yaml |       - .github/workflows/test.yaml | ||||||
|  |   pull_request: | ||||||
| jobs: | jobs: | ||||||
|   test: |   test: | ||||||
|     name: Test |     name: Test | ||||||
|  |  | ||||||
|  | @ -34,13 +34,13 @@ buildbuddy( | ||||||
| 
 | 
 | ||||||
| git_repository( | git_repository( | ||||||
|     name = "rbe_23", |     name = "rbe_23", | ||||||
|     commit = "d2b454dc5138a2a92de45a0a672241a4fbb5a1e5", |     commit = "b21c066e426de48e526cc0f8c5158b7024d04e85", | ||||||
|     remote = "https://github.com/rabbitmq/rbe-erlang-platform.git", |     remote = "https://github.com/rabbitmq/rbe-erlang-platform.git", | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| git_repository( | git_repository( | ||||||
|     name = "rbe_24", |     name = "rbe_24", | ||||||
|     commit = "a087892ef4202dc3245b64d36d5921491848315f", |     commit = "c8cbf65e2facbe464ebbcee7b6cf6f7a2d422ded", | ||||||
|     remote = "https://github.com/rabbitmq/rbe-erlang-platform.git", |     remote = "https://github.com/rabbitmq/rbe-erlang-platform.git", | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -703,9 +703,6 @@ suites = [ | ||||||
|         additional_hdrs = [ |         additional_hdrs = [ | ||||||
|             "src/rabbit_fifo.hrl", |             "src/rabbit_fifo.hrl", | ||||||
|         ], |         ], | ||||||
|         erlc_opts = [ |  | ||||||
|             "-I deps/rabbit",  # allow rabbit_fifo.hrl to be included at src/rabbit_fifo.hrl |  | ||||||
|         ], |  | ||||||
|         runtime_deps = [ |         runtime_deps = [ | ||||||
|             "@meck//:bazel_erlang_lib", |             "@meck//:bazel_erlang_lib", | ||||||
|             "@ra//:bazel_erlang_lib", |             "@ra//:bazel_erlang_lib", | ||||||
|  |  | ||||||
|  | @ -500,7 +500,7 @@ stop_and_halt() -> | ||||||
|         %% init:stop() will be called regardless of any errors. |         %% init:stop() will be called regardless of any errors. | ||||||
|         try |         try | ||||||
|             AppsLeft = [ A || {A, _, _} <- application:which_applications() ], |             AppsLeft = [ A || {A, _, _} <- application:which_applications() ], | ||||||
|             ?LOG_ERROR( |             ?LOG_INFO( | ||||||
|                 lists:flatten( |                 lists:flatten( | ||||||
|                   ["Halting Erlang VM with the following applications:~n", |                   ["Halting Erlang VM with the following applications:~n", | ||||||
|                    ["    ~p~n" || _ <- AppsLeft]]), |                    ["    ~p~n" || _ <- AppsLeft]]), | ||||||
|  |  | ||||||
|  | @ -38,7 +38,8 @@ check_user_pass_login(Username, Password) -> | ||||||
| check_user_login(Username, AuthProps) -> | check_user_login(Username, AuthProps) -> | ||||||
|     %% extra auth properties like MQTT client id are in AuthProps |     %% extra auth properties like MQTT client id are in AuthProps | ||||||
|     {ok, Modules} = application:get_env(rabbit, auth_backends), |     {ok, Modules} = application:get_env(rabbit, auth_backends), | ||||||
|     R = lists:foldl( |     try | ||||||
|  |         lists:foldl( | ||||||
|             fun (rabbit_auth_backend_cache=ModN, {refused, _, _, _}) -> |             fun (rabbit_auth_backend_cache=ModN, {refused, _, _, _}) -> | ||||||
|                     %% It is possible to specify authn/authz within the cache module settings, |                     %% It is possible to specify authn/authz within the cache module settings, | ||||||
|                     %% so we have to do both auth steps here |                     %% so we have to do both auth steps here | ||||||
|  | @ -65,8 +66,14 @@ check_user_login(Username, AuthProps) -> | ||||||
|                     %% We've successfully authenticated. Skip to the end... |                     %% We've successfully authenticated. Skip to the end... | ||||||
|                     {ok, User} |                     {ok, User} | ||||||
|             end, |             end, | ||||||
|           {refused, Username, "No modules checked '~s'", [Username]}, Modules), |             {refused, Username, "No modules checked '~s'", [Username]}, Modules) | ||||||
|     R. |         catch  | ||||||
|  |             Type:Error:Stacktrace ->  | ||||||
|  |                 rabbit_log:debug("User '~s' authentication failed with ~s:~p:~n~p", [Username, Type, Error, Stacktrace]), | ||||||
|  |                 {refused, Username, "User '~s' authentication failed with internal error. " | ||||||
|  |                                     "Enable debug logs to see the real error.", [Username]} | ||||||
|  | 
 | ||||||
|  |         end. | ||||||
| 
 | 
 | ||||||
| try_authenticate_and_try_authorize(ModN, ModZs0, Username, AuthProps) -> | try_authenticate_and_try_authorize(ModN, ModZs0, Username, AuthProps) -> | ||||||
|     ModZs = case ModZs0 of |     ModZs = case ModZs0 of | ||||||
|  |  | ||||||
|  | @ -14,12 +14,15 @@ | ||||||
| -export([user_login_authentication/2, user_login_authorization/2, | -export([user_login_authentication/2, user_login_authorization/2, | ||||||
|          check_vhost_access/3, check_resource_access/4, check_topic_access/4]). |          check_vhost_access/3, check_resource_access/4, check_topic_access/4]). | ||||||
| 
 | 
 | ||||||
| -export([add_user/3, delete_user/2, lookup_user/1, exists/1, | -export([add_user/3, add_user/4, add_user/5, delete_user/2, lookup_user/1, exists/1, | ||||||
|          change_password/3, clear_password/2, |          change_password/3, clear_password/2, | ||||||
|          hash_password/2, change_password_hash/2, change_password_hash/3, |          hash_password/2, change_password_hash/2, change_password_hash/3, | ||||||
|          set_tags/3, set_permissions/6, clear_permissions/3, |          set_tags/3, set_permissions/6, clear_permissions/3, | ||||||
|          set_topic_permissions/6, clear_topic_permissions/3, clear_topic_permissions/4, |          set_topic_permissions/6, clear_topic_permissions/3, clear_topic_permissions/4, | ||||||
|          add_user_sans_validation/3, put_user/2, put_user/3]). |          add_user_sans_validation/3, put_user/2, put_user/3, | ||||||
|  |          update_user/5, | ||||||
|  |          update_user_with_hash/5, | ||||||
|  |          add_user_sans_validation/6]). | ||||||
| 
 | 
 | ||||||
| -export([set_user_limits/3, clear_user_limits/3, is_over_connection_limit/1, | -export([set_user_limits/3, clear_user_limits/3, is_over_connection_limit/1, | ||||||
|          is_over_channel_limit/1, get_user_limits/0, get_user_limits/1]). |          is_over_channel_limit/1, get_user_limits/0, get_user_limits/1]). | ||||||
|  | @ -208,14 +211,56 @@ add_user(Username, Password, ActingUser) -> | ||||||
|     validate_and_alternate_credentials(Username, Password, ActingUser, |     validate_and_alternate_credentials(Username, Password, ActingUser, | ||||||
|                                        fun add_user_sans_validation/3). |                                        fun add_user_sans_validation/3). | ||||||
| 
 | 
 | ||||||
|  | -spec add_user(rabbit_types:username(), rabbit_types:password(), | ||||||
|  |                rabbit_types:username(), [atom()]) -> 'ok' | {'error', string()}. | ||||||
|  | 
 | ||||||
|  | add_user(Username, Password, ActingUser, Tags) -> | ||||||
|  |     add_user(Username, Password, ActingUser, undefined, Tags). | ||||||
|  | 
 | ||||||
|  | add_user(Username, Password, ActingUser, Limits, Tags) -> | ||||||
|  |     validate_and_alternate_credentials(Username, Password, ActingUser, | ||||||
|  |                                        add_user_sans_validation(Limits, Tags)). | ||||||
|  | 
 | ||||||
| add_user_sans_validation(Username, Password, ActingUser) -> | add_user_sans_validation(Username, Password, ActingUser) -> | ||||||
|  |     add_user_sans_validation(Username, Password, ActingUser, undefined, []). | ||||||
|  | 
 | ||||||
|  | add_user_sans_validation(Limits, Tags) -> | ||||||
|  |     fun(Username, Password, ActingUser) -> | ||||||
|  |             add_user_sans_validation(Username, Password, ActingUser, Limits, Tags) | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | add_user_sans_validation(Username, Password, ActingUser, Limits, Tags) -> | ||||||
|     rabbit_log:debug("Asked to create a new user '~s', password length in bytes: ~p", [Username, bit_size(Password)]), |     rabbit_log:debug("Asked to create a new user '~s', password length in bytes: ~p", [Username, bit_size(Password)]), | ||||||
|     %% hash_password will pick the hashing function configured for us |     %% hash_password will pick the hashing function configured for us | ||||||
|     %% but we also need to store a hint as part of the record, so we |     %% but we also need to store a hint as part of the record, so we | ||||||
|     %% retrieve it here one more time |     %% retrieve it here one more time | ||||||
|     HashingMod = rabbit_password:hashing_mod(), |     HashingMod = rabbit_password:hashing_mod(), | ||||||
|     PasswordHash = hash_password(HashingMod, Password), |     PasswordHash = hash_password(HashingMod, Password), | ||||||
|     User = internal_user:create_user(Username, PasswordHash, HashingMod), |     User0 = internal_user:create_user(Username, PasswordHash, HashingMod), | ||||||
|  |     ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags], | ||||||
|  |     User1 = internal_user:set_tags(User0, ConvertedTags), | ||||||
|  |     User = case Limits of | ||||||
|  |                undefined -> User1; | ||||||
|  |                Term -> internal_user:update_limits(add, User1, Term) | ||||||
|  |            end, | ||||||
|  |     add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser). | ||||||
|  | 
 | ||||||
|  | add_user_sans_validation(Username, PasswordHash, HashingAlgorithm, Tags, Limits, ActingUser) -> | ||||||
|  |     rabbit_log:debug("Asked to create a new user '~s' with password hash", [Username]), | ||||||
|  |     ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags], | ||||||
|  |     HashingMod = rabbit_password:hashing_mod(), | ||||||
|  |     User0 = internal_user:create_user(Username, PasswordHash, HashingMod), | ||||||
|  |     User1 = internal_user:set_tags( | ||||||
|  |               internal_user:set_password_hash(User0, | ||||||
|  |                                               PasswordHash, HashingAlgorithm), | ||||||
|  |               ConvertedTags), | ||||||
|  |     User = case Limits of | ||||||
|  |                undefined -> User1; | ||||||
|  |                Term -> internal_user:update_limits(add, User1, Term) | ||||||
|  |            end, | ||||||
|  |     add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser). | ||||||
|  | 
 | ||||||
|  | add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser) -> | ||||||
|     try |     try | ||||||
|         R = rabbit_misc:execute_mnesia_transaction( |         R = rabbit_misc:execute_mnesia_transaction( | ||||||
|           fun () -> |           fun () -> | ||||||
|  | @ -229,6 +274,14 @@ add_user_sans_validation(Username, Password, ActingUser) -> | ||||||
|         rabbit_log:info("Created user '~s'", [Username]), |         rabbit_log:info("Created user '~s'", [Username]), | ||||||
|         rabbit_event:notify(user_created, [{name, Username}, |         rabbit_event:notify(user_created, [{name, Username}, | ||||||
|                                            {user_who_performed_action, ActingUser}]), |                                            {user_who_performed_action, ActingUser}]), | ||||||
|  |         case ConvertedTags of | ||||||
|  |             [] -> ok; | ||||||
|  |             _ -> notify_user_tags_set(Username, ConvertedTags, ActingUser) | ||||||
|  |         end, | ||||||
|  |         case Limits of | ||||||
|  |             undefined -> ok; | ||||||
|  |             _ -> notify_limit_set(Username, ActingUser, Limits) | ||||||
|  |         end, | ||||||
|         R |         R | ||||||
|     catch |     catch | ||||||
|         throw:{error, {user_already_exists, _}} = Error -> |         throw:{error, {user_already_exists, _}} = Error -> | ||||||
|  | @ -322,6 +375,42 @@ change_password_sans_validation(Username, Password, ActingUser) -> | ||||||
|             erlang:raise(Class, Error, Stacktrace) |             erlang:raise(Class, Error, Stacktrace) | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
|  | update_user(Username, Password, Tags, Limits, ActingUser) -> | ||||||
|  |     validate_and_alternate_credentials(Username, Password, ActingUser, | ||||||
|  |                                        update_user_sans_validation(Tags, Limits)). | ||||||
|  | 
 | ||||||
|  | update_user_sans_validation(Tags, Limits) -> | ||||||
|  |     fun(Username, Password, ActingUser) -> | ||||||
|  |             try | ||||||
|  |                 rabbit_log:debug("Asked to change password of user '~s', new password length in bytes: ~p", [Username, bit_size(Password)]), | ||||||
|  |                 HashingAlgorithm = rabbit_password:hashing_mod(), | ||||||
|  | 
 | ||||||
|  |                 rabbit_log:debug("Asked to set user tags for user '~s' to ~p", [Username, Tags]), | ||||||
|  | 
 | ||||||
|  |                 ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags], | ||||||
|  |                 R = update_user_with_hash(Username, | ||||||
|  |                                           hash_password(rabbit_password:hashing_mod(), | ||||||
|  |                                                         Password), | ||||||
|  |                                           HashingAlgorithm, | ||||||
|  |                                           ConvertedTags, | ||||||
|  |                                           Limits), | ||||||
|  |                 rabbit_log:info("Successfully changed password for user '~s'", [Username]), | ||||||
|  |                 rabbit_event:notify(user_password_changed, | ||||||
|  |                                     [{name, Username}, | ||||||
|  |                                      {user_who_performed_action, ActingUser}]), | ||||||
|  | 
 | ||||||
|  |                 notify_user_tags_set(Username, ConvertedTags, ActingUser), | ||||||
|  |                 R | ||||||
|  |             catch | ||||||
|  |                 throw:{error, {no_such_user, _}} = Error -> | ||||||
|  |                     rabbit_log:warning("Failed to change password for user '~s': the user does not exist", [Username]), | ||||||
|  |                     throw(Error); | ||||||
|  |                 Class:Error:Stacktrace -> | ||||||
|  |                     rabbit_log:warning("Failed to change password for user '~s': ~p", [Username, Error]), | ||||||
|  |                     erlang:raise(Class, Error, Stacktrace) | ||||||
|  |             end | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
| -spec clear_password(rabbit_types:username(), rabbit_types:username()) -> 'ok'. | -spec clear_password(rabbit_types:username(), rabbit_types:username()) -> 'ok'. | ||||||
| 
 | 
 | ||||||
| clear_password(Username, ActingUser) -> | clear_password(Username, ActingUser) -> | ||||||
|  | @ -346,9 +435,21 @@ change_password_hash(Username, PasswordHash) -> | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| change_password_hash(Username, PasswordHash, HashingAlgorithm) -> | change_password_hash(Username, PasswordHash, HashingAlgorithm) -> | ||||||
|     update_user(Username, fun(User) -> |     update_user_with_hash(Username, PasswordHash, HashingAlgorithm, [], undefined). | ||||||
|                               internal_user:set_password_hash(User, | 
 | ||||||
|                                   PasswordHash, HashingAlgorithm) | update_user_with_hash(Username, PasswordHash, HashingAlgorithm, ConvertedTags, Limits) -> | ||||||
|  |     update_user(Username, | ||||||
|  |                 fun(User0) -> | ||||||
|  |                         User1 = internal_user:set_password_hash(User0, | ||||||
|  |                                                                 PasswordHash, HashingAlgorithm), | ||||||
|  |                         User2 = case Limits of | ||||||
|  |                                     undefined -> User1; | ||||||
|  |                                     _         -> internal_user:update_limits(add, User1, Limits) | ||||||
|  |                                 end, | ||||||
|  |                         case ConvertedTags of | ||||||
|  |                             [] -> User2; | ||||||
|  |                             _  -> internal_user:set_tags(User2, ConvertedTags) | ||||||
|  |                         end | ||||||
|                 end). |                 end). | ||||||
| 
 | 
 | ||||||
| -spec set_tags(rabbit_types:username(), [atom()], rabbit_types:username()) -> 'ok'. | -spec set_tags(rabbit_types:username(), [atom()], rabbit_types:username()) -> 'ok'. | ||||||
|  | @ -360,9 +461,7 @@ set_tags(Username, Tags, ActingUser) -> | ||||||
|         R = update_user(Username, fun(User) -> |         R = update_user(Username, fun(User) -> | ||||||
|                                      internal_user:set_tags(User, ConvertedTags) |                                      internal_user:set_tags(User, ConvertedTags) | ||||||
|                                   end), |                                   end), | ||||||
|         rabbit_log:info("Successfully set user tags for user '~s' to ~p", [Username, ConvertedTags]), |         notify_user_tags_set(Username, ConvertedTags, ActingUser), | ||||||
|         rabbit_event:notify(user_tags_set, [{name, Username}, {tags, ConvertedTags}, |  | ||||||
|                                             {user_who_performed_action, ActingUser}]), |  | ||||||
|         R |         R | ||||||
|     catch |     catch | ||||||
|         throw:{error, {no_such_user, _}} = Error -> |         throw:{error, {no_such_user, _}} = Error -> | ||||||
|  | @ -373,6 +472,11 @@ set_tags(Username, Tags, ActingUser) -> | ||||||
|             erlang:raise(Class, Error, Stacktrace) |             erlang:raise(Class, Error, Stacktrace) | ||||||
|     end . |     end . | ||||||
| 
 | 
 | ||||||
|  | notify_user_tags_set(Username, ConvertedTags, ActingUser) -> | ||||||
|  |     rabbit_log:info("Successfully set user tags for user '~s' to ~p", [Username, ConvertedTags]), | ||||||
|  |     rabbit_event:notify(user_tags_set, [{name, Username}, {tags, ConvertedTags}, | ||||||
|  |                                         {user_who_performed_action, ActingUser}]). | ||||||
|  | 
 | ||||||
| -spec set_permissions | -spec set_permissions | ||||||
|         (rabbit_types:username(), rabbit_types:vhost(), regexp(), regexp(), |         (rabbit_types:username(), rabbit_types:vhost(), regexp(), regexp(), | ||||||
|          regexp(), rabbit_types:username()) -> |          regexp(), rabbit_types:username()) -> | ||||||
|  | @ -648,13 +752,27 @@ put_user(User, Version, ActingUser) -> | ||||||
|                 rabbit_credential_validation:validate(Username, Password) =:= ok |                 rabbit_credential_validation:validate(Username, Password) =:= ok | ||||||
|         end, |         end, | ||||||
| 
 | 
 | ||||||
|  |     Limits = case rabbit_feature_flags:is_enabled(user_limits) of | ||||||
|  |                  false -> | ||||||
|  |                      undefined; | ||||||
|  |                  true -> | ||||||
|  |                      case maps:get(limits, User, undefined) of | ||||||
|  |                          undefined -> | ||||||
|  |                              undefined; | ||||||
|  |                          Term -> | ||||||
|  |                              case validate_user_limits(Term) of | ||||||
|  |                                  ok -> Term; | ||||||
|  |                                  Error -> throw(Error) | ||||||
|  |                              end | ||||||
|  |                      end | ||||||
|  |              end, | ||||||
|     case exists(Username) of |     case exists(Username) of | ||||||
|         true  -> |         true  -> | ||||||
|             case {HasPassword, HasPasswordHash} of |             case {HasPassword, HasPasswordHash} of | ||||||
|                 {true, false} -> |                 {true, false} -> | ||||||
|                     update_user_password(PassedCredentialValidation, Username, Password, Tags, ActingUser); |                     update_user_password(PassedCredentialValidation, Username, Password, Tags, Limits, ActingUser); | ||||||
|                 {false, true} -> |                 {false, true} -> | ||||||
|                     update_user_password_hash(Username, PasswordHash, Tags, User, Version, ActingUser); |                     update_user_password_hash(Username, PasswordHash, Tags, Limits, User, Version); | ||||||
|                 {true, true} -> |                 {true, true} -> | ||||||
|                     throw({error, both_password_and_password_hash_are_provided}); |                     throw({error, both_password_and_password_hash_are_provided}); | ||||||
|                 %% clear password, update tags if needed |                 %% clear password, update tags if needed | ||||||
|  | @ -665,63 +783,54 @@ put_user(User, Version, ActingUser) -> | ||||||
|         false -> |         false -> | ||||||
|             case {HasPassword, HasPasswordHash} of |             case {HasPassword, HasPasswordHash} of | ||||||
|                 {true, false}  -> |                 {true, false}  -> | ||||||
|                     create_user_with_password(PassedCredentialValidation, Username, Password, Tags, Permissions, ActingUser); |                     create_user_with_password(PassedCredentialValidation, Username, Password, Tags, Permissions, Limits, ActingUser); | ||||||
|                 {false, true}  -> |                 {false, true}  -> | ||||||
|                     create_user_with_password_hash(Username, PasswordHash, Tags, User, Version, Permissions, ActingUser); |                     create_user_with_password_hash(Username, PasswordHash, Tags, User, Version, Permissions, Limits, ActingUser); | ||||||
|                 {true, true}   -> |                 {true, true}   -> | ||||||
|                     throw({error, both_password_and_password_hash_are_provided}); |                     throw({error, both_password_and_password_hash_are_provided}); | ||||||
|                 {false, false} -> |                 {false, false} -> | ||||||
|                     %% this user won't be able to sign in using |                     %% this user won't be able to sign in using | ||||||
|                     %% a username/password pair but can be used for x509 certificate authentication, |                     %% a username/password pair but can be used for x509 certificate authentication, | ||||||
|                     %% with authn backends such as HTTP or LDAP and so on. |                     %% with authn backends such as HTTP or LDAP and so on. | ||||||
|                     create_user_with_password(PassedCredentialValidation, Username, <<"">>, Tags, Permissions, ActingUser) |                     create_user_with_password(PassedCredentialValidation, Username, <<"">>, Tags, Permissions, Limits, ActingUser) | ||||||
|             end |             end | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| update_user_password(_PassedCredentialValidation = true,  Username, Password, Tags, ActingUser) -> | update_user_password(_PassedCredentialValidation = true,  Username, Password, Tags, Limits, ActingUser) -> | ||||||
|     rabbit_auth_backend_internal:change_password(Username, Password, ActingUser), |     %% change_password, set_tags and limits | ||||||
|     rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser); |     rabbit_auth_backend_internal:update_user(Username, Password, Tags, Limits, ActingUser); | ||||||
| update_user_password(_PassedCredentialValidation = false, _Username, _Password, _Tags, _ActingUser) -> | update_user_password(_PassedCredentialValidation = false, _Username, _Password, _Tags, _Limits, _ActingUser) -> | ||||||
|     %% we don't log here because |     %% we don't log here because | ||||||
|     %% rabbit_auth_backend_internal will do it |     %% rabbit_auth_backend_internal will do it | ||||||
|     throw({error, credential_validation_failed}). |     throw({error, credential_validation_failed}). | ||||||
| 
 | 
 | ||||||
| update_user_password_hash(Username, PasswordHash, Tags, User, Version, ActingUser) -> | update_user_password_hash(Username, PasswordHash, Tags, Limits, User, Version) -> | ||||||
|     %% when a hash this provided, credential validation |     %% when a hash this provided, credential validation | ||||||
|     %% is not applied |     %% is not applied | ||||||
|     HashingAlgorithm = hashing_algorithm(User, Version), |     HashingAlgorithm = hashing_algorithm(User, Version), | ||||||
| 
 | 
 | ||||||
|     Hash = rabbit_misc:b64decode_or_throw(PasswordHash), |     Hash = rabbit_misc:b64decode_or_throw(PasswordHash), | ||||||
|     rabbit_auth_backend_internal:change_password_hash( |     ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags], | ||||||
|       Username, Hash, HashingAlgorithm), |     rabbit_auth_backend_internal:update_user_with_hash( | ||||||
|     rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser). |       Username, Hash, HashingAlgorithm, ConvertedTags, Limits). | ||||||
| 
 | 
 | ||||||
| create_user_with_password(_PassedCredentialValidation = true,  Username, Password, Tags, undefined, ActingUser) -> | create_user_with_password(_PassedCredentialValidation = true,  Username, Password, Tags, undefined, Limits, ActingUser) -> | ||||||
|     rabbit_auth_backend_internal:add_user(Username, Password, ActingUser), |     rabbit_auth_backend_internal:add_user(Username, Password, ActingUser, Limits, Tags); | ||||||
|     rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser); | create_user_with_password(_PassedCredentialValidation = true,  Username, Password, Tags, PreconfiguredPermissions, Limits, ActingUser) -> | ||||||
| create_user_with_password(_PassedCredentialValidation = true,  Username, Password, Tags, PreconfiguredPermissions, ActingUser) -> |     rabbit_auth_backend_internal:add_user(Username, Password, ActingUser, Limits, Tags), | ||||||
|     rabbit_auth_backend_internal:add_user(Username, Password, ActingUser), |  | ||||||
|     rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser), |  | ||||||
|     preconfigure_permissions(Username, PreconfiguredPermissions, ActingUser); |     preconfigure_permissions(Username, PreconfiguredPermissions, ActingUser); | ||||||
| create_user_with_password(_PassedCredentialValidation = false, _Username, _Password, _Tags, _, _) -> | create_user_with_password(_PassedCredentialValidation = false, _Username, _Password, _Tags, _, _, _) -> | ||||||
|     %% we don't log here because |     %% we don't log here because | ||||||
|     %% rabbit_auth_backend_internal will do it |     %% rabbit_auth_backend_internal will do it | ||||||
|     throw({error, credential_validation_failed}). |     throw({error, credential_validation_failed}). | ||||||
| 
 | 
 | ||||||
| create_user_with_password_hash(Username, PasswordHash, Tags, User, Version, PreconfiguredPermissions, ActingUser) -> | create_user_with_password_hash(Username, PasswordHash, Tags, User, Version, PreconfiguredPermissions, Limits, ActingUser) -> | ||||||
|     %% when a hash this provided, credential validation |     %% when a hash this provided, credential validation | ||||||
|     %% is not applied |     %% is not applied | ||||||
|     HashingAlgorithm = hashing_algorithm(User, Version), |     HashingAlgorithm = hashing_algorithm(User, Version), | ||||||
|     Hash             = rabbit_misc:b64decode_or_throw(PasswordHash), |     Hash             = rabbit_misc:b64decode_or_throw(PasswordHash), | ||||||
| 
 | 
 | ||||||
|     %% first we create a user with dummy credentials and no |     rabbit_auth_backend_internal:add_user_sans_validation(Username, Hash, HashingAlgorithm, Tags, Limits, ActingUser), | ||||||
|     %% validation applied, then we update password hash |  | ||||||
|     TmpPassword = rabbit_guid:binary(rabbit_guid:gen_secure(), "tmp"), |  | ||||||
|     rabbit_auth_backend_internal:add_user_sans_validation(Username, TmpPassword, ActingUser), |  | ||||||
| 
 |  | ||||||
|     rabbit_auth_backend_internal:change_password_hash( |  | ||||||
|       Username, Hash, HashingAlgorithm), |  | ||||||
|     rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser), |  | ||||||
|     preconfigure_permissions(Username, PreconfiguredPermissions, ActingUser). |     preconfigure_permissions(Username, PreconfiguredPermissions, ActingUser). | ||||||
| 
 | 
 | ||||||
| preconfigure_permissions(_Username, undefined, _ActingUser) -> | preconfigure_permissions(_Username, undefined, _ActingUser) -> | ||||||
|  | @ -756,8 +865,7 @@ set_user_limits(Username, Definition, ActingUser) when is_map(Definition) -> | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| validate_parameters_and_update_limit(Username, Term, ActingUser) -> | validate_parameters_and_update_limit(Username, Term, ActingUser) -> | ||||||
|     case flatten_errors(rabbit_parameter_validation:proplist( |     case validate_user_limits(Term) of | ||||||
|                         <<"user-limits">>, user_limit_validation(), Term)) of |  | ||||||
|         ok -> |         ok -> | ||||||
|             update_user(Username, fun(User) -> |             update_user(Username, fun(User) -> | ||||||
|                                       internal_user:update_limits(add, User, Term) |                                       internal_user:update_limits(add, User, Term) | ||||||
|  | @ -767,6 +875,10 @@ validate_parameters_and_update_limit(Username, Term, ActingUser) -> | ||||||
|             {error_string, rabbit_misc:format(Reason, Arguments)} |             {error_string, rabbit_misc:format(Reason, Arguments)} | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
|  | validate_user_limits(Term) -> | ||||||
|  |     flatten_errors(rabbit_parameter_validation:proplist( | ||||||
|  |                      <<"user-limits">>, user_limit_validation(), Term)). | ||||||
|  | 
 | ||||||
| user_limit_validation() -> | user_limit_validation() -> | ||||||
|     [{<<"max-connections">>, fun rabbit_parameter_validation:integer/2, optional}, |     [{<<"max-connections">>, fun rabbit_parameter_validation:integer/2, optional}, | ||||||
|      {<<"max-channels">>, fun rabbit_parameter_validation:integer/2, optional}]. |      {<<"max-channels">>, fun rabbit_parameter_validation:integer/2, optional}]. | ||||||
|  |  | ||||||
|  | @ -1306,7 +1306,7 @@ handle_method(#'basic.publish'{exchange    = ExchangeNameBin, | ||||||
|     check_expiration_header(Props), |     check_expiration_header(Props), | ||||||
|     DoConfirm = Tx =/= none orelse ConfirmEnabled, |     DoConfirm = Tx =/= none orelse ConfirmEnabled, | ||||||
|     {MsgSeqNo, State1} = |     {MsgSeqNo, State1} = | ||||||
|         case DoConfirm orelse Mandatory of |         case DoConfirm of | ||||||
|             false -> {undefined, State0}; |             false -> {undefined, State0}; | ||||||
|             true  -> rabbit_global_counters:messages_received_confirm(amqp091, 1), |             true  -> rabbit_global_counters:messages_received_confirm(amqp091, 1), | ||||||
|                      SeqNo = State0#ch.publish_seqno, |                      SeqNo = State0#ch.publish_seqno, | ||||||
|  |  | ||||||
|  | @ -464,6 +464,10 @@ add_policy(Param, Username) -> | ||||||
| 
 | 
 | ||||||
| add_policy(VHost, Param, Username) -> | add_policy(VHost, Param, Username) -> | ||||||
|     Key   = maps:get(name,  Param, undefined), |     Key   = maps:get(name,  Param, undefined), | ||||||
|  |     case Key of | ||||||
|  |       undefined -> exit(rabbit_misc:format("policy in virtual host '~s' has undefined name", [VHost])); | ||||||
|  |       _ -> ok | ||||||
|  |     end, | ||||||
|     case rabbit_policy:set( |     case rabbit_policy:set( | ||||||
|            VHost, Key, maps:get(pattern, Param, undefined), |            VHost, Key, maps:get(pattern, Param, undefined), | ||||||
|            case maps:get(definition, Param, undefined) of |            case maps:get(definition, Param, undefined) of | ||||||
|  |  | ||||||
|  | @ -33,6 +33,7 @@ | ||||||
|          get_disk_free/0, set_enabled/1]). |          get_disk_free/0, set_enabled/1]). | ||||||
| 
 | 
 | ||||||
| -define(SERVER, ?MODULE). | -define(SERVER, ?MODULE). | ||||||
|  | -define(ETS_NAME, ?MODULE). | ||||||
| -define(DEFAULT_MIN_DISK_CHECK_INTERVAL, 100). | -define(DEFAULT_MIN_DISK_CHECK_INTERVAL, 100). | ||||||
| -define(DEFAULT_MAX_DISK_CHECK_INTERVAL, 10000). | -define(DEFAULT_MAX_DISK_CHECK_INTERVAL, 10000). | ||||||
| -define(DEFAULT_DISK_FREE_LIMIT, 50000000). | -define(DEFAULT_DISK_FREE_LIMIT, 50000000). | ||||||
|  | @ -73,51 +74,42 @@ | ||||||
| %%---------------------------------------------------------------------------- | %%---------------------------------------------------------------------------- | ||||||
| 
 | 
 | ||||||
| -spec get_disk_free_limit() -> integer(). | -spec get_disk_free_limit() -> integer(). | ||||||
| 
 |  | ||||||
| get_disk_free_limit() -> | get_disk_free_limit() -> | ||||||
|     gen_server:call(?MODULE, get_disk_free_limit, infinity). |     safe_ets_lookup(disk_free_limit, ?DEFAULT_DISK_FREE_LIMIT). | ||||||
| 
 | 
 | ||||||
| -spec set_disk_free_limit(disk_free_limit()) -> 'ok'. | -spec set_disk_free_limit(disk_free_limit()) -> 'ok'. | ||||||
| 
 |  | ||||||
| set_disk_free_limit(Limit) -> | set_disk_free_limit(Limit) -> | ||||||
|     gen_server:call(?MODULE, {set_disk_free_limit, Limit}, infinity). |     gen_server:call(?MODULE, {set_disk_free_limit, Limit}). | ||||||
| 
 | 
 | ||||||
| -spec get_min_check_interval() -> integer(). | -spec get_min_check_interval() -> integer(). | ||||||
| 
 |  | ||||||
| get_min_check_interval() -> | get_min_check_interval() -> | ||||||
|     gen_server:call(?MODULE, get_min_check_interval, infinity). |     safe_ets_lookup(min_check_interval, ?DEFAULT_MIN_DISK_CHECK_INTERVAL). | ||||||
| 
 | 
 | ||||||
| -spec set_min_check_interval(integer()) -> 'ok'. | -spec set_min_check_interval(integer()) -> 'ok'. | ||||||
| 
 |  | ||||||
| set_min_check_interval(Interval) -> | set_min_check_interval(Interval) -> | ||||||
|     gen_server:call(?MODULE, {set_min_check_interval, Interval}, infinity). |     gen_server:call(?MODULE, {set_min_check_interval, Interval}). | ||||||
| 
 | 
 | ||||||
| -spec get_max_check_interval() -> integer(). | -spec get_max_check_interval() -> integer(). | ||||||
| 
 |  | ||||||
| get_max_check_interval() -> | get_max_check_interval() -> | ||||||
|     gen_server:call(?MODULE, get_max_check_interval, infinity). |     safe_ets_lookup(max_check_interval, ?DEFAULT_MAX_DISK_CHECK_INTERVAL). | ||||||
| 
 | 
 | ||||||
| -spec set_max_check_interval(integer()) -> 'ok'. | -spec set_max_check_interval(integer()) -> 'ok'. | ||||||
| 
 |  | ||||||
| set_max_check_interval(Interval) -> | set_max_check_interval(Interval) -> | ||||||
|     gen_server:call(?MODULE, {set_max_check_interval, Interval}, infinity). |     gen_server:call(?MODULE, {set_max_check_interval, Interval}). | ||||||
| 
 | 
 | ||||||
| -spec get_disk_free() -> (integer() | 'unknown'). | -spec get_disk_free() -> (integer() | 'unknown'). | ||||||
| 
 |  | ||||||
| get_disk_free() -> | get_disk_free() -> | ||||||
|     gen_server:call(?MODULE, get_disk_free, infinity). |     safe_ets_lookup(disk_free, unknown). | ||||||
| 
 | 
 | ||||||
| -spec set_enabled(string()) -> 'ok'. | -spec set_enabled(string()) -> 'ok'. | ||||||
| 
 |  | ||||||
| set_enabled(Enabled) -> | set_enabled(Enabled) -> | ||||||
|     gen_server:call(?MODULE, {set_enabled, Enabled}, infinity). |     gen_server:call(?MODULE, {set_enabled, Enabled}). | ||||||
| 
 | 
 | ||||||
| %%---------------------------------------------------------------------------- | %%---------------------------------------------------------------------------- | ||||||
| %% gen_server callbacks | %% gen_server callbacks | ||||||
| %%---------------------------------------------------------------------------- | %%---------------------------------------------------------------------------- | ||||||
| 
 | 
 | ||||||
| -spec start_link(disk_free_limit()) -> rabbit_types:ok_pid_or_error(). | -spec start_link(disk_free_limit()) -> rabbit_types:ok_pid_or_error(). | ||||||
| 
 |  | ||||||
| start_link(Args) -> | start_link(Args) -> | ||||||
|     gen_server:start_link({local, ?SERVER}, ?MODULE, [Args], []). |     gen_server:start_link({local, ?SERVER}, ?MODULE, [Args], []). | ||||||
| 
 | 
 | ||||||
|  | @ -125,18 +117,16 @@ init([Limit]) -> | ||||||
|     Dir = dir(), |     Dir = dir(), | ||||||
|     {ok, Retries} = application:get_env(rabbit, disk_monitor_failure_retries), |     {ok, Retries} = application:get_env(rabbit, disk_monitor_failure_retries), | ||||||
|     {ok, Interval} = application:get_env(rabbit, disk_monitor_failure_retry_interval), |     {ok, Interval} = application:get_env(rabbit, disk_monitor_failure_retry_interval), | ||||||
|     State = #state{dir          = Dir, |     ?ETS_NAME = ets:new(?ETS_NAME, [protected, set, named_table]), | ||||||
|                    min_interval = ?DEFAULT_MIN_DISK_CHECK_INTERVAL, |     State0 = #state{dir          = Dir, | ||||||
|                    max_interval = ?DEFAULT_MAX_DISK_CHECK_INTERVAL, |  | ||||||
|                     alarmed      = false, |                     alarmed      = false, | ||||||
|                     enabled      = true, |                     enabled      = true, | ||||||
|                     limit        = Limit, |                     limit        = Limit, | ||||||
|                     retries      = Retries, |                     retries      = Retries, | ||||||
|                     interval     = Interval}, |                     interval     = Interval}, | ||||||
|     {ok, enable(State)}. |     State1 = set_min_check_interval(?DEFAULT_MIN_DISK_CHECK_INTERVAL, State0), | ||||||
| 
 |     State2 = set_max_check_interval(?DEFAULT_MAX_DISK_CHECK_INTERVAL, State1), | ||||||
| handle_call(get_disk_free_limit, _From, State = #state{limit = Limit}) -> |     {ok, enable(State2)}. | ||||||
|     {reply, Limit, State}; |  | ||||||
| 
 | 
 | ||||||
| handle_call({set_disk_free_limit, _}, _From, #state{enabled = false} = State) -> | handle_call({set_disk_free_limit, _}, _From, #state{enabled = false} = State) -> | ||||||
|     rabbit_log:info("Cannot set disk free limit: " |     rabbit_log:info("Cannot set disk free limit: " | ||||||
|  | @ -146,20 +136,14 @@ handle_call({set_disk_free_limit, _}, _From, #state{enabled = false} = State) -> | ||||||
| handle_call({set_disk_free_limit, Limit}, _From, State) -> | handle_call({set_disk_free_limit, Limit}, _From, State) -> | ||||||
|     {reply, ok, set_disk_limits(State, Limit)}; |     {reply, ok, set_disk_limits(State, Limit)}; | ||||||
| 
 | 
 | ||||||
| handle_call(get_min_check_interval, _From, State) -> |  | ||||||
|     {reply, State#state.min_interval, State}; |  | ||||||
| 
 |  | ||||||
| handle_call(get_max_check_interval, _From, State) -> | handle_call(get_max_check_interval, _From, State) -> | ||||||
|     {reply, State#state.max_interval, State}; |     {reply, State#state.max_interval, State}; | ||||||
| 
 | 
 | ||||||
| handle_call({set_min_check_interval, MinInterval}, _From, State) -> | handle_call({set_min_check_interval, MinInterval}, _From, State) -> | ||||||
|     {reply, ok, State#state{min_interval = MinInterval}}; |     {reply, ok, set_min_check_interval(MinInterval, State)}; | ||||||
| 
 | 
 | ||||||
| handle_call({set_max_check_interval, MaxInterval}, _From, State) -> | handle_call({set_max_check_interval, MaxInterval}, _From, State) -> | ||||||
|     {reply, ok, State#state{max_interval = MaxInterval}}; |     {reply, ok, set_max_check_interval(MaxInterval, State)}; | ||||||
| 
 |  | ||||||
| handle_call(get_disk_free, _From, State = #state { actual = Actual }) -> |  | ||||||
|     {reply, Actual, State}; |  | ||||||
| 
 | 
 | ||||||
| handle_call({set_enabled, _Enabled = true}, _From, State) -> | handle_call({set_enabled, _Enabled = true}, _From, State) -> | ||||||
|     start_timer(set_disk_limits(State, State#state.limit)), |     start_timer(set_disk_limits(State, State#state.limit)), | ||||||
|  | @ -194,14 +178,36 @@ code_change(_OldVsn, State, _Extra) -> | ||||||
| %% Server Internals | %% Server Internals | ||||||
| %%---------------------------------------------------------------------------- | %%---------------------------------------------------------------------------- | ||||||
| 
 | 
 | ||||||
|  | safe_ets_lookup(Key, Default) -> | ||||||
|  |     try | ||||||
|  |         case ets:lookup(?ETS_NAME, Key) of | ||||||
|  |             [{Key, Value}] -> | ||||||
|  |                 Value; | ||||||
|  |             [] -> | ||||||
|  |                 Default | ||||||
|  |         end | ||||||
|  |     catch | ||||||
|  |         error:badarg -> | ||||||
|  |             Default | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
| % the partition / drive containing this directory will be monitored | % the partition / drive containing this directory will be monitored | ||||||
| dir() -> rabbit_mnesia:dir(). | dir() -> rabbit_mnesia:dir(). | ||||||
| 
 | 
 | ||||||
|  | set_min_check_interval(MinInterval, State) -> | ||||||
|  |     ets:insert(?ETS_NAME, {min_check_interval, MinInterval}), | ||||||
|  |     State#state{min_interval = MinInterval}. | ||||||
|  | 
 | ||||||
|  | set_max_check_interval(MaxInterval, State) -> | ||||||
|  |     ets:insert(?ETS_NAME, {max_check_interval, MaxInterval}), | ||||||
|  |     State#state{max_interval = MaxInterval}. | ||||||
|  | 
 | ||||||
| set_disk_limits(State, Limit0) -> | set_disk_limits(State, Limit0) -> | ||||||
|     Limit = interpret_limit(Limit0), |     Limit = interpret_limit(Limit0), | ||||||
|     State1 = State#state { limit = Limit }, |     State1 = State#state { limit = Limit }, | ||||||
|     rabbit_log:info("Disk free limit set to ~pMB", |     rabbit_log:info("Disk free limit set to ~pMB", | ||||||
|                     [trunc(Limit / 1000000)]), |                     [trunc(Limit / 1000000)]), | ||||||
|  |     ets:insert(?ETS_NAME, {disk_free_limit, Limit}), | ||||||
|     internal_update(State1). |     internal_update(State1). | ||||||
| 
 | 
 | ||||||
| internal_update(State = #state { limit   = Limit, | internal_update(State = #state { limit   = Limit, | ||||||
|  | @ -219,6 +225,7 @@ internal_update(State = #state { limit   = Limit, | ||||||
|         _ -> |         _ -> | ||||||
|             ok |             ok | ||||||
|     end, |     end, | ||||||
|  |     ets:insert(?ETS_NAME, {disk_free, CurrentFree}), | ||||||
|     State#state{alarmed = NewAlarmed, actual = CurrentFree}. |     State#state{alarmed = NewAlarmed, actual = CurrentFree}. | ||||||
| 
 | 
 | ||||||
| get_disk_free(Dir) -> | get_disk_free(Dir) -> | ||||||
|  | @ -227,11 +234,89 @@ get_disk_free(Dir) -> | ||||||
| get_disk_free(Dir, {unix, Sun}) | get_disk_free(Dir, {unix, Sun}) | ||||||
|   when Sun =:= sunos; Sun =:= sunos4; Sun =:= solaris -> |   when Sun =:= sunos; Sun =:= sunos4; Sun =:= solaris -> | ||||||
|     Df = os:find_executable("df"), |     Df = os:find_executable("df"), | ||||||
|     parse_free_unix(rabbit_misc:os_cmd(Df ++ " -k " ++ Dir)); |     parse_free_unix(run_cmd(Df ++ " -k " ++ Dir)); | ||||||
| get_disk_free(Dir, {unix, _}) -> | get_disk_free(Dir, {unix, _}) -> | ||||||
|     Df = os:find_executable("df"), |     Df = os:find_executable("df"), | ||||||
|     parse_free_unix(rabbit_misc:os_cmd(Df ++ " -kP " ++ Dir)); |     parse_free_unix(run_cmd(Df ++ " -kP " ++ Dir)); | ||||||
| get_disk_free(Dir, {win32, _}) -> | get_disk_free(Dir, {win32, _}) -> | ||||||
|  |     % Dir: | ||||||
|  |     % "c:/Users/username/AppData/Roaming/RabbitMQ/db/rabbit2@username-z01-mnesia" | ||||||
|  |     case win32_get_drive_letter(Dir) of | ||||||
|  |         error -> | ||||||
|  |             rabbit_log:warning("Expected the mnesia directory absolute " | ||||||
|  |                                "path to start with a drive letter like " | ||||||
|  |                                "'C:'. The path is: '~p'", [Dir]), | ||||||
|  |             case win32_get_disk_free_dir(Dir) of | ||||||
|  |                 {ok, Free} -> | ||||||
|  |                     Free; | ||||||
|  |                 _ -> exit(could_not_determine_disk_free) | ||||||
|  |             end; | ||||||
|  |         DriveLetter -> | ||||||
|  |             case win32_get_disk_free_fsutil(DriveLetter) of | ||||||
|  |                 {ok, Free0} -> Free0; | ||||||
|  |                 error -> | ||||||
|  |                     case win32_get_disk_free_pwsh(DriveLetter) of | ||||||
|  |                         {ok, Free1} -> Free1; | ||||||
|  |                         _ -> exit(could_not_determine_disk_free) | ||||||
|  |                     end | ||||||
|  |             end | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | parse_free_unix(Str) -> | ||||||
|  |     case string:tokens(Str, "\n") of | ||||||
|  |         [_, S | _] -> case string:tokens(S, " \t") of | ||||||
|  |                           [_, _, _, Free | _] -> list_to_integer(Free) * 1024; | ||||||
|  |                           _                   -> exit({unparseable, Str}) | ||||||
|  |                       end; | ||||||
|  |         _          -> exit({unparseable, Str}) | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | win32_get_drive_letter([DriveLetter, $:, $/ | _]) when | ||||||
|  |       (DriveLetter >= $a andalso DriveLetter =< $z) orelse | ||||||
|  |       (DriveLetter >= $A andalso DriveLetter =< $Z) -> | ||||||
|  |     DriveLetter; | ||||||
|  | win32_get_drive_letter(_) -> | ||||||
|  |     error. | ||||||
|  | 
 | ||||||
|  | win32_get_disk_free_fsutil(DriveLetter) when | ||||||
|  |       (DriveLetter >= $a andalso DriveLetter =< $z) orelse | ||||||
|  |       (DriveLetter >= $A andalso DriveLetter =< $Z) -> | ||||||
|  |     % DriveLetter $c | ||||||
|  |     FsutilCmd = "fsutil.exe volume diskfree " ++ [DriveLetter] ++ ":", | ||||||
|  | 
 | ||||||
|  |     % C:\windows\system32>fsutil volume diskfree c: | ||||||
|  |     % Total free bytes        :   812,733,878,272 (756.9 GB) | ||||||
|  |     % Total bytes             : 1,013,310,287,872 (943.7 GB) | ||||||
|  |     % Total quota free bytes  :   812,733,878,272 (756.9 GB) | ||||||
|  |     case run_cmd(FsutilCmd) of | ||||||
|  |         {error, timeout} -> | ||||||
|  |             error; | ||||||
|  |         FsutilResult -> | ||||||
|  |             case string:slice(FsutilResult, 0, 5) of | ||||||
|  |                 "Error" -> | ||||||
|  |                     error; | ||||||
|  |                 "Total" -> | ||||||
|  |                     FirstLine = hd(string:tokens(FsutilResult, "\r\n")), | ||||||
|  |                     {match, [FreeStr]} = re:run(FirstLine, "(\\d+,?)+", [{capture, first, list}]), | ||||||
|  |                     {ok, list_to_integer(lists:flatten(string:tokens(FreeStr, ",")))} | ||||||
|  |             end | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | win32_get_disk_free_pwsh(DriveLetter) when | ||||||
|  |       (DriveLetter >= $a andalso DriveLetter =< $z) orelse | ||||||
|  |       (DriveLetter >= $A andalso DriveLetter =< $Z) -> | ||||||
|  |     % DriveLetter $c | ||||||
|  |     PoshCmd = "powershell.exe -NoLogo -NoProfile -NonInteractive -Command (Get-PSDrive " ++ [DriveLetter] ++ ").Free", | ||||||
|  |     case run_cmd(PoshCmd) of | ||||||
|  |         {error, timeout} -> | ||||||
|  |             error; | ||||||
|  |         PoshResultStr -> | ||||||
|  |             % Note: remove \r\n | ||||||
|  |             PoshResult = string:slice(PoshResultStr, 0, length(PoshResultStr) - 2), | ||||||
|  |             {ok, list_to_integer(PoshResult)} | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | win32_get_disk_free_dir(Dir) -> | ||||||
|     %% On Windows, the Win32 API enforces a limit of 260 characters |     %% On Windows, the Win32 API enforces a limit of 260 characters | ||||||
|     %% (MAX_PATH). If we call `dir` with a path longer than that, it |     %% (MAX_PATH). If we call `dir` with a path longer than that, it | ||||||
|     %% fails with "File not found". Starting with Windows 10 version |     %% fails with "File not found". Starting with Windows 10 version | ||||||
|  | @ -253,22 +338,11 @@ get_disk_free(Dir, {win32, _}) -> | ||||||
|     %% See the following page to learn more about this: |     %% See the following page to learn more about this: | ||||||
|     %% https://ss64.com/nt/syntax-filenames.html |     %% https://ss64.com/nt/syntax-filenames.html | ||||||
|     RawDir = "\\\\?\\" ++ string:replace(Dir, "/", "\\", all), |     RawDir = "\\\\?\\" ++ string:replace(Dir, "/", "\\", all), | ||||||
|     parse_free_win32(rabbit_misc:os_cmd("dir /-C /W \"" ++ RawDir ++ "\"")). |     CommandResult = run_cmd("dir /-C /W \"" ++ RawDir ++ "\""), | ||||||
| 
 |  | ||||||
| parse_free_unix(Str) -> |  | ||||||
|     case string:tokens(Str, "\n") of |  | ||||||
|         [_, S | _] -> case string:tokens(S, " \t") of |  | ||||||
|                           [_, _, _, Free | _] -> list_to_integer(Free) * 1024; |  | ||||||
|                           _                   -> exit({unparseable, Str}) |  | ||||||
|                       end; |  | ||||||
|         _          -> exit({unparseable, Str}) |  | ||||||
|     end. |  | ||||||
| 
 |  | ||||||
| parse_free_win32(CommandResult) -> |  | ||||||
|     LastLine = lists:last(string:tokens(CommandResult, "\r\n")), |     LastLine = lists:last(string:tokens(CommandResult, "\r\n")), | ||||||
|     {match, [Free]} = re:run(lists:reverse(LastLine), "(\\d+)", |     {match, [Free]} = re:run(lists:reverse(LastLine), "(\\d+)", | ||||||
|                              [{capture, all_but_first, list}]), |                              [{capture, all_but_first, list}]), | ||||||
|     list_to_integer(lists:reverse(Free)). |     {ok, list_to_integer(lists:reverse(Free))}. | ||||||
| 
 | 
 | ||||||
| interpret_limit({mem_relative, Relative}) | interpret_limit({mem_relative, Relative}) | ||||||
|     when is_number(Relative) -> |     when is_number(Relative) -> | ||||||
|  | @ -318,3 +392,20 @@ enable(#state{dir = Dir, interval = Interval, limit = Limit, retries = Retries} | ||||||
|             erlang:send_after(Interval, self(), try_enable), |             erlang:send_after(Interval, self(), try_enable), | ||||||
|             State#state{enabled = false} |             State#state{enabled = false} | ||||||
|     end. |     end. | ||||||
|  | 
 | ||||||
|  | run_cmd(Cmd) -> | ||||||
|  |     Pid = self(), | ||||||
|  |     Ref = make_ref(), | ||||||
|  |     CmdFun = fun() -> | ||||||
|  |         CmdResult = rabbit_misc:os_cmd(Cmd), | ||||||
|  |         Pid ! {Pid, Ref, CmdResult} | ||||||
|  |     end, | ||||||
|  |     CmdPid = spawn(CmdFun), | ||||||
|  |     receive | ||||||
|  |         {Pid, Ref, CmdResult} -> | ||||||
|  |             CmdResult | ||||||
|  |     after 5000 -> | ||||||
|  |         exit(CmdPid, kill), | ||||||
|  |         rabbit_log:error("Command timed out: '~s'", [Cmd]), | ||||||
|  |         {error, timeout} | ||||||
|  |     end. | ||||||
|  |  | ||||||
|  | @ -40,7 +40,7 @@ is_file(File) -> | ||||||
| 
 | 
 | ||||||
| is_dir(Dir) -> is_dir_internal(read_file_info(Dir)). | is_dir(Dir) -> is_dir_internal(read_file_info(Dir)). | ||||||
| 
 | 
 | ||||||
| is_dir_no_handle(Dir) -> is_dir_internal(prim_file:read_file_info(Dir)). | is_dir_no_handle(Dir) -> is_dir_internal(file:read_file_info(Dir, [raw])). | ||||||
| 
 | 
 | ||||||
| is_dir_internal({ok, #file_info{type=directory}}) -> true; | is_dir_internal({ok, #file_info{type=directory}}) -> true; | ||||||
| is_dir_internal(_)                                -> false. | is_dir_internal(_)                                -> false. | ||||||
|  | @ -83,14 +83,23 @@ wildcard(Pattern, Dir) -> | ||||||
| list_dir(Dir) -> with_handle(fun () -> prim_file:list_dir(Dir) end). | list_dir(Dir) -> with_handle(fun () -> prim_file:list_dir(Dir) end). | ||||||
| 
 | 
 | ||||||
| read_file_info(File) -> | read_file_info(File) -> | ||||||
|     with_handle(fun () -> prim_file:read_file_info(File) end). |     with_handle(fun () -> file:read_file_info(File, [raw]) end). | ||||||
| 
 | 
 | ||||||
| -spec read_term_file | -spec read_term_file | ||||||
|         (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any()). |         (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any()). | ||||||
| 
 | 
 | ||||||
| read_term_file(File) -> | read_term_file(File) -> | ||||||
|     try |     try | ||||||
|         {ok, Data} = with_handle(fun () -> prim_file:read_file(File) end), |         F = fun() -> | ||||||
|  |                     {ok, FInfo} = file:read_file_info(File, [raw]), | ||||||
|  |                     {ok, Fd} = file:open(File, [read, raw, binary]), | ||||||
|  |                     try | ||||||
|  |                         file:read(Fd, FInfo#file_info.size) | ||||||
|  |                     after | ||||||
|  |                         file:close(Fd) | ||||||
|  |                     end | ||||||
|  |             end, | ||||||
|  |         {ok, Data} = with_handle(F), | ||||||
|         {ok, Tokens, _} = erl_scan:string(binary_to_list(Data)), |         {ok, Tokens, _} = erl_scan:string(binary_to_list(Data)), | ||||||
|         TokenGroups = group_tokens(Tokens), |         TokenGroups = group_tokens(Tokens), | ||||||
|         {ok, [begin |         {ok, [begin | ||||||
|  |  | ||||||
|  | @ -64,7 +64,11 @@ node_health_check(rabbit_node_monitor) -> | ||||||
|     end; |     end; | ||||||
| 
 | 
 | ||||||
| node_health_check(alarms) -> | node_health_check(alarms) -> | ||||||
|     case proplists:get_value(alarms, rabbit:status()) of |     % Note: | ||||||
|  |     % Removed call to rabbit:status/0 here due to a memory leak on win32, | ||||||
|  |     % plus it uses an excessive amount of resources | ||||||
|  |     % Alternative to https://github.com/rabbitmq/rabbitmq-server/pull/3893 | ||||||
|  |     case rabbit:alarms() of | ||||||
|         [] -> |         [] -> | ||||||
|             ok; |             ok; | ||||||
|         Alarms -> |         Alarms -> | ||||||
|  |  | ||||||
|  | @ -78,6 +78,8 @@ handle_info(tick, #state{timeout = Timeout} = State) -> | ||||||
|                               %% down `rabbit_sup` and the whole `rabbit` app. |                               %% down `rabbit_sup` and the whole `rabbit` app. | ||||||
|                               [] |                               [] | ||||||
|                       end, |                       end, | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|               rabbit_core_metrics:queue_stats(QName, Infos), |               rabbit_core_metrics:queue_stats(QName, Infos), | ||||||
|               rabbit_event:notify(queue_stats, Infos ++ [{name, QName}, |               rabbit_event:notify(queue_stats, Infos ++ [{name, QName}, | ||||||
|                                                          {messages, COffs}, |                                                          {messages, COffs}, | ||||||
|  |  | ||||||
|  | @ -176,4 +176,6 @@ merge_policy_value(<<"max-length-bytes">>, Val, OpVal) -> min(Val, OpVal); | ||||||
| merge_policy_value(<<"max-in-memory-length">>, Val, OpVal) -> min(Val, OpVal); | merge_policy_value(<<"max-in-memory-length">>, Val, OpVal) -> min(Val, OpVal); | ||||||
| merge_policy_value(<<"max-in-memory-bytes">>, Val, OpVal) -> min(Val, OpVal); | merge_policy_value(<<"max-in-memory-bytes">>, Val, OpVal) -> min(Val, OpVal); | ||||||
| merge_policy_value(<<"expires">>, Val, OpVal)          -> min(Val, OpVal); | merge_policy_value(<<"expires">>, Val, OpVal)          -> min(Val, OpVal); | ||||||
| merge_policy_value(<<"delivery-limit">>, Val, OpVal)   -> min(Val, OpVal). | merge_policy_value(<<"delivery-limit">>, Val, OpVal)   -> min(Val, OpVal); | ||||||
|  | %% use operator policy value for booleans | ||||||
|  | merge_policy_value(_Key, Val, OpVal) when is_boolean(Val) andalso is_boolean(OpVal) -> OpVal. | ||||||
|  |  | ||||||
|  | @ -846,7 +846,11 @@ phase_update_mnesia(StreamId, Args, #{reference := QName, | ||||||
|                     %% This can happen during recovery |                     %% This can happen during recovery | ||||||
|                     %% we need to re-initialise the queue record |                     %% we need to re-initialise the queue record | ||||||
|                     %% if the stream id is a match |                     %% if the stream id is a match | ||||||
|                     [Q] = mnesia:dirty_read(rabbit_durable_queue, QName), |                     case mnesia:dirty_read(rabbit_durable_queue, QName) of | ||||||
|  |                         [] -> | ||||||
|  |                             %% queue not found at all, it must have been deleted | ||||||
|  |                             ok; | ||||||
|  |                         [Q] -> | ||||||
|                             case amqqueue:get_type_state(Q) of |                             case amqqueue:get_type_state(Q) of | ||||||
|                                 #{name := S} when S == StreamId -> |                                 #{name := S} when S == StreamId -> | ||||||
|                                     rabbit_log:debug("~s: initializing queue record for stream id  ~s", |                                     rabbit_log:debug("~s: initializing queue record for stream id  ~s", | ||||||
|  | @ -856,8 +860,8 @@ phase_update_mnesia(StreamId, Args, #{reference := QName, | ||||||
|                                 _ -> |                                 _ -> | ||||||
|                                     ok |                                     ok | ||||||
|                             end, |                             end, | ||||||
| 
 |                             send_self_command({mnesia_updated, StreamId, Args}) | ||||||
|                     send_self_command({mnesia_updated, StreamId, Args}); |                     end; | ||||||
|                 _ -> |                 _ -> | ||||||
|                     send_self_command({mnesia_updated, StreamId, Args}) |                     send_self_command({mnesia_updated, StreamId, Args}) | ||||||
|             catch _:E -> |             catch _:E -> | ||||||
|  |  | ||||||
|  | @ -46,7 +46,10 @@ groups() -> | ||||||
|                                import_case13, |                                import_case13, | ||||||
|                                import_case14, |                                import_case14, | ||||||
|                                import_case15, |                                import_case15, | ||||||
|                                import_case16 |                                import_case16, | ||||||
|  |                                import_case17, | ||||||
|  |                                import_case18, | ||||||
|  |                                import_case19 | ||||||
|                               ]}, |                               ]}, | ||||||
|          |          | ||||||
|         {boot_time_import_using_classic_source, [], [ |         {boot_time_import_using_classic_source, [], [ | ||||||
|  | @ -236,6 +239,36 @@ import_case16(Config) -> | ||||||
|         {skip, "Should not run in mixed version environments"} |         {skip, "Should not run in mixed version environments"} | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
|  | import_case17(Config) -> import_invalid_file_case(Config, "failing_case17"). | ||||||
|  | 
 | ||||||
|  | import_case18(Config) -> | ||||||
|  |     case rabbit_ct_helpers:is_mixed_versions() of | ||||||
|  |       false -> | ||||||
|  |         case rabbit_ct_broker_helpers:enable_feature_flag(Config, user_limits) of | ||||||
|  |             ok -> | ||||||
|  |                 import_file_case(Config, "case18"), | ||||||
|  |                 User = <<"limited_guest">>, | ||||||
|  |                 UserIsImported = | ||||||
|  |                     fun () -> | ||||||
|  |                             case user_lookup(Config, User) of | ||||||
|  |                                 {error, not_found} -> false; | ||||||
|  |                                 _       -> true | ||||||
|  |                             end | ||||||
|  |                     end, | ||||||
|  |                 rabbit_ct_helpers:await_condition(UserIsImported, 20000), | ||||||
|  |                 {ok, UserRec} = user_lookup(Config, User), | ||||||
|  |                 ?assertEqual(#{<<"max-connections">> => 2}, internal_user:get_limits(UserRec)), | ||||||
|  |                 ok; | ||||||
|  |             Skip -> | ||||||
|  |                 Skip | ||||||
|  |         end; | ||||||
|  |       _ -> | ||||||
|  |         %% skip the test in mixed version mode | ||||||
|  |         {skip, "Should not run in mixed version environments"} | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | import_case19(Config) -> import_invalid_file_case(Config, "failing_case19"). | ||||||
|  | 
 | ||||||
| export_import_round_trip_case1(Config) -> | export_import_round_trip_case1(Config) -> | ||||||
|     case rabbit_ct_helpers:is_mixed_versions() of |     case rabbit_ct_helpers:is_mixed_versions() of | ||||||
|       false -> |       false -> | ||||||
|  | @ -382,3 +415,6 @@ queue_lookup(Config, VHost, Name) -> | ||||||
| 
 | 
 | ||||||
| vhost_lookup(Config, VHost) -> | vhost_lookup(Config, VHost) -> | ||||||
|     rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_vhost, lookup, [VHost]). |     rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_vhost, lookup, [VHost]). | ||||||
|  | 
 | ||||||
|  | user_lookup(Config, User) -> | ||||||
|  |     rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, lookup_user, [User]). | ||||||
|  |  | ||||||
|  | @ -0,0 +1,46 @@ | ||||||
|  | { | ||||||
|  |   "bindings": [], | ||||||
|  |   "exchanges": [], | ||||||
|  |   "global_parameters": [ | ||||||
|  |     { | ||||||
|  |       "name": "cluster_name", | ||||||
|  |       "value": "rabbitmq@localhost" | ||||||
|  |     } | ||||||
|  |   ], | ||||||
|  |   "parameters": [], | ||||||
|  |   "permissions": [ | ||||||
|  |     { | ||||||
|  |       "configure": ".*", | ||||||
|  |       "read": ".*", | ||||||
|  |       "user": "guest", | ||||||
|  |       "vhost": "/", | ||||||
|  |       "write": ".*" | ||||||
|  |     } | ||||||
|  |   ], | ||||||
|  |   "policies": [], | ||||||
|  |   "queues": [], | ||||||
|  |   "rabbit_version": "3.9.1", | ||||||
|  |   "rabbitmq_version": "3.9.1", | ||||||
|  |   "topic_permissions": [], | ||||||
|  |   "users": [ | ||||||
|  |     { | ||||||
|  |       "hashing_algorithm": "rabbit_password_hashing_sha256", | ||||||
|  |       "limits": {"max-connections" : 2}, | ||||||
|  |       "name": "limited_guest", | ||||||
|  |       "password_hash": "wS4AT3B4Z5RpWlFn1FA30osf2C75D7WA3gem591ACDZ6saO6", | ||||||
|  |       "tags": [ | ||||||
|  |         "administrator" | ||||||
|  |       ] | ||||||
|  |     } | ||||||
|  |   ], | ||||||
|  |   "vhosts": [ | ||||||
|  |     { | ||||||
|  |       "limits": [], | ||||||
|  |       "name": "/" | ||||||
|  |     }, | ||||||
|  |     { | ||||||
|  |       "limits": [], | ||||||
|  |       "name": "tagged" | ||||||
|  |     } | ||||||
|  |   ] | ||||||
|  | } | ||||||
|  | @ -0,0 +1,19 @@ | ||||||
|  | { | ||||||
|  |   "vhosts": [ | ||||||
|  |     { | ||||||
|  |       "name": "\/" | ||||||
|  |     } | ||||||
|  |   ], | ||||||
|  |   "policies": [ | ||||||
|  |     { | ||||||
|  |       "vhost": "\/", | ||||||
|  |       "pattern": "^project-nd-ns-", | ||||||
|  |       "apply-to": "queues", | ||||||
|  |       "definition": { | ||||||
|  |         "expires": 120000, | ||||||
|  |         "max-length": 10000 | ||||||
|  |       }, | ||||||
|  |       "priority": 1 | ||||||
|  |     } | ||||||
|  |   ] | ||||||
|  | } | ||||||
|  | @ -0,0 +1,46 @@ | ||||||
|  | { | ||||||
|  |   "bindings": [], | ||||||
|  |   "exchanges": [], | ||||||
|  |   "global_parameters": [ | ||||||
|  |     { | ||||||
|  |       "name": "cluster_name", | ||||||
|  |       "value": "rabbitmq@localhost" | ||||||
|  |     } | ||||||
|  |   ], | ||||||
|  |   "parameters": [], | ||||||
|  |   "permissions": [ | ||||||
|  |     { | ||||||
|  |       "configure": ".*", | ||||||
|  |       "read": ".*", | ||||||
|  |       "user": "guest", | ||||||
|  |       "vhost": "/", | ||||||
|  |       "write": ".*" | ||||||
|  |     } | ||||||
|  |   ], | ||||||
|  |   "policies": [], | ||||||
|  |   "queues": [], | ||||||
|  |   "rabbit_version": "3.9.1", | ||||||
|  |   "rabbitmq_version": "3.9.1", | ||||||
|  |   "topic_permissions": [], | ||||||
|  |   "users": [ | ||||||
|  |     { | ||||||
|  |       "hashing_algorithm": "rabbit_password_hashing_sha256", | ||||||
|  |       "limits": {"max-connections" : "twomincepies"}, | ||||||
|  |       "name": "limited_guest", | ||||||
|  |       "password_hash": "wS4AT3B4Z5RpWlFn1FA30osf2C75D7WA3gem591ACDZ6saO6", | ||||||
|  |       "tags": [ | ||||||
|  |         "administrator" | ||||||
|  |       ] | ||||||
|  |     } | ||||||
|  |   ], | ||||||
|  |   "vhosts": [ | ||||||
|  |     { | ||||||
|  |       "limits": [], | ||||||
|  |       "name": "/" | ||||||
|  |     }, | ||||||
|  |     { | ||||||
|  |       "limits": [], | ||||||
|  |       "name": "tagged" | ||||||
|  |     } | ||||||
|  |   ] | ||||||
|  | } | ||||||
|  | @ -29,6 +29,7 @@ groups() -> | ||||||
|                              confirm_nowait, |                              confirm_nowait, | ||||||
|                              confirm_ack, |                              confirm_ack, | ||||||
|                              confirm_acks, |                              confirm_acks, | ||||||
|  |                              confirm_after_mandatory_bug, | ||||||
|                              confirm_mandatory_unroutable, |                              confirm_mandatory_unroutable, | ||||||
|                              confirm_unroutable_message], |                              confirm_unroutable_message], | ||||||
|     [ |     [ | ||||||
|  | @ -187,6 +188,17 @@ confirm_acks(Config) -> | ||||||
|     publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>, <<"msg4">>]), |     publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>, <<"msg4">>]), | ||||||
|     receive_many(lists:seq(1, 4)). |     receive_many(lists:seq(1, 4)). | ||||||
| 
 | 
 | ||||||
|  | confirm_after_mandatory_bug(Config) -> | ||||||
|  |     {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), | ||||||
|  |     QName = ?config(queue_name, Config), | ||||||
|  |     declare_queue(Ch, Config, QName), | ||||||
|  |     ok = amqp_channel:call(Ch, #'basic.publish'{routing_key = QName, | ||||||
|  |                                                 mandatory = true}, #amqp_msg{payload = <<"msg1">>}), | ||||||
|  |     #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), | ||||||
|  |     publish(Ch, QName, [<<"msg2">>]), | ||||||
|  |     true = amqp_channel:wait_for_confirms(Ch, 1), | ||||||
|  |     ok. | ||||||
|  | 
 | ||||||
| %% For unroutable messages, the broker will issue a confirm once the exchange verifies a message | %% For unroutable messages, the broker will issue a confirm once the exchange verifies a message | ||||||
| %% won't route to any queue (returns an empty list of queues). | %% won't route to any queue (returns an empty list of queues). | ||||||
| %% If the message is also published as mandatory, the basic.return is sent to the client before | %% If the message is also published as mandatory, the basic.return is sent to the client before | ||||||
|  |  | ||||||
|  | @ -11,7 +11,7 @@ | ||||||
| -include_lib("common_test/include/ct.hrl"). | -include_lib("common_test/include/ct.hrl"). | ||||||
| -include_lib("eunit/include/eunit.hrl"). | -include_lib("eunit/include/eunit.hrl"). | ||||||
| -include_lib("rabbit_common/include/rabbit.hrl"). | -include_lib("rabbit_common/include/rabbit.hrl"). | ||||||
| -include("src/rabbit_fifo.hrl"). | -include_lib("rabbit/src/rabbit_fifo.hrl"). | ||||||
| 
 | 
 | ||||||
| %%%=================================================================== | %%%=================================================================== | ||||||
| %%% Common Test callbacks | %%% Common Test callbacks | ||||||
|  |  | ||||||
|  | @ -67,6 +67,12 @@ set_disk_free_limit_command(Config) -> | ||||||
|       ?MODULE, set_disk_free_limit_command1, [Config]). |       ?MODULE, set_disk_free_limit_command1, [Config]). | ||||||
| 
 | 
 | ||||||
| set_disk_free_limit_command1(_Config) -> | set_disk_free_limit_command1(_Config) -> | ||||||
|  |     F = fun () -> | ||||||
|  |         DiskFree = rabbit_disk_monitor:get_disk_free(), | ||||||
|  |         DiskFree =/= unknown | ||||||
|  |     end, | ||||||
|  |     rabbit_ct_helpers:await_condition(F), | ||||||
|  | 
 | ||||||
|     %% Use an integer |     %% Use an integer | ||||||
|     rabbit_disk_monitor:set_disk_free_limit({mem_relative, 1}), |     rabbit_disk_monitor:set_disk_free_limit({mem_relative, 1}), | ||||||
|     disk_free_limit_to_total_memory_ratio_is(1), |     disk_free_limit_to_total_memory_ratio_is(1), | ||||||
|  | @ -84,7 +90,8 @@ set_disk_free_limit_command1(_Config) -> | ||||||
|     passed. |     passed. | ||||||
| 
 | 
 | ||||||
| disk_free_limit_to_total_memory_ratio_is(MemRatio) -> | disk_free_limit_to_total_memory_ratio_is(MemRatio) -> | ||||||
|  |     DiskFreeLimit = rabbit_disk_monitor:get_disk_free_limit(), | ||||||
|     ExpectedLimit = MemRatio * vm_memory_monitor:get_total_memory(), |     ExpectedLimit = MemRatio * vm_memory_monitor:get_total_memory(), | ||||||
|     % Total memory is unstable, so checking order |     % Total memory is unstable, so checking order | ||||||
|     true = ExpectedLimit/rabbit_disk_monitor:get_disk_free_limit() < 1.2, |     true = ExpectedLimit/DiskFreeLimit < 1.2, | ||||||
|     true = ExpectedLimit/rabbit_disk_monitor:get_disk_free_limit() > 0.98. |     true = ExpectedLimit/DiskFreeLimit > 0.98. | ||||||
|  |  | ||||||
|  | @ -88,7 +88,7 @@ disk_monitor_enable1(_Config) -> | ||||||
|     application:set_env(rabbit, disk_monitor_failure_retry_interval, 100), |     application:set_env(rabbit, disk_monitor_failure_retry_interval, 100), | ||||||
|     ok = rabbit_sup:stop_child(rabbit_disk_monitor_sup), |     ok = rabbit_sup:stop_child(rabbit_disk_monitor_sup), | ||||||
|     ok = rabbit_sup:start_delayed_restartable_child(rabbit_disk_monitor, [1000]), |     ok = rabbit_sup:start_delayed_restartable_child(rabbit_disk_monitor, [1000]), | ||||||
|     undefined = rabbit_disk_monitor:get_disk_free(), |     unknown = rabbit_disk_monitor:get_disk_free(), | ||||||
|     Cmd = case os:type() of |     Cmd = case os:type() of | ||||||
|               {win32, _} -> " Le volume dans le lecteur C n’a pas de nom.\n" |               {win32, _} -> " Le volume dans le lecteur C n’a pas de nom.\n" | ||||||
|                             " Le numéro de série du volume est 707D-5BDC\n" |                             " Le numéro de série du volume est 707D-5BDC\n" | ||||||
|  |  | ||||||
|  | @ -21,7 +21,8 @@ all() -> | ||||||
| groups() -> | groups() -> | ||||||
|     [ |     [ | ||||||
|       {parallel_tests, [parallel], [ |       {parallel_tests, [parallel], [ | ||||||
|           merge_operator_policy_definitions |           merge_operator_policy_definitions, | ||||||
|  |           conflict_resolution_for_booleans | ||||||
|         ]} |         ]} | ||||||
|     ]. |     ]. | ||||||
| 
 | 
 | ||||||
|  | @ -102,6 +103,54 @@ merge_operator_policy_definitions(_Config) -> | ||||||
|                     [{definition, [ |                     [{definition, [ | ||||||
|                       {<<"message-ttl">>, 3000} |                       {<<"message-ttl">>, 3000} | ||||||
|                     ]}]) |                     ]}]) | ||||||
|     ), |     ). | ||||||
| 
 | 
 | ||||||
|     passed. | 
 | ||||||
|  |   conflict_resolution_for_booleans(_Config) -> | ||||||
|  |     ?assertEqual( | ||||||
|  |       [ | ||||||
|  |         {<<"remote-dc-replicate">>, true} | ||||||
|  |       ], | ||||||
|  |       rabbit_policy:merge_operator_definitions( | ||||||
|  |          #{definition => #{ | ||||||
|  |            <<"remote-dc-replicate">> => true | ||||||
|  |          }}, | ||||||
|  |          [{definition, [ | ||||||
|  |            {<<"remote-dc-replicate">>, true} | ||||||
|  |          ]}])), | ||||||
|  | 
 | ||||||
|  |     ?assertEqual( | ||||||
|  |       [ | ||||||
|  |         {<<"remote-dc-replicate">>, false} | ||||||
|  |       ], | ||||||
|  |       rabbit_policy:merge_operator_definitions( | ||||||
|  |         #{definition => #{ | ||||||
|  |           <<"remote-dc-replicate">> => false | ||||||
|  |         }}, | ||||||
|  |         [{definition, [ | ||||||
|  |           {<<"remote-dc-replicate">>, false} | ||||||
|  |         ]}])), | ||||||
|  | 
 | ||||||
|  |     ?assertEqual( | ||||||
|  |       [ | ||||||
|  |         {<<"remote-dc-replicate">>, true} | ||||||
|  |       ], | ||||||
|  |       rabbit_policy:merge_operator_definitions( | ||||||
|  |         #{definition => #{ | ||||||
|  |           <<"remote-dc-replicate">> => false | ||||||
|  |         }}, | ||||||
|  |         [{definition, [ | ||||||
|  |           {<<"remote-dc-replicate">>, true} | ||||||
|  |         ]}])), | ||||||
|  | 
 | ||||||
|  |     ?assertEqual( | ||||||
|  |       [ | ||||||
|  |         {<<"remote-dc-replicate">>, false} | ||||||
|  |       ], | ||||||
|  |       rabbit_policy:merge_operator_definitions( | ||||||
|  |         #{definition => #{ | ||||||
|  |           <<"remote-dc-replicate">> => true | ||||||
|  |         }}, | ||||||
|  |         [{definition, [ | ||||||
|  |           {<<"remote-dc-replicate">>, false} | ||||||
|  |         ]}])). | ||||||
|  | @ -139,6 +139,46 @@ In that case, the configuration will look like this: | ||||||
| 
 | 
 | ||||||
| NOTE: `jwks_url` takes precedence over `signing_keys` if both are provided. | NOTE: `jwks_url` takes precedence over `signing_keys` if both are provided. | ||||||
| 
 | 
 | ||||||
|  | ### Variables Configurable in rabbitmq.conf | ||||||
|  | 
 | ||||||
|  | | Key                                      | Documentation      | ||||||
|  | |------------------------------------------|----------- | ||||||
|  | | `auth_oauth2.resource_server_id`         | [The Resource Server ID](#resource-server-id-and-scope-prefixes) | ||||||
|  | | `auth_oauth2.additional_scopes_key`      | Configure the plugin to also look in other fields (maps to `additional_rabbitmq_scopes` in the old format). | ||||||
|  | | `auth_oauth2.default_key`                | ID of the default signing key. | ||||||
|  | | `auth_oauth2.signing_keys`               | Paths to signing key files. | ||||||
|  | | `auth_oauth2.jwks_url`                   | The URL of key server. According to the [JWT Specification](https://datatracker.ietf.org/doc/html/rfc7515#section-4.1.2) key server URL must be https. | ||||||
|  | | `auth_oauth2.https.cacertfile`           | Path to a file containing PEM-encoded CA certificates. The CA certificates are used during key server [peer verification](https://rabbitmq.com/ssl.html#peer-verification). | ||||||
|  | | `auth_oauth2.https.depth`                | The maximum number of non-self-issued intermediate certificates that may follow the peer certificate in a valid [certification path](https://rabbitmq.com/ssl.html#peer-verification-depth). Default is 10. | ||||||
|  | | `auth_oauth2.https.peer_verification`    | Should [peer verification](https://rabbitmq.com/ssl.html#peer-verification) be enabled. Available values: `verify_none`, `verify_peer`. Default is `verify_none`. It is recommended to configure `verify_peer`. Peer verification requires a certain amount of setup and is more secure. | ||||||
|  | | `auth_oauth2.https.fail_if_no_peer_cert` | Used together with `auth_oauth2.https.peer_verification = verify_peer`. When set to `true`, TLS connection will be rejected if client fails to provide a certificate. Default is `false`. | ||||||
|  | | `auth_oauth2.https.hostname_verification`| Enable wildcard-aware hostname verification for key server. Available values: `wildcard`, `none`. Default is `none`. | ||||||
|  | | `auth_oauth2.algorithms`                 | Restrict [the usable algorithms](https://github.com/potatosalad/erlang-jose#algorithm-support). | ||||||
|  | 
 | ||||||
|  | For example: | ||||||
|  | 
 | ||||||
|  | Configure with key files | ||||||
|  | ``` | ||||||
|  | auth_oauth2.resource_server_id = new_resource_server_id | ||||||
|  | auth_oauth2.additional_scopes_key = my_custom_scope_key | ||||||
|  | auth_oauth2.default_key = id1 | ||||||
|  | auth_oauth2.signing_keys.id1 = test/config_schema_SUITE_data/certs/key.pem | ||||||
|  | auth_oauth2.signing_keys.id2 = test/config_schema_SUITE_data/certs/cert.pem | ||||||
|  | auth_oauth2.algorithms.1 = HS256 | ||||||
|  | auth_oauth2.algorithms.2 = RS256 | ||||||
|  | ``` | ||||||
|  | Configure with key server | ||||||
|  | ``` | ||||||
|  | auth_oauth2.resource_server_id = new_resource_server_id | ||||||
|  | auth_oauth2.jwks_url = https://my-jwt-issuer/jwks.json | ||||||
|  | auth_oauth2.https.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem | ||||||
|  | auth_oauth2.https.peer_verification = verify_peer | ||||||
|  | auth_oauth2.https.depth = 5 | ||||||
|  | auth_oauth2.https.fail_if_no_peer_cert = true | ||||||
|  | auth_oauth2.https.hostname_verification = wildcard | ||||||
|  | auth_oauth2.algorithms.1 = HS256 | ||||||
|  | auth_oauth2.algorithms.2 = RS256 | ||||||
|  | ``` | ||||||
| ### Resource Server ID and Scope Prefixes | ### Resource Server ID and Scope Prefixes | ||||||
| 
 | 
 | ||||||
| OAuth 2.0 (and thus UAA-provided) tokens use scopes to communicate what set of permissions particular | OAuth 2.0 (and thus UAA-provided) tokens use scopes to communicate what set of permissions particular | ||||||
|  |  | ||||||
|  | @ -77,3 +77,52 @@ | ||||||
|                   end, Settings), |                   end, Settings), | ||||||
|     maps:from_list(SigningKeys) |     maps:from_list(SigningKeys) | ||||||
|  end}. |  end}. | ||||||
|  | 
 | ||||||
|  | {mapping, | ||||||
|  |  "auth_oauth2.jwks_url", | ||||||
|  |  "rabbitmq_auth_backend_oauth2.key_config.jwks_url", | ||||||
|  |  [{datatype, string}, {validators, ["uri", "https_uri"]}]}. | ||||||
|  | 
 | ||||||
|  | {mapping, | ||||||
|  |  "auth_oauth2.https.peer_verification", | ||||||
|  |  "rabbitmq_auth_backend_oauth2.key_config.peer_verification", | ||||||
|  |  [{datatype, {enum, [verify_peer, verify_none]}}]}. | ||||||
|  | 
 | ||||||
|  | {mapping, | ||||||
|  |  "auth_oauth2.https.cacertfile", | ||||||
|  |  "rabbitmq_auth_backend_oauth2.key_config.cacertfile", | ||||||
|  |  [{datatype, file}, {validators, ["file_accessible"]}]}. | ||||||
|  | 
 | ||||||
|  | {mapping, | ||||||
|  |  "auth_oauth2.https.depth", | ||||||
|  |  "rabbitmq_auth_backend_oauth2.key_config.depth", | ||||||
|  |  [{datatype, integer}]}. | ||||||
|  | 
 | ||||||
|  | {mapping, | ||||||
|  |  "auth_oauth2.https.hostname_verification", | ||||||
|  |  "rabbitmq_auth_backend_oauth2.key_config.hostname_verification", | ||||||
|  |  [{datatype, {enum, [wildcard, none]}}]}. | ||||||
|  | 
 | ||||||
|  | {mapping, | ||||||
|  |  "auth_oauth2.https.crl_check", | ||||||
|  |  "rabbitmq_auth_backend_oauth2.key_config.crl_check", | ||||||
|  |  [{datatype, {enum, [true, false, peer, best_effort]}}]}. | ||||||
|  | 
 | ||||||
|  | {mapping, | ||||||
|  |  "auth_oauth2.https.fail_if_no_peer_cert", | ||||||
|  |  "rabbitmq_auth_backend_oauth2.key_config.fail_if_no_peer_cert", | ||||||
|  |  [{datatype, {enum, [true, false]}}]}. | ||||||
|  | 
 | ||||||
|  | {validator, "https_uri", "According to the JWT Specification, Key Server URL must be https.", | ||||||
|  |  fun(Uri) -> string:nth_lexeme(Uri, 1, "://") == "https" end}. | ||||||
|  | 
 | ||||||
|  | {mapping, | ||||||
|  |  "auth_oauth2.algorithms.$algorithm", | ||||||
|  |  "rabbitmq_auth_backend_oauth2.key_config.algorithms", | ||||||
|  |  [{datatype, string}]}. | ||||||
|  | 
 | ||||||
|  | {translation, "rabbitmq_auth_backend_oauth2.key_config.algorithms", | ||||||
|  |  fun(Conf) -> | ||||||
|  |      Settings = cuttlefish_variable:filter_by_prefix("auth_oauth2.algorithms", Conf), | ||||||
|  |      [list_to_binary(V) || {_, V} <- Settings] | ||||||
|  |  end}. | ||||||
|  |  | ||||||
|  | @ -0,0 +1,27 @@ | ||||||
|  | -module(uaa_jwks). | ||||||
|  | -export([get/1]). | ||||||
|  | 
 | ||||||
|  | -spec get(string() | binary()) -> {ok, term()} | {error, term()}. | ||||||
|  | get(JwksUrl) -> | ||||||
|  |     httpc:request(get, {JwksUrl, []}, [{ssl, ssl_options()}, {timeout, 60000}], []). | ||||||
|  | 
 | ||||||
|  | -spec ssl_options() -> list(). | ||||||
|  | ssl_options() -> | ||||||
|  |     UaaEnv = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), | ||||||
|  |     PeerVerification = proplists:get_value(peer_verification, UaaEnv, verify_none), | ||||||
|  |     CaCertFile = proplists:get_value(cacertfile, UaaEnv), | ||||||
|  |     Depth = proplists:get_value(depth, UaaEnv, 10), | ||||||
|  |     FailIfNoPeerCert = proplists:get_value(fail_if_no_peer_cert, UaaEnv, false), | ||||||
|  |     CrlCheck = proplists:get_value(crl_check, UaaEnv, false), | ||||||
|  |     SslOpts0 = [{verify, PeerVerification}, | ||||||
|  |                 {cacertfile, CaCertFile}, | ||||||
|  |                 {depth, Depth}, | ||||||
|  |                 {fail_if_no_peer_cert, FailIfNoPeerCert}, | ||||||
|  |                 {crl_check, CrlCheck}, | ||||||
|  |                 {crl_cache, {ssl_crl_cache, {internal, [{http, 10000}]}}}], | ||||||
|  |     case proplists:get_value(hostname_verification, UaaEnv, none) of | ||||||
|  |         wildcard -> | ||||||
|  |             [{customize_hostname_check, [{match_fun, public_key:pkix_verify_hostname_match_fun(https)}]} | SslOpts0]; | ||||||
|  |         none -> | ||||||
|  |             SslOpts0 | ||||||
|  |     end. | ||||||
|  | @ -58,7 +58,7 @@ update_jwks_signing_keys() -> | ||||||
|         undefined -> |         undefined -> | ||||||
|             {error, no_jwks_url}; |             {error, no_jwks_url}; | ||||||
|         JwksUrl -> |         JwksUrl -> | ||||||
|             case httpc:request(JwksUrl) of |             case uaa_jwks:get(JwksUrl) of | ||||||
|                 {ok, {_, _, JwksBody}} -> |                 {ok, {_, _, JwksBody}} -> | ||||||
|                     KeyList = maps:get(<<"keys">>, jose:decode(erlang:iolist_to_binary(JwksBody)), []), |                     KeyList = maps:get(<<"keys">>, jose:decode(erlang:iolist_to_binary(JwksBody)), []), | ||||||
|                     Keys = maps:from_list(lists:map(fun(Key) -> {maps:get(<<"kid">>, Key, undefined), {json, Key}} end, KeyList)), |                     Keys = maps:from_list(lists:map(fun(Key) -> {maps:get(<<"kid">>, Key, undefined), {json, Key}} end, KeyList)), | ||||||
|  |  | ||||||
|  | @ -24,7 +24,15 @@ decode(Token) -> | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| decode_and_verify(Jwk, Token) -> | decode_and_verify(Jwk, Token) -> | ||||||
|     case jose_jwt:verify(Jwk, Token) of |     UaaEnv = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), | ||||||
|  |     Verify = | ||||||
|  |         case proplists:get_value(algorithms, UaaEnv) of | ||||||
|  |             undefined -> | ||||||
|  |                 jose_jwt:verify(Jwk, Token); | ||||||
|  |             Algs -> | ||||||
|  |                 jose_jwt:verify_strict(Jwk, Algs, Token) | ||||||
|  |         end, | ||||||
|  |     case Verify of | ||||||
|         {true, #jose_jwt{fields = Fields}, _}  -> {true, Fields}; |         {true, #jose_jwt{fields = Fields}, _}  -> {true, Fields}; | ||||||
|         {false, #jose_jwt{fields = Fields}, _} -> {false, Fields} |         {false, #jose_jwt{fields = Fields}, _} -> {false, Fields} | ||||||
|     end. |     end. | ||||||
|  |  | ||||||
|  | @ -4,7 +4,16 @@ | ||||||
|         auth_oauth2.additional_scopes_key = my_custom_scope_key |         auth_oauth2.additional_scopes_key = my_custom_scope_key | ||||||
|         auth_oauth2.default_key = id1 |         auth_oauth2.default_key = id1 | ||||||
|         auth_oauth2.signing_keys.id1 = test/config_schema_SUITE_data/certs/key.pem |         auth_oauth2.signing_keys.id1 = test/config_schema_SUITE_data/certs/key.pem | ||||||
|         auth_oauth2.signing_keys.id2 = test/config_schema_SUITE_data/certs/cert.pem", |         auth_oauth2.signing_keys.id2 = test/config_schema_SUITE_data/certs/cert.pem | ||||||
|  |         auth_oauth2.jwks_url = https://my-jwt-issuer/jwks.json | ||||||
|  |         auth_oauth2.https.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem | ||||||
|  |         auth_oauth2.https.peer_verification = verify_none | ||||||
|  |         auth_oauth2.https.depth = 5 | ||||||
|  |         auth_oauth2.https.fail_if_no_peer_cert = false | ||||||
|  |         auth_oauth2.https.hostname_verification = wildcard | ||||||
|  |         auth_oauth2.https.crl_check = true | ||||||
|  |         auth_oauth2.algorithms.1 = HS256 | ||||||
|  |         auth_oauth2.algorithms.2 = RS256", | ||||||
|         [ |         [ | ||||||
|         {rabbitmq_auth_backend_oauth2, [ |         {rabbitmq_auth_backend_oauth2, [ | ||||||
|             {resource_server_id,<<"new_resource_server_id">>}, |             {resource_server_id,<<"new_resource_server_id">>}, | ||||||
|  | @ -16,7 +25,15 @@ | ||||||
|                     <<"id1">> => {pem, <<"I'm not a certificate">>}, |                     <<"id1">> => {pem, <<"I'm not a certificate">>}, | ||||||
|                     <<"id2">> => {pem, <<"I'm not a certificate">>} |                     <<"id2">> => {pem, <<"I'm not a certificate">>} | ||||||
|                 } |                 } | ||||||
|             } |             }, | ||||||
|  |             {jwks_url, "https://my-jwt-issuer/jwks.json"}, | ||||||
|  |             {cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"}, | ||||||
|  |             {peer_verification, verify_none}, | ||||||
|  |             {depth, 5}, | ||||||
|  |             {fail_if_no_peer_cert, false}, | ||||||
|  |             {hostname_verification, wildcard}, | ||||||
|  |             {crl_check, true}, | ||||||
|  |             {algorithms, [<<"HS256">>, <<"RS256">>]} | ||||||
|             ] |             ] | ||||||
|             } |             } | ||||||
|         ]} |         ]} | ||||||
|  |  | ||||||
|  | @ -21,7 +21,9 @@ | ||||||
| all() -> | all() -> | ||||||
|     [ |     [ | ||||||
|      {group, happy_path}, |      {group, happy_path}, | ||||||
|      {group, unhappy_path} |      {group, unhappy_path}, | ||||||
|  |      {group, unvalidated_jwks_server}, | ||||||
|  |      {group, no_peer_verification} | ||||||
|     ]. |     ]. | ||||||
| 
 | 
 | ||||||
| groups() -> | groups() -> | ||||||
|  | @ -34,6 +36,7 @@ groups() -> | ||||||
|                        test_successful_connection_with_complex_claim_as_a_list, |                        test_successful_connection_with_complex_claim_as_a_list, | ||||||
|                        test_successful_connection_with_complex_claim_as_a_binary, |                        test_successful_connection_with_complex_claim_as_a_binary, | ||||||
|                        test_successful_connection_with_keycloak_token, |                        test_successful_connection_with_keycloak_token, | ||||||
|  |                        test_successful_connection_with_algorithm_restriction, | ||||||
|                        test_successful_token_refresh |                        test_successful_token_refresh | ||||||
|                       ]}, |                       ]}, | ||||||
|      {unhappy_path, [], [ |      {unhappy_path, [], [ | ||||||
|  | @ -41,9 +44,12 @@ groups() -> | ||||||
|                          test_failed_connection_with_a_non_token, |                          test_failed_connection_with_a_non_token, | ||||||
|                          test_failed_connection_with_a_token_with_insufficient_vhost_permission, |                          test_failed_connection_with_a_token_with_insufficient_vhost_permission, | ||||||
|                          test_failed_connection_with_a_token_with_insufficient_resource_permission, |                          test_failed_connection_with_a_token_with_insufficient_resource_permission, | ||||||
|  |                          test_failed_connection_with_algorithm_restriction, | ||||||
|                          test_failed_token_refresh_case1, |                          test_failed_token_refresh_case1, | ||||||
|                          test_failed_token_refresh_case2 |                          test_failed_token_refresh_case2 | ||||||
|                         ]} |                         ]}, | ||||||
|  |      {unvalidated_jwks_server, [], [test_failed_connection_with_unvalidated_jwks_server]}, | ||||||
|  |      {no_peer_verification, [], [{group, happy_path}, {group, unhappy_path}]} | ||||||
|     ]. |     ]. | ||||||
| 
 | 
 | ||||||
| %% | %% | ||||||
|  | @ -69,23 +75,35 @@ end_per_suite(Config) -> | ||||||
|         fun stop_jwks_server/1 |         fun stop_jwks_server/1 | ||||||
|       ] ++ rabbit_ct_broker_helpers:teardown_steps()). |       ] ++ rabbit_ct_broker_helpers:teardown_steps()). | ||||||
| 
 | 
 | ||||||
|  | init_per_group(no_peer_verification, Config) -> | ||||||
|  |     add_vhosts(Config), | ||||||
|  |     KeyConfig = rabbit_ct_helpers:set_config(?config(key_config, Config), [{jwks_url, ?config(non_strict_jwks_url, Config)}, {peer_verification, verify_none}]), | ||||||
|  |     ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, key_config, KeyConfig]), | ||||||
|  |     rabbit_ct_helpers:set_config(Config, {key_config, KeyConfig}); | ||||||
| 
 | 
 | ||||||
| init_per_group(_Group, Config) -> | init_per_group(_Group, Config) -> | ||||||
|     %% The broker is managed by {init,end}_per_testcase(). |     add_vhosts(Config), | ||||||
|     lists:foreach(fun(Value) -> |  | ||||||
|                           rabbit_ct_broker_helpers:add_vhost(Config, Value) |  | ||||||
|                   end, |  | ||||||
|                   [<<"vhost1">>, <<"vhost2">>, <<"vhost3">>, <<"vhost4">>]), |  | ||||||
|     Config. |     Config. | ||||||
| 
 | 
 | ||||||
|  | end_per_group(no_peer_verification, Config) -> | ||||||
|  |     delete_vhosts(Config), | ||||||
|  |     KeyConfig = rabbit_ct_helpers:set_config(?config(key_config, Config), [{jwks_url, ?config(strict_jwks_url, Config)}, {peer_verification, verify_peer}]), | ||||||
|  |     ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, key_config, KeyConfig]), | ||||||
|  |     rabbit_ct_helpers:set_config(Config, {key_config, KeyConfig}); | ||||||
|  | 
 | ||||||
| end_per_group(_Group, Config) -> | end_per_group(_Group, Config) -> | ||||||
|     %% The broker is managed by {init,end}_per_testcase(). |     delete_vhosts(Config), | ||||||
|     lists:foreach(fun(Value) -> |  | ||||||
|                           rabbit_ct_broker_helpers:delete_vhost(Config, Value) |  | ||||||
|                   end, |  | ||||||
|                   [<<"vhost1">>, <<"vhost2">>, <<"vhost3">>, <<"vhost4">>]), |  | ||||||
|     Config. |     Config. | ||||||
| 
 | 
 | ||||||
|  | add_vhosts(Config) -> | ||||||
|  |     %% The broker is managed by {init,end}_per_testcase(). | ||||||
|  |     lists:foreach(fun(Value) -> rabbit_ct_broker_helpers:add_vhost(Config, Value) end, | ||||||
|  |                   [<<"vhost1">>, <<"vhost2">>, <<"vhost3">>, <<"vhost4">>]). | ||||||
|  | 
 | ||||||
|  | delete_vhosts(Config) -> | ||||||
|  |     %% The broker is managed by {init,end}_per_testcase(). | ||||||
|  |     lists:foreach(fun(Value) -> rabbit_ct_broker_helpers:delete_vhost(Config, Value) end, | ||||||
|  |                   [<<"vhost1">>, <<"vhost2">>, <<"vhost3">>, <<"vhost4">>]). | ||||||
| 
 | 
 | ||||||
| init_per_testcase(Testcase, Config) when Testcase =:= test_successful_connection_with_a_full_permission_token_and_explicitly_configured_vhost orelse | init_per_testcase(Testcase, Config) when Testcase =:= test_successful_connection_with_a_full_permission_token_and_explicitly_configured_vhost orelse | ||||||
|                                          Testcase =:= test_successful_token_refresh -> |                                          Testcase =:= test_successful_token_refresh -> | ||||||
|  | @ -107,6 +125,24 @@ init_per_testcase(Testcase, Config) when Testcase =:= test_successful_connection | ||||||
|   rabbit_ct_helpers:testcase_started(Config, Testcase), |   rabbit_ct_helpers:testcase_started(Config, Testcase), | ||||||
|   Config; |   Config; | ||||||
| 
 | 
 | ||||||
|  | init_per_testcase(Testcase, Config) when Testcase =:= test_successful_connection_with_algorithm_restriction -> | ||||||
|  |     KeyConfig = ?config(key_config, Config), | ||||||
|  |     ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, key_config, [{algorithms, [<<"HS256">>]} | KeyConfig]]), | ||||||
|  |     rabbit_ct_helpers:testcase_started(Config, Testcase), | ||||||
|  |     Config; | ||||||
|  | 
 | ||||||
|  | init_per_testcase(Testcase, Config) when Testcase =:= test_failed_connection_with_algorithm_restriction -> | ||||||
|  |     KeyConfig = ?config(key_config, Config), | ||||||
|  |     ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, key_config, [{algorithms, [<<"RS256">>]} | KeyConfig]]), | ||||||
|  |     rabbit_ct_helpers:testcase_started(Config, Testcase), | ||||||
|  |     Config; | ||||||
|  | 
 | ||||||
|  | init_per_testcase(Testcase, Config) when Testcase =:= test_failed_connection_with_unvalidated_jwks_server -> | ||||||
|  |     KeyConfig = rabbit_ct_helpers:set_config(?config(key_config, Config), {jwks_url, ?config(non_strict_jwks_url, Config)}), | ||||||
|  |     ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, key_config, KeyConfig]), | ||||||
|  |     rabbit_ct_helpers:testcase_started(Config, Testcase), | ||||||
|  |     Config; | ||||||
|  | 
 | ||||||
| init_per_testcase(Testcase, Config) -> | init_per_testcase(Testcase, Config) -> | ||||||
|     rabbit_ct_helpers:testcase_started(Config, Testcase), |     rabbit_ct_helpers:testcase_started(Config, Testcase), | ||||||
|     Config. |     Config. | ||||||
|  | @ -126,6 +162,14 @@ end_per_testcase(Testcase, Config) when Testcase =:= test_successful_connection_ | ||||||
|   rabbit_ct_helpers:testcase_started(Config, Testcase), |   rabbit_ct_helpers:testcase_started(Config, Testcase), | ||||||
|   Config; |   Config; | ||||||
| 
 | 
 | ||||||
|  | end_per_testcase(Testcase, Config) when Testcase =:= test_successful_connection_with_algorithm_restriction orelse | ||||||
|  |                                         Testcase =:= test_failed_connection_with_algorithm_restriction orelse | ||||||
|  |                                         Testcase =:= test_failed_connection_with_unvalidated_jwks_server -> | ||||||
|  |     rabbit_ct_broker_helpers:delete_vhost(Config, <<"vhost1">>), | ||||||
|  |     ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, key_config, ?config(key_config, Config)]), | ||||||
|  |     rabbit_ct_helpers:testcase_finished(Config, Testcase), | ||||||
|  |     Config; | ||||||
|  | 
 | ||||||
| end_per_testcase(Testcase, Config) -> | end_per_testcase(Testcase, Config) -> | ||||||
|     rabbit_ct_broker_helpers:delete_vhost(Config, <<"vhost1">>), |     rabbit_ct_broker_helpers:delete_vhost(Config, <<"vhost1">>), | ||||||
|     rabbit_ct_helpers:testcase_finished(Config, Testcase), |     rabbit_ct_helpers:testcase_finished(Config, Testcase), | ||||||
|  | @ -143,13 +187,27 @@ start_jwks_server(Config) -> | ||||||
|     %% Assume we don't have more than 100 ports allocated for tests |     %% Assume we don't have more than 100 ports allocated for tests | ||||||
|     PortBase = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_ports_base), |     PortBase = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_ports_base), | ||||||
|     JwksServerPort = PortBase + 100, |     JwksServerPort = PortBase + 100, | ||||||
|  | 
 | ||||||
|  |     %% Both URLs direct to the same JWKS server | ||||||
|  |     %% The NonStrictJwksUrl identity cannot be validated while StrictJwksUrl identity can be validated | ||||||
|  |     NonStrictJwksUrl = "https://127.0.0.1:" ++ integer_to_list(JwksServerPort) ++ "/jwks", | ||||||
|  |     StrictJwksUrl = "https://localhost:" ++ integer_to_list(JwksServerPort) ++ "/jwks", | ||||||
|  | 
 | ||||||
|     ok = application:set_env(jwks_http, keys, [Jwk]), |     ok = application:set_env(jwks_http, keys, [Jwk]), | ||||||
|  |     {ok, _} = application:ensure_all_started(ssl), | ||||||
|     {ok, _} = application:ensure_all_started(cowboy), |     {ok, _} = application:ensure_all_started(cowboy), | ||||||
|     ok = jwks_http_app:start(JwksServerPort), |     CertsDir = ?config(rmq_certsdir, Config), | ||||||
|     KeyConfig = [{jwks_url, "http://127.0.0.1:" ++ integer_to_list(JwksServerPort) ++ "/jwks"}], |     ok = jwks_http_app:start(JwksServerPort, CertsDir), | ||||||
|  |     KeyConfig = [{jwks_url, StrictJwksUrl}, | ||||||
|  |                  {peer_verification, verify_peer}, | ||||||
|  |                  {cacertfile, filename:join([CertsDir, "testca", "cacert.pem"])}], | ||||||
|     ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, |     ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, | ||||||
|                                       [rabbitmq_auth_backend_oauth2, key_config, KeyConfig]), |                                       [rabbitmq_auth_backend_oauth2, key_config, KeyConfig]), | ||||||
|     rabbit_ct_helpers:set_config(Config, {fixture_jwk, Jwk}). |     rabbit_ct_helpers:set_config(Config, | ||||||
|  |                                  [{non_strict_jwks_url, NonStrictJwksUrl}, | ||||||
|  |                                   {strict_jwks_url, StrictJwksUrl}, | ||||||
|  |                                   {key_config, KeyConfig}, | ||||||
|  |                                   {fixture_jwk, Jwk}]). | ||||||
| 
 | 
 | ||||||
| stop_jwks_server(Config) -> | stop_jwks_server(Config) -> | ||||||
|     ok = jwks_http_app:stop(), |     ok = jwks_http_app:stop(), | ||||||
|  | @ -305,7 +363,7 @@ test_successful_token_refresh(Config) -> | ||||||
|     Conn     = open_unmanaged_connection(Config, 0, <<"vhost1">>, <<"username">>, Token), |     Conn     = open_unmanaged_connection(Config, 0, <<"vhost1">>, <<"username">>, Token), | ||||||
|     {ok, Ch} = amqp_connection:open_channel(Conn), |     {ok, Ch} = amqp_connection:open_channel(Conn), | ||||||
| 
 | 
 | ||||||
|     {_Algo, Token2} = generate_valid_token(Config, [<<"rabbitmq.configure:vhost1/*">>, |     {_Algo2, Token2} = generate_valid_token(Config, [<<"rabbitmq.configure:vhost1/*">>, | ||||||
|                                                     <<"rabbitmq.write:vhost1/*">>, |                                                     <<"rabbitmq.write:vhost1/*">>, | ||||||
|                                                     <<"rabbitmq.read:vhost1/*">>]), |                                                     <<"rabbitmq.read:vhost1/*">>]), | ||||||
|     ?UTIL_MOD:wait_for_token_to_expire(timer:seconds(Duration)), |     ?UTIL_MOD:wait_for_token_to_expire(timer:seconds(Duration)), | ||||||
|  | @ -321,6 +379,13 @@ test_successful_token_refresh(Config) -> | ||||||
|     amqp_channel:close(Ch2), |     amqp_channel:close(Ch2), | ||||||
|     close_connection_and_channel(Conn, Ch). |     close_connection_and_channel(Conn, Ch). | ||||||
| 
 | 
 | ||||||
|  | test_successful_connection_with_algorithm_restriction(Config) -> | ||||||
|  |     {_Algo, Token} = rabbit_ct_helpers:get_config(Config, fixture_jwt), | ||||||
|  |     Conn = open_unmanaged_connection(Config, 0, <<"username">>, Token), | ||||||
|  |     {ok, Ch} = amqp_connection:open_channel(Conn), | ||||||
|  |     #'queue.declare_ok'{queue = _} = | ||||||
|  |         amqp_channel:call(Ch, #'queue.declare'{exclusive = true}), | ||||||
|  |     close_connection_and_channel(Conn, Ch). | ||||||
| 
 | 
 | ||||||
| test_failed_connection_with_expired_token(Config) -> | test_failed_connection_with_expired_token(Config) -> | ||||||
|     {_Algo, Token} = generate_expired_token(Config, [<<"rabbitmq.configure:vhost1/*">>, |     {_Algo, Token} = generate_expired_token(Config, [<<"rabbitmq.configure:vhost1/*">>, | ||||||
|  | @ -359,7 +424,7 @@ test_failed_token_refresh_case1(Config) -> | ||||||
|     #'queue.declare_ok'{queue = _} = |     #'queue.declare_ok'{queue = _} = | ||||||
|         amqp_channel:call(Ch, #'queue.declare'{exclusive = true}), |         amqp_channel:call(Ch, #'queue.declare'{exclusive = true}), | ||||||
| 
 | 
 | ||||||
|     {_Algo, Token2} = generate_expired_token(Config, [<<"rabbitmq.configure:vhost4/*">>, |     {_Algo2, Token2} = generate_expired_token(Config, [<<"rabbitmq.configure:vhost4/*">>, | ||||||
|                                                       <<"rabbitmq.write:vhost4/*">>, |                                                       <<"rabbitmq.write:vhost4/*">>, | ||||||
|                                                       <<"rabbitmq.read:vhost4/*">>]), |                                                       <<"rabbitmq.read:vhost4/*">>]), | ||||||
|     %% the error is communicated asynchronously via a connection-level error |     %% the error is communicated asynchronously via a connection-level error | ||||||
|  | @ -387,3 +452,13 @@ test_failed_token_refresh_case2(Config) -> | ||||||
|        amqp_connection:open_channel(Conn)), |        amqp_connection:open_channel(Conn)), | ||||||
| 
 | 
 | ||||||
|     close_connection(Conn). |     close_connection(Conn). | ||||||
|  | 
 | ||||||
|  | test_failed_connection_with_algorithm_restriction(Config) -> | ||||||
|  |     {_Algo, Token} = rabbit_ct_helpers:get_config(Config, fixture_jwt), | ||||||
|  |     ?assertMatch({error, {auth_failure, _}}, | ||||||
|  |                  open_unmanaged_connection(Config, 0, <<"username">>, Token)). | ||||||
|  | 
 | ||||||
|  | test_failed_connection_with_unvalidated_jwks_server(Config) -> | ||||||
|  |     {_Algo, Token} = rabbit_ct_helpers:get_config(Config, fixture_jwt), | ||||||
|  |     ?assertMatch({error, {auth_failure, _}}, | ||||||
|  |                  open_unmanaged_connection(Config, 0, <<"username">>, Token)). | ||||||
|  |  | ||||||
|  | @ -1,8 +1,8 @@ | ||||||
| -module(jwks_http_app). | -module(jwks_http_app). | ||||||
| 
 | 
 | ||||||
| -export([start/1, stop/0]). | -export([start/2, stop/0]). | ||||||
| 
 | 
 | ||||||
| start(Port) -> | start(Port, CertsDir) -> | ||||||
|     Dispatch = |     Dispatch = | ||||||
|         cowboy_router:compile( |         cowboy_router:compile( | ||||||
|           [ |           [ | ||||||
|  | @ -11,8 +11,10 @@ start(Port) -> | ||||||
|                  ]} |                  ]} | ||||||
|           ] |           ] | ||||||
|          ), |          ), | ||||||
|     {ok, _} = cowboy:start_clear(jwks_http_listener, |     {ok, _} = cowboy:start_tls(jwks_http_listener, | ||||||
|                       [{port, Port}], |                       [{port, Port}, | ||||||
|  |                        {certfile, filename:join([CertsDir, "server", "cert.pem"])}, | ||||||
|  |                        {keyfile, filename:join([CertsDir, "server", "key.pem"])}], | ||||||
|                       #{env => #{dispatch => Dispatch}}), |                       #{env => #{dispatch => Dispatch}}), | ||||||
|     ok. |     ok. | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -73,7 +73,7 @@ expired_token_with_scopes(Scopes) -> | ||||||
|     token_with_scopes_and_expiration(Scopes, os:system_time(seconds) - 10). |     token_with_scopes_and_expiration(Scopes, os:system_time(seconds) - 10). | ||||||
| 
 | 
 | ||||||
| fixture_token_with_scopes(Scopes) -> | fixture_token_with_scopes(Scopes) -> | ||||||
|     token_with_scopes_and_expiration(Scopes, os:system_time(seconds) + 10). |     token_with_scopes_and_expiration(Scopes, os:system_time(seconds) + 30). | ||||||
| 
 | 
 | ||||||
| token_with_scopes_and_expiration(Scopes, Expiration) -> | token_with_scopes_and_expiration(Scopes, Expiration) -> | ||||||
|     %% expiration is a timestamp with precision in seconds |     %% expiration is a timestamp with precision in seconds | ||||||
|  |  | ||||||
|  | @ -87,7 +87,9 @@ delete_super_stream(VirtualHost, Name, Username) -> | ||||||
|     gen_server:call(?MODULE, |     gen_server:call(?MODULE, | ||||||
|                     {delete_super_stream, VirtualHost, Name, Username}). |                     {delete_super_stream, VirtualHost, Name, Username}). | ||||||
| 
 | 
 | ||||||
| -spec lookup_leader(binary(), binary()) -> pid() | cluster_not_found. | -spec lookup_leader(binary(), binary()) -> | ||||||
|  |                        {ok, pid()} | {error, not_available} | | ||||||
|  |                        {error, not_found}. | ||||||
| lookup_leader(VirtualHost, Stream) -> | lookup_leader(VirtualHost, Stream) -> | ||||||
|     gen_server:call(?MODULE, {lookup_leader, VirtualHost, Stream}). |     gen_server:call(?MODULE, {lookup_leader, VirtualHost, Stream}). | ||||||
| 
 | 
 | ||||||
|  | @ -294,20 +296,25 @@ handle_call({lookup_leader, VirtualHost, Stream}, _From, State) -> | ||||||
|                           LeaderPid = amqqueue:get_pid(Q), |                           LeaderPid = amqqueue:get_pid(Q), | ||||||
|                           case process_alive(LeaderPid) of |                           case process_alive(LeaderPid) of | ||||||
|                               true -> |                               true -> | ||||||
|                                   LeaderPid; |                                   {ok, LeaderPid}; | ||||||
|                               false -> |                               false -> | ||||||
|                                   case leader_from_members(Q) of |                                   case leader_from_members(Q) of | ||||||
|                                       {ok, Pid} -> |                                       {ok, Pid} -> | ||||||
|                                           Pid; |                                           {ok, Pid}; | ||||||
|                                       _ -> |                                       _ -> | ||||||
|                                           cluster_not_found |                                           {error, not_available} | ||||||
|                                   end |                                   end | ||||||
|                           end; |                           end; | ||||||
|                       _ -> |                       _ -> | ||||||
|                           cluster_not_found |                           {error, not_found} | ||||||
|                   end; |                   end; | ||||||
|  |               {error, not_found} -> | ||||||
|  |                   case rabbit_amqqueue:not_found_or_absent_dirty(Name) of | ||||||
|  |                       not_found -> | ||||||
|  |                           {error, not_found}; | ||||||
|                       _ -> |                       _ -> | ||||||
|                   cluster_not_found |                           {error, not_available} | ||||||
|  |                   end | ||||||
|           end, |           end, | ||||||
|     {reply, Res, State}; |     {reply, Res, State}; | ||||||
| handle_call({lookup_local_member, VirtualHost, Stream}, _From, | handle_call({lookup_local_member, VirtualHost, Stream}, _From, | ||||||
|  |  | ||||||
|  | @ -1494,7 +1494,7 @@ handle_frame_post_auth(Transport, | ||||||
|             of |             of | ||||||
|                 {false, false} -> |                 {false, false} -> | ||||||
|                     case lookup_leader(Stream, Connection0) of |                     case lookup_leader(Stream, Connection0) of | ||||||
|                         cluster_not_found -> |                         {error, not_found} -> | ||||||
|                             response(Transport, |                             response(Transport, | ||||||
|                                      Connection0, |                                      Connection0, | ||||||
|                                      declare_publisher, |                                      declare_publisher, | ||||||
|  | @ -1504,6 +1504,16 @@ handle_frame_post_auth(Transport, | ||||||
|                                                                              ?STREAM_DOES_NOT_EXIST, |                                                                              ?STREAM_DOES_NOT_EXIST, | ||||||
|                                                                              1), |                                                                              1), | ||||||
|                             {Connection0, State}; |                             {Connection0, State}; | ||||||
|  |                         {error, not_available} -> | ||||||
|  |                             response(Transport, | ||||||
|  |                                      Connection0, | ||||||
|  |                                      declare_publisher, | ||||||
|  |                                      CorrelationId, | ||||||
|  |                                      ?RESPONSE_CODE_STREAM_NOT_AVAILABLE), | ||||||
|  |                             rabbit_global_counters:increase_protocol_counter(stream, | ||||||
|  |                                                                              ?STREAM_NOT_AVAILABLE, | ||||||
|  |                                                                              1), | ||||||
|  |                             {Connection0, State}; | ||||||
|                         {ClusterLeader, |                         {ClusterLeader, | ||||||
|                          #stream_connection{publishers = Publishers0, |                          #stream_connection{publishers = Publishers0, | ||||||
|                                             publisher_to_ids = RefIds0} = |                                             publisher_to_ids = RefIds0} = | ||||||
|  | @ -1960,9 +1970,9 @@ handle_frame_post_auth(_Transport, | ||||||
|     of |     of | ||||||
|         ok -> |         ok -> | ||||||
|             case lookup_leader(Stream, Connection) of |             case lookup_leader(Stream, Connection) of | ||||||
|                 cluster_not_found -> |                 {error, Error} -> | ||||||
|                     rabbit_log:warning("Could not find leader to store offset on ~p", |                     rabbit_log:warning("Could not find leader to store offset on ~p: ~p", | ||||||
|                                        [Stream]), |                                        [Stream, Error]), | ||||||
|                     %% FIXME store offset is fire-and-forget, so no response even if error, change this? |                     %% FIXME store offset is fire-and-forget, so no response even if error, change this? | ||||||
|                     {Connection, State}; |                     {Connection, State}; | ||||||
|                 {ClusterLeader, Connection1} -> |                 {ClusterLeader, Connection1} -> | ||||||
|  | @ -1992,11 +2002,16 @@ handle_frame_post_auth(Transport, | ||||||
|         of |         of | ||||||
|             ok -> |             ok -> | ||||||
|                 case lookup_leader(Stream, Connection0) of |                 case lookup_leader(Stream, Connection0) of | ||||||
|                     cluster_not_found -> |                     {error, not_found} -> | ||||||
|                         rabbit_global_counters:increase_protocol_counter(stream, |                         rabbit_global_counters:increase_protocol_counter(stream, | ||||||
|                                                                          ?STREAM_DOES_NOT_EXIST, |                                                                          ?STREAM_DOES_NOT_EXIST, | ||||||
|                                                                          1), |                                                                          1), | ||||||
|                         {?RESPONSE_CODE_STREAM_DOES_NOT_EXIST, 0, Connection0}; |                         {?RESPONSE_CODE_STREAM_DOES_NOT_EXIST, 0, Connection0}; | ||||||
|  |                     {error, not_available} -> | ||||||
|  |                         rabbit_global_counters:increase_protocol_counter(stream, | ||||||
|  |                                                                          ?STREAM_NOT_AVAILABLE, | ||||||
|  |                                                                          1), | ||||||
|  |                         {?RESPONSE_CODE_STREAM_NOT_AVAILABLE, 0, Connection0}; | ||||||
|                     {LeaderPid, C} -> |                     {LeaderPid, C} -> | ||||||
|                         {RC, O} = |                         {RC, O} = | ||||||
|                             case osiris:read_tracking(LeaderPid, Reference) of |                             case osiris:read_tracking(LeaderPid, Reference) of | ||||||
|  | @ -2532,9 +2547,9 @@ lookup_leader(Stream, | ||||||
|     case maps:get(Stream, StreamLeaders, undefined) of |     case maps:get(Stream, StreamLeaders, undefined) of | ||||||
|         undefined -> |         undefined -> | ||||||
|             case lookup_leader_from_manager(VirtualHost, Stream) of |             case lookup_leader_from_manager(VirtualHost, Stream) of | ||||||
|                 cluster_not_found -> |                 {error, Error} -> | ||||||
|                     cluster_not_found; |                     {error, Error}; | ||||||
|                 LeaderPid -> |                 {ok, LeaderPid} -> | ||||||
|                     Connection1 = |                     Connection1 = | ||||||
|                         maybe_monitor_stream(LeaderPid, Stream, Connection), |                         maybe_monitor_stream(LeaderPid, Stream, Connection), | ||||||
|                     {LeaderPid, |                     {LeaderPid, | ||||||
|  |  | ||||||
|  | @ -27,9 +27,9 @@ | ||||||
| 
 | 
 | ||||||
|     <properties> |     <properties> | ||||||
| 	<stream-client.version>[0.5.0-SNAPSHOT,1.0-SNAPSHOT)</stream-client.version> | 	<stream-client.version>[0.5.0-SNAPSHOT,1.0-SNAPSHOT)</stream-client.version> | ||||||
|         <junit.jupiter.version>5.8.1</junit.jupiter.version> |         <junit.jupiter.version>5.8.2</junit.jupiter.version> | ||||||
|         <assertj.version>3.21.0</assertj.version> |         <assertj.version>3.21.0</assertj.version> | ||||||
|         <logback.version>1.2.6</logback.version> |         <logback.version>1.2.7</logback.version> | ||||||
|         <maven.compiler.plugin.version>3.8.1</maven.compiler.plugin.version> |         <maven.compiler.plugin.version>3.8.1</maven.compiler.plugin.version> | ||||||
|         <maven-surefire-plugin.version>2.22.2</maven-surefire-plugin.version> |         <maven-surefire-plugin.version>2.22.2</maven-surefire-plugin.version> | ||||||
|         <spotless.version>2.2.0</spotless.version> |         <spotless.version>2.2.0</spotless.version> | ||||||
|  |  | ||||||
|  | @ -16,6 +16,9 @@ | ||||||
| 
 | 
 | ||||||
| package com.rabbitmq.stream; | package com.rabbitmq.stream; | ||||||
| 
 | 
 | ||||||
|  | import static com.rabbitmq.stream.TestUtils.ResponseConditions.ko; | ||||||
|  | import static com.rabbitmq.stream.TestUtils.ResponseConditions.ok; | ||||||
|  | import static com.rabbitmq.stream.TestUtils.ResponseConditions.responseCode; | ||||||
| import static org.assertj.core.api.Assertions.assertThat; | import static org.assertj.core.api.Assertions.assertThat; | ||||||
| 
 | 
 | ||||||
| import com.rabbitmq.stream.impl.Client; | import com.rabbitmq.stream.impl.Client; | ||||||
|  | @ -40,8 +43,7 @@ public class ClusterSizeTest { | ||||||
|     String s = UUID.randomUUID().toString(); |     String s = UUID.randomUUID().toString(); | ||||||
|     Response response = |     Response response = | ||||||
|         client.create(s, Collections.singletonMap("initial-cluster-size", clusterSize)); |         client.create(s, Collections.singletonMap("initial-cluster-size", clusterSize)); | ||||||
|     assertThat(response.isOk()).isFalse(); |     assertThat(response).is(ko()).has(responseCode(Constants.RESPONSE_CODE_PRECONDITION_FAILED)); | ||||||
|     assertThat(response.getResponseCode()).isEqualTo(Constants.RESPONSE_CODE_PRECONDITION_FAILED); |  | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   @ParameterizedTest |   @ParameterizedTest | ||||||
|  | @ -53,7 +55,7 @@ public class ClusterSizeTest { | ||||||
|     try { |     try { | ||||||
|       Response response = |       Response response = | ||||||
|           client.create(s, Collections.singletonMap("initial-cluster-size", requestedClusterSize)); |           client.create(s, Collections.singletonMap("initial-cluster-size", requestedClusterSize)); | ||||||
|       assertThat(response.isOk()).isTrue(); |       assertThat(response).is(ok()); | ||||||
|       StreamMetadata metadata = client.metadata(s).get(s); |       StreamMetadata metadata = client.metadata(s).get(s); | ||||||
|       assertThat(metadata).isNotNull(); |       assertThat(metadata).isNotNull(); | ||||||
|       assertThat(metadata.getResponseCode()).isEqualTo(Constants.RESPONSE_CODE_OK); |       assertThat(metadata.getResponseCode()).isEqualTo(Constants.RESPONSE_CODE_OK); | ||||||
|  |  | ||||||
|  | @ -16,11 +16,16 @@ | ||||||
| 
 | 
 | ||||||
| package com.rabbitmq.stream; | package com.rabbitmq.stream; | ||||||
| 
 | 
 | ||||||
|  | import static com.rabbitmq.stream.TestUtils.ResponseConditions.ok; | ||||||
|  | import static com.rabbitmq.stream.TestUtils.waitAtMost; | ||||||
|  | import static com.rabbitmq.stream.TestUtils.waitUntil; | ||||||
| import static org.assertj.core.api.Assertions.assertThat; | import static org.assertj.core.api.Assertions.assertThat; | ||||||
| import static org.assertj.core.api.Assertions.fail; | import static org.assertj.core.api.Assertions.fail; | ||||||
| 
 | 
 | ||||||
| import com.rabbitmq.stream.codec.WrapperMessageBuilder; | import com.rabbitmq.stream.codec.WrapperMessageBuilder; | ||||||
| import com.rabbitmq.stream.impl.Client; | import com.rabbitmq.stream.impl.Client; | ||||||
|  | import com.rabbitmq.stream.impl.Client.ClientParameters; | ||||||
|  | import com.rabbitmq.stream.impl.Client.Response; | ||||||
| import java.nio.charset.StandardCharsets; | import java.nio.charset.StandardCharsets; | ||||||
| import java.time.Duration; | import java.time.Duration; | ||||||
| import java.util.*; | import java.util.*; | ||||||
|  | @ -66,7 +71,7 @@ public class FailureTest { | ||||||
|     Client.StreamMetadata streamMetadata = metadata.get(stream); |     Client.StreamMetadata streamMetadata = metadata.get(stream); | ||||||
|     assertThat(streamMetadata).isNotNull(); |     assertThat(streamMetadata).isNotNull(); | ||||||
| 
 | 
 | ||||||
|     TestUtils.waitUntil(() -> client.metadata(stream).get(stream).getReplicas().size() == 2); |     waitUntil(() -> client.metadata(stream).get(stream).getReplicas().size() == 2); | ||||||
| 
 | 
 | ||||||
|     streamMetadata = client.metadata(stream).get(stream); |     streamMetadata = client.metadata(stream).get(stream); | ||||||
|     assertThat(streamMetadata.getLeader().getPort()).isEqualTo(TestUtils.streamPortNode1()); |     assertThat(streamMetadata.getLeader().getPort()).isEqualTo(TestUtils.streamPortNode1()); | ||||||
|  | @ -107,7 +112,7 @@ public class FailureTest { | ||||||
|       assertThat(metadataLatch.await(10, TimeUnit.SECONDS)).isTrue(); |       assertThat(metadataLatch.await(10, TimeUnit.SECONDS)).isTrue(); | ||||||
| 
 | 
 | ||||||
|       // wait until there's a new leader |       // wait until there's a new leader | ||||||
|       TestUtils.waitAtMost( |       waitAtMost( | ||||||
|           Duration.ofSeconds(10), |           Duration.ofSeconds(10), | ||||||
|           () -> { |           () -> { | ||||||
|             Client.StreamMetadata m = publisher.metadata(stream).get(stream); |             Client.StreamMetadata m = publisher.metadata(stream).get(stream); | ||||||
|  | @ -133,7 +138,7 @@ public class FailureTest { | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // wait until all the replicas are there |     // wait until all the replicas are there | ||||||
|     TestUtils.waitAtMost( |     waitAtMost( | ||||||
|         Duration.ofSeconds(10), |         Duration.ofSeconds(10), | ||||||
|         () -> { |         () -> { | ||||||
|           LOGGER.info("Getting metadata for {}", stream); |           LOGGER.info("Getting metadata for {}", stream); | ||||||
|  | @ -164,7 +169,7 @@ public class FailureTest { | ||||||
|                       consumeLatch.countDown(); |                       consumeLatch.countDown(); | ||||||
|                     })); |                     })); | ||||||
| 
 | 
 | ||||||
|     TestUtils.waitAtMost( |     waitAtMost( | ||||||
|         Duration.ofSeconds(5), |         Duration.ofSeconds(5), | ||||||
|         () -> { |         () -> { | ||||||
|           Client.Response response = |           Client.Response response = | ||||||
|  | @ -219,7 +224,7 @@ public class FailureTest { | ||||||
|                       cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode2())); |                       cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode2())); | ||||||
|                   // wait until there's a new leader |                   // wait until there's a new leader | ||||||
|                   try { |                   try { | ||||||
|                     TestUtils.waitAtMost( |                     waitAtMost( | ||||||
|                         Duration.ofSeconds(5), |                         Duration.ofSeconds(5), | ||||||
|                         () -> { |                         () -> { | ||||||
|                           Client.StreamMetadata m = locator.metadata(stream).get(stream); |                           Client.StreamMetadata m = locator.metadata(stream).get(stream); | ||||||
|  | @ -314,7 +319,7 @@ public class FailureTest { | ||||||
| 
 | 
 | ||||||
|     Client metadataClient = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode2())); |     Client metadataClient = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode2())); | ||||||
|     // wait until all the replicas are there |     // wait until all the replicas are there | ||||||
|     TestUtils.waitAtMost( |     waitAtMost( | ||||||
|         Duration.ofSeconds(5), |         Duration.ofSeconds(5), | ||||||
|         () -> { |         () -> { | ||||||
|           Client.StreamMetadata m = metadataClient.metadata(stream).get(stream); |           Client.StreamMetadata m = metadataClient.metadata(stream).get(stream); | ||||||
|  | @ -350,7 +355,7 @@ public class FailureTest { | ||||||
| 
 | 
 | ||||||
|     Client.Response response = |     Client.Response response = | ||||||
|         consumer.subscribe((byte) 1, stream, OffsetSpecification.first(), 10); |         consumer.subscribe((byte) 1, stream, OffsetSpecification.first(), 10); | ||||||
|     assertThat(response.isOk()).isTrue(); |     assertThat(response).is(ok()); | ||||||
| 
 | 
 | ||||||
|     assertThat(consumedLatch.await(5, TimeUnit.SECONDS)).isTrue(); |     assertThat(consumedLatch.await(5, TimeUnit.SECONDS)).isTrue(); | ||||||
|     assertThat(generations).hasSize(2).contains(0L, 1L); |     assertThat(generations).hasSize(2).contains(0L, 1L); | ||||||
|  | @ -372,8 +377,7 @@ public class FailureTest { | ||||||
|     Client.StreamMetadata streamMetadata = metadata.get(stream); |     Client.StreamMetadata streamMetadata = metadata.get(stream); | ||||||
|     assertThat(streamMetadata).isNotNull(); |     assertThat(streamMetadata).isNotNull(); | ||||||
| 
 | 
 | ||||||
|     TestUtils.waitUntil( |     waitUntil(() -> metadataClient.metadata(stream).get(stream).getReplicas().size() == 2); | ||||||
|         () -> metadataClient.metadata(stream).get(stream).getReplicas().size() == 2); |  | ||||||
| 
 | 
 | ||||||
|     metadata = metadataClient.metadata(stream); |     metadata = metadataClient.metadata(stream); | ||||||
|     streamMetadata = metadata.get(stream); |     streamMetadata = metadata.get(stream); | ||||||
|  | @ -497,7 +501,7 @@ public class FailureTest { | ||||||
| 
 | 
 | ||||||
|     Client.Response response = |     Client.Response response = | ||||||
|         consumer.subscribe((byte) 1, stream, OffsetSpecification.first(), 10); |         consumer.subscribe((byte) 1, stream, OffsetSpecification.first(), 10); | ||||||
|     assertThat(response.isOk()).isTrue(); |     assertThat(response).is(ok()); | ||||||
| 
 | 
 | ||||||
|     // let's publish for a bit of time |     // let's publish for a bit of time | ||||||
|     Thread.sleep(2000); |     Thread.sleep(2000); | ||||||
|  | @ -521,7 +525,7 @@ public class FailureTest { | ||||||
|     confirmedCount = confirmed.size(); |     confirmedCount = confirmed.size(); | ||||||
| 
 | 
 | ||||||
|     // wait until all the replicas are there |     // wait until all the replicas are there | ||||||
|     TestUtils.waitAtMost( |     waitAtMost( | ||||||
|         Duration.ofSeconds(10), |         Duration.ofSeconds(10), | ||||||
|         () -> { |         () -> { | ||||||
|           Client.StreamMetadata m = metadataClient.metadata(stream).get(stream); |           Client.StreamMetadata m = metadataClient.metadata(stream).get(stream); | ||||||
|  | @ -535,9 +539,9 @@ public class FailureTest { | ||||||
| 
 | 
 | ||||||
|     keepPublishing.set(false); |     keepPublishing.set(false); | ||||||
| 
 | 
 | ||||||
|     assertThat(publishingLatch.await(5, TimeUnit.SECONDS)).isTrue(); |     assertThat(publishingLatch.await(10, TimeUnit.SECONDS)).isTrue(); | ||||||
| 
 | 
 | ||||||
|     TestUtils.waitAtMost(Duration.ofSeconds(5), () -> consumed.size() >= confirmed.size()); |     waitAtMost(Duration.ofSeconds(10), () -> consumed.size() >= confirmed.size()); | ||||||
| 
 | 
 | ||||||
|     assertThat(generations).hasSize(2).contains(0L, 1L); |     assertThat(generations).hasSize(2).contains(0L, 1L); | ||||||
|     assertThat(consumed).hasSizeGreaterThanOrEqualTo(confirmed.size()); |     assertThat(consumed).hasSizeGreaterThanOrEqualTo(confirmed.size()); | ||||||
|  | @ -551,4 +555,33 @@ public class FailureTest { | ||||||
| 
 | 
 | ||||||
|     confirmedIds.forEach(confirmedId -> assertThat(consumedIds).contains(confirmedId)); |     confirmedIds.forEach(confirmedId -> assertThat(consumedIds).contains(confirmedId)); | ||||||
|   } |   } | ||||||
|  | 
 | ||||||
|  |   @Test | ||||||
|  |   void declarePublisherShouldNotReturnStreamDoesNotExistOnRestart() throws Exception { | ||||||
|  |     try { | ||||||
|  |       Host.rabbitmqctl("stop_app"); | ||||||
|  |     } finally { | ||||||
|  |       Host.rabbitmqctl("start_app"); | ||||||
|  |     } | ||||||
|  |     AtomicReference<Client> client = new AtomicReference<>(); | ||||||
|  |     waitUntil( | ||||||
|  |         () -> { | ||||||
|  |           try { | ||||||
|  |             client.set(cf.get(new ClientParameters().port(TestUtils.streamPortNode1()))); | ||||||
|  |           } catch (Exception e) { | ||||||
|  | 
 | ||||||
|  |           } | ||||||
|  |           return client.get() != null; | ||||||
|  |         }); | ||||||
|  |     Set<Short> responseCodes = ConcurrentHashMap.newKeySet(); | ||||||
|  | 
 | ||||||
|  |     waitUntil( | ||||||
|  |         () -> { | ||||||
|  |           Response response = client.get().declarePublisher((byte) 0, null, stream); | ||||||
|  |           responseCodes.add(response.getResponseCode()); | ||||||
|  |           return response.isOk(); | ||||||
|  |         }); | ||||||
|  | 
 | ||||||
|  |     assertThat(responseCodes).doesNotContain(Constants.RESPONSE_CODE_STREAM_DOES_NOT_EXIST); | ||||||
|  |   } | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -16,6 +16,9 @@ | ||||||
| 
 | 
 | ||||||
| package com.rabbitmq.stream; | package com.rabbitmq.stream; | ||||||
| 
 | 
 | ||||||
|  | import static com.rabbitmq.stream.TestUtils.ResponseConditions.ko; | ||||||
|  | import static com.rabbitmq.stream.TestUtils.ResponseConditions.ok; | ||||||
|  | import static com.rabbitmq.stream.TestUtils.ResponseConditions.responseCode; | ||||||
| import static java.util.concurrent.TimeUnit.SECONDS; | import static java.util.concurrent.TimeUnit.SECONDS; | ||||||
| import static org.assertj.core.api.Assertions.assertThat; | import static org.assertj.core.api.Assertions.assertThat; | ||||||
| 
 | 
 | ||||||
|  | @ -47,8 +50,7 @@ public class LeaderLocatorTest { | ||||||
|     Client client = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1())); |     Client client = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1())); | ||||||
|     String s = UUID.randomUUID().toString(); |     String s = UUID.randomUUID().toString(); | ||||||
|     Response response = client.create(s, Collections.singletonMap("queue-leader-locator", "foo")); |     Response response = client.create(s, Collections.singletonMap("queue-leader-locator", "foo")); | ||||||
|     assertThat(response.isOk()).isFalse(); |     assertThat(response).is(ko()).has(responseCode(Constants.RESPONSE_CODE_PRECONDITION_FAILED)); | ||||||
|     assertThat(response.getResponseCode()).isEqualTo(Constants.RESPONSE_CODE_PRECONDITION_FAILED); |  | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   @Test |   @Test | ||||||
|  | @ -60,7 +62,7 @@ public class LeaderLocatorTest { | ||||||
|       try { |       try { | ||||||
|         Response response = |         Response response = | ||||||
|             client.create(s, Collections.singletonMap("queue-leader-locator", "client-local")); |             client.create(s, Collections.singletonMap("queue-leader-locator", "client-local")); | ||||||
|         assertThat(response.isOk()).isTrue(); |         assertThat(response).is(ok()); | ||||||
|         StreamMetadata metadata = client.metadata(s).get(s); |         StreamMetadata metadata = client.metadata(s).get(s); | ||||||
|         assertThat(metadata).isNotNull(); |         assertThat(metadata).isNotNull(); | ||||||
|         assertThat(metadata.getResponseCode()).isEqualTo(Constants.RESPONSE_CODE_OK); |         assertThat(metadata.getResponseCode()).isEqualTo(Constants.RESPONSE_CODE_OK); | ||||||
|  | @ -136,7 +138,7 @@ public class LeaderLocatorTest { | ||||||
|                 Response response = |                 Response response = | ||||||
|                     client.create( |                     client.create( | ||||||
|                         s, Collections.singletonMap("queue-leader-locator", "least-leaders")); |                         s, Collections.singletonMap("queue-leader-locator", "least-leaders")); | ||||||
|                 assertThat(response.isOk()).isTrue(); |                 assertThat(response).is(ok()); | ||||||
|                 createdStreams.add(s); |                 createdStreams.add(s); | ||||||
|               }); |               }); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -16,11 +16,13 @@ | ||||||
| 
 | 
 | ||||||
| package com.rabbitmq.stream; | package com.rabbitmq.stream; | ||||||
| 
 | 
 | ||||||
|  | import static com.rabbitmq.stream.TestUtils.ResponseConditions.ok; | ||||||
| import static java.util.concurrent.TimeUnit.SECONDS; | import static java.util.concurrent.TimeUnit.SECONDS; | ||||||
| import static org.assertj.core.api.Assertions.assertThat; | import static org.assertj.core.api.Assertions.assertThat; | ||||||
| import static org.junit.jupiter.api.Assertions.fail; | import static org.junit.jupiter.api.Assertions.fail; | ||||||
| 
 | 
 | ||||||
| import com.rabbitmq.stream.impl.Client; | import com.rabbitmq.stream.impl.Client; | ||||||
|  | import com.rabbitmq.stream.impl.Client.Response; | ||||||
| import io.netty.channel.EventLoopGroup; | import io.netty.channel.EventLoopGroup; | ||||||
| import io.netty.channel.nio.NioEventLoopGroup; | import io.netty.channel.nio.NioEventLoopGroup; | ||||||
| import java.lang.reflect.Field; | import java.lang.reflect.Field; | ||||||
|  | @ -30,6 +32,7 @@ import java.util.Set; | ||||||
| import java.util.UUID; | import java.util.UUID; | ||||||
| import java.util.concurrent.ConcurrentHashMap; | import java.util.concurrent.ConcurrentHashMap; | ||||||
| import java.util.function.BooleanSupplier; | import java.util.function.BooleanSupplier; | ||||||
|  | import org.assertj.core.api.Condition; | ||||||
| import org.junit.jupiter.api.TestInfo; | import org.junit.jupiter.api.TestInfo; | ||||||
| import org.junit.jupiter.api.extension.*; | import org.junit.jupiter.api.extension.*; | ||||||
| 
 | 
 | ||||||
|  | @ -106,7 +109,7 @@ public class TestUtils { | ||||||
|                     .eventLoopGroup(eventLoopGroup(context)) |                     .eventLoopGroup(eventLoopGroup(context)) | ||||||
|                     .port(streamPortNode1())); |                     .port(streamPortNode1())); | ||||||
|         Client.Response response = client.create(stream); |         Client.Response response = client.create(stream); | ||||||
|         assertThat(response.isOk()).isTrue(); |         assertThat(response).is(ok()); | ||||||
|         client.close(); |         client.close(); | ||||||
|         store(context).put("testMethodStream", stream); |         store(context).put("testMethodStream", stream); | ||||||
|       } catch (NoSuchFieldException e) { |       } catch (NoSuchFieldException e) { | ||||||
|  | @ -136,7 +139,7 @@ public class TestUtils { | ||||||
|                     .eventLoopGroup(eventLoopGroup(context)) |                     .eventLoopGroup(eventLoopGroup(context)) | ||||||
|                     .port(streamPortNode1())); |                     .port(streamPortNode1())); | ||||||
|         Client.Response response = client.delete(stream); |         Client.Response response = client.delete(stream); | ||||||
|         assertThat(response.isOk()).isTrue(); |         assertThat(response).is(ok()); | ||||||
|         client.close(); |         client.close(); | ||||||
|         store(context).remove("testMethodStream"); |         store(context).remove("testMethodStream"); | ||||||
|       } catch (NoSuchFieldException e) { |       } catch (NoSuchFieldException e) { | ||||||
|  | @ -197,4 +200,22 @@ public class TestUtils { | ||||||
|       } |       } | ||||||
|     } |     } | ||||||
|   } |   } | ||||||
|  | 
 | ||||||
|  |   static class ResponseConditions { | ||||||
|  | 
 | ||||||
|  |     static Condition<Response> ok() { | ||||||
|  |       return new Condition<>(Response::isOk, "Response should be OK"); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static Condition<Response> ko() { | ||||||
|  |       return new Condition<>(response -> !response.isOk(), "Response should be OK"); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static Condition<Response> responseCode(short expectedResponse) { | ||||||
|  |       return new Condition<>( | ||||||
|  |           response -> response.getResponseCode() == expectedResponse, | ||||||
|  |           "response code %d", | ||||||
|  |           expectedResponse); | ||||||
|  |     } | ||||||
|  |   } | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -16,7 +16,7 @@ all() -> | ||||||
|     [{group, non_parallel_tests}]. |     [{group, non_parallel_tests}]. | ||||||
| 
 | 
 | ||||||
| groups() -> | groups() -> | ||||||
|     [{non_parallel_tests, [], [manage_super_stream]}]. |     [{non_parallel_tests, [], [manage_super_stream, lookup_leader]}]. | ||||||
| 
 | 
 | ||||||
| %% ------------------------------------------------------------------- | %% ------------------------------------------------------------------- | ||||||
| %% Testsuite setup/teardown. | %% Testsuite setup/teardown. | ||||||
|  | @ -71,6 +71,17 @@ end_per_testcase(Testcase, Config) -> | ||||||
| %% Testcases. | %% Testcases. | ||||||
| %% ------------------------------------------------------------------- | %% ------------------------------------------------------------------- | ||||||
| 
 | 
 | ||||||
|  | lookup_leader(Config) -> | ||||||
|  |     Stream = <<"stream_manager_lookup_leader_stream">>, | ||||||
|  |     ?assertMatch({ok, _}, create_stream(Config, Stream)), | ||||||
|  | 
 | ||||||
|  |     {ok, Pid} = lookup_leader(Config, Stream), | ||||||
|  |     ?assert(is_pid(Pid)), | ||||||
|  | 
 | ||||||
|  |     ?assertEqual({error, not_found}, lookup_leader(Config, <<"foo">>)), | ||||||
|  | 
 | ||||||
|  |     ?assertEqual({ok, deleted}, delete_stream(Config, Stream)). | ||||||
|  | 
 | ||||||
| manage_super_stream(Config) -> | manage_super_stream(Config) -> | ||||||
|     % create super stream |     % create super stream | ||||||
|     ?assertEqual(ok, |     ?assertEqual(ok, | ||||||
|  | @ -140,6 +151,20 @@ create_stream(Config, Name) -> | ||||||
|                                  create, |                                  create, | ||||||
|                                  [<<"/">>, Name, [], <<"guest">>]). |                                  [<<"/">>, Name, [], <<"guest">>]). | ||||||
| 
 | 
 | ||||||
|  | delete_stream(Config, Name) -> | ||||||
|  |     rabbit_ct_broker_helpers:rpc(Config, | ||||||
|  |                                  0, | ||||||
|  |                                  rabbit_stream_manager, | ||||||
|  |                                  delete, | ||||||
|  |                                  [<<"/">>, Name, <<"guest">>]). | ||||||
|  | 
 | ||||||
|  | lookup_leader(Config, Name) -> | ||||||
|  |     rabbit_ct_broker_helpers:rpc(Config, | ||||||
|  |                                  0, | ||||||
|  |                                  rabbit_stream_manager, | ||||||
|  |                                  lookup_leader, | ||||||
|  |                                  [<<"/">>, Name]). | ||||||
|  | 
 | ||||||
| partitions(Config, Name) -> | partitions(Config, Name) -> | ||||||
|     rabbit_ct_broker_helpers:rpc(Config, |     rabbit_ct_broker_helpers:rpc(Config, | ||||||
|                                  0, |                                  0, | ||||||
|  |  | ||||||
|  | @ -27,11 +27,11 @@ | ||||||
| 
 | 
 | ||||||
|     <properties> |     <properties> | ||||||
| 	<stream-client.version>[0.5.0-SNAPSHOT,1.0-SNAPSHOT)</stream-client.version> | 	<stream-client.version>[0.5.0-SNAPSHOT,1.0-SNAPSHOT)</stream-client.version> | ||||||
|         <junit.jupiter.version>5.8.1</junit.jupiter.version> |         <junit.jupiter.version>5.8.2</junit.jupiter.version> | ||||||
|         <assertj.version>3.21.0</assertj.version> |         <assertj.version>3.21.0</assertj.version> | ||||||
|         <okhttp.version>4.9.2</okhttp.version> |         <okhttp.version>4.9.3</okhttp.version> | ||||||
|         <gson.version>2.8.9</gson.version> |         <gson.version>2.8.9</gson.version> | ||||||
|         <logback.version>1.2.6</logback.version> |         <logback.version>1.2.7</logback.version> | ||||||
|         <maven.compiler.plugin.version>3.8.1</maven.compiler.plugin.version> |         <maven.compiler.plugin.version>3.8.1</maven.compiler.plugin.version> | ||||||
|         <maven-surefire-plugin.version>2.22.2</maven-surefire-plugin.version> |         <maven-surefire-plugin.version>2.22.2</maven-surefire-plugin.version> | ||||||
|         <spotless.version>2.2.0</spotless.version> |         <spotless.version>2.2.0</spotless.version> | ||||||
|  |  | ||||||
|  | @ -15,7 +15,9 @@ deps_dirs: | ||||||
|   - bazel-bin/external/* |   - bazel-bin/external/* | ||||||
| include_dirs: | include_dirs: | ||||||
|   - deps |   - deps | ||||||
|  |   - deps/* | ||||||
|   - deps/*/include |   - deps/*/include | ||||||
|  |   - deps/*/src | ||||||
|   - bazel-bin/external |   - bazel-bin/external | ||||||
|   - bazel-bin/external/*/include |   - bazel-bin/external/*/include | ||||||
| plt_path: bazel-bin/deps/rabbit/.base_plt.plt | plt_path: bazel-bin/deps/rabbit/.base_plt.plt | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue