diff --git a/.gitlab/CODEOWNERS b/.gitlab/CODEOWNERS index 93061d39f41..8772b5c787e 100644 --- a/.gitlab/CODEOWNERS +++ b/.gitlab/CODEOWNERS @@ -7,10 +7,10 @@ .gitlab/CODEOWNERS @gitlab-org/development-leaders @gitlab-org/tw-leadership ## Allows release tooling and Gitaly team members to update the Gitaly Version -/GITALY_SERVER_VERSION @project_278964_bot_77e28085fcec07f14dfd31c689824b5b @gitlab-org/maintainers/rails-backend @gitlab-org/delivery @gl-gitaly +/GITALY_SERVER_VERSION @project_278964_bot_e2e6cca5e3b0076fdecec369cccb9e18 @gitlab-org/maintainers/rails-backend @gitlab-org/delivery @gl-gitaly ## Allows release tooling, KAS version maintainers and the delivery team to update the KAS version -/GITLAB_KAS_VERSION @project_278964_bot_77e28085fcec07f14dfd31c689824b5b @gitlab-org/maintainers/kas-version-maintainers @gitlab-org/maintainers/rails-backend @gitlab-org/delivery +/GITLAB_KAS_VERSION @project_278964_bot_e2e6cca5e3b0076fdecec369cccb9e18 @gitlab-org/maintainers/kas-version-maintainers @gitlab-org/maintainers/rails-backend @gitlab-org/delivery ## Allows automated updates to E2E test knapsack reports /qa/knapsack/**/*.json @project_278964_bot_bd38289efeb650826d995b5f830ca9cb @gl-dx diff --git a/app/assets/javascripts/glql/components/presenters/table.vue b/app/assets/javascripts/glql/components/presenters/table.vue index 97670fab10b..6cc578fd5bc 100644 --- a/app/assets/javascripts/glql/components/presenters/table.vue +++ b/app/assets/javascripts/glql/components/presenters/table.vue @@ -76,7 +76,6 @@ export default { persist-collapsed-state class="!gl-mt-5 gl-overflow-hidden" :body-class="{ '!gl-m-[-1px] !gl-p-0': items.length || isPreview }" - footer-class="!gl-border-t-0" @collapsed="isCollapsed = true" @expanded="isCollapsed = false" > diff --git a/app/finders/groups/user_groups_finder.rb b/app/finders/groups/user_groups_finder.rb index 0a5342922fa..7ac0aef5606 100644 --- a/app/finders/groups/user_groups_finder.rb +++ b/app/finders/groups/user_groups_finder.rb @@ -11,6 +11,7 @@ # permissions: string (see Types::Groups::UserPermissionsEnum) # search: string used for search on path and group name # sort: string (see Types::Namespaces::GroupSortEnum) +# exact_matches_first: boolean used to enable priotization of exact matches # # Initially created to filter user groups and descendants where the user can create projects module Groups @@ -26,8 +27,11 @@ module Groups return Group.none if target_user.blank? items = by_permission_scope - items = by_search(items) + # Search will perform an ORDER BY to ensure exact matches are returned first. + return by_search(items, exact_matches_first: true) if exact_matches_first_enabled? + + items = by_search(items) sort(items) end @@ -68,5 +72,10 @@ module Groups items.sort_by_attribute(params[:sort]) end + + def exact_matches_first_enabled? + params[:exact_matches_first] && params[:search].present? && + Feature.enabled?(:exact_matches_first_project_transfer, current_user) + end end end diff --git a/app/finders/notes_finder.rb b/app/finders/notes_finder.rb index 42a92410953..85693642ceb 100644 --- a/app/finders/notes_finder.rb +++ b/app/finders/notes_finder.rb @@ -193,7 +193,6 @@ class NotesFinder end def without_hidden_notes? - return false unless Feature.enabled?(:hidden_notes) return false if @current_user&.can_admin_all_resources? true diff --git a/app/models/note.rb b/app/models/note.rb index 08e9e320b62..2d54542eead 100644 --- a/app/models/note.rb +++ b/app/models/note.rb @@ -171,11 +171,7 @@ class Note < ApplicationRecord scope :with_metadata, -> { includes(:system_note_metadata) } scope :without_hidden, -> { - if Feature.enabled?(:hidden_notes) - where_not_exists(Users::BannedUser.where('notes.author_id = banned_users.user_id')) - else - all - end + where_not_exists(Users::BannedUser.where('notes.author_id = banned_users.user_id')) } scope :for_note_or_capitalized_note, ->(text) { where(note: [text, text.capitalize]) } diff --git a/app/policies/base_policy.rb b/app/policies/base_policy.rb index 30dfbf3edf2..6ec7c1d5991 100644 --- a/app/policies/base_policy.rb +++ b/app/policies/base_policy.rb @@ -77,6 +77,12 @@ class BasePolicy < DeclarativePolicy::Base with_options scope: :global, score: 0 condition(:can_create_organization) { Gitlab::CurrentSettings.can_create_organization } + desc "Only admins can destroy projects" + condition(:owner_cannot_destroy_project, scope: :global) do + ::Gitlab::CurrentSettings.current_application_settings + .default_project_deletion_protection + end + desc "The application is restricted from public visibility" condition(:restricted_public_level, scope: :global) do Gitlab::CurrentSettings.current_application_settings.restricted_visibility_levels.include?(Gitlab::VisibilityLevel::PUBLIC) diff --git a/app/services/click_house/sync_strategies/base_sync_strategy.rb b/app/services/click_house/sync_strategies/base_sync_strategy.rb index 2a0b4cf7062..8aaad8f8dc8 100644 --- a/app/services/click_house/sync_strategies/base_sync_strategy.rb +++ b/app/services/click_house/sync_strategies/base_sync_strategy.rb @@ -74,12 +74,12 @@ module ClickHouse def process_batch(context) Enumerator.new do |yielder| has_more_data = false - batching_scope.each_batch(of: BATCH_SIZE) do |relation| - records = relation.select(projections).to_a + batching_scope.each_batch(of: BATCH_SIZE, column: primary_key) do |relation| + records = relation.select(*projections, "#{primary_key} AS id_for_cursor").to_a has_more_data = records.size == BATCH_SIZE records.each do |row| yielder << transform_row(row) - context.last_processed_id = row.id + context.last_processed_id = row.id_for_cursor break if context.record_limit_reached? end @@ -112,6 +112,12 @@ module ClickHouse raise NotImplementedError, "Subclasses must implement `projections`" end + # UInt type primary key used for cursor management, + # override if necessary. + def primary_key + :id + end + def csv_mapping raise NotImplementedError, "Subclasses must implement `csv_mapping`" end diff --git a/app/views/groups/settings/_remove.html.haml b/app/views/groups/settings/_remove.html.haml index 98fa621f8f7..e61fa787ffe 100644 --- a/app/views/groups/settings/_remove.html.haml +++ b/app/views/groups/settings/_remove.html.haml @@ -1,3 +1,4 @@ +- return unless can?(current_user, :remove_group, group) - remove_form_id = local_assigns.fetch(:remove_form_id, nil) - if group.adjourned_deletion? diff --git a/config/feature_flags/development/hidden_notes.yml b/config/feature_flags/development/hidden_notes.yml deleted file mode 100644 index 1510ebac17b..00000000000 --- a/config/feature_flags/development/hidden_notes.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -name: hidden_notes -introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/112973 -rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/405148 -milestone: '15.11' -type: development -group: group::authorization -default_enabled: false diff --git a/config/feature_flags/gitlab_com_derisk/exact_matches_first_project_transfer.yml b/config/feature_flags/gitlab_com_derisk/exact_matches_first_project_transfer.yml new file mode 100644 index 00000000000..eb6c95a63b1 --- /dev/null +++ b/config/feature_flags/gitlab_com_derisk/exact_matches_first_project_transfer.yml @@ -0,0 +1,10 @@ +--- +name: exact_matches_first_project_transfer +description: Prioritize exact matches when searching for groups in project transfer +feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/536745 +introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/188711 +rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/536751 +milestone: '18.0' +group: group::project management +type: gitlab_com_derisk +default_enabled: false diff --git a/config/feature_flags/ops/database_reindexing.yml b/config/feature_flags/ops/database_reindexing.yml index fb0c29393f5..8af47123ad0 100644 --- a/config/feature_flags/ops/database_reindexing.yml +++ b/config/feature_flags/ops/database_reindexing.yml @@ -5,4 +5,4 @@ rollout_issue_url: milestone: '13.5' type: ops group: group::database -default_enabled: false +default_enabled: true diff --git a/db/post_migrate/20250411093351_sync_drop_artifacts_partition_id_job_id_index.rb b/db/post_migrate/20250411093351_sync_drop_artifacts_partition_id_job_id_index.rb new file mode 100644 index 00000000000..22a04a7da04 --- /dev/null +++ b/db/post_migrate/20250411093351_sync_drop_artifacts_partition_id_job_id_index.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +class SyncDropArtifactsPartitionIdJobIdIndex < Gitlab::Database::Migration[2.2] + include Gitlab::Database::PartitioningMigrationHelpers + + milestone '18.0' + disable_ddl_transaction! + + INDEX_NAME = :p_ci_job_artifacts_partition_id_job_id_idx + + def up + remove_concurrent_partitioned_index_by_name :p_ci_job_artifacts, INDEX_NAME + end + + def down + add_concurrent_partitioned_index :p_ci_job_artifacts, [:partition_id, :job_id], name: INDEX_NAME + end +end diff --git a/db/schema_migrations/20250411093351 b/db/schema_migrations/20250411093351 new file mode 100644 index 00000000000..e7be90ad3f7 --- /dev/null +++ b/db/schema_migrations/20250411093351 @@ -0,0 +1 @@ +3edf57d181e9c073e472b33eac4b599afd3bddca2d99130e08fb58457f187cb2 \ No newline at end of file diff --git a/db/structure.sql b/db/structure.sql index ed20e986098..8d01017ac44 100644 --- a/db/structure.sql +++ b/db/structure.sql @@ -34490,10 +34490,6 @@ CREATE INDEX p_ci_job_artifacts_project_id_file_type_id_idx ON ONLY p_ci_job_art CREATE INDEX index_ci_job_artifacts_on_id_project_id_and_file_type ON ci_job_artifacts USING btree (project_id, file_type, id); -CREATE INDEX p_ci_job_artifacts_partition_id_job_id_idx ON ONLY p_ci_job_artifacts USING btree (partition_id, job_id); - -CREATE INDEX index_ci_job_artifacts_on_partition_id_job_id ON ci_job_artifacts USING btree (partition_id, job_id); - CREATE INDEX p_ci_job_artifacts_project_id_id_idx1 ON ONLY p_ci_job_artifacts USING btree (project_id, id); CREATE INDEX index_ci_job_artifacts_on_project_id_and_id ON ci_job_artifacts USING btree (project_id, id); @@ -40986,8 +40982,6 @@ ALTER INDEX p_ci_job_artifacts_project_id_created_at_id_idx ATTACH PARTITION ind ALTER INDEX p_ci_job_artifacts_project_id_file_type_id_idx ATTACH PARTITION index_ci_job_artifacts_on_id_project_id_and_file_type; -ALTER INDEX p_ci_job_artifacts_partition_id_job_id_idx ATTACH PARTITION index_ci_job_artifacts_on_partition_id_job_id; - ALTER INDEX p_ci_job_artifacts_project_id_id_idx1 ATTACH PARTITION index_ci_job_artifacts_on_project_id_and_id; ALTER INDEX p_ci_job_artifacts_project_id_idx1 ATTACH PARTITION index_ci_job_artifacts_on_project_id_for_security_reports; diff --git a/doc/administration/gitlab_duo_self_hosted/troubleshooting.md b/doc/administration/gitlab_duo_self_hosted/troubleshooting.md index 4d777849b43..0d15075c064 100644 --- a/doc/administration/gitlab_duo_self_hosted/troubleshooting.md +++ b/doc/administration/gitlab_duo_self_hosted/troubleshooting.md @@ -257,7 +257,7 @@ echo $AIGW_CUSTOM_MODELS__ENABLED # must be true ``` If the environment variables are not set up correctly, set them by -[creating a container](../../install/install_ai_gateway.md#find-the-ai-gateway-release). +[creating a container](../../install/install_ai_gateway.md#find-the-ai-gateway-image). ## Check if the model is reachable from AI gateway @@ -295,7 +295,7 @@ If not successful, verify your network configurations. ## The image's platform does not match the host -When [finding the AI gateway release](../../install/install_ai_gateway.md#find-the-ai-gateway-release), +When [finding the AI gateway release](../../install/install_ai_gateway.md#find-the-ai-gateway-image), you might get an error that states `The requested image's platform (linux/amd64) does not match the detected host`. To work around this error, add `--platform linux/amd64` to the `docker run` command: diff --git a/doc/administration/raketasks/maintenance.md b/doc/administration/raketasks/maintenance.md index 8b1df3d6187..ff860658ba1 100644 --- a/doc/administration/raketasks/maintenance.md +++ b/doc/administration/raketasks/maintenance.md @@ -387,16 +387,17 @@ Starting with GitLab 17.1, migrations are executed in an ## Rebuild database indexes -{{< details >}} +{{< history >}} -- Status: Experiment +- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/42705) in GitLab 13.5 [with a flag](../../administration/feature_flags.md) named `database_reindexing`. Disabled by default. +- [Enabled on GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/3989) in GitLab 13.9. +- [Enabled on GitLab Self-Managed and GitLab Dedicated](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/188548) in GitLab 18.0. -{{< /details >}} +{{< /history >}} {{< alert type="warning" >}} -This feature is experimental, and isn't enabled by default. Use caution when -running in a production environment, and run during off-peak times. +Use with caution when running in a production environment, and run during off-peak times. {{< /alert >}} @@ -410,7 +411,6 @@ Prerequisites: - This feature requires PostgreSQL 12 or later. - These index types are **not supported**: expression indexes and indexes used for constraint exclusion. -- Not enabled by default. A feature flag must be set for this task to work: `Feature.enable("database_reindexing")` ### Run reindexing diff --git a/doc/api/graphql/getting_started.md b/doc/api/graphql/getting_started.md index 84bf929cf00..fa8cbd61de6 100644 --- a/doc/api/graphql/getting_started.md +++ b/doc/api/graphql/getting_started.md @@ -60,7 +60,7 @@ curl "https://gitlab.com/api/graphql" --header "Authorization: Bearer $GRAPHQL_T ``` To nest strings in the query string, -wrap the data in single quotes or escape the strings with \\: +wrap the data in single quotes or escape the strings with ` \\ `: ```shell curl "https://gitlab.com/api/graphql" --header "Authorization: Bearer $GRAPHQL_TOKEN" \ diff --git a/doc/api/graphql/reference/_index.md b/doc/api/graphql/reference/_index.md index 64e854ecb21..4717a29ce69 100644 --- a/doc/api/graphql/reference/_index.md +++ b/doc/api/graphql/reference/_index.md @@ -26431,6 +26431,7 @@ Relationship between an epic and an issue. | `severity` | [`IssuableSeverity`](#issuableseverity) | Severity level of the incident. | | `slaDueAt` | [`Time`](#time) | Timestamp of when the issue SLA expires. | | `state` | [`IssueState!`](#issuestate) | State of the issue. | +| `status` {{< icon name="warning-solid" >}} | [`WorkItemStatus`](#workitemstatus) | **Introduced** in GitLab 18.0. **Status**: Experiment. Status of the issue. | | `statusPagePublishedIncident` | [`Boolean`](#boolean) | Indicates whether an issue is published to the status page. | | `subscribed` | [`Boolean!`](#boolean) | Indicates the currently logged in user is subscribed to the issue. | | `taskCompletionStatus` | [`TaskCompletionStatus!`](#taskcompletionstatus) | Task completion status of the issue. | @@ -29829,6 +29830,7 @@ Describes an issuable resource link for incident issues. | `severity` | [`IssuableSeverity`](#issuableseverity) | Severity level of the incident. | | `slaDueAt` | [`Time`](#time) | Timestamp of when the issue SLA expires. | | `state` | [`IssueState!`](#issuestate) | State of the issue. | +| `status` {{< icon name="warning-solid" >}} | [`WorkItemStatus`](#workitemstatus) | **Introduced** in GitLab 18.0. **Status**: Experiment. Status of the issue. | | `statusPagePublishedIncident` | [`Boolean`](#boolean) | Indicates whether an issue is published to the status page. | | `subscribed` | [`Boolean!`](#boolean) | Indicates the currently logged in user is subscribed to the issue. | | `taskCompletionStatus` | [`TaskCompletionStatus!`](#taskcompletionstatus) | Task completion status of the issue. | diff --git a/doc/ci/docker/using_docker_images.md b/doc/ci/docker/using_docker_images.md index 652071bd14e..9e29137275d 100644 --- a/doc/ci/docker/using_docker_images.md +++ b/doc/ci/docker/using_docker_images.md @@ -314,7 +314,7 @@ Use one of the following methods to determine the value for `DOCKER_AUTH_CONFIG` {{< alert type="note" >}} - If your username includes special characters like `@`, you must escape them with a backslash (\) to prevent authentication problems. + If your username includes special characters like `@`, you must escape them with a backslash (` \ `) to prevent authentication problems. {{< /alert >}} diff --git a/doc/ci/pipelines/downstream_pipelines.md b/doc/ci/pipelines/downstream_pipelines.md index 06eee81db46..fe6dcbd08c6 100644 --- a/doc/ci/pipelines/downstream_pipelines.md +++ b/doc/ci/pipelines/downstream_pipelines.md @@ -233,7 +233,7 @@ with the CI/CD configuration in that file. The artifact path is parsed by GitLab, not the runner, so the path must match the syntax for the OS running GitLab. If GitLab is running on Linux but using a Windows runner for testing, the path separator for the trigger job is `/`. Other CI/CD -configuration for jobs that use the Windows runner, like scripts, use \. +configuration for jobs that use the Windows runner, like scripts, use ` \ `. You cannot use CI/CD variables in an `include` section in a dynamic child pipeline's configuration. [Issue 378717](https://gitlab.com/gitlab-org/gitlab/-/issues/378717) proposes fixing diff --git a/doc/ci/yaml/_index.md b/doc/ci/yaml/_index.md index 9cead8c5810..b7a8fff83ae 100644 --- a/doc/ci/yaml/_index.md +++ b/doc/ci/yaml/_index.md @@ -4153,7 +4153,7 @@ job: You can use CI/CD variables to define the description, but some shells [use different syntax](../variables/_index.md#use-cicd-variables-in-job-scripts) to reference variables. Similarly, some shells might require special characters - to be escaped. For example, backticks (`` ` ``) might need to be escaped with a backslash (\). + to be escaped. For example, backticks (`` ` ``) might need to be escaped with a backslash (` \ `). #### `release:ref` diff --git a/doc/development/ai_features/duo_chat.md b/doc/development/ai_features/duo_chat.md index 66ca4cc3930..31a99c0010b 100644 --- a/doc/development/ai_features/duo_chat.md +++ b/doc/development/ai_features/duo_chat.md @@ -567,53 +567,50 @@ flow of how we construct a Chat prompt: from original GraphQL request and initializes a new instance of `Gitlab::Llm::Completions::Chat` and calls `execute` on it ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/55b8eb6ff869e61500c839074f080979cc60f9de/ee/lib/gitlab/llm/completions_factory.rb#L89)) -1. `Gitlab::Llm::Completions::Chat#execute` calls `Gitlab::Llm::Chain::Agents::SingleActionExecutor`. - ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/d539f64ce6c5bed72ab65294da3bcebdc43f68c6/ee/lib/gitlab/llm/completions/chat.rb#L128-134)) -1. `Gitlab::Llm::Chain::Agents::SingleActionExecutor#execute` calls - `execute_streamed_request`, which calls `request`, a method defined in the - `AiDependent` concern - ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/7ac19f75bd0ba4db5cfe7030e56c3672e2ccdc88/ee/lib/gitlab/llm/chain/concerns/ai_dependent.rb#L14)) -1. The `SingleActionExecutor#prompt_options` method assembles all prompt parameters for the AI gateway request - ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/chain/agents/single_action_executor.rb#L120-120)) -1. `ai_request` is defined in `Llm::Completions::Chat` and evaluates to - `AiGateway`([code](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/completions/chat.rb#L51-51)) -1. `ai_request.request` routes to `Llm::Chain::Requests::AiGateway#request`, - which calls `ai_client.stream` - ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/e88256b1acc0d70ffc643efab99cad9190529312/ee/lib/gitlab/llm/chain/requests/ai_gateway.rb#L20-27)) -1. `ai_client.stream` routes to `Gitlab::Llm::AiGateway::Client#stream`, which - makes an API request to the AI gateway `/v2/chat/agent` endpoint - ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/e88256b1acc0d70ffc643efab99cad9190529312/ee/lib/gitlab/llm/ai_gateway/client.rb#L64-82)) -1. AI gateway receives the request - ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/e6f55d143ecb5409e8ca4fefc042e590e5a95158/ai_gateway/api/v2/chat/agent.py#L43-43)) -1. AI gateway gets the list of tools available for user - ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/e6f55d143ecb5409e8ca4fefc042e590e5a95158/ai_gateway/chat/toolset.py#L43-43)) -1. AI GW gets definitions for each tool - ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/e6f55d143ecb5409e8ca4fefc042e590e5a95158/ai_gateway/chat/tools/gitlab.py#L11-11)) -1. And they are inserted into prompt template alongside other prompt parameters that come from Rails - ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/e6f55d143ecb5409e8ca4fefc042e590e5a95158/ai_gateway/agents/definitions/chat/react/base.yml#L14-14)) -1. AI gateway makes request to LLM and return response to Rails. - ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/e6f55d143ecb5409e8ca4fefc042e590e5a95158/ai_gateway/api/v2/chat/agent.py#L103-103)) -1. We've now made our first request to the AI gateway. If the LLM says that the - answer to the first request is a final answer, we - [parse the answer](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/chain/parsers/single_action_parser.rb#L41-42) - and stream it ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/chain/concerns/ai_dependent.rb#L25-25)) - and return it ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/chain/agents/single_action_executor.rb#L46-46)) -1. If the first answer is not final, the "thoughts" and "picked tools" - from the first LLM request are parsed and then the relevant tool class is - called. - ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/chain/agents/single_action_executor.rb#L54-54)) -1. The tool executor classes also include `Concerns::AiDependent` and use the - included `request` method similar to how the chat executor does - ([example](https://gitlab.com/gitlab-org/gitlab/-/blob/70fca6dbec522cb2218c5dcee66caa908c84271d/ee/lib/gitlab/llm/chain/tools/identifier.rb#L8)). - The `request` method uses the same `ai_request` instance - that was injected into the `context` in `Llm::Completions::Chat`. For Chat, - this is `Gitlab::Llm::Chain::Requests::AiGateway`. So, essentially the same - request to the AI gateway is put together but with a different - `prompt` / `PROMPT_TEMPLATE` than for the first request - ([Example tool prompt template](https://gitlab.com/gitlab-org/gitlab/-/blob/70fca6dbec522cb2218c5dcee66caa908c84271d/ee/lib/gitlab/llm/chain/tools/issue_identifier/executor.rb#L39-104)) -1. If the tool answer is not final, the response is added to `agent_scratchpad` - and the loop in `SingleActionExecutor` starts again, adding the additional - context to the request. It loops to up to 10 times until a final answer is reached. +1. `Gitlab::Llm::Completions::Chat#execute` calls `Gitlab::Duo::Chat::ReactExecutor`. + ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/llm/completions/chat.rb#L122-L130)) +1. `Gitlab::Duo::Chat::ReactExecutor#execute` calls `#step_forward` which calls `Gitlab::Duo::Chat::StepExecutor#step` + ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/duo/chat/react_executor.rb#L235)). +1. `Gitlab::Duo::Chat::StepExecutor#step` calls `Gitlab::Duo::Chat::StepExecutor#perform_agent_request`, which sends a request to the AI Gateway `/v2/chat/agent/` endpoint + ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/duo/chat/step_executor.rb#L69)). +1. The AI Gateway `/v2/chat/agent` endpoint receives the request on the `api.v2.agent.chat.agent.chat` function + ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v2/chat/agent.py#L133)) +1. `api.v2.agent.chat.agent.chat` creates the `GLAgentRemoteExecutor` through the `gl_agent_remote_executor_factory` ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v2/chat/agent.py#L166)). + + Upon creation of the `GLAgentRemoteExecutor`, the following parameters are passed: + - `tools_registry` - the registry of all available tools; this is passed through the factory ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/container.py#L35)) + - `agent` - `ReActAgent` object that wraps the prompt information, including the chosen LLM model, prompt template, etc + +1. `api.v2.agent.chat.agent.chat` calls the `GLAgentRemoteExecutor.on_behalf`, which gets the user tools early to raise an exception as soon as possible if an error occurs ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/executor.py#L56)). +1. `api.v2.agent.chat.agent.chat` calls the `GLAgentRemoteExecutor.stream` ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/executor.py#L81)). +1. `GLAgentRemoteExecutor.stream` calls `astream` on `agent` (an instance of `ReActAgent`) with inputs such as the messages and the list of available tools ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/executor.py#L92)). +1. The `ReActAgent` builds the prompts, with the available tools inserted into the system prompt template + ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/prompts/definitions/chat/react/system/1.0.0.jinja)). +1. `ReActAgent.astream` sends a call to the LLM model ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/agents/react.py#L216)) +1. The LLM response is returned to Rails + (code path: [`ReActAgent.astream`](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/agents/react.py#L209) + -> [`GLAgentRemoteExecutor.stream`](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/executor.py#L81) + -> [`api.v2.agent.chat.agent.chat`](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v2/chat/agent.py#L133) + -> Rails) +1. We've now made our first request to the AI gateway. If the LLM says that the answer to the first request is final, + Rails [parses the answer](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/duo/chat/react_executor.rb#L56) and [returns it](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/duo/chat/react_executor.rb#L63) for further response handling by [`Gitlab::Llm::Completions::Chat`](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/llm/completions/chat.rb#L66). +1. If the answer is not final, the "thoughts" and "picked tools" from the first LLM request are parsed and then the relevant tool class is called. + ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/duo/chat/react_executor.rb#L207) + | [example tool class](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/chain/tools/identifier.rb)) + 1. The tool executor classes include `Concerns::AiDependent` and use its `request` method. + ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/llm/chain/concerns/ai_dependent.rb#L14)) + 1. The `request` method uses the `ai_request` instance + that was injected into the `context` in `Llm::Completions::Chat`. For Chat, + this is `Gitlab::Llm::Chain::Requests::AiGateway`. ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/completions/chat.rb#L42)). + 1. The tool indicates that `use_ai_gateway_agent_prompt=true` ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/llm/chain/tools/issue_reader/executor.rb#L121)). + + This tells the `ai_request` to send the prompt to the `/v1/prompts/chat` endpoint ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/llm/chain/requests/ai_gateway.rb#L87)). + + 1. AI Gateway `/v1/prompts/chat` endpoint receives the request on `api.v1.prompts.invoke` + ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v1/prompts/invoke.py#L41)). + 1. `api.v1.prompts.invoke` gets the correct tool prompt from the tool prompt registry ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v1/prompts/invoke.py#L49)). + 1. The prompt is called either as a [stream](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v1/prompts/invoke.py#L86) or as a [non-streamed invocation](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v1/prompts/invoke.py#L96). + 1. If the tool answer is not final, the response is added to agent_scratchpad and the loop in `Gitlab::Duo::Chat::ReactExecutor` starts again, adding the additional context to the request. It loops to up to 10 times until a final answer is reached. ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/duo/chat/react_executor.rb#L44)) ## Interpreting GitLab Duo Chat error codes diff --git a/doc/development/documentation/restful_api_styleguide.md b/doc/development/documentation/restful_api_styleguide.md index 5a3c9481862..51180191a92 100644 --- a/doc/development/documentation/restful_api_styleguide.md +++ b/doc/development/documentation/restful_api_styleguide.md @@ -241,7 +241,7 @@ For information about writing attribute descriptions, see the [GraphQL API descr - Declare URLs with the `--url` parameter, and wrap the URL in double quotes (`"`). - Prefer to use examples using the personal access token and don't pass data of username and password. -- For legibility, use the \ character and indentation to break long single-line +- For legibility, use the ` \ ` character and indentation to break long single-line commands apart into multiple lines. | Methods | Description | diff --git a/doc/development/secure_coding_guidelines.md b/doc/development/secure_coding_guidelines.md index b9f4b915201..7b09e55529d 100644 --- a/doc/development/secure_coding_guidelines.md +++ b/doc/development/secure_coding_guidelines.md @@ -161,7 +161,7 @@ In most cases the anchors `\A` for beginning of text and `\z` for end of text sh ### Escape sequences in Go -When a character in a string literal or regular expression literal is preceded by a backslash, it is interpreted as part of an escape sequence. For example, the escape sequence `\n` in a string literal corresponds to a single `newline` character, and not the \ and `n` characters. +When a character in a string literal or regular expression literal is preceded by a backslash, it is interpreted as part of an escape sequence. For example, the escape sequence `\n` in a string literal corresponds to a single `newline` character, and not the ` \ ` and `n` characters. There are two Go escape sequences that could produce surprising results. First, `regexp.Compile("\a")` matches the bell character, whereas `regexp.Compile("\\A")` matches the start of text and `regexp.Compile("\\a")` is a Vim (but not Go) regular expression matching any alphabetic character. Second, `regexp.Compile("\b")` matches a backspace, whereas `regexp.Compile("\\b")` matches the start of a word. Confusing one for the other could lead to a regular expression passing or failing much more often than expected, with potential security consequences. diff --git a/doc/drawers/advanced_search_syntax.md b/doc/drawers/advanced_search_syntax.md index a95b1574ad8..0e8a185b9c6 100644 --- a/doc/drawers/advanced_search_syntax.md +++ b/doc/drawers/advanced_search_syntax.md @@ -15,7 +15,7 @@ title: Syntax options | `+` | And | [`display +banner`](https://gitlab.com/search?group_id=9970&project_id=278964&repository_ref=&scope=blobs&search=display+%2Bbanner&snippets=) | | `-` | Exclude | [`display -banner`](https://gitlab.com/search?group_id=9970&project_id=278964&scope=blobs&search=display+-banner) | | `*` | Partial | [`bug error 50*`](https://gitlab.com/search?group_id=9970&project_id=278964&repository_ref=&scope=blobs&search=bug+error+50%2A&snippets=) | -| \ | Escape | [`\*md`](https://gitlab.com/search?snippets=&scope=blobs&repository_ref=&search=%5C*md&group_id=9970&project_id=278964) | +| ` \ ` | Escape | [`\*md`](https://gitlab.com/search?snippets=&scope=blobs&repository_ref=&search=%5C*md&group_id=9970&project_id=278964) | | `#` | Issue ID | [`#23456`](https://gitlab.com/search?snippets=&scope=issues&repository_ref=&search=%2323456&group_id=9970&project_id=278964) | | `!` | Merge request ID | [`!23456`](https://gitlab.com/search?snippets=&scope=merge_requests&repository_ref=&search=%2123456&group_id=9970&project_id=278964) | diff --git a/doc/install/install_ai_gateway.md b/doc/install/install_ai_gateway.md index bba7603d9ee..5d7912c7153 100644 --- a/doc/install/install_ai_gateway.md +++ b/doc/install/install_ai_gateway.md @@ -6,41 +6,51 @@ description: Set up your self-hosted model GitLab AI gateway title: Install the GitLab AI gateway --- -The [AI gateway](https://handbook.gitlab.com/handbook/engineering/architecture/design-documents/ai_gateway/) is a standalone service that gives access to AI-native GitLab Duo features. +The [AI gateway](https://handbook.gitlab.com/handbook/engineering/architecture/design-documents/ai_gateway/) +is a standalone service that gives access to AI-native GitLab Duo features. -## Install using Docker - -Prerequisites: - -- Install a Docker container engine, such as [Docker](https://docs.docker.com/engine/install/#server). -- Use a valid hostname accessible within your network. Do not use `localhost`. +## Install by using Docker The GitLab AI gateway Docker image contains all necessary code and dependencies in a single container. -The Docker image for the AI gateway is around 340 MB (compressed) for the `linux/amd64` architecture and requires a minimum of 512 MB of RAM to operate. A GPU is not needed for the GitLab AI gateway. To ensure better performance, especially under heavy usage, consider allocating more disk space, memory, and resources than the minimum requirements. Higher RAM and disk capacity can enhance the AI gateway's efficiency during peak loads. +Prerequisites: -### Find the AI Gateway Release +- Install a Docker container engine, like [Docker](https://docs.docker.com/engine/install/#server). +- Use a valid hostname that is accessible in your network. Do not use `localhost`. +- Ensure you have approximately 340 MB (compressed) for the `linux/amd64` architecture and + a minimum of 512 MB of RAM. -Find the GitLab official Docker image at: +To ensure better performance, especially under heavy usage, consider allocating +more disk space, memory, and resources than the minimum requirements. +Higher RAM and disk capacity can enhance the AI gateway's efficiency during peak loads. -- AI Gateway Docker image on Container Registry: +A GPU is not needed for the GitLab AI gateway. + +### Find the AI gateway image + +The GitLab official Docker image is available: + +- In the container registry: - [Stable](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/container_registry/3809284) - [Nightly](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/container_registry/8086262) -- AI Gateway Docker image on DockerHub: +- On DockerHub: - [Stable](https://hub.docker.com/r/gitlab/model-gateway/tags) - [Nightly](https://hub.docker.com/r/gitlab/model-gateway-self-hosted/tags) -- [Release process for self-hosted AI Gateway](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/main/docs/release.md). + [View the release process for the self-hosted AI gateway](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/main/docs/release.md). -Use the image tag that corresponds to your GitLab version. For example, if your GitLab version is `v17.9.0`, use the `self-hosted-17.9.0-ee` tag. It is critical to ensure that the image version matches your GitLab version to avoid compatibility issues. Nightly builds are available to have access to newer features, but backwards compatibility is not guaranteed. +Use the image tag that corresponds to your GitLab version. +For example, if your GitLab version is `v17.9.0`, use the `self-hosted-17.9.0-ee` tag. +Ensure that the image version matches your GitLab version to avoid compatibility issues. +Newer features are available from nightly builds, but backwards compatibility is not guaranteed. {{< alert type="note" >}} -Using the `:latest` tag is **not recommended** as it can cause incompatibility if your GitLab version lags behind or jumps ahead of the AI Gateway release. Always use an explicit version tag. +Using the `:latest` tag is **not recommended** because it can cause incompatibility if your GitLab version is behind or ahead of the AI gateway release. Always use an explicit version tag. {{< /alert >}} -### Start a Container from the Image +### Start a container from the image 1. Run the following command, replacing `` and `` with your GitLab instance's URL and domain: @@ -64,14 +74,14 @@ Using the `:latest` tag is **not recommended** as it can cause incompatibility i If you encounter issues loading the PEM file, resulting in errors like `JWKError`, you may need to resolve an SSL certificate error. -To fix this, set the appropriate certificate bundle path in the Docker container by using the following environment variables: +To fix this issue, set the appropriate certificate bundle path in the Docker container by using the following environment variables: - `SSL_CERT_FILE=/path/to/ca-bundle.pem` - `REQUESTS_CA_BUNDLE=/path/to/ca-bundle.pem` Replace `/path/to/ca-bundle.pem` with the actual path to your certificate bundle. -## Docker-NGINX-SSL Setup +## Set up Docker with NGINX and SSL {{< alert type="note" >}} @@ -85,16 +95,16 @@ You can set up SSL for an AI gateway instance by using Docker, NGINX as a reverse proxy, and Let's Encrypt for SSL certificates. NGINX manages the secure connection with external clients, decrypting incoming HTTPS requests before -passing them to the AI Gateway. +passing them to the AI gateway. Prerequisites: - Docker and Docker Compose installed - Registered and configured domain name -### Step 1: Create Configuration Files +### Create configuration files -Create the following files in your working directory: +Start by creating the following files in your working directory. 1. `nginx.conf`: @@ -127,7 +137,7 @@ Create the following files in your working directory: listen 80; server_name _; - # Forward all requests to the AI Gateway + # Forward all requests to the AI gateway location / { proxy_pass http://gitlab-ai-gateway:5052; proxy_read_timeout 300s; @@ -163,7 +173,7 @@ Create the following files in your working directory: proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; - # Forward all requests to the AI Gateway + # Forward all requests to the AI gateway location / { proxy_pass http://gitlab-ai-gateway:5052; proxy_read_timeout 300s; @@ -173,55 +183,58 @@ Create the following files in your working directory: } ``` -### Step 2: SSL Certificate setup using Let's Encrypt +### Set up SSL certificate by using Let's Encrypt -- For Docker-based NGINX servers, Certbot provides an automated way to implement Let's Encrypt certificates - - see the [guide here](https://phoenixnap.com/kb/letsencrypt-docker). -- Alternatively, you can use [Certbot's manual installation](https://eff-certbot.readthedocs.io/en/stable/using.html#manual) - process if you prefer that approach. +Now set up an SSL certificate: -### Step 3: Create Docker-compose file +- For Docker-based NGINX servers, Certbot + [provides an automated way to implement Let's Encrypt certificates](https://phoenixnap.com/kb/letsencrypt-docker). +- Alternatively, you can use the [Certbot manual installation](https://eff-certbot.readthedocs.io/en/stable/using.html#manual). -1. `docker-compose.yaml`: +### Create Docker-compose file - ```yaml - version: '3.8' +Now create a `docker-compose.yaml` file. - services: - nginx-proxy: - image: nginx:alpine - ports: - - "80:80" - - "443:443" - volumes: - - /path/to/nginx.conf:/etc/nginx/nginx.conf:ro - - /path/to/default.conf:/etc/nginx/conf.d/default.conf:ro - - /path/to/fullchain.pem:/etc/nginx/ssl/server.crt:ro - - /path/to/privkey.pem:/etc/nginx/ssl/server.key:ro - networks: - - proxy-network - depends_on: - - gitlab-ai-gateway +```yaml +version: '3.8' - gitlab-ai-gateway: - image: registry.gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/model-gateway: - expose: - - "5052" - environment: - - AIGW_GITLAB_URL= - - AIGW_GITLAB_API_URL=https:///api/v4/ - networks: - - proxy-network - restart: always +services: + nginx-proxy: + image: nginx:alpine + ports: + - "80:80" + - "443:443" + volumes: + - /path/to/nginx.conf:/etc/nginx/nginx.conf:ro + - /path/to/default.conf:/etc/nginx/conf.d/default.conf:ro + - /path/to/fullchain.pem:/etc/nginx/ssl/server.crt:ro + - /path/to/privkey.pem:/etc/nginx/ssl/server.key:ro + networks: + - proxy-network + depends_on: + - gitlab-ai-gateway - networks: - proxy-network: - driver: bridge - ``` + gitlab-ai-gateway: + image: registry.gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/model-gateway: + expose: + - "5052" + environment: + - AIGW_GITLAB_URL= + - AIGW_GITLAB_API_URL=https:///api/v4/ + networks: + - proxy-network + restart: always -### Step 4: Deployment and validation +networks: + proxy-network: + driver: bridge +``` -1. Start the `nginx` and `AIGW` containers and verify if they're running: +### Deploy and validate + +Noe deploy and validate the solution. + +1. Start the `nginx` and `AIGW` containers and verify that they're running: ```shell docker-compose up @@ -232,7 +245,7 @@ Create the following files in your working directory: 1. Perform the health check and confirm that the AI gateway is accessible. -## Install using the AI gateway Helm chart +## Install by using Helm chart Prerequisites: @@ -338,7 +351,7 @@ To upgrade the AI gateway, download the newest Docker image tag. For information on alternative ways to install the AI gateway, see [issue 463773](https://gitlab.com/gitlab-org/gitlab/-/issues/463773). -## Health Check and Debugging +## Health check and debugging To debug issues with your self-hosted Duo installation, run the following command: @@ -365,67 +378,67 @@ These tests are performed for offline environments: | License | Tests whether your license has the ability to access Code Suggestions feature. | | System exchange | Tests whether Code Suggestions can be used in your instance. If the system exchange assessment fails, users might not be able to use GitLab Duo features. | -## Does the AIGW need to autoscale? +## Does the AI gateway need to autoscale? Autoscaling is not mandatory but is recommended for environments with variable workloads, high concurrency requirements, or unpredictable usage patterns. In GitLab’s production environment: -- Baseline Setup: A single AI Gateway instance with 2 CPU cores and 8 GB RAM can handle approximately 40 concurrent requests. -- Scaling Guidelines: For larger setups, such as an AWS t3.2xlarge instance (8 vCPUs, 32 GB RAM), the gateway can handle up to 160 concurrent requests, equivalent to 4x the baseline setup. -- Request Throughput: GitLab.com’s observed usage suggests that 7 RPS (requests per second) per 1000 active users is a reasonable metric for planning. -- Autoscaling Options: Use Kubernetes Horizontal Pod Autoscalers (HPA) or similar mechanisms to dynamically adjust the number of instances based on metrics like CPU, memory utilization, or request latency thresholds. +- Baseline setup: A single AI gateway instance with 2 CPU cores and 8 GB RAM can handle approximately 40 concurrent requests. +- Scaling guidelines: For larger setups, such as an AWS t3.2xlarge instance (8 vCPUs, 32 GB RAM), the gateway can handle up to 160 concurrent requests, equivalent to 4x the baseline setup. +- Request throughput: GitLab.com’s observed usage suggests that 7 RPS (requests per second) per 1000 active users is a reasonable metric for planning. +- Autoscaling options: Use Kubernetes Horizontal Pod Autoscalers (HPA) or similar mechanisms to dynamically adjust the number of instances based on metrics like CPU, memory utilization, or request latency thresholds. -## Configuration Examples by Deployment Size +## Configuration examples by deployment size -- Small Deployment: +- Small deployment: - Single instance with 2 vCPUs and 8 GB RAM. - Handles up to 40 concurrent requests. - Teams or organizations with up to 50 users and predictable workloads. - Fixed instances may suffice; autoscaling can be disabled for cost efficiency. -- Medium Deployment: +- Medium deployment: - Single AWS t3.2xlarge instance with 8 vCPUs and 32 GB RAM. - Handles up to 160 concurrent requests. - Organizations with 50-200 users and moderate concurrency requirements. - Implement Kubernetes HPA with thresholds for 50% CPU utilization or request latency above 500ms. -- Large Deployment: +- Large deployment: - Cluster of multiple AWS t3.2xlarge instances or equivalent. - Each instance handles 160 concurrent requests, scaling to thousands of users with multiple instances. - Enterprises with over 200 users and variable, high-concurrency workloads. - Use HPA to scale pods based on real-time demand, combined with node autoscaling for cluster-wide resource adjustments. -## What specs does the AIGW container have access to, and how does resource allocation affect performance? +## What specs does the AI gateway container have access to, and how does resource allocation affect performance? -The AI Gateway operates effectively under the following resource allocations: +The AI gateway operates effectively under the following resource allocations: - 2 CPU cores and 8 GB of RAM per container. -- Containers typically utilize about 7.39% CPU and proportionate memory in GitLab’s production environment, leaving room for growth or handling burst activity. +- Containers typically utilize about 7.39% CPU and proportionate memory in the GitLab production environment, leaving room for growth or handling burst activity. -## Mitigation Strategies for Resource Contention +## Mitigation strategies for resource contention -- Use Kubernetes resource requests and limits to ensure AIGW containers receive guaranteed CPU and memory allocations. For example: +- Use Kubernetes resource requests and limits to ensure AI gateway containers receive guaranteed CPU and memory allocations. For example: -```yaml -resources: - requests: - memory: "16Gi" - cpu: "4" - limits: - memory: "32Gi" - cpu: "8" -``` + ```yaml + resources: + requests: + memory: "16Gi" + cpu: "4" + limits: + memory: "32Gi" + cpu: "8" + ``` - Implement tools like Prometheus and Grafana to track resource utilization (CPU, memory, latency) and detect bottlenecks early. -- Dedicate nodes or instances exclusively to the AI Gateway to prevent resource competition with other services. +- Dedicate nodes or instances exclusively to the AI gateway to prevent resource competition with other services. -## Scaling Strategies +## Scaling strategies - Use Kubernetes HPA to scale pods based on real-time metrics like: - Average CPU utilization exceeding 50%. - Request latency consistently above 500ms. - Enable node autoscaling to scale infrastructure resources dynamically as pods increase. -## Scaling Recommendations +## Scaling recommendations -| Deployment Size | Instance Type | Resources | Capacity (Concurrent Requests) | Scaling Recommendations | +| Deployment size | Instance type | Resources | Capacity (concurrent requests) | Scaling recommendations | |------------------|--------------------|------------------------|---------------------------------|---------------------------------------------| | Small | 2 vCPUs, 8 GB RAM | Single instance | 40 | Fixed deployment; no autoscaling. | | Medium | AWS t3.2xlarge | Single instance | 160 | HPA based on CPU or latency thresholds. | @@ -455,7 +468,7 @@ You should locate your AI gateway in the same geographic region as your GitLab i When deploying the AI gateway on OpenShift, you might encounter permission errors due to OpenShift's security model. -By default, the AI Gateway uses `/home/aigateway/.hf` for caching HuggingFace models, which may not be writable in OpenShift's +By default, the AI gateway uses `/home/aigateway/.hf` for caching HuggingFace models, which may not be writable in OpenShift's security-restricted environment. This can result in permission errors like: ```shell @@ -481,7 +494,7 @@ You can configure this in either of the following ways: --set "extraEnvironmentVariables[0].value=/var/tmp/huggingface" # Use any writable directory ``` -This configuration ensures the AI Gateway can properly cache HuggingFace models while respecting OpenShift's security constraints. The exact directory you choose may depend on your specific OpenShift configuration and security policies. +This configuration ensures the AI gateway can properly cache HuggingFace models while respecting the OpenShift security constraints. The exact directory you choose may depend on your specific OpenShift configuration and security policies. ### Self-signed certificate error diff --git a/doc/update/background_migrations.md b/doc/update/background_migrations.md index a56575991ca..a560bae5be2 100644 --- a/doc/update/background_migrations.md +++ b/doc/update/background_migrations.md @@ -314,7 +314,7 @@ use the information in the failure error logs or the database: ``` When dealing with multiple arguments, such as `[["id"],["id_convert_to_bigint"]]`, escape the - comma between each argument with a backslash \ to prevent an invalid character error. + comma between each argument with a backslash ` \ ` to prevent an invalid character error. For example, to finish the migration from the previous step: ```shell @@ -342,7 +342,7 @@ use the information in the failure error logs or the database: - `job_arguments`: `[["id"], ["id_convert_to_bigint"]]` When dealing with multiple arguments, such as `[["id"],["id_convert_to_bigint"]]`, escape the - comma between each argument with a backslash \ to prevent an invalid character error. + comma between each argument with a backslash ` \ ` to prevent an invalid character error. Every comma in the `job_arguments` parameter value must be escaped with a backslash. For example: diff --git a/doc/user/gitlab_duo/_index.md b/doc/user/gitlab_duo/_index.md index 268935747e0..89ca5dc7768 100644 --- a/doc/user/gitlab_duo/_index.md +++ b/doc/user/gitlab_duo/_index.md @@ -118,18 +118,18 @@ To improve your security, try these features: | [GitLab Duo Chat](../gitlab_duo_chat/_index.md) | Premium, Ultimate | GitLab Duo Pro or Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Generally available | | [GitLab Duo Workflow](../duo_workflow/_index.md) | Ultimate | None | Private beta | N/A | N/A | N/A | | [Issue Description Generation](../project/issues/managing_issues.md#populate-an-issue-with-issue-description-generation) | Ultimate | GitLab Duo Enterprise | Experiment | N/A | N/A | N/A | -| [Discussion Summary](../discussions/_index.md#summarize-issue-discussions-with-duo-chat) | Ultimate | GitLab Duo Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | N/A | +| [Discussion Summary](../discussions/_index.md#summarize-issue-discussions-with-duo-chat) | Ultimate | GitLab Duo Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Beta| | [Code Suggestions](../project/repository/code_suggestions/_index.md) | Premium, Ultimate | GitLab Duo Pro or Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Generally available | | [Code Explanation](../project/repository/code_explain.md) | Premium, Ultimate | GitLab Duo Pro or Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Generally available | | [Test Generation](../gitlab_duo_chat/examples.md#write-tests-in-the-ide) | Premium, Ultimate | GitLab Duo Pro or Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Generally available | | [Refactor Code](../gitlab_duo_chat/examples.md#refactor-code-in-the-ide) | Premium, Ultimate | GitLab Duo Pro or Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Generally available | | [Fix Code](../gitlab_duo_chat/examples.md#fix-code-in-the-ide) | Premium, Ultimate | GitLab Duo Pro or Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | N/A | | [GitLab Duo for the CLI](../../editor_extensions/gitlab_cli/_index.md#gitlab-duo-for-the-cli) | Ultimate | GitLab Duo Enterprise | Generally available | Generally available | Generally available | N/A | -| [Merge Request Summary](../project/merge_requests/duo_in_merge_requests.md#generate-a-description-by-summarizing-code-changes) | Ultimate | GitLab Duo Enterprise | Beta | Beta | N/A | N/A | +| [Merge Request Summary](../project/merge_requests/duo_in_merge_requests.md#generate-a-description-by-summarizing-code-changes) | Ultimate | GitLab Duo Enterprise | Beta | Beta | N/A | Beta| | [Code Review](../project/merge_requests/duo_in_merge_requests.md#have-gitlab-duo-review-your-code) | Ultimate | GitLab Duo Enterprise | Beta | Beta | Beta | N/A | -| [Code Review Summary](../project/merge_requests/duo_in_merge_requests.md#summarize-a-code-review) | Ultimate | GitLab Duo Enterprise | Experiment | Experiment | N/A | N/A | -| [Merge Commit Message Generation](../project/merge_requests/duo_in_merge_requests.md#generate-a-merge-commit-message) | Ultimate | GitLab Duo Enterprise | Generally available | Generally available | Generally available | N/A | +| [Code Review Summary](../project/merge_requests/duo_in_merge_requests.md#summarize-a-code-review) | Ultimate | GitLab Duo Enterprise | Experiment | Experiment | N/A | Experiment | +| [Merge Commit Message Generation](../project/merge_requests/duo_in_merge_requests.md#generate-a-merge-commit-message) | Ultimate | GitLab Duo Enterprise | Generally available | Generally available | Generally available | Beta | | [Root Cause Analysis](../gitlab_duo_chat/examples.md#troubleshoot-failed-cicd-jobs-with-root-cause-analysis) | Ultimate | GitLab Duo Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Beta | | [Vulnerability Explanation](../application_security/vulnerabilities/_index.md#explaining-a-vulnerability) | Ultimate | GitLab Duo Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | N/A | -| [Vulnerability Resolution](../application_security/vulnerabilities/_index.md#vulnerability-resolution) | Ultimate | GitLab Duo Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | N/A | +| [Vulnerability Resolution](../application_security/vulnerabilities/_index.md#vulnerability-resolution) | Ultimate | GitLab Duo Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Beta | | [AI Impact Dashboard](../analytics/ai_impact_analytics.md) | Ultimate | GitLab Duo Enterprise | Generally available | Generally available | N/A | Beta | diff --git a/doc/user/markdown.md b/doc/user/markdown.md index 7eba4150102..d1d0cd3c847 100644 --- a/doc/user/markdown.md +++ b/doc/user/markdown.md @@ -343,7 +343,7 @@ However, you cannot mix the wrapping tags: ``` Diff highlighting doesn't work with `` `inline code` ``. If your text includes backticks (`` ` ``), [escape](#escape-characters) -each backtick with a backslash \: +each backtick with a backslash ` \ `: ```markdown - {+ Just regular text +} @@ -2040,6 +2040,22 @@ A backslash doesn't always escape the character that follows it. The backslash a In these instances you might need to use the equivalent HTML entity, such as `]` for `]`. +### Use backslash with backticks + +When the backslash (` \ `) character is at the end of an inline code sample, the backslash +can escape the last backtick. In this case, add extra spaces around the inline code, +for example: + +```markdown +Use the backslash ` \ ` character to escape inline code that ends in a ` backslash\ `. +``` + +When rendered, the inline code looks like this: + +--- + +Use the backslash ` \ ` character to escape inline code that ends in a ` backslash\ `. + ## Footnotes [View this topic rendered in GitLab](https://gitlab.com/gitlab-org/gitlab/-/blob/master/doc/user/markdown.md#footnotes). diff --git a/doc/user/project/import/_index.md b/doc/user/project/import/_index.md index 15b0615befc..dd279221238 100644 --- a/doc/user/project/import/_index.md +++ b/doc/user/project/import/_index.md @@ -334,9 +334,18 @@ On the destination instance, users with the Owner role for a top-level group can [accepts the reassignment request](#accept-contribution-reassignment). - Choose not to reassign contributions and memberships and [keep them assigned to placeholder users](#keep-as-placeholder). +#### Reassigning contributions from multiple placeholder users + All the contributions initially assigned to a single placeholder user can only be reassigned to a single active regular user on the destination instance. The contributions assigned to a single placeholder user cannot be split among multiple active regular users. + +You can reassign contributions from multiple placeholder users to the same user +on the destination instance if the placeholder users are from: + +- Different source instances +- The same source instance and are imported to different top-level groups on the destination instance + If an assigned user becomes inactive before accepting the reassignment request, the pending reassignment remains linked to the user until they accept it. diff --git a/doc/user/search/advanced_search.md b/doc/user/search/advanced_search.md index e9defe99c6c..20810f28e9e 100644 --- a/doc/user/search/advanced_search.md +++ b/doc/user/search/advanced_search.md @@ -62,7 +62,7 @@ You can refine user search with `simple_query_string`. | `+` | And | [`display +banner`](https://gitlab.com/search?group_id=9970&project_id=278964&repository_ref=&scope=blobs&search=display+%2Bbanner&snippets=) | | `-` | Exclude | [`display -banner`](https://gitlab.com/search?group_id=9970&project_id=278964&scope=blobs&search=display+-banner) | | `*` | Partial | [`bug error 50*`](https://gitlab.com/search?group_id=9970&project_id=278964&repository_ref=&scope=blobs&search=bug+error+50%2A&snippets=) | -| \ | Escape | [`\*md`](https://gitlab.com/search?snippets=&scope=blobs&repository_ref=&search=%5C*md&group_id=9970&project_id=278964) | +| ` \ ` | Escape | [`\*md`](https://gitlab.com/search?snippets=&scope=blobs&repository_ref=&search=%5C*md&group_id=9970&project_id=278964) | | `#` | Issue ID | [`#23456`](https://gitlab.com/search?snippets=&scope=issues&repository_ref=&search=%2323456&group_id=9970&project_id=278964) | | `!` | Merge request ID | [`!23456`](https://gitlab.com/search?snippets=&scope=merge_requests&repository_ref=&search=%2123456&group_id=9970&project_id=278964) | diff --git a/gems/gitlab-active-context/doc/usage.md b/gems/gitlab-active-context/doc/usage.md index 0464ba23fc5..76c8c8da554 100644 --- a/gems/gitlab-active-context/doc/usage.md +++ b/gems/gitlab-active-context/doc/usage.md @@ -18,7 +18,7 @@ class CreateMergeRequests < ActiveContext::Migration[1.0] create_collection :merge_requests, number_of_partitions: 3 do |c| c.bigint :issue_id, index: true c.bigint :namespace_id, index: true - c.prefix :traversal_ids + c.keyword :traversal_ids c.vector :embeddings, dimensions: 768 end end diff --git a/gems/gitlab-active-context/lib/active_context/databases/collection_builder.rb b/gems/gitlab-active-context/lib/active_context/databases/collection_builder.rb index 00949de9124..2887b82e576 100644 --- a/gems/gitlab-active-context/lib/active_context/databases/collection_builder.rb +++ b/gems/gitlab-active-context/lib/active_context/databases/collection_builder.rb @@ -13,8 +13,8 @@ module ActiveContext fields << Field::Bigint.new(name, index: index) end - def prefix(name) - fields << Field::Prefix.new(name, index: true) + def keyword(name) + fields << Field::Keyword.new(name, index: true) end def vector(name, dimensions:, index: true) @@ -31,7 +31,7 @@ module ActiveContext end class Bigint < Field; end - class Prefix < Field; end + class Keyword < Field; end class Vector < Field; end end end diff --git a/gems/gitlab-active-context/lib/active_context/databases/concerns/elastic_executor.rb b/gems/gitlab-active-context/lib/active_context/databases/concerns/elastic_executor.rb index 9b1c84ae7e9..96bc50c7d22 100644 --- a/gems/gitlab-active-context/lib/active_context/databases/concerns/elastic_executor.rb +++ b/gems/gitlab-active-context/lib/active_context/databases/concerns/elastic_executor.rb @@ -64,7 +64,7 @@ module ActiveContext mappings[field.name] = case field when Field::Bigint { type: 'long' } - when Field::Prefix + when Field::Keyword { type: 'keyword' } when Field::Vector vector_field_mapping(field) diff --git a/gems/gitlab-active-context/lib/active_context/databases/postgresql/executor.rb b/gems/gitlab-active-context/lib/active_context/databases/postgresql/executor.rb index 83c4116e9b4..592282b7ddf 100644 --- a/gems/gitlab-active-context/lib/active_context/databases/postgresql/executor.rb +++ b/gems/gitlab-active-context/lib/active_context/databases/postgresql/executor.rb @@ -75,7 +75,7 @@ module ActiveContext when Field::Bigint # Bigint is 8 bytes fixed_columns << [field, 8] - when Field::Prefix + when Field::Keyword # Text fields are variable width variable_columns << field else @@ -93,7 +93,7 @@ module ActiveContext table.column(field.name, "vector(#{field.options[:dimensions]})") when Field::Bigint table.bigint(field.name, **field.options.except(:index)) - when Field::Prefix + when Field::Keyword table.text(field.name, **field.options.except(:index)) else raise ArgumentError, "Unknown field type: #{field.class}" diff --git a/lib/api/projects.rb b/lib/api/projects.rb index e8bbc916687..715027f9e53 100644 --- a/lib/api/projects.rb +++ b/lib/api/projects.rb @@ -1056,6 +1056,7 @@ module API authorize! :change_namespace, user_project args = declared_params(include_missing: false) args[:permission_scope] = :transfer_projects + args[:exact_matches_first] = true groups = ::Groups::UserGroupsFinder.new(current_user, current_user, args).execute groups = groups.excluding_groups(user_project.group).with_route diff --git a/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml b/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml index 053aa2c5546..0bc68250bd5 100644 --- a/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml +++ b/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml @@ -10,7 +10,7 @@ variables: SECURE_ANALYZERS_PREFIX: "$CI_TEMPLATE_REGISTRY_HOST/security-products" # SECRET_DETECTION_IMAGE_SUFFIX: "" - SECRETS_ANALYZER_VERSION: "6" + SECRETS_ANALYZER_VERSION: "7" SECRET_DETECTION_EXCLUDED_PATHS: "" .secret-analyzer: diff --git a/lib/gitlab/ci/templates/Jobs/Secret-Detection.latest.gitlab-ci.yml b/lib/gitlab/ci/templates/Jobs/Secret-Detection.latest.gitlab-ci.yml index 9ff5a70749f..23eb3f56dd7 100644 --- a/lib/gitlab/ci/templates/Jobs/Secret-Detection.latest.gitlab-ci.yml +++ b/lib/gitlab/ci/templates/Jobs/Secret-Detection.latest.gitlab-ci.yml @@ -15,7 +15,7 @@ variables: AST_ENABLE_MR_PIPELINES: "true" # SECRET_DETECTION_IMAGE_SUFFIX: "" - SECRETS_ANALYZER_VERSION: "6" + SECRETS_ANALYZER_VERSION: "7" SECRET_DETECTION_EXCLUDED_PATHS: "" .secret-analyzer: diff --git a/lib/gitlab/ci/templates/Security/Secure-Binaries.gitlab-ci.yml b/lib/gitlab/ci/templates/Security/Secure-Binaries.gitlab-ci.yml index 19b80574648..40c46d06031 100644 --- a/lib/gitlab/ci/templates/Security/Secure-Binaries.gitlab-ci.yml +++ b/lib/gitlab/ci/templates/Security/Secure-Binaries.gitlab-ci.yml @@ -130,7 +130,7 @@ eslint: secrets: extends: .download_images variables: - SECURE_BINARIES_ANALYZER_VERSION: "6" + SECURE_BINARIES_ANALYZER_VERSION: "7" rules: - if: '$SECURE_BINARIES_DOWNLOAD_IMAGES == "true" && $SECURE_BINARIES_ANALYZERS =~ /\bsecrets\b/' diff --git a/locale/gitlab.pot b/locale/gitlab.pot index 8317bd020df..669823c2722 100644 --- a/locale/gitlab.pot +++ b/locale/gitlab.pot @@ -53679,12 +53679,21 @@ msgstr "" msgid "Secrets|An error occurred while fetching the Secret manager status. Please try again." msgstr "" +msgid "Secrets|Are you sure you want to delete secret %{secretName}? This action cannot be undone, and the secret cannot be recovered." +msgstr "" + msgid "Secrets|Created" msgstr "" msgid "Secrets|Delete" msgstr "" +msgid "Secrets|Delete Secret" +msgstr "" + +msgid "Secrets|Delete secret" +msgstr "" + msgid "Secrets|Description must be 200 characters or less." msgstr "" @@ -53736,6 +53745,9 @@ msgstr "" msgid "Secrets|Rotation reminder" msgstr "" +msgid "Secrets|Secret %{secretName} has been deleted." +msgstr "" + msgid "Secrets|Secrets" msgstr "" @@ -53760,6 +53772,9 @@ msgstr "" msgid "Secrets|The name should be unique within this project." msgstr "" +msgid "Secrets|To confirm, enter %{secretName}:" +msgstr "" + msgid "Secrets|Use the Secrets Manager to store your sensitive credentials, and then safely use them in your processes." msgstr "" diff --git a/spec/finders/groups/user_groups_finder_spec.rb b/spec/finders/groups/user_groups_finder_spec.rb index 3ea5ad03066..9ff2db8488e 100644 --- a/spec/finders/groups/user_groups_finder_spec.rb +++ b/spec/finders/groups/user_groups_finder_spec.rb @@ -12,7 +12,7 @@ RSpec.describe Groups::UserGroupsFinder, feature_category: :groups_and_projects let_it_be(:public_maintainer_group) { create(:group, name: 'a public maintainer', path: 'a-public-maintainer', parent: root_group) } let_it_be(:public_owner_group) { create(:group, name: 'a public owner', path: 'a-public-owner') } - subject { described_class.new(current_user, target_user, arguments.merge(search_arguments)).execute } + subject(:result) { described_class.new(current_user, target_user, arguments.merge(search_arguments)).execute } let(:arguments) { {} } let(:current_user) { user } @@ -171,6 +171,36 @@ RSpec.describe Groups::UserGroupsFinder, feature_category: :groups_and_projects end end + context 'on searching with exact_matches_first' do + let(:search_arguments) { { exact_matches_first: true, search: private_maintainer_group.path } } + let(:other_groups) { [] } + + before do + 2.times do + new_group = create(:group, :private, path: "1-#{SecureRandom.hex}-#{private_maintainer_group.path}", parent: root_group) + new_group.add_owner(current_user) + other_groups << new_group + end + end + + it 'prioritizes exact matches first' do + expect(result.first).to eq(private_maintainer_group) + expect(result[1..]).to match_array(other_groups) + end + + context 'when exact_matches_first_project_transfer feature flag is disabled' do + let(:expected_groups) { other_groups + [private_maintainer_group] } + + before do + stub_feature_flags(exact_matches_first_project_transfer: false) + end + + it 'returns matching groups sorted by namespace path' do + expect(result).to match_array(expected_groups.sort_by(&:path)) + end + end + end + it 'returns all groups where the user is a direct member' do is_expected.to contain_exactly( public_maintainer_group, diff --git a/spec/finders/notes_finder_spec.rb b/spec/finders/notes_finder_spec.rb index 819187ec100..40b29cb6b2a 100644 --- a/spec/finders/notes_finder_spec.rb +++ b/spec/finders/notes_finder_spec.rb @@ -134,42 +134,20 @@ RSpec.describe NotesFinder do let_it_be(:banned_user) { create(:banned_user).user } let!(:banned_note) { create(:note_on_issue, project: project, author: banned_user) } - context 'when :hidden_notes feature is not enabled' do - before do - stub_feature_flags(hidden_notes: false) - end + context 'when user is an admin' do + let(:user) { create(:admin) } - context 'when user is not an admin' do - it { is_expected.to include(banned_note) } - end - - context 'when @current_user is nil' do - let(:user) { nil } - - it { is_expected.to be_empty } - end + it { is_expected.to include(banned_note) } end - context 'when :hidden_notes feature is enabled' do - before do - stub_feature_flags(hidden_notes: true) - end + context 'when user is not an admin' do + it { is_expected.not_to include(banned_note) } + end - context 'when user is an admin' do - let(:user) { create(:admin) } + context 'when @current_user is nil' do + let(:user) { nil } - it { is_expected.to include(banned_note) } - end - - context 'when user is not an admin' do - it { is_expected.not_to include(banned_note) } - end - - context 'when @current_user is nil' do - let(:user) { nil } - - it { is_expected.to be_empty } - end + it { is_expected.to be_empty } end end diff --git a/spec/lib/gitlab/import_export/all_models.yml b/spec/lib/gitlab/import_export/all_models.yml index bdc6ce48ded..8035200fa63 100644 --- a/spec/lib/gitlab/import_export/all_models.yml +++ b/spec/lib/gitlab/import_export/all_models.yml @@ -84,6 +84,7 @@ issues: - observability_logs - observability_traces - dates_source +- current_status work_item_type: - issues - namespace diff --git a/spec/models/note_spec.rb b/spec/models/note_spec.rb index d2d935695f9..6759b512dfa 100644 --- a/spec/models/note_spec.rb +++ b/spec/models/note_spec.rb @@ -1919,22 +1919,8 @@ RSpec.describe Note, feature_category: :team_planning do let_it_be(:banned_user) { create(:banned_user).user } let_it_be(:banned_note) { create(:note, author: banned_user) } - context 'when the :hidden_notes feature is disabled' do - before do - stub_feature_flags(hidden_notes: false) - end - - it { is_expected.to include(banned_note, note1) } - end - - context 'when the :hidden_notes feature is enabled' do - before do - stub_feature_flags(hidden_notes: true) - end - - it { is_expected.not_to include(banned_note) } - it { is_expected.to include(note1) } - end + it { is_expected.not_to include(banned_note) } + it { is_expected.to include(note1) } end end diff --git a/spec/requests/api/internal/base_spec.rb b/spec/requests/api/internal/base_spec.rb index deb12856347..c5c36f4c6bc 100644 --- a/spec/requests/api/internal/base_spec.rb +++ b/spec/requests/api/internal/base_spec.rb @@ -448,15 +448,9 @@ RSpec.describe API::Internal::Base, feature_category: :system_access do let(:actor) { key } let(:rate_limiter) { double(:rate_limiter, ip: "127.0.0.1", trusted_ip?: false) } - before do - allow(::Gitlab::Auth::IpRateLimiter).to receive(:new).with("127.0.0.1").and_return(rate_limiter) - end - it 'is throttled by rate limiter' do allow(::Gitlab::ApplicationRateLimiter).to receive(:threshold).and_return(1) - expect(::Gitlab::ApplicationRateLimiter).to receive(:throttled?).with(:gitlab_shell_operation, scope: [action, project.full_path, actor]).twice.and_call_original - expect(::Gitlab::ApplicationRateLimiter).to receive(:throttled?).with(:gitlab_shell_operation, scope: [action, project.full_path, "127.0.0.1"]).and_call_original request @@ -465,6 +459,7 @@ RSpec.describe API::Internal::Base, feature_category: :system_access do request expect(response).to have_gitlab_http_status(:too_many_requests) + expect(json_response['message']['error']).to eq('This endpoint has been requested too many times. Try again later.') end diff --git a/spec/services/click_house/sync_strategies/base_sync_strategy_spec.rb b/spec/services/click_house/sync_strategies/base_sync_strategy_spec.rb index 4153eed67cf..1885d8433e1 100644 --- a/spec/services/click_house/sync_strategies/base_sync_strategy_spec.rb +++ b/spec/services/click_house/sync_strategies/base_sync_strategy_spec.rb @@ -66,6 +66,16 @@ RSpec.describe ClickHouse::SyncStrategies::BaseSyncStrategy, feature_category: : expect(events.size).to eq(4) end + it 'uses the configured primary_key for the id_for_cursor alias' do + allow(strategy).to receive(:primary_key).and_return(:id) + # consider primary key :id out of projections + allow(strategy).to receive(:projections).and_return([:project_id]) + + expect(execute).to eq({ status: :processed, records_inserted: 4, reached_end_of_table: true }) + # cursor is still set to last primary key + expect(ClickHouse::SyncCursor.cursor_for(:events)).to eq(project_event1.id) + end + context 'when new records are inserted while processing' do it 'does not process new records created during the iteration' do # Simulating the case when there is an insert during the iteration diff --git a/spec/views/groups/settings/_remove.html.haml_spec.rb b/spec/views/groups/settings/_remove.html.haml_spec.rb index 9101b9f489c..035e851ed83 100644 --- a/spec/views/groups/settings/_remove.html.haml_spec.rb +++ b/spec/views/groups/settings/_remove.html.haml_spec.rb @@ -2,20 +2,39 @@ require 'spec_helper' -RSpec.describe 'groups/settings/_remove.html.haml' do +RSpec.describe 'groups/settings/_remove.html.haml', feature_category: :groups_and_projects do + let_it_be(:group) { build_stubbed(:group) } + before do stub_feature_flags(downtier_delayed_deletion: false) + allow(view).to receive(:current_user).and_return(double.as_null_object) end describe 'render' do - it 'enables the Remove group button for a group' do - group = build(:group) + context 'when user can :remove_group' do + before do + allow(view).to receive(:can?).with(anything, :remove_group, group).and_return(true) + end - render 'groups/settings/remove', group: group + it 'enables the Remove group button for a group' do + render 'groups/settings/remove', group: group - expect(rendered).to have_selector '[data-button-testid="remove-group-button"]' - expect(rendered).not_to have_selector '[data-button-testid="remove-group-button"].disabled' - expect(rendered).not_to have_selector '[data-testid="group-has-linked-subscription-alert"]' + expect(rendered).to have_selector '[data-button-testid="remove-group-button"]' + expect(rendered).not_to have_selector '[data-button-testid="remove-group-button"].disabled' + expect(rendered).not_to have_selector '[data-testid="group-has-linked-subscription-alert"]' + end + end + + context 'when user cannot :remove_group' do + before do + allow(view).to receive(:can?).with(anything, :remove_group, group).and_return(false) + end + + it 'disables the Remove group button for a group' do + output = view.render('groups/settings/remove', group: group) + + expect(output).to be_nil + end end end end diff --git a/vendor/gems/sidekiq/lib/sidekiq/api.rb.orig b/vendor/gems/sidekiq/lib/sidekiq/api.rb.orig deleted file mode 100644 index fa206d65709..00000000000 --- a/vendor/gems/sidekiq/lib/sidekiq/api.rb.orig +++ /dev/null @@ -1,1240 +0,0 @@ -# frozen_string_literal: true - -require "sidekiq" - -require "zlib" -require "set" - -require "sidekiq/metrics/query" - -# -# Sidekiq's Data API provides a Ruby object model on top -# of Sidekiq's runtime data in Redis. This API should never -# be used within application code for business logic. -# -# The Sidekiq server process never uses this API: all data -# manipulation is done directly for performance reasons to -# ensure we are using Redis as efficiently as possible at -# every callsite. -# - -module Sidekiq - # Retrieve runtime statistics from Redis regarding - # this Sidekiq cluster. - # - # stat = Sidekiq::Stats.new - # stat.processed - class Stats - def initialize - fetch_stats_fast! - end - - def processed - stat :processed - end - - def failed - stat :failed - end - - def scheduled_size - stat :scheduled_size - end - - def retry_size - stat :retry_size - end - - def dead_size - stat :dead_size - end - - def enqueued - stat :enqueued - end - - def processes_size - stat :processes_size - end - - def workers_size - stat :workers_size - end - - def default_queue_latency - stat :default_queue_latency - end - - def queues - Sidekiq.redis do |conn| - queues = conn.sscan("queues").to_a - - lengths = conn.pipelined { |pipeline| - queues.each do |queue| - pipeline.llen("queue:#{queue}") - end - } - - array_of_arrays = queues.zip(lengths).sort_by { |_, size| -size } - array_of_arrays.to_h - end - end - - # O(1) redis calls - # @api private - def fetch_stats_fast! - pipe1_res = Sidekiq.redis { |conn| - conn.pipelined do |pipeline| - pipeline.get("stat:processed") - pipeline.get("stat:failed") - pipeline.zcard("schedule") - pipeline.zcard("retry") - pipeline.zcard("dead") - pipeline.scard("processes") - pipeline.lindex("queue:default", -1) - end - } - - default_queue_latency = if (entry = pipe1_res[6]) - job = begin - Sidekiq.load_json(entry) - rescue - {} - end - now = Time.now.to_f - thence = job["enqueued_at"] || now - now - thence - else - 0 - end - - @stats = { - processed: pipe1_res[0].to_i, - failed: pipe1_res[1].to_i, - scheduled_size: pipe1_res[2], - retry_size: pipe1_res[3], - dead_size: pipe1_res[4], - processes_size: pipe1_res[5], - - default_queue_latency: default_queue_latency - } - end - - # O(number of processes + number of queues) redis calls - # @api private - def fetch_stats_slow! - processes = Sidekiq.redis { |conn| - conn.sscan("processes").to_a - } - - queues = Sidekiq.redis { |conn| - conn.sscan("queues").to_a - } - - pipe2_res = Sidekiq.redis { |conn| - conn.pipelined do |pipeline| - processes.each { |key| pipeline.hget(key, "busy") } - queues.each { |queue| pipeline.llen("queue:#{queue}") } - end - } - - s = processes.size - workers_size = pipe2_res[0...s].sum(&:to_i) - enqueued = pipe2_res[s..].sum(&:to_i) - - @stats[:workers_size] = workers_size - @stats[:enqueued] = enqueued - @stats - end - - # @api private - def fetch_stats! - fetch_stats_fast! - fetch_stats_slow! - end - - # @api private - def reset(*stats) - all = %w[failed processed] - stats = stats.empty? ? all : all & stats.flatten.compact.map(&:to_s) - - mset_args = [] - stats.each do |stat| - mset_args << "stat:#{stat}" - mset_args << 0 - end - Sidekiq.redis do |conn| - conn.mset(*mset_args) - end - end - - private - - def stat(s) - fetch_stats_slow! if @stats[s].nil? - @stats[s] || raise(ArgumentError, "Unknown stat #{s}") - end - - class History - def initialize(days_previous, start_date = nil, pool: nil) - # we only store five years of data in Redis - raise ArgumentError if days_previous < 1 || days_previous > (5 * 365) - @days_previous = days_previous - @start_date = start_date || Time.now.utc.to_date - end - - def processed - @processed ||= date_stat_hash("processed") - end - - def failed - @failed ||= date_stat_hash("failed") - end - - private - - def date_stat_hash(stat) - stat_hash = {} - dates = @start_date.downto(@start_date - @days_previous + 1).map { |date| - date.strftime("%Y-%m-%d") - } - - keys = dates.map { |datestr| "stat:#{stat}:#{datestr}" } - - Sidekiq.redis do |conn| - conn.mget(keys).each_with_index do |value, idx| - stat_hash[dates[idx]] = value ? value.to_i : 0 - end - end - - stat_hash - end - end - end - - ## - # Represents a queue within Sidekiq. - # Allows enumeration of all jobs within the queue - # and deletion of jobs. NB: this queue data is real-time - # and is changing within Redis moment by moment. - # - # queue = Sidekiq::Queue.new("mailer") - # queue.each do |job| - # job.klass # => 'MyWorker' - # job.args # => [1, 2, 3] - # job.delete if job.jid == 'abcdef1234567890' - # end - class Queue - include Enumerable - - ## - # Fetch all known queues within Redis. - # - # @return [Array] - def self.all - Sidekiq.redis { |c| c.sscan("queues").to_a }.sort.map { |q| Sidekiq::Queue.new(q) } - end - - attr_reader :name - - # @param name [String] the name of the queue - def initialize(name = "default") - @name = name.to_s - @rname = "queue:#{name}" - end - - # The current size of the queue within Redis. - # This value is real-time and can change between calls. - # - # @return [Integer] the size - def size - Sidekiq.redis { |con| con.llen(@rname) } - end - - # @return [Boolean] if the queue is currently paused - def paused? - false - end - - ## - # Calculates this queue's latency, the difference in seconds since the oldest - # job in the queue was enqueued. - # - # @return [Float] in seconds - def latency - entry = Sidekiq.redis { |conn| - conn.lindex(@rname, -1) - } - return 0 unless entry - job = Sidekiq.load_json(entry) - now = Time.now.to_f - thence = job["enqueued_at"] || now - now - thence - end - - def each - initial_size = size - deleted_size = 0 - page = 0 - page_size = 50 - - loop do - range_start = page * page_size - deleted_size - range_end = range_start + page_size - 1 - entries = Sidekiq.redis { |conn| - conn.lrange @rname, range_start, range_end - } - break if entries.empty? - page += 1 - entries.each do |entry| - yield JobRecord.new(entry, @name) - end - deleted_size = initial_size - size - end - end - - ## - # Find the job with the given JID within this queue. - # - # This is a *slow, inefficient* operation. Do not use under - # normal conditions. - # - # @param jid [String] the job_id to look for - # @return [Sidekiq::JobRecord] - # @return [nil] if not found - def find_job(jid) - detect { |j| j.jid == jid } - end - - # delete all jobs within this queue - # @return [Boolean] true - def clear - Sidekiq.redis do |conn| - conn.multi do |transaction| - transaction.unlink(@rname) - transaction.srem("queues", [name]) - end - end - true - end - alias_method :💣, :clear - - # :nodoc: - # @api private - def as_json(options = nil) - {name: name} # 5336 - end - end - - ## - # Represents a pending job within a Sidekiq queue. - # - # The job should be considered immutable but may be - # removed from the queue via JobRecord#delete. - class JobRecord - # the parsed Hash of job data - # @!attribute [r] Item - attr_reader :item - # the underlying String in Redis - # @!attribute [r] Value - attr_reader :value - # the queue associated with this job - # @!attribute [r] Queue - attr_reader :queue - - # :nodoc: - # @api private - def initialize(item, queue_name = nil) - @args = nil - @value = item - @item = item.is_a?(Hash) ? item : parse(item) - @queue = queue_name || @item["queue"] - end - - # :nodoc: - # @api private - def parse(item) - Sidekiq.load_json(item) - rescue JSON::ParserError - # If the job payload in Redis is invalid JSON, we'll load - # the item as an empty hash and store the invalid JSON as - # the job 'args' for display in the Web UI. - @invalid = true - @args = [item] - {} - end - - # This is the job class which Sidekiq will execute. If using ActiveJob, - # this class will be the ActiveJob adapter class rather than a specific job. - def klass - self["class"] - end - - def display_class - # Unwrap known wrappers so they show up in a human-friendly manner in the Web UI - @klass ||= self["display_class"] || begin - if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper" || klass == "Sidekiq::ActiveJob::Wrapper" - job_class = @item["wrapped"] || args[0] - if job_class == "ActionMailer::DeliveryJob" || job_class == "ActionMailer::MailDeliveryJob" - # MailerClass#mailer_method - args[0]["arguments"][0..1].join("#") - else - job_class - end - else - klass - end - end - end - - def display_args - # Unwrap known wrappers so they show up in a human-friendly manner in the Web UI - @display_args ||= if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper" || klass == "Sidekiq::ActiveJob::Wrapper" - job_args = self["wrapped"] ? deserialize_argument(args[0]["arguments"]) : [] - if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob" - # remove MailerClass, mailer_method and 'deliver_now' - job_args.drop(3) - elsif (self["wrapped"] || args[0]) == "ActionMailer::MailDeliveryJob" - # remove MailerClass, mailer_method and 'deliver_now' - job_args.drop(3).first.values_at("params", "args") - else - job_args - end - else - if self["encrypt"] - # no point in showing 150+ bytes of random garbage - args[-1] = "[encrypted data]" - end - args - end - end - - def args - @args || @item["args"] - end - - def jid - self["jid"] - end - - def bid - self["bid"] - end - - def enqueued_at - self["enqueued_at"] ? Time.at(self["enqueued_at"]).utc : nil - end - - def created_at - Time.at(self["created_at"] || self["enqueued_at"] || 0).utc - end - - def tags - self["tags"] || [] - end - - def error_backtrace - # Cache nil values - if defined?(@error_backtrace) - @error_backtrace - else - value = self["error_backtrace"] - @error_backtrace = value && uncompress_backtrace(value) - end - end - - def latency - now = Time.now.to_f - now - (@item["enqueued_at"] || @item["created_at"] || now) - end - - # Remove this job from the queue - def delete - count = Sidekiq.redis { |conn| - conn.lrem("queue:#{@queue}", 1, @value) - } - count != 0 - end - - # Access arbitrary attributes within the job hash - def [](name) - # nil will happen if the JSON fails to parse. - # We don't guarantee Sidekiq will work with bad job JSON but we should - # make a best effort to minimize the damage. - @item ? @item[name] : nil - end - - private - - ACTIVE_JOB_PREFIX = "_aj_" - GLOBALID_KEY = "_aj_globalid" - - def deserialize_argument(argument) - case argument - when Array - argument.map { |arg| deserialize_argument(arg) } - when Hash - if serialized_global_id?(argument) - argument[GLOBALID_KEY] - else - argument.transform_values { |v| deserialize_argument(v) } - .reject { |k, _| k.start_with?(ACTIVE_JOB_PREFIX) } - end - else - argument - end - end - - def serialized_global_id?(hash) - hash.size == 1 && hash.include?(GLOBALID_KEY) - end - - def uncompress_backtrace(backtrace) - strict_base64_decoded = backtrace.unpack1("m") - uncompressed = Zlib::Inflate.inflate(strict_base64_decoded) - Sidekiq.load_json(uncompressed) - end - end - - # Represents a job within a Redis sorted set where the score - # represents a timestamp associated with the job. This timestamp - # could be the scheduled time for it to run (e.g. scheduled set), - # or the expiration date after which the entry should be deleted (e.g. dead set). - class SortedEntry < JobRecord - attr_reader :score - attr_reader :parent - - # :nodoc: - # @api private - def initialize(parent, score, item) - super(item) - @score = Float(score) - @parent = parent - end - - # The timestamp associated with this entry - def at - Time.at(score).utc - end - - # remove this entry from the sorted set - def delete - if @value - @parent.delete_by_value(@parent.name, @value) - else - @parent.delete_by_jid(score, jid) - end - end - - # Change the scheduled time for this job. - # - # @param at [Time] the new timestamp for this job - def reschedule(at) - Sidekiq.redis do |conn| - conn.zincrby(@parent.name, at.to_f - @score, Sidekiq.dump_json(@item)) - end - end - - # Enqueue this job from the scheduled or dead set so it will - # be executed at some point in the near future. - def add_to_queue - remove_job do |message| - msg = Sidekiq.load_json(message) - Sidekiq::Client.push(msg) - end - end - - # enqueue this job from the retry set so it will be executed - # at some point in the near future. - def retry - remove_job do |message| - msg = Sidekiq.load_json(message) - msg["retry_count"] -= 1 if msg["retry_count"] - Sidekiq::Client.push(msg) - end - end - - # Move this job from its current set into the Dead set. - def kill - remove_job do |message| - DeadSet.new.kill(message) - end - end - - def error? - !!item["error_class"] - end - - private - - def remove_job - Sidekiq.redis do |conn| - results = conn.multi { |transaction| - transaction.zrange(parent.name, score, score, "BYSCORE") - transaction.zremrangebyscore(parent.name, score, score) - }.first - - if results.size == 1 - yield results.first - else - # multiple jobs with the same score - # find the one with the right JID and push it - matched, nonmatched = results.partition { |message| - if message.index(jid) - msg = Sidekiq.load_json(message) - msg["jid"] == jid - else - false - end - } - - msg = matched.first - yield msg if msg - - # push the rest back onto the sorted set - conn.multi do |transaction| - nonmatched.each do |message| - transaction.zadd(parent.name, score.to_f.to_s, message) - end - end - end - end - end - end - - # Base class for all sorted sets within Sidekiq. - class SortedSet - include Enumerable - - # Redis key of the set - # @!attribute [r] Name - attr_reader :name - - # :nodoc: - # @api private - def initialize(name) - @name = name - @_size = size - end - - # real-time size of the set, will change - def size - Sidekiq.redis { |c| c.zcard(name) } - end - - # Scan through each element of the sorted set, yielding each to the supplied block. - # Please see Redis's SCAN documentation for implementation details. - # - # @param match [String] a snippet or regexp to filter matches. - # @param count [Integer] number of elements to retrieve at a time, default 100 - # @yieldparam [Sidekiq::SortedEntry] each entry - def scan(match, count = 100) - return to_enum(:scan, match, count) unless block_given? - - match = "*#{match}*" unless match.include?("*") - Sidekiq.redis do |conn| - conn.zscan(name, match: match, count: count) do |entry, score| - yield SortedEntry.new(self, score, entry) - end - end - end - - # @return [Boolean] always true - def clear - Sidekiq.redis do |conn| - conn.unlink(name) - end - true - end - alias_method :💣, :clear - - # :nodoc: - # @api private - def as_json(options = nil) - {name: name} # 5336 - end - end - - # Base class for all sorted sets which contain jobs, e.g. scheduled, retry and dead. - # Sidekiq Pro and Enterprise add additional sorted sets which do not contain job data, - # e.g. Batches. - class JobSet < SortedSet - # Add a job with the associated timestamp to this set. - # @param timestamp [Time] the score for the job - # @param job [Hash] the job data - def schedule(timestamp, job) - Sidekiq.redis do |conn| - conn.zadd(name, timestamp.to_f.to_s, Sidekiq.dump_json(job)) - end - end - - def pop_each - Sidekiq.redis do |c| - size.times do - data, score = c.zpopmin(name, 1)&.first - break unless data - yield data, score - end - end - end - - def retry_all - c = Sidekiq::Client.new - pop_each do |msg, _| - job = Sidekiq.load_json(msg) - # Manual retries should not count against the retry limit. - job["retry_count"] -= 1 if job["retry_count"] - c.push(job) - end - end - - # Move all jobs from this Set to the Dead Set. - # See DeadSet#kill - def kill_all(notify_failure: false, ex: nil) - ds = DeadSet.new - opts = {notify_failure: notify_failure, ex: ex, trim: false} - - begin - pop_each do |msg, _| - ds.kill(msg, opts) - end - ensure - ds.trim - end - end - - def each - initial_size = @_size - offset_size = 0 - page = -1 - page_size = 50 - - loop do - range_start = page * page_size + offset_size - range_end = range_start + page_size - 1 - elements = Sidekiq.redis { |conn| - conn.zrange name, range_start, range_end, "withscores" - } - break if elements.empty? - page -= 1 - elements.reverse_each do |element, score| - yield SortedEntry.new(self, score, element) - end - offset_size = initial_size - @_size - end - end - - ## - # Fetch jobs that match a given time or Range. Job ID is an - # optional second argument. - # - # @param score [Time,Range] a specific timestamp or range - # @param jid [String, optional] find a specific JID within the score - # @return [Array] any results found, can be empty - def fetch(score, jid = nil) - begin_score, end_score = - if score.is_a?(Range) - [score.first, score.last] - else - [score, score] - end - - elements = Sidekiq.redis { |conn| - conn.zrange(name, begin_score, end_score, "BYSCORE", "withscores") - } - - elements.each_with_object([]) do |element, result| - data, job_score = element - entry = SortedEntry.new(self, job_score, data) - result << entry if jid.nil? || entry.jid == jid - end - end - - ## - # Find the job with the given JID within this sorted set. - # *This is a slow O(n) operation*. Do not use for app logic. - # - # @param jid [String] the job identifier - # @return [SortedEntry] the record or nil - def find_job(jid) - Sidekiq.redis do |conn| - conn.zscan(name, match: "*#{jid}*", count: 100) do |entry, score| - job = Sidekiq.load_json(entry) - matched = job["jid"] == jid - return SortedEntry.new(self, score, entry) if matched - end - end - nil - end - - # :nodoc: - # @api private - def delete_by_value(name, value) - Sidekiq.redis do |conn| - ret = conn.zrem(name, value) - @_size -= 1 if ret - ret - end - end - - # :nodoc: - # @api private - def delete_by_jid(score, jid) - Sidekiq.redis do |conn| - elements = conn.zrange(name, score, score, "BYSCORE") - elements.each do |element| - if element.index(jid) - message = Sidekiq.load_json(element) - if message["jid"] == jid - ret = conn.zrem(name, element) - @_size -= 1 if ret - break ret - end - end - end - end - end - - alias_method :delete, :delete_by_jid - end - - ## - # The set of scheduled jobs within Sidekiq. - # See the API wiki page for usage notes and examples. - # - class ScheduledSet < JobSet - def initialize - super("schedule") - end - end - - ## - # The set of retries within Sidekiq. - # See the API wiki page for usage notes and examples. - # - class RetrySet < JobSet - def initialize - super("retry") - end - end - - ## - # The set of dead jobs within Sidekiq. Dead jobs have failed all of - # their retries and are helding in this set pending some sort of manual - # fix. They will be removed after 6 months (dead_timeout) if not. - # - class DeadSet < JobSet - def initialize - super("dead") - end - - # Trim dead jobs which are over our storage limits - def trim - hash = Sidekiq.default_configuration - now = Time.now.to_f - Sidekiq.redis do |conn| - conn.multi do |transaction| - transaction.zremrangebyscore(name, "-inf", now - hash[:dead_timeout_in_seconds]) - transaction.zremrangebyrank(name, 0, - hash[:dead_max_jobs]) - end - end - end - - # Add the given job to the Dead set. - # @param message [String] the job data as JSON - # @option opts [Boolean] :notify_failure (true) Whether death handlers should be called - # @option opts [Boolean] :trim (true) Whether Sidekiq should trim the structure to keep it within configuration - # @option opts [Exception] :ex (RuntimeError) An exception to pass to the death handlers - def kill(message, opts = {}) - now = Time.now.to_f - Sidekiq.redis do |conn| - conn.zadd(name, now.to_s, message) - end - - trim if opts[:trim] != false - - if opts[:notify_failure] != false - job = Sidekiq.load_json(message) - if opts[:ex] - ex = opts[:ex] - else - ex = RuntimeError.new("Job killed by API") - ex.set_backtrace(caller) - end - Sidekiq.default_configuration.death_handlers.each do |handle| - handle.call(job, ex) - end - end - true - end - end - - ## - # Enumerates the set of Sidekiq processes which are actively working - # right now. Each process sends a heartbeat to Redis every 5 seconds - # so this set should be relatively accurate, barring network partitions. - # - # @yieldparam [Sidekiq::Process] - # - class ProcessSet - include Enumerable - - def self.[](identity) - exists, (info, busy, beat, quiet, rss, rtt_us) = Sidekiq.redis { |conn| - conn.multi { |transaction| - transaction.sismember("processes", identity) - transaction.hmget(identity, "info", "busy", "beat", "quiet", "rss", "rtt_us") - } - } - - return nil if exists == 0 || info.nil? - - hash = Sidekiq.load_json(info) - Process.new(hash.merge("busy" => busy.to_i, - "beat" => beat.to_f, - "quiet" => quiet, - "rss" => rss.to_i, - "rtt_us" => rtt_us.to_i)) - end - - # :nodoc: - # @api private - def initialize(clean_plz = true) - cleanup if clean_plz - end - - # Cleans up dead processes recorded in Redis. - # Returns the number of processes cleaned. - # :nodoc: - # @api private - def cleanup - # dont run cleanup more than once per minute - return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", "NX", "EX", "60") } - - count = 0 - Sidekiq.redis do |conn| - procs = conn.sscan("processes").to_a - heartbeats = conn.pipelined { |pipeline| - procs.each do |key| - pipeline.hget(key, "info") - end - } - - # the hash named key has an expiry of 60 seconds. - # if it's not found, that means the process has not reported - # in to Redis and probably died. - to_prune = procs.select.with_index { |proc, i| - heartbeats[i].nil? - } - count = conn.srem("processes", to_prune) unless to_prune.empty? - end - count - end - - def each - result = Sidekiq.redis { |conn| - procs = conn.sscan("processes").to_a.sort - - # We're making a tradeoff here between consuming more memory instead of - # making more roundtrips to Redis, but if you have hundreds or thousands of workers, - # you'll be happier this way - conn.pipelined do |pipeline| - procs.each do |key| - pipeline.hmget(key, "info", "busy", "beat", "quiet", "rss", "rtt_us") - end - end - } - - result.each do |info, busy, beat, quiet, rss, rtt_us| - # If a process is stopped between when we query Redis for `procs` and - # when we query for `result`, we will have an item in `result` that is - # composed of `nil` values. - next if info.nil? - - hash = Sidekiq.load_json(info) - yield Process.new(hash.merge("busy" => busy.to_i, - "beat" => beat.to_f, - "quiet" => quiet, - "rss" => rss.to_i, - "rtt_us" => rtt_us.to_i)) - end - end - - # This method is not guaranteed accurate since it does not prune the set - # based on current heartbeat. #each does that and ensures the set only - # contains Sidekiq processes which have sent a heartbeat within the last - # 60 seconds. - # @return [Integer] current number of registered Sidekiq processes - def size - Sidekiq.redis { |conn| conn.scard("processes") } - end - - # Total number of threads available to execute jobs. - # For Sidekiq Enterprise customers this number (in production) must be - # less than or equal to your licensed concurrency. - # @return [Integer] the sum of process concurrency - def total_concurrency - sum { |x| x["concurrency"].to_i } - end - - # @return [Integer] total amount of RSS memory consumed by Sidekiq processes - def total_rss_in_kb - sum { |x| x["rss"].to_i } - end - alias_method :total_rss, :total_rss_in_kb - - # Returns the identity of the current cluster leader or "" if no leader. - # This is a Sidekiq Enterprise feature, will always return "" in Sidekiq - # or Sidekiq Pro. - # @return [String] Identity of cluster leader - # @return [String] empty string if no leader - def leader - @leader ||= begin - x = Sidekiq.redis { |c| c.get("dear-leader") } - # need a non-falsy value so we can memoize - x ||= "" - x - end - end - end - - # - # Sidekiq::Process represents an active Sidekiq process talking with Redis. - # Each process has a set of attributes which look like this: - # - # { - # 'hostname' => 'app-1.example.com', - # 'started_at' => , - # 'pid' => 12345, - # 'tag' => 'myapp' - # 'concurrency' => 25, - # 'queues' => ['default', 'low'], - # 'busy' => 10, - # 'beat' => , - # 'identity' => , - # 'embedded' => true, - # } - class Process - # :nodoc: - # @api private - def initialize(hash) - @attribs = hash - end - - def tag - self["tag"] - end - - def labels - self["labels"].to_a - end - - def [](key) - @attribs[key] - end - - def identity - self["identity"] - end - - def queues - self["queues"] - end - - def weights - self["weights"] - end - - def version - self["version"] - end - - def embedded? - self["embedded"] - end - - # Signal this process to stop processing new jobs. - # It will continue to execute jobs it has already fetched. - # This method is *asynchronous* and it can take 5-10 - # seconds for the process to quiet. - def quiet! - raise "Can't quiet an embedded process" if embedded? - - signal("TSTP") - end - - # Signal this process to shutdown. - # It will shutdown within its configured :timeout value, default 25 seconds. - # This method is *asynchronous* and it can take 5-10 - # seconds for the process to start shutting down. - def stop! - raise "Can't stop an embedded process" if embedded? - - signal("TERM") - end - - # Signal this process to log backtraces for all threads. - # Useful if you have a frozen or deadlocked process which is - # still sending a heartbeat. - # This method is *asynchronous* and it can take 5-10 seconds. - def dump_threads - signal("TTIN") - end - - # @return [Boolean] true if this process is quiet or shutting down - def stopping? - self["quiet"] == "true" - end - - private - - def signal(sig) - key = "#{identity}-signals" - Sidekiq.redis do |c| - c.multi do |transaction| - transaction.lpush(key, sig) - transaction.expire(key, 60) - end - end - end - end - - ## - # The WorkSet stores the work being done by this Sidekiq cluster. - # It tracks the process and thread working on each job. - # - # WARNING WARNING WARNING - # - # This is live data that can change every millisecond. - # If you call #size => 5 and then expect #each to be - # called 5 times, you're going to have a bad time. - # - # works = Sidekiq::WorkSet.new - # works.size => 2 - # works.each do |process_id, thread_id, work| - # # process_id is a unique identifier per Sidekiq process - # # thread_id is a unique identifier per thread - # # work is a Hash which looks like: - # # { 'queue' => name, 'run_at' => timestamp, 'payload' => job_hash } - # # run_at is an epoch Integer. - # end - # - class WorkSet - include Enumerable - - def each(&block) - results = [] - procs = nil - all_works = nil - - Sidekiq.redis do |conn| - procs = conn.sscan("processes").to_a.sort - all_works = conn.pipelined do |pipeline| - procs.each do |key| - pipeline.hgetall("#{key}:work") - end - end - end - - procs.zip(all_works).each do |key, workers| - workers.each_pair do |tid, json| - results << [key, tid, Sidekiq::Work.new(key, tid, Sidekiq.load_json(json))] unless json.empty? - end - end - - results.sort_by { |(_, _, hsh)| hsh.raw("run_at") }.each(&block) - end - - # Note that #size is only as accurate as Sidekiq's heartbeat, - # which happens every 5 seconds. It is NOT real-time. - # - # Not very efficient if you have lots of Sidekiq - # processes but the alternative is a global counter - # which can easily get out of sync with crashy processes. - def size - Sidekiq.redis do |conn| - procs = conn.sscan("processes").to_a - if procs.empty? - 0 - else - conn.pipelined { |pipeline| - procs.each do |key| - pipeline.hget(key, "busy") - end - }.sum(&:to_i) - end - end - end - - ## - # Find the work which represents a job with the given JID. - # *This is a slow O(n) operation*. Do not use for app logic. - # - # @param jid [String] the job identifier - # @return [Sidekiq::Work] the work or nil - def find_work_by_jid(jid) - each do |_process_id, _thread_id, work| - job = work.job - return work if job.jid == jid - end - nil - end - end - - # Sidekiq::Work represents a job which is currently executing. - class Work - attr_reader :process_id - attr_reader :thread_id - - def initialize(pid, tid, hsh) - @process_id = pid - @thread_id = tid - @hsh = hsh - @job = nil - end - - def queue - @hsh["queue"] - end - - def run_at - Time.at(@hsh["run_at"]) - end - - def job - @job ||= Sidekiq::JobRecord.new(@hsh["payload"]) - end - - def payload - @hsh["payload"] - end - - # deprecated - def [](key) - kwargs = {uplevel: 1} - kwargs[:category] = :deprecated if RUBY_VERSION > "3.0" # TODO - warn("Direct access to `Sidekiq::Work` attributes is deprecated, please use `#payload`, `#queue`, `#run_at` or `#job` instead", **kwargs) - - @hsh[key] - end - - # :nodoc: - # @api private - def raw(name) - @hsh[name] - end - - def method_missing(*all) - @hsh.send(*all) - end - - def respond_to_missing?(name, *args) - @hsh.respond_to?(name) - end - end - - # Since "worker" is a nebulous term, we've deprecated the use of this class name. - # Is "worker" a process, a type of job, a thread? Undefined! - # WorkSet better describes the data. - Workers = WorkSet -end diff --git a/vendor/gems/sidekiq/lib/sidekiq/paginator.rb.orig b/vendor/gems/sidekiq/lib/sidekiq/paginator.rb.orig deleted file mode 100644 index 8bd1c1bae78..00000000000 --- a/vendor/gems/sidekiq/lib/sidekiq/paginator.rb.orig +++ /dev/null @@ -1,61 +0,0 @@ -# frozen_string_literal: true - -module Sidekiq - module Paginator - TYPE_CACHE = { - "dead" => "zset", - "retry" => "zset", - "schedule" => "zset" - } - - def page(key, pageidx = 1, page_size = 25, opts = nil) - current_page = (pageidx.to_i < 1) ? 1 : pageidx.to_i - pageidx = current_page - 1 - total_size = 0 - items = [] - starting = pageidx * page_size - ending = starting + page_size - 1 - - Sidekiq.redis do |conn| - type = conn.type(key) - rev = opts && opts[:reverse] - - case type - when "zset" - total_size, items = conn.multi { |transaction| - transaction.zcard(key) - if rev - transaction.zrange(key, starting, ending, "REV", "withscores") - else - transaction.zrange(key, starting, ending, "withscores") - end - } - [current_page, total_size, items] - when "list" - total_size, items = conn.multi { |transaction| - transaction.llen(key) - if rev - transaction.lrange(key, -ending - 1, -starting - 1) - else - transaction.lrange(key, starting, ending) - end - } - items.reverse! if rev - [current_page, total_size, items] - when "none" - [1, 0, []] - else - raise "can't page a #{type}" - end - end - end - - def page_items(items, pageidx = 1, page_size = 25) - current_page = (pageidx.to_i < 1) ? 1 : pageidx.to_i - pageidx = current_page - 1 - starting = pageidx * page_size - items = items.to_a - [current_page, items.size, items[starting, page_size]] - end - end -end