Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2025-04-23 12:07:28 +00:00
parent 90d525e137
commit 6a7ae91bd0
49 changed files with 367 additions and 1565 deletions

View File

@ -7,10 +7,10 @@
.gitlab/CODEOWNERS @gitlab-org/development-leaders @gitlab-org/tw-leadership
## Allows release tooling and Gitaly team members to update the Gitaly Version
/GITALY_SERVER_VERSION @project_278964_bot_77e28085fcec07f14dfd31c689824b5b @gitlab-org/maintainers/rails-backend @gitlab-org/delivery @gl-gitaly
/GITALY_SERVER_VERSION @project_278964_bot_e2e6cca5e3b0076fdecec369cccb9e18 @gitlab-org/maintainers/rails-backend @gitlab-org/delivery @gl-gitaly
## Allows release tooling, KAS version maintainers and the delivery team to update the KAS version
/GITLAB_KAS_VERSION @project_278964_bot_77e28085fcec07f14dfd31c689824b5b @gitlab-org/maintainers/kas-version-maintainers @gitlab-org/maintainers/rails-backend @gitlab-org/delivery
/GITLAB_KAS_VERSION @project_278964_bot_e2e6cca5e3b0076fdecec369cccb9e18 @gitlab-org/maintainers/kas-version-maintainers @gitlab-org/maintainers/rails-backend @gitlab-org/delivery
## Allows automated updates to E2E test knapsack reports
/qa/knapsack/**/*.json @project_278964_bot_bd38289efeb650826d995b5f830ca9cb @gl-dx

View File

@ -76,7 +76,6 @@ export default {
persist-collapsed-state
class="!gl-mt-5 gl-overflow-hidden"
:body-class="{ '!gl-m-[-1px] !gl-p-0': items.length || isPreview }"
footer-class="!gl-border-t-0"
@collapsed="isCollapsed = true"
@expanded="isCollapsed = false"
>

View File

@ -11,6 +11,7 @@
# permissions: string (see Types::Groups::UserPermissionsEnum)
# search: string used for search on path and group name
# sort: string (see Types::Namespaces::GroupSortEnum)
# exact_matches_first: boolean used to enable priotization of exact matches
#
# Initially created to filter user groups and descendants where the user can create projects
module Groups
@ -26,8 +27,11 @@ module Groups
return Group.none if target_user.blank?
items = by_permission_scope
items = by_search(items)
# Search will perform an ORDER BY to ensure exact matches are returned first.
return by_search(items, exact_matches_first: true) if exact_matches_first_enabled?
items = by_search(items)
sort(items)
end
@ -68,5 +72,10 @@ module Groups
items.sort_by_attribute(params[:sort])
end
def exact_matches_first_enabled?
params[:exact_matches_first] && params[:search].present? &&
Feature.enabled?(:exact_matches_first_project_transfer, current_user)
end
end
end

View File

@ -193,7 +193,6 @@ class NotesFinder
end
def without_hidden_notes?
return false unless Feature.enabled?(:hidden_notes)
return false if @current_user&.can_admin_all_resources?
true

View File

@ -171,11 +171,7 @@ class Note < ApplicationRecord
scope :with_metadata, -> { includes(:system_note_metadata) }
scope :without_hidden, -> {
if Feature.enabled?(:hidden_notes)
where_not_exists(Users::BannedUser.where('notes.author_id = banned_users.user_id'))
else
all
end
where_not_exists(Users::BannedUser.where('notes.author_id = banned_users.user_id'))
}
scope :for_note_or_capitalized_note, ->(text) { where(note: [text, text.capitalize]) }

View File

@ -77,6 +77,12 @@ class BasePolicy < DeclarativePolicy::Base
with_options scope: :global, score: 0
condition(:can_create_organization) { Gitlab::CurrentSettings.can_create_organization }
desc "Only admins can destroy projects"
condition(:owner_cannot_destroy_project, scope: :global) do
::Gitlab::CurrentSettings.current_application_settings
.default_project_deletion_protection
end
desc "The application is restricted from public visibility"
condition(:restricted_public_level, scope: :global) do
Gitlab::CurrentSettings.current_application_settings.restricted_visibility_levels.include?(Gitlab::VisibilityLevel::PUBLIC)

View File

@ -74,12 +74,12 @@ module ClickHouse
def process_batch(context)
Enumerator.new do |yielder|
has_more_data = false
batching_scope.each_batch(of: BATCH_SIZE) do |relation|
records = relation.select(projections).to_a
batching_scope.each_batch(of: BATCH_SIZE, column: primary_key) do |relation|
records = relation.select(*projections, "#{primary_key} AS id_for_cursor").to_a
has_more_data = records.size == BATCH_SIZE
records.each do |row|
yielder << transform_row(row)
context.last_processed_id = row.id
context.last_processed_id = row.id_for_cursor
break if context.record_limit_reached?
end
@ -112,6 +112,12 @@ module ClickHouse
raise NotImplementedError, "Subclasses must implement `projections`"
end
# UInt type primary key used for cursor management,
# override if necessary.
def primary_key
:id
end
def csv_mapping
raise NotImplementedError, "Subclasses must implement `csv_mapping`"
end

View File

@ -1,3 +1,4 @@
- return unless can?(current_user, :remove_group, group)
- remove_form_id = local_assigns.fetch(:remove_form_id, nil)
- if group.adjourned_deletion?

View File

@ -1,8 +0,0 @@
---
name: hidden_notes
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/112973
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/405148
milestone: '15.11'
type: development
group: group::authorization
default_enabled: false

View File

@ -0,0 +1,10 @@
---
name: exact_matches_first_project_transfer
description: Prioritize exact matches when searching for groups in project transfer
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/536745
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/188711
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/536751
milestone: '18.0'
group: group::project management
type: gitlab_com_derisk
default_enabled: false

View File

@ -5,4 +5,4 @@ rollout_issue_url:
milestone: '13.5'
type: ops
group: group::database
default_enabled: false
default_enabled: true

View File

@ -0,0 +1,18 @@
# frozen_string_literal: true
class SyncDropArtifactsPartitionIdJobIdIndex < Gitlab::Database::Migration[2.2]
include Gitlab::Database::PartitioningMigrationHelpers
milestone '18.0'
disable_ddl_transaction!
INDEX_NAME = :p_ci_job_artifacts_partition_id_job_id_idx
def up
remove_concurrent_partitioned_index_by_name :p_ci_job_artifacts, INDEX_NAME
end
def down
add_concurrent_partitioned_index :p_ci_job_artifacts, [:partition_id, :job_id], name: INDEX_NAME
end
end

View File

@ -0,0 +1 @@
3edf57d181e9c073e472b33eac4b599afd3bddca2d99130e08fb58457f187cb2

View File

@ -34490,10 +34490,6 @@ CREATE INDEX p_ci_job_artifacts_project_id_file_type_id_idx ON ONLY p_ci_job_art
CREATE INDEX index_ci_job_artifacts_on_id_project_id_and_file_type ON ci_job_artifacts USING btree (project_id, file_type, id);
CREATE INDEX p_ci_job_artifacts_partition_id_job_id_idx ON ONLY p_ci_job_artifacts USING btree (partition_id, job_id);
CREATE INDEX index_ci_job_artifacts_on_partition_id_job_id ON ci_job_artifacts USING btree (partition_id, job_id);
CREATE INDEX p_ci_job_artifacts_project_id_id_idx1 ON ONLY p_ci_job_artifacts USING btree (project_id, id);
CREATE INDEX index_ci_job_artifacts_on_project_id_and_id ON ci_job_artifacts USING btree (project_id, id);
@ -40986,8 +40982,6 @@ ALTER INDEX p_ci_job_artifacts_project_id_created_at_id_idx ATTACH PARTITION ind
ALTER INDEX p_ci_job_artifacts_project_id_file_type_id_idx ATTACH PARTITION index_ci_job_artifacts_on_id_project_id_and_file_type;
ALTER INDEX p_ci_job_artifacts_partition_id_job_id_idx ATTACH PARTITION index_ci_job_artifacts_on_partition_id_job_id;
ALTER INDEX p_ci_job_artifacts_project_id_id_idx1 ATTACH PARTITION index_ci_job_artifacts_on_project_id_and_id;
ALTER INDEX p_ci_job_artifacts_project_id_idx1 ATTACH PARTITION index_ci_job_artifacts_on_project_id_for_security_reports;

View File

@ -257,7 +257,7 @@ echo $AIGW_CUSTOM_MODELS__ENABLED # must be true
```
If the environment variables are not set up correctly, set them by
[creating a container](../../install/install_ai_gateway.md#find-the-ai-gateway-release).
[creating a container](../../install/install_ai_gateway.md#find-the-ai-gateway-image).
## Check if the model is reachable from AI gateway
@ -295,7 +295,7 @@ If not successful, verify your network configurations.
## The image's platform does not match the host
When [finding the AI gateway release](../../install/install_ai_gateway.md#find-the-ai-gateway-release),
When [finding the AI gateway release](../../install/install_ai_gateway.md#find-the-ai-gateway-image),
you might get an error that states `The requested image's platform (linux/amd64) does not match the detected host`.
To work around this error, add `--platform linux/amd64` to the `docker run` command:

View File

@ -387,16 +387,17 @@ Starting with GitLab 17.1, migrations are executed in an
## Rebuild database indexes
{{< details >}}
{{< history >}}
- Status: Experiment
- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/42705) in GitLab 13.5 [with a flag](../../administration/feature_flags.md) named `database_reindexing`. Disabled by default.
- [Enabled on GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/3989) in GitLab 13.9.
- [Enabled on GitLab Self-Managed and GitLab Dedicated](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/188548) in GitLab 18.0.
{{< /details >}}
{{< /history >}}
{{< alert type="warning" >}}
This feature is experimental, and isn't enabled by default. Use caution when
running in a production environment, and run during off-peak times.
Use with caution when running in a production environment, and run during off-peak times.
{{< /alert >}}
@ -410,7 +411,6 @@ Prerequisites:
- This feature requires PostgreSQL 12 or later.
- These index types are **not supported**: expression indexes and indexes used for constraint exclusion.
- Not enabled by default. A feature flag must be set for this task to work: `Feature.enable("database_reindexing")`
### Run reindexing

View File

@ -60,7 +60,7 @@ curl "https://gitlab.com/api/graphql" --header "Authorization: Bearer $GRAPHQL_T
```
To nest strings in the query string,
wrap the data in single quotes or escape the strings with <code>&#92;&#92;</code>:
wrap the data in single quotes or escape the strings with ` \\ `:
```shell
curl "https://gitlab.com/api/graphql" --header "Authorization: Bearer $GRAPHQL_TOKEN" \

View File

@ -26431,6 +26431,7 @@ Relationship between an epic and an issue.
| <a id="epicissueseverity"></a>`severity` | [`IssuableSeverity`](#issuableseverity) | Severity level of the incident. |
| <a id="epicissuesladueat"></a>`slaDueAt` | [`Time`](#time) | Timestamp of when the issue SLA expires. |
| <a id="epicissuestate"></a>`state` | [`IssueState!`](#issuestate) | State of the issue. |
| <a id="epicissuestatus"></a>`status` {{< icon name="warning-solid" >}} | [`WorkItemStatus`](#workitemstatus) | **Introduced** in GitLab 18.0. **Status**: Experiment. Status of the issue. |
| <a id="epicissuestatuspagepublishedincident"></a>`statusPagePublishedIncident` | [`Boolean`](#boolean) | Indicates whether an issue is published to the status page. |
| <a id="epicissuesubscribed"></a>`subscribed` | [`Boolean!`](#boolean) | Indicates the currently logged in user is subscribed to the issue. |
| <a id="epicissuetaskcompletionstatus"></a>`taskCompletionStatus` | [`TaskCompletionStatus!`](#taskcompletionstatus) | Task completion status of the issue. |
@ -29829,6 +29830,7 @@ Describes an issuable resource link for incident issues.
| <a id="issueseverity"></a>`severity` | [`IssuableSeverity`](#issuableseverity) | Severity level of the incident. |
| <a id="issuesladueat"></a>`slaDueAt` | [`Time`](#time) | Timestamp of when the issue SLA expires. |
| <a id="issuestate"></a>`state` | [`IssueState!`](#issuestate) | State of the issue. |
| <a id="issuestatus"></a>`status` {{< icon name="warning-solid" >}} | [`WorkItemStatus`](#workitemstatus) | **Introduced** in GitLab 18.0. **Status**: Experiment. Status of the issue. |
| <a id="issuestatuspagepublishedincident"></a>`statusPagePublishedIncident` | [`Boolean`](#boolean) | Indicates whether an issue is published to the status page. |
| <a id="issuesubscribed"></a>`subscribed` | [`Boolean!`](#boolean) | Indicates the currently logged in user is subscribed to the issue. |
| <a id="issuetaskcompletionstatus"></a>`taskCompletionStatus` | [`TaskCompletionStatus!`](#taskcompletionstatus) | Task completion status of the issue. |

View File

@ -314,7 +314,7 @@ Use one of the following methods to determine the value for `DOCKER_AUTH_CONFIG`
{{< alert type="note" >}}
If your username includes special characters like `@`, you must escape them with a backslash (<code>&#92;</code>) to prevent authentication problems.
If your username includes special characters like `@`, you must escape them with a backslash (` \ `) to prevent authentication problems.
{{< /alert >}}

View File

@ -233,7 +233,7 @@ with the CI/CD configuration in that file.
The artifact path is parsed by GitLab, not the runner, so the path must match the
syntax for the OS running GitLab. If GitLab is running on Linux but using a Windows
runner for testing, the path separator for the trigger job is `/`. Other CI/CD
configuration for jobs that use the Windows runner, like scripts, use <code>&#92;</code>.
configuration for jobs that use the Windows runner, like scripts, use ` \ `.
You cannot use CI/CD variables in an `include` section in a dynamic child pipeline's configuration.
[Issue 378717](https://gitlab.com/gitlab-org/gitlab/-/issues/378717) proposes fixing

View File

@ -4153,7 +4153,7 @@ job:
You can use CI/CD variables to define the description, but some shells
[use different syntax](../variables/_index.md#use-cicd-variables-in-job-scripts)
to reference variables. Similarly, some shells might require special characters
to be escaped. For example, backticks (`` ` ``) might need to be escaped with a backslash (<code>&#92;</code>).
to be escaped. For example, backticks (`` ` ``) might need to be escaped with a backslash (` \ `).
#### `release:ref`

View File

@ -567,53 +567,50 @@ flow of how we construct a Chat prompt:
from original GraphQL request and initializes a new instance of
`Gitlab::Llm::Completions::Chat` and calls `execute` on it
([code](https://gitlab.com/gitlab-org/gitlab/-/blob/55b8eb6ff869e61500c839074f080979cc60f9de/ee/lib/gitlab/llm/completions_factory.rb#L89))
1. `Gitlab::Llm::Completions::Chat#execute` calls `Gitlab::Llm::Chain::Agents::SingleActionExecutor`.
([code](https://gitlab.com/gitlab-org/gitlab/-/blob/d539f64ce6c5bed72ab65294da3bcebdc43f68c6/ee/lib/gitlab/llm/completions/chat.rb#L128-134))
1. `Gitlab::Llm::Chain::Agents::SingleActionExecutor#execute` calls
`execute_streamed_request`, which calls `request`, a method defined in the
`AiDependent` concern
([code](https://gitlab.com/gitlab-org/gitlab/-/blob/7ac19f75bd0ba4db5cfe7030e56c3672e2ccdc88/ee/lib/gitlab/llm/chain/concerns/ai_dependent.rb#L14))
1. The `SingleActionExecutor#prompt_options` method assembles all prompt parameters for the AI gateway request
([code](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/chain/agents/single_action_executor.rb#L120-120))
1. `ai_request` is defined in `Llm::Completions::Chat` and evaluates to
`AiGateway`([code](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/completions/chat.rb#L51-51))
1. `ai_request.request` routes to `Llm::Chain::Requests::AiGateway#request`,
which calls `ai_client.stream`
([code](https://gitlab.com/gitlab-org/gitlab/-/blob/e88256b1acc0d70ffc643efab99cad9190529312/ee/lib/gitlab/llm/chain/requests/ai_gateway.rb#L20-27))
1. `ai_client.stream` routes to `Gitlab::Llm::AiGateway::Client#stream`, which
makes an API request to the AI gateway `/v2/chat/agent` endpoint
([code](https://gitlab.com/gitlab-org/gitlab/-/blob/e88256b1acc0d70ffc643efab99cad9190529312/ee/lib/gitlab/llm/ai_gateway/client.rb#L64-82))
1. AI gateway receives the request
([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/e6f55d143ecb5409e8ca4fefc042e590e5a95158/ai_gateway/api/v2/chat/agent.py#L43-43))
1. AI gateway gets the list of tools available for user
([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/e6f55d143ecb5409e8ca4fefc042e590e5a95158/ai_gateway/chat/toolset.py#L43-43))
1. AI GW gets definitions for each tool
([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/e6f55d143ecb5409e8ca4fefc042e590e5a95158/ai_gateway/chat/tools/gitlab.py#L11-11))
1. And they are inserted into prompt template alongside other prompt parameters that come from Rails
([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/e6f55d143ecb5409e8ca4fefc042e590e5a95158/ai_gateway/agents/definitions/chat/react/base.yml#L14-14))
1. AI gateway makes request to LLM and return response to Rails.
([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/e6f55d143ecb5409e8ca4fefc042e590e5a95158/ai_gateway/api/v2/chat/agent.py#L103-103))
1. We've now made our first request to the AI gateway. If the LLM says that the
answer to the first request is a final answer, we
[parse the answer](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/chain/parsers/single_action_parser.rb#L41-42)
and stream it ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/chain/concerns/ai_dependent.rb#L25-25))
and return it ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/chain/agents/single_action_executor.rb#L46-46))
1. If the first answer is not final, the "thoughts" and "picked tools"
from the first LLM request are parsed and then the relevant tool class is
called.
([code](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/chain/agents/single_action_executor.rb#L54-54))
1. The tool executor classes also include `Concerns::AiDependent` and use the
included `request` method similar to how the chat executor does
([example](https://gitlab.com/gitlab-org/gitlab/-/blob/70fca6dbec522cb2218c5dcee66caa908c84271d/ee/lib/gitlab/llm/chain/tools/identifier.rb#L8)).
The `request` method uses the same `ai_request` instance
that was injected into the `context` in `Llm::Completions::Chat`. For Chat,
this is `Gitlab::Llm::Chain::Requests::AiGateway`. So, essentially the same
request to the AI gateway is put together but with a different
`prompt` / `PROMPT_TEMPLATE` than for the first request
([Example tool prompt template](https://gitlab.com/gitlab-org/gitlab/-/blob/70fca6dbec522cb2218c5dcee66caa908c84271d/ee/lib/gitlab/llm/chain/tools/issue_identifier/executor.rb#L39-104))
1. If the tool answer is not final, the response is added to `agent_scratchpad`
and the loop in `SingleActionExecutor` starts again, adding the additional
context to the request. It loops to up to 10 times until a final answer is reached.
1. `Gitlab::Llm::Completions::Chat#execute` calls `Gitlab::Duo::Chat::ReactExecutor`.
([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/llm/completions/chat.rb#L122-L130))
1. `Gitlab::Duo::Chat::ReactExecutor#execute` calls `#step_forward` which calls `Gitlab::Duo::Chat::StepExecutor#step`
([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/duo/chat/react_executor.rb#L235)).
1. `Gitlab::Duo::Chat::StepExecutor#step` calls `Gitlab::Duo::Chat::StepExecutor#perform_agent_request`, which sends a request to the AI Gateway `/v2/chat/agent/` endpoint
([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/duo/chat/step_executor.rb#L69)).
1. The AI Gateway `/v2/chat/agent` endpoint receives the request on the `api.v2.agent.chat.agent.chat` function
([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v2/chat/agent.py#L133))
1. `api.v2.agent.chat.agent.chat` creates the `GLAgentRemoteExecutor` through the `gl_agent_remote_executor_factory` ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v2/chat/agent.py#L166)).
Upon creation of the `GLAgentRemoteExecutor`, the following parameters are passed:
- `tools_registry` - the registry of all available tools; this is passed through the factory ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/container.py#L35))
- `agent` - `ReActAgent` object that wraps the prompt information, including the chosen LLM model, prompt template, etc
1. `api.v2.agent.chat.agent.chat` calls the `GLAgentRemoteExecutor.on_behalf`, which gets the user tools early to raise an exception as soon as possible if an error occurs ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/executor.py#L56)).
1. `api.v2.agent.chat.agent.chat` calls the `GLAgentRemoteExecutor.stream` ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/executor.py#L81)).
1. `GLAgentRemoteExecutor.stream` calls `astream` on `agent` (an instance of `ReActAgent`) with inputs such as the messages and the list of available tools ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/executor.py#L92)).
1. The `ReActAgent` builds the prompts, with the available tools inserted into the system prompt template
([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/prompts/definitions/chat/react/system/1.0.0.jinja)).
1. `ReActAgent.astream` sends a call to the LLM model ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/agents/react.py#L216))
1. The LLM response is returned to Rails
(code path: [`ReActAgent.astream`](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/agents/react.py#L209)
-> [`GLAgentRemoteExecutor.stream`](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/chat/executor.py#L81)
-> [`api.v2.agent.chat.agent.chat`](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v2/chat/agent.py#L133)
-> Rails)
1. We've now made our first request to the AI gateway. If the LLM says that the answer to the first request is final,
Rails [parses the answer](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/duo/chat/react_executor.rb#L56) and [returns it](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/duo/chat/react_executor.rb#L63) for further response handling by [`Gitlab::Llm::Completions::Chat`](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/llm/completions/chat.rb#L66).
1. If the answer is not final, the "thoughts" and "picked tools" from the first LLM request are parsed and then the relevant tool class is called.
([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/duo/chat/react_executor.rb#L207)
| [example tool class](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/chain/tools/identifier.rb))
1. The tool executor classes include `Concerns::AiDependent` and use its `request` method.
([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/llm/chain/concerns/ai_dependent.rb#L14))
1. The `request` method uses the `ai_request` instance
that was injected into the `context` in `Llm::Completions::Chat`. For Chat,
this is `Gitlab::Llm::Chain::Requests::AiGateway`. ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/971d07aa37d9f300b108ed66304505f2d7022841/ee/lib/gitlab/llm/completions/chat.rb#L42)).
1. The tool indicates that `use_ai_gateway_agent_prompt=true` ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/llm/chain/tools/issue_reader/executor.rb#L121)).
This tells the `ai_request` to send the prompt to the `/v1/prompts/chat` endpoint ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/llm/chain/requests/ai_gateway.rb#L87)).
1. AI Gateway `/v1/prompts/chat` endpoint receives the request on `api.v1.prompts.invoke`
([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v1/prompts/invoke.py#L41)).
1. `api.v1.prompts.invoke` gets the correct tool prompt from the tool prompt registry ([code](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v1/prompts/invoke.py#L49)).
1. The prompt is called either as a [stream](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v1/prompts/invoke.py#L86) or as a [non-streamed invocation](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/989ead63fae493efab255180a51786b69a403b49/ai_gateway/api/v1/prompts/invoke.py#L96).
1. If the tool answer is not final, the response is added to agent_scratchpad and the loop in `Gitlab::Duo::Chat::ReactExecutor` starts again, adding the additional context to the request. It loops to up to 10 times until a final answer is reached. ([code](https://gitlab.com/gitlab-org/gitlab/-/blob/30817374f2feecdaedbd3a0efaad93feaed5e0a0/ee/lib/gitlab/duo/chat/react_executor.rb#L44))
## Interpreting GitLab Duo Chat error codes

View File

@ -241,7 +241,7 @@ For information about writing attribute descriptions, see the [GraphQL API descr
- Declare URLs with the `--url` parameter, and wrap the URL in double quotes (`"`).
- Prefer to use examples using the personal access token and don't pass data of
username and password.
- For legibility, use the <code>&#92;</code> character and indentation to break long single-line
- For legibility, use the ` \ ` character and indentation to break long single-line
commands apart into multiple lines.
| Methods | Description |

View File

@ -161,7 +161,7 @@ In most cases the anchors `\A` for beginning of text and `\z` for end of text sh
### Escape sequences in Go
When a character in a string literal or regular expression literal is preceded by a backslash, it is interpreted as part of an escape sequence. For example, the escape sequence `\n` in a string literal corresponds to a single `newline` character, and not the <code>&#92;</code> and `n` characters.
When a character in a string literal or regular expression literal is preceded by a backslash, it is interpreted as part of an escape sequence. For example, the escape sequence `\n` in a string literal corresponds to a single `newline` character, and not the ` \ ` and `n` characters.
There are two Go escape sequences that could produce surprising results. First, `regexp.Compile("\a")` matches the bell character, whereas `regexp.Compile("\\A")` matches the start of text and `regexp.Compile("\\a")` is a Vim (but not Go) regular expression matching any alphabetic character. Second, `regexp.Compile("\b")` matches a backspace, whereas `regexp.Compile("\\b")` matches the start of a word. Confusing one for the other could lead to a regular expression passing or failing much more often than expected, with potential security consequences.

View File

@ -15,7 +15,7 @@ title: Syntax options
| `+` | And | [`display +banner`](https://gitlab.com/search?group_id=9970&project_id=278964&repository_ref=&scope=blobs&search=display+%2Bbanner&snippets=) |
| `-` | Exclude | [`display -banner`](https://gitlab.com/search?group_id=9970&project_id=278964&scope=blobs&search=display+-banner) |
| `*` | Partial | [`bug error 50*`](https://gitlab.com/search?group_id=9970&project_id=278964&repository_ref=&scope=blobs&search=bug+error+50%2A&snippets=) |
| <code>&#92;</code> | Escape | [`\*md`](https://gitlab.com/search?snippets=&scope=blobs&repository_ref=&search=%5C*md&group_id=9970&project_id=278964) |
| ` \ ` | Escape | [`\*md`](https://gitlab.com/search?snippets=&scope=blobs&repository_ref=&search=%5C*md&group_id=9970&project_id=278964) |
| `#` | Issue ID | [`#23456`](https://gitlab.com/search?snippets=&scope=issues&repository_ref=&search=%2323456&group_id=9970&project_id=278964) |
| `!` | Merge request ID | [`!23456`](https://gitlab.com/search?snippets=&scope=merge_requests&repository_ref=&search=%2123456&group_id=9970&project_id=278964) |

View File

@ -6,41 +6,51 @@ description: Set up your self-hosted model GitLab AI gateway
title: Install the GitLab AI gateway
---
The [AI gateway](https://handbook.gitlab.com/handbook/engineering/architecture/design-documents/ai_gateway/) is a standalone service that gives access to AI-native GitLab Duo features.
The [AI gateway](https://handbook.gitlab.com/handbook/engineering/architecture/design-documents/ai_gateway/)
is a standalone service that gives access to AI-native GitLab Duo features.
## Install using Docker
Prerequisites:
- Install a Docker container engine, such as [Docker](https://docs.docker.com/engine/install/#server).
- Use a valid hostname accessible within your network. Do not use `localhost`.
## Install by using Docker
The GitLab AI gateway Docker image contains all necessary code and dependencies
in a single container.
The Docker image for the AI gateway is around 340 MB (compressed) for the `linux/amd64` architecture and requires a minimum of 512 MB of RAM to operate. A GPU is not needed for the GitLab AI gateway. To ensure better performance, especially under heavy usage, consider allocating more disk space, memory, and resources than the minimum requirements. Higher RAM and disk capacity can enhance the AI gateway's efficiency during peak loads.
Prerequisites:
### Find the AI Gateway Release
- Install a Docker container engine, like [Docker](https://docs.docker.com/engine/install/#server).
- Use a valid hostname that is accessible in your network. Do not use `localhost`.
- Ensure you have approximately 340 MB (compressed) for the `linux/amd64` architecture and
a minimum of 512 MB of RAM.
Find the GitLab official Docker image at:
To ensure better performance, especially under heavy usage, consider allocating
more disk space, memory, and resources than the minimum requirements.
Higher RAM and disk capacity can enhance the AI gateway's efficiency during peak loads.
- AI Gateway Docker image on Container Registry:
A GPU is not needed for the GitLab AI gateway.
### Find the AI gateway image
The GitLab official Docker image is available:
- In the container registry:
- [Stable](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/container_registry/3809284)
- [Nightly](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/container_registry/8086262)
- AI Gateway Docker image on DockerHub:
- On DockerHub:
- [Stable](https://hub.docker.com/r/gitlab/model-gateway/tags)
- [Nightly](https://hub.docker.com/r/gitlab/model-gateway-self-hosted/tags)
- [Release process for self-hosted AI Gateway](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/main/docs/release.md).
[View the release process for the self-hosted AI gateway](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/main/docs/release.md).
Use the image tag that corresponds to your GitLab version. For example, if your GitLab version is `v17.9.0`, use the `self-hosted-17.9.0-ee` tag. It is critical to ensure that the image version matches your GitLab version to avoid compatibility issues. Nightly builds are available to have access to newer features, but backwards compatibility is not guaranteed.
Use the image tag that corresponds to your GitLab version.
For example, if your GitLab version is `v17.9.0`, use the `self-hosted-17.9.0-ee` tag.
Ensure that the image version matches your GitLab version to avoid compatibility issues.
Newer features are available from nightly builds, but backwards compatibility is not guaranteed.
{{< alert type="note" >}}
Using the `:latest` tag is **not recommended** as it can cause incompatibility if your GitLab version lags behind or jumps ahead of the AI Gateway release. Always use an explicit version tag.
Using the `:latest` tag is **not recommended** because it can cause incompatibility if your GitLab version is behind or ahead of the AI gateway release. Always use an explicit version tag.
{{< /alert >}}
### Start a Container from the Image
### Start a container from the image
1. Run the following command, replacing `<your_gitlab_instance>` and `<your_gitlab_domain>` with your GitLab instance's URL and domain:
@ -64,14 +74,14 @@ Using the `:latest` tag is **not recommended** as it can cause incompatibility i
If you encounter issues loading the PEM file, resulting in errors like `JWKError`, you may need to resolve an SSL certificate error.
To fix this, set the appropriate certificate bundle path in the Docker container by using the following environment variables:
To fix this issue, set the appropriate certificate bundle path in the Docker container by using the following environment variables:
- `SSL_CERT_FILE=/path/to/ca-bundle.pem`
- `REQUESTS_CA_BUNDLE=/path/to/ca-bundle.pem`
Replace `/path/to/ca-bundle.pem` with the actual path to your certificate bundle.
## Docker-NGINX-SSL Setup
## Set up Docker with NGINX and SSL
{{< alert type="note" >}}
@ -85,16 +95,16 @@ You can set up SSL for an AI gateway instance by using Docker,
NGINX as a reverse proxy, and Let's Encrypt for SSL certificates.
NGINX manages the secure connection with external clients, decrypting incoming HTTPS requests before
passing them to the AI Gateway.
passing them to the AI gateway.
Prerequisites:
- Docker and Docker Compose installed
- Registered and configured domain name
### Step 1: Create Configuration Files
### Create configuration files
Create the following files in your working directory:
Start by creating the following files in your working directory.
1. `nginx.conf`:
@ -127,7 +137,7 @@ Create the following files in your working directory:
listen 80;
server_name _;
# Forward all requests to the AI Gateway
# Forward all requests to the AI gateway
location / {
proxy_pass http://gitlab-ai-gateway:5052;
proxy_read_timeout 300s;
@ -163,7 +173,7 @@ Create the following files in your working directory:
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Forward all requests to the AI Gateway
# Forward all requests to the AI gateway
location / {
proxy_pass http://gitlab-ai-gateway:5052;
proxy_read_timeout 300s;
@ -173,55 +183,58 @@ Create the following files in your working directory:
}
```
### Step 2: SSL Certificate setup using Let's Encrypt
### Set up SSL certificate by using Let's Encrypt
- For Docker-based NGINX servers, Certbot provides an automated way to implement Let's Encrypt certificates -
see the [guide here](https://phoenixnap.com/kb/letsencrypt-docker).
- Alternatively, you can use [Certbot's manual installation](https://eff-certbot.readthedocs.io/en/stable/using.html#manual)
process if you prefer that approach.
Now set up an SSL certificate:
### Step 3: Create Docker-compose file
- For Docker-based NGINX servers, Certbot
[provides an automated way to implement Let's Encrypt certificates](https://phoenixnap.com/kb/letsencrypt-docker).
- Alternatively, you can use the [Certbot manual installation](https://eff-certbot.readthedocs.io/en/stable/using.html#manual).
1. `docker-compose.yaml`:
### Create Docker-compose file
```yaml
version: '3.8'
Now create a `docker-compose.yaml` file.
services:
nginx-proxy:
image: nginx:alpine
ports:
- "80:80"
- "443:443"
volumes:
- /path/to/nginx.conf:/etc/nginx/nginx.conf:ro
- /path/to/default.conf:/etc/nginx/conf.d/default.conf:ro
- /path/to/fullchain.pem:/etc/nginx/ssl/server.crt:ro
- /path/to/privkey.pem:/etc/nginx/ssl/server.key:ro
networks:
- proxy-network
depends_on:
- gitlab-ai-gateway
```yaml
version: '3.8'
gitlab-ai-gateway:
image: registry.gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/model-gateway:<ai-gateway-tag>
expose:
- "5052"
environment:
- AIGW_GITLAB_URL=<your_gitlab_instance>
- AIGW_GITLAB_API_URL=https://<your_gitlab_domain>/api/v4/
networks:
- proxy-network
restart: always
services:
nginx-proxy:
image: nginx:alpine
ports:
- "80:80"
- "443:443"
volumes:
- /path/to/nginx.conf:/etc/nginx/nginx.conf:ro
- /path/to/default.conf:/etc/nginx/conf.d/default.conf:ro
- /path/to/fullchain.pem:/etc/nginx/ssl/server.crt:ro
- /path/to/privkey.pem:/etc/nginx/ssl/server.key:ro
networks:
- proxy-network
depends_on:
- gitlab-ai-gateway
networks:
proxy-network:
driver: bridge
```
gitlab-ai-gateway:
image: registry.gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/model-gateway:<ai-gateway-tag>
expose:
- "5052"
environment:
- AIGW_GITLAB_URL=<your_gitlab_instance>
- AIGW_GITLAB_API_URL=https://<your_gitlab_domain>/api/v4/
networks:
- proxy-network
restart: always
### Step 4: Deployment and validation
networks:
proxy-network:
driver: bridge
```
1. Start the `nginx` and `AIGW` containers and verify if they're running:
### Deploy and validate
Noe deploy and validate the solution.
1. Start the `nginx` and `AIGW` containers and verify that they're running:
```shell
docker-compose up
@ -232,7 +245,7 @@ Create the following files in your working directory:
1. Perform the health check and confirm that the AI gateway is accessible.
## Install using the AI gateway Helm chart
## Install by using Helm chart
Prerequisites:
@ -338,7 +351,7 @@ To upgrade the AI gateway, download the newest Docker image tag.
For information on alternative ways to install the AI gateway, see
[issue 463773](https://gitlab.com/gitlab-org/gitlab/-/issues/463773).
## Health Check and Debugging
## Health check and debugging
To debug issues with your self-hosted Duo installation, run the following command:
@ -365,67 +378,67 @@ These tests are performed for offline environments:
| License | Tests whether your license has the ability to access Code Suggestions feature. |
| System exchange | Tests whether Code Suggestions can be used in your instance. If the system exchange assessment fails, users might not be able to use GitLab Duo features. |
## Does the AIGW need to autoscale?
## Does the AI gateway need to autoscale?
Autoscaling is not mandatory but is recommended for environments with variable workloads, high concurrency requirements, or unpredictable usage patterns. In GitLabs production environment:
- Baseline Setup: A single AI Gateway instance with 2 CPU cores and 8 GB RAM can handle approximately 40 concurrent requests.
- Scaling Guidelines: For larger setups, such as an AWS t3.2xlarge instance (8 vCPUs, 32 GB RAM), the gateway can handle up to 160 concurrent requests, equivalent to 4x the baseline setup.
- Request Throughput: GitLab.coms observed usage suggests that 7 RPS (requests per second) per 1000 active users is a reasonable metric for planning.
- Autoscaling Options: Use Kubernetes Horizontal Pod Autoscalers (HPA) or similar mechanisms to dynamically adjust the number of instances based on metrics like CPU, memory utilization, or request latency thresholds.
- Baseline setup: A single AI gateway instance with 2 CPU cores and 8 GB RAM can handle approximately 40 concurrent requests.
- Scaling guidelines: For larger setups, such as an AWS t3.2xlarge instance (8 vCPUs, 32 GB RAM), the gateway can handle up to 160 concurrent requests, equivalent to 4x the baseline setup.
- Request throughput: GitLab.coms observed usage suggests that 7 RPS (requests per second) per 1000 active users is a reasonable metric for planning.
- Autoscaling options: Use Kubernetes Horizontal Pod Autoscalers (HPA) or similar mechanisms to dynamically adjust the number of instances based on metrics like CPU, memory utilization, or request latency thresholds.
## Configuration Examples by Deployment Size
## Configuration examples by deployment size
- Small Deployment:
- Small deployment:
- Single instance with 2 vCPUs and 8 GB RAM.
- Handles up to 40 concurrent requests.
- Teams or organizations with up to 50 users and predictable workloads.
- Fixed instances may suffice; autoscaling can be disabled for cost efficiency.
- Medium Deployment:
- Medium deployment:
- Single AWS t3.2xlarge instance with 8 vCPUs and 32 GB RAM.
- Handles up to 160 concurrent requests.
- Organizations with 50-200 users and moderate concurrency requirements.
- Implement Kubernetes HPA with thresholds for 50% CPU utilization or request latency above 500ms.
- Large Deployment:
- Large deployment:
- Cluster of multiple AWS t3.2xlarge instances or equivalent.
- Each instance handles 160 concurrent requests, scaling to thousands of users with multiple instances.
- Enterprises with over 200 users and variable, high-concurrency workloads.
- Use HPA to scale pods based on real-time demand, combined with node autoscaling for cluster-wide resource adjustments.
## What specs does the AIGW container have access to, and how does resource allocation affect performance?
## What specs does the AI gateway container have access to, and how does resource allocation affect performance?
The AI Gateway operates effectively under the following resource allocations:
The AI gateway operates effectively under the following resource allocations:
- 2 CPU cores and 8 GB of RAM per container.
- Containers typically utilize about 7.39% CPU and proportionate memory in GitLabs production environment, leaving room for growth or handling burst activity.
- Containers typically utilize about 7.39% CPU and proportionate memory in the GitLab production environment, leaving room for growth or handling burst activity.
## Mitigation Strategies for Resource Contention
## Mitigation strategies for resource contention
- Use Kubernetes resource requests and limits to ensure AIGW containers receive guaranteed CPU and memory allocations. For example:
- Use Kubernetes resource requests and limits to ensure AI gateway containers receive guaranteed CPU and memory allocations. For example:
```yaml
resources:
requests:
memory: "16Gi"
cpu: "4"
limits:
memory: "32Gi"
cpu: "8"
```
```yaml
resources:
requests:
memory: "16Gi"
cpu: "4"
limits:
memory: "32Gi"
cpu: "8"
```
- Implement tools like Prometheus and Grafana to track resource utilization (CPU, memory, latency) and detect bottlenecks early.
- Dedicate nodes or instances exclusively to the AI Gateway to prevent resource competition with other services.
- Dedicate nodes or instances exclusively to the AI gateway to prevent resource competition with other services.
## Scaling Strategies
## Scaling strategies
- Use Kubernetes HPA to scale pods based on real-time metrics like:
- Average CPU utilization exceeding 50%.
- Request latency consistently above 500ms.
- Enable node autoscaling to scale infrastructure resources dynamically as pods increase.
## Scaling Recommendations
## Scaling recommendations
| Deployment Size | Instance Type | Resources | Capacity (Concurrent Requests) | Scaling Recommendations |
| Deployment size | Instance type | Resources | Capacity (concurrent requests) | Scaling recommendations |
|------------------|--------------------|------------------------|---------------------------------|---------------------------------------------|
| Small | 2 vCPUs, 8 GB RAM | Single instance | 40 | Fixed deployment; no autoscaling. |
| Medium | AWS t3.2xlarge | Single instance | 160 | HPA based on CPU or latency thresholds. |
@ -455,7 +468,7 @@ You should locate your AI gateway in the same geographic region as your GitLab i
When deploying the AI gateway on OpenShift, you might encounter permission errors due to OpenShift's security model.
By default, the AI Gateway uses `/home/aigateway/.hf` for caching HuggingFace models, which may not be writable in OpenShift's
By default, the AI gateway uses `/home/aigateway/.hf` for caching HuggingFace models, which may not be writable in OpenShift's
security-restricted environment. This can result in permission errors like:
```shell
@ -481,7 +494,7 @@ You can configure this in either of the following ways:
--set "extraEnvironmentVariables[0].value=/var/tmp/huggingface" # Use any writable directory
```
This configuration ensures the AI Gateway can properly cache HuggingFace models while respecting OpenShift's security constraints. The exact directory you choose may depend on your specific OpenShift configuration and security policies.
This configuration ensures the AI gateway can properly cache HuggingFace models while respecting the OpenShift security constraints. The exact directory you choose may depend on your specific OpenShift configuration and security policies.
### Self-signed certificate error

View File

@ -314,7 +314,7 @@ use the information in the failure error logs or the database:
```
When dealing with multiple arguments, such as `[["id"],["id_convert_to_bigint"]]`, escape the
comma between each argument with a backslash <code>&#92;</code> to prevent an invalid character error.
comma between each argument with a backslash ` \ ` to prevent an invalid character error.
For example, to finish the migration from the previous step:
```shell
@ -342,7 +342,7 @@ use the information in the failure error logs or the database:
- `job_arguments`: `[["id"], ["id_convert_to_bigint"]]`
When dealing with multiple arguments, such as `[["id"],["id_convert_to_bigint"]]`, escape the
comma between each argument with a backslash <code>&#92;</code> to prevent an invalid character error.
comma between each argument with a backslash ` \ ` to prevent an invalid character error.
Every comma in the `job_arguments` parameter value must be escaped with a backslash.
For example:

View File

@ -118,18 +118,18 @@ To improve your security, try these features:
| [GitLab Duo Chat](../gitlab_duo_chat/_index.md) | Premium, Ultimate | GitLab Duo Pro or Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Generally available |
| [GitLab Duo Workflow](../duo_workflow/_index.md) | Ultimate | None | Private beta | N/A | N/A | N/A |
| [Issue Description Generation](../project/issues/managing_issues.md#populate-an-issue-with-issue-description-generation) | Ultimate | GitLab Duo Enterprise | Experiment | N/A | N/A | N/A |
| [Discussion Summary](../discussions/_index.md#summarize-issue-discussions-with-duo-chat) | Ultimate | GitLab Duo Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | N/A |
| [Discussion Summary](../discussions/_index.md#summarize-issue-discussions-with-duo-chat) | Ultimate | GitLab Duo Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Beta|
| [Code Suggestions](../project/repository/code_suggestions/_index.md) | Premium, Ultimate | GitLab Duo Pro or Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Generally available |
| [Code Explanation](../project/repository/code_explain.md) | Premium, Ultimate | GitLab Duo Pro or Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Generally available |
| [Test Generation](../gitlab_duo_chat/examples.md#write-tests-in-the-ide) | Premium, Ultimate | GitLab Duo Pro or Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Generally available |
| [Refactor Code](../gitlab_duo_chat/examples.md#refactor-code-in-the-ide) | Premium, Ultimate | GitLab Duo Pro or Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Generally available |
| [Fix Code](../gitlab_duo_chat/examples.md#fix-code-in-the-ide) | Premium, Ultimate | GitLab Duo Pro or Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | N/A |
| [GitLab Duo for the CLI](../../editor_extensions/gitlab_cli/_index.md#gitlab-duo-for-the-cli) | Ultimate | GitLab Duo Enterprise | Generally available | Generally available | Generally available | N/A |
| [Merge Request Summary](../project/merge_requests/duo_in_merge_requests.md#generate-a-description-by-summarizing-code-changes) | Ultimate | GitLab Duo Enterprise | Beta | Beta | N/A | N/A |
| [Merge Request Summary](../project/merge_requests/duo_in_merge_requests.md#generate-a-description-by-summarizing-code-changes) | Ultimate | GitLab Duo Enterprise | Beta | Beta | N/A | Beta|
| [Code Review](../project/merge_requests/duo_in_merge_requests.md#have-gitlab-duo-review-your-code) | Ultimate | GitLab Duo Enterprise | Beta | Beta | Beta | N/A |
| [Code Review Summary](../project/merge_requests/duo_in_merge_requests.md#summarize-a-code-review) | Ultimate | GitLab Duo Enterprise | Experiment | Experiment | N/A | N/A |
| [Merge Commit Message Generation](../project/merge_requests/duo_in_merge_requests.md#generate-a-merge-commit-message) | Ultimate | GitLab Duo Enterprise | Generally available | Generally available | Generally available | N/A |
| [Code Review Summary](../project/merge_requests/duo_in_merge_requests.md#summarize-a-code-review) | Ultimate | GitLab Duo Enterprise | Experiment | Experiment | N/A | Experiment |
| [Merge Commit Message Generation](../project/merge_requests/duo_in_merge_requests.md#generate-a-merge-commit-message) | Ultimate | GitLab Duo Enterprise | Generally available | Generally available | Generally available | Beta |
| [Root Cause Analysis](../gitlab_duo_chat/examples.md#troubleshoot-failed-cicd-jobs-with-root-cause-analysis) | Ultimate | GitLab Duo Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Beta |
| [Vulnerability Explanation](../application_security/vulnerabilities/_index.md#explaining-a-vulnerability) | Ultimate | GitLab Duo Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | N/A |
| [Vulnerability Resolution](../application_security/vulnerabilities/_index.md#vulnerability-resolution) | Ultimate | GitLab Duo Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | N/A |
| [Vulnerability Resolution](../application_security/vulnerabilities/_index.md#vulnerability-resolution) | Ultimate | GitLab Duo Enterprise, GitLab Duo with Amazon Q | Generally available | Generally available | Generally available | Beta |
| [AI Impact Dashboard](../analytics/ai_impact_analytics.md) | Ultimate | GitLab Duo Enterprise | Generally available | Generally available | N/A | Beta |

View File

@ -343,7 +343,7 @@ However, you cannot mix the wrapping tags:
```
Diff highlighting doesn't work with `` `inline code` ``. If your text includes backticks (`` ` ``), [escape](#escape-characters)
each backtick with a backslash <code>&#92;</code>:
each backtick with a backslash ` \ `:
```markdown
- {+ Just regular text +}
@ -2040,6 +2040,22 @@ A backslash doesn't always escape the character that follows it. The backslash a
In these instances you might need to use the equivalent HTML entity, such as `&#93;` for `]`.
### Use backslash with backticks
When the backslash (` \ `) character is at the end of an inline code sample, the backslash
can escape the last backtick. In this case, add extra spaces around the inline code,
for example:
```markdown
Use the backslash ` \ ` character to escape inline code that ends in a ` backslash\ `.
```
When rendered, the inline code looks like this:
---
Use the backslash ` \ ` character to escape inline code that ends in a ` backslash\ `.
## Footnotes
[View this topic rendered in GitLab](https://gitlab.com/gitlab-org/gitlab/-/blob/master/doc/user/markdown.md#footnotes).

View File

@ -334,9 +334,18 @@ On the destination instance, users with the Owner role for a top-level group can
[accepts the reassignment request](#accept-contribution-reassignment).
- Choose not to reassign contributions and memberships and [keep them assigned to placeholder users](#keep-as-placeholder).
#### Reassigning contributions from multiple placeholder users
All the contributions initially assigned to a single placeholder user can only be reassigned to a single active regular
user on the destination instance. The contributions assigned to a single placeholder user cannot be split among multiple
active regular users.
You can reassign contributions from multiple placeholder users to the same user
on the destination instance if the placeholder users are from:
- Different source instances
- The same source instance and are imported to different top-level groups on the destination instance
If an assigned user becomes inactive before accepting the reassignment request,
the pending reassignment remains linked to the user until they accept it.

View File

@ -62,7 +62,7 @@ You can refine user search with `simple_query_string`.
| `+` | And | [`display +banner`](https://gitlab.com/search?group_id=9970&project_id=278964&repository_ref=&scope=blobs&search=display+%2Bbanner&snippets=) |
| `-` | Exclude | [`display -banner`](https://gitlab.com/search?group_id=9970&project_id=278964&scope=blobs&search=display+-banner) |
| `*` | Partial | [`bug error 50*`](https://gitlab.com/search?group_id=9970&project_id=278964&repository_ref=&scope=blobs&search=bug+error+50%2A&snippets=) |
| <code>&#92;</code> | Escape | [`\*md`](https://gitlab.com/search?snippets=&scope=blobs&repository_ref=&search=%5C*md&group_id=9970&project_id=278964) |
| ` \ ` | Escape | [`\*md`](https://gitlab.com/search?snippets=&scope=blobs&repository_ref=&search=%5C*md&group_id=9970&project_id=278964) |
| `#` | Issue ID | [`#23456`](https://gitlab.com/search?snippets=&scope=issues&repository_ref=&search=%2323456&group_id=9970&project_id=278964) |
| `!` | Merge request ID | [`!23456`](https://gitlab.com/search?snippets=&scope=merge_requests&repository_ref=&search=%2123456&group_id=9970&project_id=278964) |

View File

@ -18,7 +18,7 @@ class CreateMergeRequests < ActiveContext::Migration[1.0]
create_collection :merge_requests, number_of_partitions: 3 do |c|
c.bigint :issue_id, index: true
c.bigint :namespace_id, index: true
c.prefix :traversal_ids
c.keyword :traversal_ids
c.vector :embeddings, dimensions: 768
end
end

View File

@ -13,8 +13,8 @@ module ActiveContext
fields << Field::Bigint.new(name, index: index)
end
def prefix(name)
fields << Field::Prefix.new(name, index: true)
def keyword(name)
fields << Field::Keyword.new(name, index: true)
end
def vector(name, dimensions:, index: true)
@ -31,7 +31,7 @@ module ActiveContext
end
class Bigint < Field; end
class Prefix < Field; end
class Keyword < Field; end
class Vector < Field; end
end
end

View File

@ -64,7 +64,7 @@ module ActiveContext
mappings[field.name] = case field
when Field::Bigint
{ type: 'long' }
when Field::Prefix
when Field::Keyword
{ type: 'keyword' }
when Field::Vector
vector_field_mapping(field)

View File

@ -75,7 +75,7 @@ module ActiveContext
when Field::Bigint
# Bigint is 8 bytes
fixed_columns << [field, 8]
when Field::Prefix
when Field::Keyword
# Text fields are variable width
variable_columns << field
else
@ -93,7 +93,7 @@ module ActiveContext
table.column(field.name, "vector(#{field.options[:dimensions]})")
when Field::Bigint
table.bigint(field.name, **field.options.except(:index))
when Field::Prefix
when Field::Keyword
table.text(field.name, **field.options.except(:index))
else
raise ArgumentError, "Unknown field type: #{field.class}"

View File

@ -1056,6 +1056,7 @@ module API
authorize! :change_namespace, user_project
args = declared_params(include_missing: false)
args[:permission_scope] = :transfer_projects
args[:exact_matches_first] = true
groups = ::Groups::UserGroupsFinder.new(current_user, current_user, args).execute
groups = groups.excluding_groups(user_project.group).with_route

View File

@ -10,7 +10,7 @@ variables:
SECURE_ANALYZERS_PREFIX: "$CI_TEMPLATE_REGISTRY_HOST/security-products"
#
SECRET_DETECTION_IMAGE_SUFFIX: ""
SECRETS_ANALYZER_VERSION: "6"
SECRETS_ANALYZER_VERSION: "7"
SECRET_DETECTION_EXCLUDED_PATHS: ""
.secret-analyzer:

View File

@ -15,7 +15,7 @@ variables:
AST_ENABLE_MR_PIPELINES: "true"
#
SECRET_DETECTION_IMAGE_SUFFIX: ""
SECRETS_ANALYZER_VERSION: "6"
SECRETS_ANALYZER_VERSION: "7"
SECRET_DETECTION_EXCLUDED_PATHS: ""
.secret-analyzer:

View File

@ -130,7 +130,7 @@ eslint:
secrets:
extends: .download_images
variables:
SECURE_BINARIES_ANALYZER_VERSION: "6"
SECURE_BINARIES_ANALYZER_VERSION: "7"
rules:
- if: '$SECURE_BINARIES_DOWNLOAD_IMAGES == "true" && $SECURE_BINARIES_ANALYZERS =~ /\bsecrets\b/'

View File

@ -53679,12 +53679,21 @@ msgstr ""
msgid "Secrets|An error occurred while fetching the Secret manager status. Please try again."
msgstr ""
msgid "Secrets|Are you sure you want to delete secret %{secretName}? This action cannot be undone, and the secret cannot be recovered."
msgstr ""
msgid "Secrets|Created"
msgstr ""
msgid "Secrets|Delete"
msgstr ""
msgid "Secrets|Delete Secret"
msgstr ""
msgid "Secrets|Delete secret"
msgstr ""
msgid "Secrets|Description must be 200 characters or less."
msgstr ""
@ -53736,6 +53745,9 @@ msgstr ""
msgid "Secrets|Rotation reminder"
msgstr ""
msgid "Secrets|Secret %{secretName} has been deleted."
msgstr ""
msgid "Secrets|Secrets"
msgstr ""
@ -53760,6 +53772,9 @@ msgstr ""
msgid "Secrets|The name should be unique within this project."
msgstr ""
msgid "Secrets|To confirm, enter %{secretName}:"
msgstr ""
msgid "Secrets|Use the Secrets Manager to store your sensitive credentials, and then safely use them in your processes."
msgstr ""

View File

@ -12,7 +12,7 @@ RSpec.describe Groups::UserGroupsFinder, feature_category: :groups_and_projects
let_it_be(:public_maintainer_group) { create(:group, name: 'a public maintainer', path: 'a-public-maintainer', parent: root_group) }
let_it_be(:public_owner_group) { create(:group, name: 'a public owner', path: 'a-public-owner') }
subject { described_class.new(current_user, target_user, arguments.merge(search_arguments)).execute }
subject(:result) { described_class.new(current_user, target_user, arguments.merge(search_arguments)).execute }
let(:arguments) { {} }
let(:current_user) { user }
@ -171,6 +171,36 @@ RSpec.describe Groups::UserGroupsFinder, feature_category: :groups_and_projects
end
end
context 'on searching with exact_matches_first' do
let(:search_arguments) { { exact_matches_first: true, search: private_maintainer_group.path } }
let(:other_groups) { [] }
before do
2.times do
new_group = create(:group, :private, path: "1-#{SecureRandom.hex}-#{private_maintainer_group.path}", parent: root_group)
new_group.add_owner(current_user)
other_groups << new_group
end
end
it 'prioritizes exact matches first' do
expect(result.first).to eq(private_maintainer_group)
expect(result[1..]).to match_array(other_groups)
end
context 'when exact_matches_first_project_transfer feature flag is disabled' do
let(:expected_groups) { other_groups + [private_maintainer_group] }
before do
stub_feature_flags(exact_matches_first_project_transfer: false)
end
it 'returns matching groups sorted by namespace path' do
expect(result).to match_array(expected_groups.sort_by(&:path))
end
end
end
it 'returns all groups where the user is a direct member' do
is_expected.to contain_exactly(
public_maintainer_group,

View File

@ -134,42 +134,20 @@ RSpec.describe NotesFinder do
let_it_be(:banned_user) { create(:banned_user).user }
let!(:banned_note) { create(:note_on_issue, project: project, author: banned_user) }
context 'when :hidden_notes feature is not enabled' do
before do
stub_feature_flags(hidden_notes: false)
end
context 'when user is an admin' do
let(:user) { create(:admin) }
context 'when user is not an admin' do
it { is_expected.to include(banned_note) }
end
context 'when @current_user is nil' do
let(:user) { nil }
it { is_expected.to be_empty }
end
it { is_expected.to include(banned_note) }
end
context 'when :hidden_notes feature is enabled' do
before do
stub_feature_flags(hidden_notes: true)
end
context 'when user is not an admin' do
it { is_expected.not_to include(banned_note) }
end
context 'when user is an admin' do
let(:user) { create(:admin) }
context 'when @current_user is nil' do
let(:user) { nil }
it { is_expected.to include(banned_note) }
end
context 'when user is not an admin' do
it { is_expected.not_to include(banned_note) }
end
context 'when @current_user is nil' do
let(:user) { nil }
it { is_expected.to be_empty }
end
it { is_expected.to be_empty }
end
end

View File

@ -84,6 +84,7 @@ issues:
- observability_logs
- observability_traces
- dates_source
- current_status
work_item_type:
- issues
- namespace

View File

@ -1919,22 +1919,8 @@ RSpec.describe Note, feature_category: :team_planning do
let_it_be(:banned_user) { create(:banned_user).user }
let_it_be(:banned_note) { create(:note, author: banned_user) }
context 'when the :hidden_notes feature is disabled' do
before do
stub_feature_flags(hidden_notes: false)
end
it { is_expected.to include(banned_note, note1) }
end
context 'when the :hidden_notes feature is enabled' do
before do
stub_feature_flags(hidden_notes: true)
end
it { is_expected.not_to include(banned_note) }
it { is_expected.to include(note1) }
end
it { is_expected.not_to include(banned_note) }
it { is_expected.to include(note1) }
end
end

View File

@ -448,15 +448,9 @@ RSpec.describe API::Internal::Base, feature_category: :system_access do
let(:actor) { key }
let(:rate_limiter) { double(:rate_limiter, ip: "127.0.0.1", trusted_ip?: false) }
before do
allow(::Gitlab::Auth::IpRateLimiter).to receive(:new).with("127.0.0.1").and_return(rate_limiter)
end
it 'is throttled by rate limiter' do
allow(::Gitlab::ApplicationRateLimiter).to receive(:threshold).and_return(1)
expect(::Gitlab::ApplicationRateLimiter).to receive(:throttled?).with(:gitlab_shell_operation, scope: [action, project.full_path, actor]).twice.and_call_original
expect(::Gitlab::ApplicationRateLimiter).to receive(:throttled?).with(:gitlab_shell_operation, scope: [action, project.full_path, "127.0.0.1"]).and_call_original
request
@ -465,6 +459,7 @@ RSpec.describe API::Internal::Base, feature_category: :system_access do
request
expect(response).to have_gitlab_http_status(:too_many_requests)
expect(json_response['message']['error']).to eq('This endpoint has been requested too many times. Try again later.')
end

View File

@ -66,6 +66,16 @@ RSpec.describe ClickHouse::SyncStrategies::BaseSyncStrategy, feature_category: :
expect(events.size).to eq(4)
end
it 'uses the configured primary_key for the id_for_cursor alias' do
allow(strategy).to receive(:primary_key).and_return(:id)
# consider primary key :id out of projections
allow(strategy).to receive(:projections).and_return([:project_id])
expect(execute).to eq({ status: :processed, records_inserted: 4, reached_end_of_table: true })
# cursor is still set to last primary key
expect(ClickHouse::SyncCursor.cursor_for(:events)).to eq(project_event1.id)
end
context 'when new records are inserted while processing' do
it 'does not process new records created during the iteration' do
# Simulating the case when there is an insert during the iteration

View File

@ -2,20 +2,39 @@
require 'spec_helper'
RSpec.describe 'groups/settings/_remove.html.haml' do
RSpec.describe 'groups/settings/_remove.html.haml', feature_category: :groups_and_projects do
let_it_be(:group) { build_stubbed(:group) }
before do
stub_feature_flags(downtier_delayed_deletion: false)
allow(view).to receive(:current_user).and_return(double.as_null_object)
end
describe 'render' do
it 'enables the Remove group button for a group' do
group = build(:group)
context 'when user can :remove_group' do
before do
allow(view).to receive(:can?).with(anything, :remove_group, group).and_return(true)
end
render 'groups/settings/remove', group: group
it 'enables the Remove group button for a group' do
render 'groups/settings/remove', group: group
expect(rendered).to have_selector '[data-button-testid="remove-group-button"]'
expect(rendered).not_to have_selector '[data-button-testid="remove-group-button"].disabled'
expect(rendered).not_to have_selector '[data-testid="group-has-linked-subscription-alert"]'
expect(rendered).to have_selector '[data-button-testid="remove-group-button"]'
expect(rendered).not_to have_selector '[data-button-testid="remove-group-button"].disabled'
expect(rendered).not_to have_selector '[data-testid="group-has-linked-subscription-alert"]'
end
end
context 'when user cannot :remove_group' do
before do
allow(view).to receive(:can?).with(anything, :remove_group, group).and_return(false)
end
it 'disables the Remove group button for a group' do
output = view.render('groups/settings/remove', group: group)
expect(output).to be_nil
end
end
end
end

File diff suppressed because it is too large Load Diff

View File

@ -1,61 +0,0 @@
# frozen_string_literal: true
module Sidekiq
module Paginator
TYPE_CACHE = {
"dead" => "zset",
"retry" => "zset",
"schedule" => "zset"
}
def page(key, pageidx = 1, page_size = 25, opts = nil)
current_page = (pageidx.to_i < 1) ? 1 : pageidx.to_i
pageidx = current_page - 1
total_size = 0
items = []
starting = pageidx * page_size
ending = starting + page_size - 1
Sidekiq.redis do |conn|
type = conn.type(key)
rev = opts && opts[:reverse]
case type
when "zset"
total_size, items = conn.multi { |transaction|
transaction.zcard(key)
if rev
transaction.zrange(key, starting, ending, "REV", "withscores")
else
transaction.zrange(key, starting, ending, "withscores")
end
}
[current_page, total_size, items]
when "list"
total_size, items = conn.multi { |transaction|
transaction.llen(key)
if rev
transaction.lrange(key, -ending - 1, -starting - 1)
else
transaction.lrange(key, starting, ending)
end
}
items.reverse! if rev
[current_page, total_size, items]
when "none"
[1, 0, []]
else
raise "can't page a #{type}"
end
end
end
def page_items(items, pageidx = 1, page_size = 25)
current_page = (pageidx.to_i < 1) ? 1 : pageidx.to_i
pageidx = current_page - 1
starting = pageidx * page_size
items = items.to_a
[current_page, items.size, items[starting, page_size]]
end
end
end