Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2023-11-06 03:12:13 +00:00
parent 9cc2aa99c0
commit ae4e6f370a
25 changed files with 861 additions and 57 deletions

View File

@ -1 +1 @@
v16.5.0
v16.6.0

View File

@ -77,7 +77,7 @@ class ProjectsFinder < UnionFinder
# EE would override this to add more filters
def filter_projects(collection)
collection = collection.without_deleted
collection = by_deleted_status(collection)
collection = by_ids(collection)
collection = by_full_paths(collection)
collection = by_personal(collection)
@ -155,6 +155,12 @@ class ProjectsFinder < UnionFinder
params[:min_access_level].present?
end
def by_deleted_status(items)
return items.without_deleted unless current_user&.can?(:admin_all_resources)
params[:include_pending_delete].present? ? items : items.without_deleted
end
# rubocop: disable CodeReuse/ActiveRecord
def by_ids(items)
items = items.where(id: project_ids_relation) if project_ids_relation

View File

@ -36,6 +36,10 @@ module Ml
def by_project_id_and_id(project_id, id)
find_by(project_id: project_id, id: id)
end
def by_project_id_name_and_version(project_id, name, version)
joins(:model).find_by(model: { name: name, project_id: project_id }, project_id: project_id, version: version)
end
end
private

View File

@ -0,0 +1,21 @@
# frozen_string_literal: true
module Ml
module ModelVersions
class GetModelVersionService
def initialize(project, name, version)
@project = project
@name = name
@version = version
end
def execute
Ml::ModelVersion.by_project_id_name_and_version(
@project.id,
@name,
@version
)
end
end
end
end

View File

@ -206,6 +206,7 @@ To add Google Cloud Logging streaming destinations to a top-level group:
1. Select **Secure > Audit events**.
1. On the main area, select **Streams** tab.
1. Select **Add streaming destination** and select **Google Cloud Logging** to show the section for adding destinations.
1. Enter a random string to use as a name for the new destination.
1. Enter the Google project ID, Google client email, and Google private key from previously-created Google Cloud service account key to add to the new destination.
1. Enter a random string to use as a log ID for the new destination. You can use this later to filter log results in Google Cloud.
1. Select **Add** to add the new streaming destination.
@ -237,7 +238,8 @@ To update Google Cloud Logging streaming destinations to a top-level group:
1. Select **Secure > Audit events**.
1. On the main area, select **Streams** tab.
1. Select the Google Cloud Logging stream to expand.
1. Enter the Google project ID and Google client email from previously-created Google Cloud service account key to update on the destination.
1. Enter a random string to use as a name for the destination.
1. Enter the Google project ID and Google client email from previously-created Google Cloud service account key to update the destination.
1. Enter a random string to update the log ID for the destination. You can use this later to filter log results in Google Cloud.
1. Select **Add a new private key** and enter a Google private key to update the private key.
1. Select **Save** to update the streaming destination.
@ -257,6 +259,81 @@ To delete Google Cloud Logging streaming destinations to a top-level group:
1. Select **Delete destination**.
1. Confirm by selecting **Delete destination** in the dialog.
### AWS S3 destinations
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/132603) in GitLab 16.6.
Manage AWS S3 destinations for top-level groups.
#### Prerequisites
Before setting up AWS S3 streaming audit events, you must:
1. Create a access key for AWS with the appropriate credentials and permissions. This account is used to configure audit log streaming authentication.
For more information, see [Managing access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html?icmpid=docs_iam_console#Using_CreateAccessKey).
1. Create a AWS S3 bucket. This bucket is used to store audit log streaming data. For more information, see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-overview.html)
#### Add a new AWS S3 destination
Prerequisites:
- Owner role for a top-level group.
To add AWS S3 streaming destinations to a top-level group:
1. On the left sidebar, select **Search or go to** and find your group.
1. Select **Secure > Audit events**.
1. On the main area, select **Streams** tab.
1. Select **Add streaming destination** and select **AWS S3** to show the section for adding destinations.
1. Enter a random string to use as a name for the new destination.
1. Enter the Access Key ID, Secret Access Key, Bucket Name, and AWS Region from previously-created AWS access key and bucket to add to the new destination.
1. Select **Add** to add the new streaming destination.
#### List AWS S3 destinations
Prerequisites:
- Owner role for a top-level group.
To list AWS S3 streaming destinations for a top-level group:
1. On the left sidebar, select **Search or go to** and find your group.
1. Select **Secure > Audit events**.
1. On the main area, select **Streams** tab.
1. Select the AWS S3 stream to expand and see all the fields.
#### Update a AWS S3 destination
Prerequisites:
- Owner role for a top-level group.
To update AWS S3 streaming destinations to a top-level group:
1. On the left sidebar, select **Search or go to** and find your group.
1. Select **Secure > Audit events**.
1. On the main area, select **Streams** tab.
1. Select the AWS S3 stream to expand.
1. Enter a random string to use as a name for the destination.
1. Enter the Access Key ID, Secret Access Key, Bucket Name, and AWS Region from previously-created AWS access key and bucket to update the destination.
1. Select **Add a new Secret Access Key** and enter a AWS Secret Access Key to update the Secret Access Key.
1. Select **Save** to update the streaming destination.
#### Delete a AWS S3 streaming destination
Prerequisites:
- Owner role for a top-level group.
To delete AWS S3 streaming destinations to a top-level group:
1. On the left sidebar, select **Search or go to** and find your group.
1. Select **Secure > Audit events**.
1. On the main area, select the **Streams** tab.
1. Select the AWS S3 stream to expand.
1. Select **Delete destination**.
1. Confirm by selecting **Delete destination** in the dialog.
## Instance streaming destinations **(ULTIMATE SELF)**
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/398107) in GitLab 16.1 [with a flag](../feature_flags.md) named `ff_external_audit_events`. Disabled by default.
@ -448,6 +525,7 @@ To add Google Cloud Logging streaming destinations to an instance:
1. On the left sidebar, select **Monitoring > Audit Events**.
1. On the main area, select **Streams** tab.
1. Select **Add streaming destination** and select **Google Cloud Logging** to show the section for adding destinations.
1. Enter a random string to use as a name for the new destination.
1. Enter the Google project ID, Google client email, and Google private key from previously-created Google Cloud service account key to add to the new destination.
1. Enter a random string to use as a log ID for the new destination. You can use this later to filter log results in Google Cloud.
1. Select **Add** to add the new streaming destination.
@ -479,7 +557,8 @@ To update Google Cloud Logging streaming destinations to an instance:
1. On the left sidebar, select **Monitoring > Audit Events**.
1. On the main area, select **Streams** tab.
1. Select the Google Cloud Logging stream to expand.
1. Enter the Google project ID and Google client email from previously-created Google Cloud service account key to update on the destination.
1. Enter a random string to use as a name for the destination.
1. Enter the Google project ID and Google client email from previously-created Google Cloud service account key to update the destination.
1. Enter a random string to update the log ID for the destination. You can use this later to filter log results in Google Cloud.
1. Select **Add a new private key** and enter a Google private key to update the private key.
1. Select **Save** to update the streaming destination.

View File

@ -57,6 +57,8 @@ GET /projects
| `id_after` | integer | No | Limit results to projects with IDs greater than the specified ID. |
| `id_before` | integer | No | Limit results to projects with IDs less than the specified ID. |
| `imported` | boolean | No | Limit results to projects which were imported from external systems by current user. |
| `include_hidden` **(PREMIUM ALL)** | boolean | No | Include hidden projects. _(administrators only)_ |
| `include_pending_delete` | boolean | No | Include projects pending deletion. _(administrators only)_ |
| `last_activity_after` | datetime | No | Limit results to projects with last activity after specified time. Format: ISO 8601 (`YYYY-MM-DDTHH:MM:SSZ`) |
| `last_activity_before` | datetime | No | Limit results to projects with last activity before specified time. Format: ISO 8601 (`YYYY-MM-DDTHH:MM:SSZ`) |
| `membership` | boolean | No | Limit by projects that the current user is a member of. |

View File

@ -0,0 +1,285 @@
---
status: proposed
creation-date: "2023-11-01"
authors: [ "@rymai" ]
coach: "@DylanGriffith"
approvers: []
owning-stage: "~devops::non_devops"
participating-stages: []
---
# Feature Flags usage in GitLab development and operations
This blueprint builds upon [the Development Feature Flags Architecture blueprint](../feature_flags_development/index.md).
## Summary
Feature flags are critical both in developing and operating GitLab, but in the current state
of the process, they can lead to production issues, and introduce a lot of manual and maintenance work.
The goals of this blueprint is to make the process safer, more maintainable, lightweight, automated and transparent.
## Motivations
### Feature flag use-cases
Feature flags can be used for different purposes:
- De-risking GitLab.com deployments (most feature flags): Allows to quickly enable/disable
a feature flag in production in the event of a production incident.
- Work-in-progress feature: Some features are complex and need to be implemented through several MRs. Until they're fully implemented, it needs
to be hidden from anyone. In that case, the feature flag allows to merge all the changes to the main branch without actually using
the feature yet.
- Beta features: We might
[not be confident we'll be able to scale, support, and maintain a feature](https://about.gitlab.com/handbook/product/gitlab-the-product/#experiment-beta-ga)
in its current form for every designed use case ([example](https://gitlab.com/gitlab-org/gitlab/-/issues/336070#note_1523983444)).
There are also scenarios where a feature is not complete enough to be considered an MVC.
Providing a flag in this case allows engineers and customers to disable the new feature until it's performant enough.
- Operations: Site reliability engineer or Support engineer can use these flags to
disable potentially resource-heavy features in order to the instance back to a
more stable and available state. Another example is SaaS-only features.
- Experiment: A/B testing on GitLab.com.
- Worker (special `ops` feature flag): Used for controlling Sidekiq workers behavior, such as deferring Sidekiq jobs.
We need to better categorize our feature flags.
### Production incidents related to feature flags
Feature flags have caused production incidents on GitLab.com ([1](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/5289), [2](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/4155), [3](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/16366)).
We need to prevent this for the sake of GitLab.com stability.
### Technical debt caused by feature flags
Feature flags are also becoming an ever-growing source of technical debt: there are currently
[591 feature flags in the GitLab codebase](../../../user/feature_flags.md).
We need to reduce feature flags count for the sake of long-term maintainability & quality of the GitLab codebase.
## Goal
The goal of this blueprint is to improve the feature flag process by making it:
- safer
- more maintainable
- more lightweight & automated
- more transparent
## Challenges
### Complex feature flag rollout process
The feature flag rollout process is currently:
- Complex: Rollout issues that are very manual and includes a lot of checkboxes
(including non-relevant checkboxes).
Engineers often don't use these issues, which tend to become stale and forgotten over time.
- Not very transparent: Feature flag changes are logged in several places far from the rollout
issue, which makes it hard to understand the latest feature flag state.
- Far from production processes: Rollout issues are created in the `gitlab-org/gitlab` project
(far from the production issue tracker).
- There is no consistent path to rolling out feature flags: we leave to the judgement of the
engineer to trade-off between speed and safety. There should be a standardized set of rollout
steps.
### Technical debt and codebase complexity
[The challenges from the Development Feature Flags Architecture blueprint still stand](../feature_flags_development/index.md#challenges).
Additionally, there are new challenges:
- If a feature flag is enabled by default, and is disabled in an on-premise installation,
then when the feature flag is removed, the feature suddenly becomes enabled on the
on-premise instance and cannot be rolled backed to the previous behavior.
### Multiple source of truth for feature flag default states and observability
We currently show the feature flag default states in several places, for different intended audiences:
**GitLab customers**
- [User documentation](../../../user/feature_flags.md):
List all feature flags and their metadata so that GitLab customers can tweak feature flags on
their instance. Also useful for GitLab.com users that want to check the default state of a feature flag.
**Site reliability and Delivery engineers**
- [Internal GitLab.com feature flag state change issues](https://gitlab.com/gitlab-com/gl-infra/feature-flag-log/-/issues):
For each change of a feature flag state on GitLab.com, an issue is created in this project.
- [Internal GitLab.com feature flag state change logs](https://nonprod-log.gitlab.net):
Filter logs with `source: feature` and `env: gprd` to see feature flag state change logs.
**GitLab Engineering & Infra/Quality Directors / VPs, and CTO**
- [Internal Sisense dashboard](https://app.periscopedata.com/app/gitlab/792066/Engineering-::-Feature-Flags):
Feature flag metrics over time, grouped per DevOps groups.
**GitLab Engineering and Product managers**
- ["Feature flags requiring attention" monthly reports](https://gitlab.com/gitlab-org/quality/triage-reports/-/issues/?sort=created_date&state=opened&search=Feature%20flags&in=TITLE&assignee_id=None&first_page_size=100):
Same data as the above Internal Sisense dashboard but for a specific DevOps
group, presented in an issue and assigned to the group's Engineering managers.
**Anyone who wants to check feature flag default states**
- [Unofficial feature flags dashboard](https://samdbeckham.gitlab.io/feature-flags/):
A user-friendly dashboard which provides useful filtering.
This leads to confusion for almost all feature flag stakeholders (Development engineers, Engineering managers, Site reliability, Delivery engineers).
## Proposal
### Improve feature flags implementation and usage
- [Reduce the likelihood of mis-configuration and human-error at the implementation step](https://gitlab.com/groups/gitlab-org/-/epics/11553)
- Remove the "percentage of time" strategy in favor of "percentage of actors"
- [Improve the feature flag development documentation](https://gitlab.com/groups/gitlab-org/-/epics/5324)
### Introduce new feature flag `type`s
It's clear that the `development` feature flag type actually includes several use-cases:
- GitLab.com deployment de-risking. YAML value: `gitlab_com_derisk`.
- Work-in-progress feature. YAML value: `wip`. Once the feature is complete, the feature flag type can be changed to `beta`
if there still are some doubts on the scalability of the feature.
- Beta features. YAML value: `beta`.
Notes:
- These new types replace the broad `development` type, which shouldn't be used anymore in the future.
- Backward-compatibility will be kept until there's no `development` feature flags in the codebase anymore.
### Introduce constraints per feature flag type
Each feature flag type will be assigned specific constraints regarding:
- Allowed values for the `default_enabled` attribute
- Maximum Lifespan (MLS): the duration starting on the introduction of the feature flag (i.e. when it's merged into `master`).
We don't introduce a life span that would start on the global GitLab.com enablement (or `default_enabled: true` when
applicable) so that there's incentive to rollout and delete feature flags as quickly as possible.
The MLS will be enforced through automation, reporting & regular review meetings at the section level.
Following are the constraints for each feature flag type:
- `gitlab_com_derisk`
- `default_enabled` **must not** be set to `true`. This kind of feature flag is meant to lower the risk on GitLab.com, thus
there's no need to keep the flag in the codebase after it's been enabled on GitLab.com.
**`default_enabled: true` will not have any effect for this type of feature flag.**
- Maximum Lifespan: 2 months.
- Additional note: This type of feature flag won't be documented in the [All feature flags in GitLab](../../../user/feature_flags.md)
page given they're short-lived and deployment-related.
- `wip`
- `default_enabled` **must not** be set to `true`. If needed, this type can be changed to `beta` once the feature is complete.
- Maximum Lifespan: 4 months.
- `beta`
- `default_enabled` can be set to `true` so that a feature can be "released" to everyone in Beta with the possibility to disable
it in the case of scalability issues (ideally it should only be disabled for this reason on specific on-premise installations).
- Maximum Lifespan: 6 months.
- `ops`
- `default_enabled` can be set to `true`.
- Maximum Lifespan: Unlimited.
- Additional note: Remember that using this type should follow a conscious decision not to introduce an instance setting.
- `experiment`
- `default_enabled` **must not** be set to `true`.
- Maximum Lifespan: 6 months.
### Introduce a new `feature_issue_url` field
Keeping the URL to the original feature issue will allow automated cross-linking from the rollout
and logging issues. The new field for this information is `feature_issue_url`.
For instance:
```yaml
---
name: auto_devops_banner_disabled
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/12345
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/678910
rollout_issue_url: https://gitlab.com/gitlab-com/gl-infra/production/-/issues/9876
milestone: '16.5'
type: gitlab_com_derisk
group: group::pipeline execution
```
```yaml
---
name: ai_mr_creation
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/12345
introduced_by_url: https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/14218
rollout_issue_url: https://gitlab.com/gitlab-com/gl-infra/production/-/issues/83652
milestone: '16.3'
type: beta
group: group::code review
default_enabled: true
```
### Streamline the feature flag rollout process
1. (Process) Transition to **create rollout issues in the
[Production issue tracker](https://gitlab.com/gitlab-com/gl-infra/production/-/issues)** and adapt the
template to be closer to the
[Change management issue template](https://gitlab.com/gitlab-com/gl-infra/production/-/blob/master/.gitlab/issue_templates/change_management.md)
(see [this issue](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/2780) for inspiration)
That way, the rollout issue would only concern the actual production changes (i.e. enablement/disablement
of the flag on production) and should be closed as soon as the production change is confirmed to work as expected.
1. (Automation) Automate most rollout steps, such as:
- (Done) [Let the author know that their feature has been deployed to staging / canary / production environments](https://gitlab.com/gitlab-org/quality/triage-ops/-/issues/1403)
- (Done) [Cross-link actual feature flag state change (from Chatops project) to rollout issues](https://gitlab.com/gitlab-org/gitlab/-/issues/290770)
- (Done) [Let the author know that their `default_enabled: true` MR has been deployed to production and that the feature flag can be removed from production](https://gitlab.com/gitlab-org/quality/triage-ops/-/merge_requests/2482)
- Automate the creation of rollout issues when a feature flag is first introduced in a merge request,
and provide an diff suggestion to fill the `rollout_issue_url` field (Danger)
- Check and enforce feature flag definition constraints in merge requests (Danger)
- Provide a diff suggestion to correct the `milestone` field when it's not the same value as
the MR milestone (Danger)
- Upon feature flag state change, notify on Slack the group responsible for it (chatops)
- 7 days before the Maximum Lifespan of a feature flag is reached, automatically create a "cleanup MR" with the group label set, and
assigned to the feature flag author (if they're still with GitLab). We could take advantage of the [automation of repetitive developer tasks](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/134487)
- Enforce Maximum Lifespan of feature flags through automated reporting & regular review at the section level
1. (Documentation/process) Ensure the rollout DRI stays online for a few hours after enabling a feature flag (ideally they'd enable the flag at the
beginning of their day) in case of any issue with the feature flag
1. (Process) Provide a standardized set of rollout steps. Trade-offs to consider include:
- Likelihood of errors occurring
- Total actors (users / requests / projects / groups) affected by the feature flag rollout,
e.g. it will be bad if 100,000 users cannot log in when we roll out for 1%
- How long to wait between each step. Some feature flags only need to wait 10 minutes per step, some
flags should wait 24 hours. Ideally there should be automation to actively verify there
is no adverse effect for each step.
### Provide better SSOT for the feature flag default states and current states & state changes on GitLab.com
**GitLab customers**
- [User documentation](../../../user/feature_flags.md):
Keep the current page but add filtering and sorting, similarly to the
[unofficial feature flags dashboard](https://samdbeckham.gitlab.io/feature-flags/).
**Site reliability and Delivery engineers**
We [assessed the usefulness of feature flag state change logging strategies](https://gitlab.com/gitlab-org/quality/engineering-productivity/team/-/issues/309)
and it appears that both
[internal GitLab.com feature flag state change issues](https://gitlab.com/gitlab-com/gl-infra/feature-flag-log/-/issues)
and [internal GitLab.com feature flag state change logs](https://nonprod-log.gitlab.net) are useful for different
audiences.
**GitLab Engineering & Infra/Quality Directors / VPs, and CTO**
- [Internal Sisense dashboard](https://app.periscopedata.com/app/gitlab/792066/Engineering-::-Feature-Flags):
Streamline the current dashboard to be more useful for its stakeholders.
**GitLab Engineering and Product managers**
- ["Feature flags requiring attention" monthly reports](https://gitlab.com/gitlab-org/quality/triage-reports/-/issues/?sort=created_date&state=opened&search=Feature%20flags&in=TITLE&assignee_id=None&first_page_size=100):
Make the current reports more actionable by linking to automatically created MRs for removing feature flags as well as improving documentation and best-practices around feature flags.
## Iterations
This work is being done as part of dedicated epic:
[Improve internal usage of Feature Flags](https://gitlab.com/groups/gitlab-org/-/epics/3551).
This epic describes a meta reasons for making these changes.
## Resources
- [What Are Feature Flags?](https://launchdarkly.com/blog/what-are-feature-flags/#:~:text=Feature%20flags%20are%20a%20software,portions%20of%20code%20are%20executed)
- [Feature Flags Best Practices](https://featureflags.io/feature-flags-best-practices/)
- [Short-lived or Long-lived Flags? Explaining Feature Flag lifespans](https://configcat.com/blog/2022/07/08/how-long-should-you-keep-feature-flags/)

View File

@ -0,0 +1,17 @@
# frozen_string_literal: true
module API
module Entities
module Ml
module Mlflow
module ModelVersions
module Responses
class Get < Grape::Entity
expose :model_version, with: Types::ModelVersion
end
end
end
end
end
end
end

View File

@ -0,0 +1,85 @@
# frozen_string_literal: true
module API
module Entities
module Ml
module Mlflow
module ModelVersions
module Types
class ModelVersion < Grape::Entity
expose :name
expose :version
expose :creation_timestamp, documentation: { type: Integer }
expose :last_updated_timestamp, documentation: { type: Integer }
expose :user_id
expose :current_stage
expose :description
expose :source
expose :run_id
expose :status
expose :status_message
expose :metadata
expose :run_link
expose :aliases, documentation: { is_array: true, type: String }
private
def name
object.model.name
end
def creation_timestamp
object.created_at.to_i
end
def last_updated_timestamp
object.updated_at.to_i
end
def user_id
nil
end
def current_stage
"development"
end
def description
""
end
def source
model_name = object.model.name
"api/v4/projects/(id)/packages/ml_models/#{model_name}/model_version/"
end
def run_id
""
end
def status
"READY"
end
def status_message
""
end
def metadata
[]
end
def run_link
""
end
def aliases
[]
end
end
end
end
end
end
end
end

View File

@ -0,0 +1,18 @@
# frozen_string_literal: true
module API
module Entities
module Ml
module Mlflow
module ModelVersions
module Types
class ModelVersionTag < Grape::Entity
expose :key
expose :value
end
end
end
end
end
end
end

View File

@ -141,7 +141,7 @@ module API
def find_project(id)
return unless id
projects = Project.without_deleted.not_hidden
projects = find_project_scopes
if id.is_a?(Integer) || id =~ INTEGER_ID_REGEX
projects.find_by(id: id)
@ -151,6 +151,11 @@ module API
end
# rubocop: enable CodeReuse/ActiveRecord
# Can be overriden by API endpoints
def find_project_scopes
Project.without_deleted.not_hidden
end
def find_project!(id)
project = find_project(id)
@ -768,6 +773,7 @@ module API
finder_params[:id_before] = sanitize_id_param(params[:id_before]) if params[:id_before]
finder_params[:updated_after] = declared_params[:updated_after] if declared_params[:updated_after]
finder_params[:updated_before] = declared_params[:updated_before] if declared_params[:updated_before]
finder_params[:include_pending_delete] = declared_params[:include_pending_delete] if declared_params[:include_pending_delete]
finder_params
end

View File

@ -12,6 +12,10 @@ module API
unauthorized! unless can?(current_user, :write_model_experiments, user_project)
end
def check_api_model_registry_read!
not_found! unless can?(current_user, :read_model_registry, user_project)
end
def resource_not_found!
render_structured_api_error!({ error_code: 'RESOURCE_DOES_NOT_EXIST' }, 404)
end

View File

@ -41,6 +41,7 @@ module API
end
namespace MLFLOW_API_PREFIX do
mount ::API::Ml::Mlflow::Experiments
mount ::API::Ml::Mlflow::ModelVersions
mount ::API::Ml::Mlflow::Runs
mount ::API::Ml::Mlflow::RegisteredModels
end

View File

@ -0,0 +1,32 @@
# frozen_string_literal: true
module API
module Ml
module Mlflow
class ModelVersions < ::API::Base
feature_category :mlops
resource :model_versions do
desc 'Fetch model version by name and version' do
success Entities::Ml::Mlflow::ModelVersions::Responses::Get
detail 'https://mlflow.org/docs/2.6.0/rest-api.html#get-modelversion'
end
params do
requires :name, type: String, desc: 'Model version name'
requires :version, type: String, desc: 'Model version number'
end
get 'get', urgency: :low do
check_api_model_registry_read!
resource_not_found! unless params[:name] && params[:version]
model_version = ::Ml::ModelVersions::GetModelVersionService.new(
user_project, params[:name], params[:version]
).execute
resource_not_found! unless model_version
response = { model_version: model_version }
present response, with: Entities::Ml::Mlflow::ModelVersions::Responses::Get
end
end
end
end
end
end

View File

@ -8,6 +8,16 @@ module API
feature_category :gitaly
helpers do
extend ::Gitlab::Utils::Override
# Allow to move projects in hidden/pending_delete state
override :find_project_scopes
def find_project_scopes
Project
end
end
resource :project_repository_storage_moves do
desc 'Get a list of all project repository storage moves' do
detail 'This feature was introduced in GitLab 13.0.'

View File

@ -159,6 +159,7 @@ module API
optional :topic_id, type: Integer, desc: 'Limit results to projects with the assigned topic given by the topic ID'
optional :updated_before, type: DateTime, desc: 'Return projects updated before the specified datetime. Format: ISO 8601 YYYY-MM-DDTHH:MM:SSZ'
optional :updated_after, type: DateTime, desc: 'Return projects updated after the specified datetime. Format: ISO 8601 YYYY-MM-DDTHH:MM:SSZ'
optional :include_pending_delete, type: Boolean, desc: 'Include projects in pending delete state. Can only be set by admins'
use :optional_filter_params_ee
end

View File

@ -34,10 +34,14 @@ module API
helpers do
# rubocop: disable CodeReuse/ActiveRecord
def reorder_users(users)
if params[:order_by] && params[:sort]
users.reorder(order_options_with_tie_breaker)
else
# Users#search orders by exact matches and handles pagination,
# so we should prioritize that.
if params[:search]
users
else
# Note that params[:order_by] and params[:sort] will always be present and
# default to "id" and "desc" as defined in `sort_params`.
users.reorder(order_options_with_tie_breaker)
end
end
# rubocop: enable CodeReuse/ActiveRecord

View File

@ -425,13 +425,30 @@ RSpec.describe ProjectsFinder, feature_category: :groups_and_projects do
it { is_expected.to match_array([internal_project]) }
end
describe 'always filters by without_deleted' do
describe 'filters by without_deleted by default' do
let_it_be(:pending_delete_project) { create(:project, :public, pending_delete: true) }
it 'returns projects that are not pending_delete' do
expect(subject).not_to include(pending_delete_project)
expect(subject).to include(public_project, internal_project)
end
context 'when include_pending_delete param is provided' do
let(:params) { { include_pending_delete: true } }
it 'returns projects that are not pending_delete' do
expect(subject).not_to include(pending_delete_project)
expect(subject).to include(public_project, internal_project)
end
context 'when user is an admin', :enable_admin_mode do
let(:current_user) { create(:admin) }
it 'also return pending_delete projects' do
expect(subject).to include(public_project, internal_project, pending_delete_project)
end
end
end
end
describe 'filter by last_activity_before' do

View File

@ -0,0 +1,86 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe API::Ml::Mlflow::ModelVersions, feature_category: :mlops do
let_it_be(:project) { create(:project) }
let_it_be(:developer) { create(:user).tap { |u| project.add_developer(u) } }
let_it_be(:another_project) { build(:project).tap { |p| p.add_developer(developer) } }
let_it_be(:name) { 'a-model-name' }
let_it_be(:version) { '0.0.1' }
let_it_be(:model) { create(:ml_models, project: project, name: name) }
let_it_be(:model_version) { create(:ml_model_versions, project: project, model: model, version: version) }
let_it_be(:tokens) do
{
write: create(:personal_access_token, scopes: %w[read_api api], user: developer),
read: create(:personal_access_token, scopes: %w[read_api], user: developer),
no_access: create(:personal_access_token, scopes: %w[read_user], user: developer),
different_user: create(:personal_access_token, scopes: %w[read_api api], user: build(:user))
}
end
let(:current_user) { developer }
let(:access_token) { tokens[:write] }
let(:headers) { { 'Authorization' => "Bearer #{access_token.token}" } }
let(:project_id) { project.id }
let(:default_params) { {} }
let(:params) { default_params }
let(:request) { get api(route), params: params, headers: headers }
let(:json_response) { Gitlab::Json.parse(api_response.body) }
subject(:api_response) do
request
response
end
describe 'GET /projects/:id/ml/mlflow/api/2.0/mlflow/model_versions/get' do
let(:route) do
"/projects/#{project_id}/ml/mlflow/api/2.0/mlflow/model_versions/get?name=#{name}&version=#{version}"
end
it 'returns the model version', :aggregate_failures do
is_expected.to have_gitlab_http_status(:ok)
expect(json_response['model_version']).not_to be_nil
expect(json_response['model_version']['name']).to eq(name)
expect(json_response['model_version']['version']).to eq(version)
end
describe 'Error States' do
context 'when has access' do
context 'and model name in incorrect' do
let(:route) do
"/projects/#{project_id}/ml/mlflow/api/2.0/mlflow/model_versions/get?name=--&version=#{version}"
end
it_behaves_like 'MLflow|Not Found - Resource Does Not Exist'
end
context 'and version in incorrect' do
let(:route) do
"/projects/#{project_id}/ml/mlflow/api/2.0/mlflow/model_versions/get?name=#{name}&version=--"
end
it_behaves_like 'MLflow|Not Found - Resource Does Not Exist'
end
context 'when user lacks read_model_registry rights' do
before do
allow(Ability).to receive(:allowed?).and_call_original
allow(Ability).to receive(:allowed?)
.with(current_user, :read_model_registry, project)
.and_return(false)
end
it "is Not Found" do
is_expected.to have_gitlab_http_status(:not_found)
end
end
end
it_behaves_like 'MLflow|shared model registry error cases'
it_behaves_like 'MLflow|Requires read_api scope'
end
end
end

View File

@ -8,5 +8,29 @@ RSpec.describe API::ProjectRepositoryStorageMoves, feature_category: :gitaly do
let_it_be(:storage_move) { create(:project_repository_storage_move, :scheduled, container: container) }
let(:repository_storage_move_factory) { :project_repository_storage_move }
let(:bulk_worker_klass) { Projects::ScheduleBulkRepositoryShardMovesWorker }
context 'when project is hidden' do
let_it_be(:container) { create(:project, :hidden) }
let_it_be(:storage_move) { create(:project_repository_storage_move, :scheduled, container: container) }
it_behaves_like 'get single container repository storage move' do
let(:container_id) { container.id }
let(:url) { "/projects/#{container_id}/repository_storage_moves/#{repository_storage_move_id}" }
end
it_behaves_like 'post single container repository storage move'
end
context 'when project is pending delete' do
let_it_be(:container) { create(:project, pending_delete: true) }
let_it_be(:storage_move) { create(:project_repository_storage_move, :scheduled, container: container) }
it_behaves_like 'get single container repository storage move' do
let(:container_id) { container.id }
let(:url) { "/projects/#{container_id}/repository_storage_moves/#{repository_storage_move_id}" }
end
it_behaves_like 'post single container repository storage move'
end
end
end

View File

@ -286,6 +286,32 @@ RSpec.describe API::Projects, :aggregate_failures, feature_category: :groups_and
expect(json_response.map { |p| p['id'] }).not_to include(project.id)
end
context 'when user requests pending_delete projects' do
before do
project.update!(pending_delete: true)
end
let(:params) { { include_pending_delete: true } }
it 'does not return projects marked for deletion' do
get api(path, user), params: params
expect(response).to have_gitlab_http_status(:ok)
expect(json_response).to be_an Array
expect(json_response.map { |p| p['id'] }).not_to include(project.id)
end
context 'when user is an admin' do
it 'returns projects marked for deletion' do
get api(path, admin, admin_mode: true), params: params
expect(response).to have_gitlab_http_status(:ok)
expect(json_response).to be_an Array
expect(json_response.map { |p| p['id'] }).to include(project.id)
end
end
end
it 'does not include open_issues_count if issues are disabled' do
project.project_feature.update_attribute(:issues_access_level, ProjectFeature::DISABLED)

View File

@ -227,6 +227,19 @@ RSpec.describe API::Users, :aggregate_failures, feature_category: :user_profile
end
end
context 'with search parameter' do
let_it_be(:first_user) { create(:user, username: 'a-user') }
let_it_be(:second_user) { create(:user, username: 'a-user2') }
it 'prioritizes username match' do
get api(path, user, admin_mode: true), params: { search: first_user.username }
expect(response).to have_gitlab_http_status(:ok)
expect(json_response.first['username']).to eq('a-user')
expect(json_response.second['username']).to eq('a-user2')
end
end
context 'N+1 queries' do
before do
create_list(:user, 2)

View File

@ -0,0 +1,28 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ml::ModelVersions::GetModelVersionService, feature_category: :mlops do
let_it_be(:existing_version) { create(:ml_model_versions) }
let_it_be(:another_project) { create(:project) }
subject(:model_version) { described_class.new(project, name, version).execute }
describe '#execute' do
context 'when model version exists' do
let(:name) { existing_version.name }
let(:version) { existing_version.version }
let(:project) { existing_version.project }
it { is_expected.to eq(existing_version) }
end
context 'when model version does not exist' do
let(:project) { existing_version.project }
let(:name) { 'a_new_model' }
let(:version) { '2.0.0' }
it { is_expected.to be_nil }
end
end
end

View File

@ -74,6 +74,37 @@ RSpec.shared_examples 'MLflow|shared error cases' do
end
end
RSpec.shared_examples 'MLflow|shared model registry error cases' do
context 'when not authenticated' do
let(:headers) { {} }
it "is Unauthorized" do
is_expected.to have_gitlab_http_status(:unauthorized)
end
end
context 'when user does not have access' do
let(:access_token) { tokens[:different_user] }
it "is Not Found" do
is_expected.to have_gitlab_http_status(:not_found)
end
end
context 'when model registry is unavailable' do
before do
allow(Ability).to receive(:allowed?).and_call_original
allow(Ability).to receive(:allowed?)
.with(current_user, :read_model_registry, project)
.and_return(false)
end
it "is Not Found" do
is_expected.to have_gitlab_http_status(:not_found)
end
end
end
RSpec.shared_examples 'MLflow|Bad Request on missing required' do |keys|
keys.each do |key|
context "when \"#{key}\" is missing" do

View File

@ -80,56 +80,9 @@ RSpec.shared_examples 'repository_storage_moves API' do |container_type|
end
end
describe "GET /#{container_type}/:id/repository_storage_moves" do
let(:container_id) { container.id }
shared_examples 'post single container repository storage move' do
let(:url) { "/#{container_type}/#{container_id}/repository_storage_moves" }
it_behaves_like 'get container repository storage move list'
context 'non-existent container' do
let(:container_id) { non_existing_record_id }
it 'returns not found' do
get api(url, user, admin_mode: user.admin?)
expect(response).to have_gitlab_http_status(:not_found)
end
end
end
describe "GET /#{container_type}/:id/repository_storage_moves/:repository_storage_move_id" do
let(:container_id) { container.id }
let(:url) { "/#{container_type}/#{container_id}/repository_storage_moves/#{repository_storage_move_id}" }
it_behaves_like 'get single container repository storage move'
context 'non-existent container' do
let(:container_id) { non_existing_record_id }
let(:repository_storage_move_id) { storage_move.id }
it 'returns not found' do
get api(url, user, admin_mode: user.admin?)
expect(response).to have_gitlab_http_status(:not_found)
end
end
end
describe "GET /#{container_type.singularize}_repository_storage_moves" do
it_behaves_like 'get container repository storage move list' do
let(:url) { "/#{container_type.singularize}_repository_storage_moves" }
end
end
describe "GET /#{container_type.singularize}_repository_storage_moves/:repository_storage_move_id" do
it_behaves_like 'get single container repository storage move' do
let(:url) { "/#{container_type.singularize}_repository_storage_moves/#{repository_storage_move_id}" }
end
end
describe "POST /#{container_type}/:id/repository_storage_moves", :aggregate_failures do
let(:container_id) { container.id }
let(:url) { "/#{container_type}/#{container_id}/repository_storage_moves" }
let(:destination_storage_name) { 'test_second_storage' }
def create_container_repository_storage_move
@ -186,6 +139,57 @@ RSpec.shared_examples 'repository_storage_moves API' do |container_type|
end
end
describe "GET /#{container_type}/:id/repository_storage_moves" do
let(:container_id) { container.id }
let(:url) { "/#{container_type}/#{container_id}/repository_storage_moves" }
it_behaves_like 'get container repository storage move list'
context 'non-existent container' do
let(:container_id) { non_existing_record_id }
it 'returns not found' do
get api(url, user, admin_mode: user.admin?)
expect(response).to have_gitlab_http_status(:not_found)
end
end
end
describe "GET /#{container_type}/:id/repository_storage_moves/:repository_storage_move_id" do
let(:container_id) { container.id }
let(:url) { "/#{container_type}/#{container_id}/repository_storage_moves/#{repository_storage_move_id}" }
it_behaves_like 'get single container repository storage move'
context 'non-existent container' do
let(:container_id) { non_existing_record_id }
let(:repository_storage_move_id) { storage_move.id }
it 'returns not found' do
get api(url, user, admin_mode: user.admin?)
expect(response).to have_gitlab_http_status(:not_found)
end
end
end
describe "GET /#{container_type.singularize}_repository_storage_moves" do
it_behaves_like 'get container repository storage move list' do
let(:url) { "/#{container_type.singularize}_repository_storage_moves" }
end
end
describe "GET /#{container_type.singularize}_repository_storage_moves/:repository_storage_move_id" do
it_behaves_like 'get single container repository storage move' do
let(:url) { "/#{container_type.singularize}_repository_storage_moves/#{repository_storage_move_id}" }
end
end
describe "POST /#{container_type}/:id/repository_storage_moves", :aggregate_failures do
it_behaves_like 'post single container repository storage move'
end
describe "POST /#{container_type.singularize}_repository_storage_moves" do
let(:url) { "/#{container_type.singularize}_repository_storage_moves" }
let(:source_storage_name) { 'default' }