Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2023-11-07 09:08:54 +00:00
parent 4be549b5eb
commit dfa6eac075
37 changed files with 1004 additions and 468 deletions

View File

@ -6,5 +6,4 @@ Lint/RedundantDirGlobSort:
- 'config/application.rb'
- 'ee/spec/spec_helper.rb'
- 'qa/qa/specs/spec_helper.rb'
- 'rubocop/rubocop.rb'
- 'spec/spec_helper.rb'

View File

@ -612,7 +612,7 @@ gem 'cvss-suite', '~> 3.0.1', require: 'cvss_suite' # rubocop:todo Gemfile/Missi
gem 'arr-pm', '~> 0.0.12' # rubocop:todo Gemfile/MissingFeatureCategory
# Remote Development
gem 'devfile', '~> 0.0.23.pre.alpha1' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'devfile', '~> 0.0.24.pre.alpha1', feature_category: :remote_development
# Apple plist parsing
gem 'CFPropertyList', '~> 3.0.0' # rubocop:todo Gemfile/MissingFeatureCategory

View File

@ -111,9 +111,9 @@
{"name":"deprecation_toolkit","version":"1.5.1","platform":"ruby","checksum":"a8a1ab1a19ae40ea12560b65010e099f3459ebde390b76621ef0c21c516a04ba"},
{"name":"derailed_benchmarks","version":"2.1.2","platform":"ruby","checksum":"eaadc6206ceeb5538ff8f5e04a0023d54ebdd95d04f33e8960fb95a5f189a14f"},
{"name":"descendants_tracker","version":"0.0.4","platform":"ruby","checksum":"e9c41dd4cfbb85829a9301ea7e7c48c2a03b26f09319db230e6479ccdc780897"},
{"name":"devfile","version":"0.0.23.pre.alpha1","platform":"arm64-darwin","checksum":"eec9ed97436cd5e9d456e270da979faeecbdeef42ee75ef9b39b45001c2399fb"},
{"name":"devfile","version":"0.0.23.pre.alpha1","platform":"ruby","checksum":"fba2c679cbafb03da153f73f55a346ae01f4921383575e1f7cda269e7e67e40a"},
{"name":"devfile","version":"0.0.23.pre.alpha1","platform":"x86_64-linux","checksum":"30e31b39599b7823673f5386f8bf19b7cb2b959c7f34a16704893db437d42094"},
{"name":"devfile","version":"0.0.24.pre.alpha1","platform":"arm64-darwin","checksum":"4954bf498772dbf534da0638bc59023234fed7423c72c85f21b6504ee4c65482"},
{"name":"devfile","version":"0.0.24.pre.alpha1","platform":"ruby","checksum":"72bbfc26edb519902d5c68e07188e0a3d699a1866392fa1497e5b7f3abb36600"},
{"name":"devfile","version":"0.0.24.pre.alpha1","platform":"x86_64-linux","checksum":"d121b1094aa3a24c29592a83c629ee640920e0196711dd06f27b6fa9b1ced609"},
{"name":"device_detector","version":"1.0.0","platform":"ruby","checksum":"b800fb3150b00c23e87b6768011808ac1771fffaae74c3238ebaf2b782947a7d"},
{"name":"devise","version":"4.8.1","platform":"ruby","checksum":"fdd48bbe79a89e7c1152236a70479842ede48bea4fa7f4f2d8da1f872559803e"},
{"name":"devise-two-factor","version":"4.1.1","platform":"ruby","checksum":"c95f5b07533e62217aaed3c386874d94e2d472fb5f2b6598afe8600fc17a8b95"},

View File

@ -442,7 +442,7 @@ GEM
thor (>= 0.19, < 2)
descendants_tracker (0.0.4)
thread_safe (~> 0.3, >= 0.3.1)
devfile (0.0.23.pre.alpha1)
devfile (0.0.24.pre.alpha1)
device_detector (1.0.0)
devise (4.8.1)
bcrypt (~> 3.0)
@ -1797,7 +1797,7 @@ DEPENDENCIES
declarative_policy (~> 1.1.0)
deprecation_toolkit (~> 1.5.1)
derailed_benchmarks
devfile (~> 0.0.23.pre.alpha1)
devfile (~> 0.0.24.pre.alpha1)
device_detector
devise (~> 4.8.1)
devise-pbkdf2-encryptable (~> 0.0.0)!

View File

@ -26,8 +26,6 @@ module BulkImports
start_export!
export.batches.destroy_all # rubocop: disable Cop/DestroyAll
enqueue_batch_exports
rescue StandardError => e
fail_export!(e)
ensure
FinishBatchedRelationExportWorker.perform_async(export.id)
end
@ -81,11 +79,5 @@ module BulkImports
def find_or_create_batch(batch_number)
export.batches.find_or_create_by!(batch_number: batch_number) # rubocop:disable CodeReuse/ActiveRecord
end
def fail_export!(exception)
Gitlab::ErrorTracking.track_exception(exception, portable_id: portable.id, portable_type: portable.class.name)
export.update!(status_event: 'fail_op', error: exception.message.truncate(255))
end
end
end

View File

@ -19,8 +19,6 @@ module BulkImports
upload_compressed_file
finish_batch!
rescue StandardError => e
fail_batch!(e)
ensure
FileUtils.remove_entry(export_path)
end
@ -72,12 +70,6 @@ module BulkImports
batch.update!(status_event: 'finish', objects_count: exported_objects_count, error: nil)
end
def fail_batch!(exception)
Gitlab::ErrorTracking.track_exception(exception, portable_id: portable.id, portable_type: portable.class.name)
batch.update!(status_event: 'fail_op', error: exception.message.truncate(255))
end
def exported_filepath
File.join(export_path, exported_filename)
end

View File

@ -42,8 +42,6 @@ module BulkImports
yield export
finish_export!(export)
rescue StandardError => e
fail_export!(export, e)
end
def export_service
@ -87,12 +85,6 @@ module BulkImports
export.update!(status_event: 'finish', batched: false, error: nil)
end
def fail_export!(export, exception)
Gitlab::ErrorTracking.track_exception(exception, portable_id: portable.id, portable_type: portable.class.name)
export&.update(status_event: 'fail_op', error: exception.class, batched: false)
end
def exported_filepath
File.join(export_path, export_service.exported_filename)
end

View File

@ -8,7 +8,7 @@ module ServiceDesk
def execute
return error_feature_flag_disabled unless Feature.enabled?(:service_desk_custom_email, project)
return error_parameter_missing if settings.blank? || verification.blank?
return error_already_finished if already_finished_and_no_mail?
return error_already_finished if verification.finished?
return error_already_failed if already_failed_and_no_mail?
verification_error = verify
@ -39,10 +39,6 @@ module ServiceDesk
@verification ||= settings.custom_email_verification
end
def already_finished_and_no_mail?
verification.finished? && mail.blank?
end
def already_failed_and_no_mail?
verification.failed? && mail.blank?
end

View File

@ -7,7 +7,16 @@ module BulkImports
idempotent!
data_consistency :always # rubocop:disable SidekiqLoadBalancing/WorkerDataConsistency
feature_category :importers
sidekiq_options status_expiration: StuckExportJobsWorker::EXPORT_JOBS_EXPIRATION
sidekiq_options status_expiration: StuckExportJobsWorker::EXPORT_JOBS_EXPIRATION, retry: 3
sidekiq_retries_exhausted do |job, exception|
batch = BulkImports::ExportBatch.find(job['args'][1])
portable = batch.export.portable
Gitlab::ErrorTracking.track_exception(exception, portable_id: portable.id, portable_type: portable.class.name)
batch.update!(status_event: 'fail_op', error: exception.message.truncate(255))
end
def perform(user_id, batch_id)
@user = User.find(user_id)

View File

@ -10,12 +10,27 @@ module BulkImports
loggable_arguments 2, 3
data_consistency :always
feature_category :importers
sidekiq_options status_expiration: StuckExportJobsWorker::EXPORT_JOBS_EXPIRATION
sidekiq_options status_expiration: StuckExportJobsWorker::EXPORT_JOBS_EXPIRATION, retry: 3
worker_resource_boundary :memory
sidekiq_retries_exhausted do |job, exception|
_user_id, portable_id, portable_type, relation, batched = job['args']
portable = portable(portable_id, portable_type)
export = portable.bulk_import_exports.find_by_relation(relation)
Gitlab::ErrorTracking.track_exception(exception, portable_id: portable_id, portable_type: portable.class.name)
export.update!(status_event: 'fail_op', error: exception.message.truncate(255), batched: batched)
end
def self.portable(portable_id, portable_class)
portable_class.classify.constantize.find(portable_id)
end
def perform(user_id, portable_id, portable_class, relation, batched = false)
user = User.find(user_id)
portable = portable(portable_id, portable_class)
portable = self.class.portable(portable_id, portable_class)
config = BulkImports::FileTransfer.config_for(portable)
log_extra_metadata_on_done(:relation, relation)
@ -27,11 +42,5 @@ module BulkImports
RelationExportService.new(user, portable, relation, jid).execute
end
end
private
def portable(portable_id, portable_class)
portable_class.classify.constantize.find(portable_id)
end
end
end

View File

@ -0,0 +1,8 @@
---
name: oidc_issuer_url
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/135049
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/429855
milestone: '16.6'
type: development
group: group::pipeline security
default_enabled: false

View File

@ -0,0 +1,24 @@
# frozen_string_literal: true
class InsertNewUltimateTrialPlanIntoPlans < Gitlab::Database::Migration[2.2]
milestone '16.6'
disable_ddl_transaction!
restrict_gitlab_migration gitlab_schema: :gitlab_main
def up
execute <<~SQL
INSERT INTO plans (name, title, created_at, updated_at)
VALUES ('ultimate_trial_paid_customer', 'Ultimate Trial for Paid Customer', current_timestamp, current_timestamp)
SQL
end
def down
# NOTE: We have a uniqueness constraint for the 'name' column in 'plans'
execute <<~SQL
DELETE FROM plans
WHERE name = 'ultimate_trial_paid_customer'
SQL
end
end

View File

@ -0,0 +1 @@
07c4a447b3888046333b0b8fa237411783fc031ea9943520f716ea0c00ed964f

View File

@ -261,7 +261,11 @@ To delete Google Cloud Logging streaming destinations to a top-level group:
### AWS S3 destinations
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/132603) in GitLab 16.6.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/132603) in GitLab 16.6 [with a flag](../feature_flags.md) named `allow_streaming_audit_events_to_amazon_s3`. Enabled by default.
FLAG:
On self-managed GitLab, by default this feature is available. To hide the feature per group, an administrator can [disable the feature flag](../feature_flags.md) named `allow_streaming_audit_events_to_amazon_s3`.
On GitLab.com, this feature is available.
Manage AWS S3 destinations for top-level groups.

View File

@ -13951,6 +13951,18 @@ Represents the YAML definitions for audit events defined in `ee/config/audit_eve
| <a id="auditeventdefinitionsavedtodatabase"></a>`savedToDatabase` | [`Boolean!`](#boolean) | Indicates if the event is saved to PostgreSQL database. |
| <a id="auditeventdefinitionstreamed"></a>`streamed` | [`Boolean!`](#boolean) | Indicates if the event is streamed to an external destination. |
### `AuditEventStreamingHTTPNamespaceFilter`
Represents a subgroup or project filter that belongs to an external audit event streaming destination.
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="auditeventstreaminghttpnamespacefilterexternalauditeventdestination"></a>`externalAuditEventDestination` | [`ExternalAuditEventDestination!`](#externalauditeventdestination) | Destination to which the filter belongs. |
| <a id="auditeventstreaminghttpnamespacefilterid"></a>`id` | [`ID!`](#id) | ID of the filter. |
| <a id="auditeventstreaminghttpnamespacefilternamespace"></a>`namespace` | [`Namespace!`](#namespace) | Group or project namespace the filter belongs to. |
### `AuditEventStreamingHeader`
Represents a HTTP header key/value that belongs to an audit streaming destination.
@ -17621,6 +17633,7 @@ Represents an external resource to send audit events to.
| <a id="externalauditeventdestinationheaders"></a>`headers` | [`AuditEventStreamingHeaderConnection!`](#auditeventstreamingheaderconnection) | List of additional HTTP headers sent with each event. (see [Connections](#connections)) |
| <a id="externalauditeventdestinationid"></a>`id` | [`ID!`](#id) | ID of the destination. |
| <a id="externalauditeventdestinationname"></a>`name` | [`String!`](#string) | Name of the external destination to send audit events to. |
| <a id="externalauditeventdestinationnamespacefilter"></a>`namespaceFilter` | [`AuditEventStreamingHTTPNamespaceFilter`](#auditeventstreaminghttpnamespacefilter) | List of subgroup or project filters for the destination. |
| <a id="externalauditeventdestinationverificationtoken"></a>`verificationToken` | [`String!`](#string) | Verification token to validate source of event. |
### `ExternalIssue`

View File

@ -106,7 +106,7 @@ the tiers are no longer mentioned in GitLab documentation:
- [Filtering merge requests](../user/project/merge_requests/index.md#filter-the-list-of-merge-requests) by "approved by"
- [Advanced search (Elasticsearch)](../user/search/advanced_search.md)
- [Service Desk](../user/project/service_desk/index.md)
- [Storage usage statistics](../user/usage_quotas.md#storage-usage-statistics)
- [Storage usage statistics](../user/usage_quotas.md)
The following developer features continue to be available to Starter and
Bronze-level subscribers:

View File

@ -7,11 +7,6 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Container Scanning **(FREE ALL)**
> - Improved support for FIPS [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/263482) in GitLab 13.6 by upgrading `CS_MAJOR_VERSION` from `2` to `3`.
> - Integration with Trivy [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/322656) in GitLab 13.9 by upgrading `CS_MAJOR_VERSION` from `3` to `4`.
> - Integration with Clair [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/321451) in GitLab 13.9.
> - Default container scanning with Trivy [introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/61850) in GitLab 14.0.
> - Integration with Grype as an alternative scanner [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/326279) in GitLab 14.0.
> - [Changed](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/86092) the major analyzer version from `4` to `5` in GitLab 15.0.
> - [Moved](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/86783) from GitLab Ultimate to GitLab Free in 15.0.
> - Container Scanning variables that reference Docker [renamed](https://gitlab.com/gitlab-org/gitlab/-/issues/357264) in GitLab 15.4.
@ -61,21 +56,21 @@ information directly in the merge request.
| Capability | In Free and Premium | In Ultimate |
| --- | ------ | ------ |
| [Configure Scanners](#configuration) | Yes | Yes |
| Customize Settings ([Variables](#available-cicd-variables), [Overriding](#overriding-the-container-scanning-template), [offline environment support](#running-container-scanning-in-an-offline-environment), etc) | Yes | Yes |
| [View JSON Report](#reports-json-format) as a CI job artifact | Yes | Yes |
| Generation of a JSON report of [dependencies](#dependency-list) as a CI job artifact | Yes | Yes |
| Ability to enable container scanning via an MR in the GitLab UI | Yes | Yes |
| [UBI Image Support](#fips-enabled-images) | Yes | Yes |
| Support for Trivy | Yes | Yes |
| Support for Grype | Yes | Yes |
| [Configure Scanners](#configuration) | **{check-circle}** Yes | **{check-circle}** Yes |
| Customize Settings ([Variables](#available-cicd-variables), [Overriding](#overriding-the-container-scanning-template), [offline environment support](#running-container-scanning-in-an-offline-environment), etc) | **{check-circle}** Yes | **{check-circle}** Yes |
| [View JSON Report](#reports-json-format) as a CI job artifact | **{check-circle}** Yes | **{check-circle}** Yes |
| Generation of a JSON report of [dependencies](#dependency-list) as a CI job artifact | **{check-circle}** Yes | **{check-circle}** Yes |
| Ability to enable container scanning via an MR in the GitLab UI | **{check-circle}** Yes | **{check-circle}** Yes |
| [UBI Image Support](#fips-enabled-images) | **{check-circle}** Yes | **{check-circle}** Yes |
| Support for Trivy | **{check-circle}** Yes | **{check-circle}** Yes |
| Support for Grype | **{check-circle}** Yes | **{check-circle}** Yes |
| Inclusion of GitLab Advisory Database | Limited to the time-delayed content from GitLab [advisories-communities](https://gitlab.com/gitlab-org/advisories-community/) project | Yes - all the latest content from [Gemnasium DB](https://gitlab.com/gitlab-org/security-products/gemnasium-db) |
| Presentation of Report data in Merge Request and Security tab of the CI pipeline job | No | Yes |
| [Interaction with Vulnerabilities](#interacting-with-the-vulnerabilities) such as merge request approvals | No | Yes |
| [Solutions for vulnerabilities (auto-remediation)](#solutions-for-vulnerabilities-auto-remediation) | No | Yes |
| Support for the [vulnerability allow list](#vulnerability-allowlisting) | No | Yes |
| [Access to Security Dashboard page](#security-dashboard) | No | Yes |
| [Access to Dependency List page](../dependency_list/index.md) | No | Yes |
| Presentation of Report data in Merge Request and Security tab of the CI pipeline job | **{dotted-circle}** No | **{check-circle}** Yes |
| [Interaction with Vulnerabilities](#interacting-with-the-vulnerabilities) such as merge request approvals | **{dotted-circle}** No | **{check-circle}** Yes |
| [Solutions for vulnerabilities (auto-remediation)](#solutions-for-vulnerabilities-auto-remediation) | **{dotted-circle}** No | **{check-circle}** Yes |
| Support for the [vulnerability allow list](#vulnerability-allowlisting) | **{dotted-circle}** No | **{check-circle}** Yes |
| [Access to Security Dashboard page](#security-dashboard) | **{dotted-circle}** No | **{check-circle}** Yes |
| [Access to Dependency List page](../dependency_list/index.md) | **{dotted-circle}** No | **{check-circle}** Yes |
## Prerequisites
@ -278,28 +273,28 @@ including a large number of false positives.
| `CS_DOCKERFILE_PATH` | `Dockerfile` | The path to the `Dockerfile` to use for generating remediations. By default, the scanner looks for a file named `Dockerfile` in the root directory of the project. You should configure this variable only if your `Dockerfile` is in a non-standard location, such as a subdirectory. See [Solutions for vulnerabilities](#solutions-for-vulnerabilities-auto-remediation) for more details. | All |
| `CS_QUIET` | `""` | If set, this variable disables output of the [vulnerabilities table](#container-scanning-job-log-format) in the job log. [Introduced](https://gitlab.com/gitlab-org/security-products/analyzers/container-scanning/-/merge_requests/50) in GitLab 15.1. | All |
| `CS_TRIVY_JAVA_DB` | `"ghcr.io/aquasecurity/trivy-java-db"` | Specify an alternate location for the [trivy-java-db](https://github.com/aquasecurity/trivy-java-db) vulnerability database. | Trivy |
| `SECURE_LOG_LEVEL` | `info` | Set the minimum logging level. Messages of this logging level or higher are output. From highest to lowest severity, the logging levels are: `fatal`, `error`, `warn`, `info`, `debug`. [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/10880) in GitLab 13.1. | All |
| `SECURE_LOG_LEVEL` | `info` | Set the minimum logging level. Messages of this logging level or higher are output. From highest to lowest severity, the logging levels are: `fatal`, `error`, `warn`, `info`, `debug`. | All |
### Supported distributions
Support depends on which scanner is used:
| Distribution | Grype | Trivy |
| -------------- | ----- | ----- |
| Alma Linux | | ✅ |
| Alpine Linux | ✅ | ✅ |
| Amazon Linux | ✅ | ✅ |
| BusyBox | ✅ | |
| CentOS | ✅ | ✅ |
| CBL-Mariner | | ✅ |
| Debian | ✅ | ✅ |
| Distroless | ✅ | ✅ |
| Oracle Linux | ✅ | ✅ |
| Photon OS | | ✅ |
| Red Hat (RHEL) | ✅ | ✅ |
| Rocky Linux | | ✅ |
| SUSE | | ✅ |
| Ubuntu | ✅ | ✅ |
| Distribution | Grype | Trivy |
|----------------|------------------------|------------------------|
| Alma Linux | **{dotted-circle}** No | **{check-circle}** Yes |
| Alpine Linux | **{check-circle}** Yes | **{check-circle}** Yes |
| Amazon Linux | **{check-circle}** Yes | **{check-circle}** Yes |
| BusyBox | **{check-circle}** Yes | **{dotted-circle}** No |
| CentOS | **{check-circle}** Yes | **{check-circle}** Yes |
| CBL-Mariner | **{dotted-circle}** No | **{check-circle}** Yes |
| Debian | **{check-circle}** Yes | **{check-circle}** Yes |
| Distroless | **{check-circle}** Yes | **{check-circle}** Yes |
| Oracle Linux | **{check-circle}** Yes | **{check-circle}** Yes |
| Photon OS | **{dotted-circle}** No | **{check-circle}** Yes |
| Red Hat (RHEL) | **{check-circle}** Yes | **{check-circle}** Yes |
| Rocky Linux | **{dotted-circle}** No | **{check-circle}** Yes |
| SUSE | **{dotted-circle}** No | **{check-circle}** Yes |
| Ubuntu | **{check-circle}** Yes | **{check-circle}** Yes |
#### FIPS-enabled images
@ -747,24 +742,24 @@ All analyzer images are [updated daily](https://gitlab.com/gitlab-org/security-p
The images use data from upstream advisory databases depending on which scanner is used:
| Data Source | Trivy | Grype |
| ------------------------------ | ----- | ----- |
| AlmaLinux Security Advisory | ✅ | ✅ |
| Amazon Linux Security Center | ✅ | ✅ |
| Arch Linux Security Tracker | ✅ | |
| SUSE CVRF | ✅ | ✅ |
| CWE Advisories | ✅ | |
| Debian Security Bug Tracker | ✅ | ✅ |
| GitHub Security Advisory | ✅ | ✅ |
| Go Vulnerability Database | ✅ | |
| CBL-Mariner Vulnerability Data | ✅ | |
| NVD | ✅ | ✅ |
| OSV | ✅ | |
| Red Hat OVAL v2 | ✅ | ✅ |
| Red Hat Security Data API | ✅ | ✅ |
| Photon Security Advisories | ✅ | |
| Rocky Linux UpdateInfo | ✅ | |
| Ubuntu CVE Tracker (only data sources from mid 2021 and later) | ✅ | ✅ |
| Data Source | Trivy | Grype |
|----------------------------------------------------------------|------------------------|------------------------|
| AlmaLinux Security Advisory | **{check-circle}** Yes | **{check-circle}** Yes |
| Amazon Linux Security Center | **{check-circle}** Yes | **{check-circle}** Yes |
| Arch Linux Security Tracker | **{check-circle}** Yes | **{dotted-circle}** No |
| SUSE CVRF | **{check-circle}** Yes | **{check-circle}** Yes |
| CWE Advisories | **{check-circle}** Yes | **{dotted-circle}** No |
| Debian Security Bug Tracker | **{check-circle}** Yes | **{check-circle}** Yes |
| GitHub Security Advisory | **{check-circle}** Yes | **{check-circle}** Yes |
| Go Vulnerability Database | **{check-circle}** Yes | **{dotted-circle}** No |
| CBL-Mariner Vulnerability Data | **{check-circle}** Yes | **{dotted-circle}** No |
| NVD | **{check-circle}** Yes | **{check-circle}** Yes |
| OSV | **{check-circle}** Yes | **{dotted-circle}** No |
| Red Hat OVAL v2 | **{check-circle}** Yes | **{check-circle}** Yes |
| Red Hat Security Data API | **{check-circle}** Yes | **{check-circle}** Yes |
| Photon Security Advisories | **{check-circle}** Yes | **{dotted-circle}** No |
| Rocky Linux UpdateInfo | **{check-circle}** Yes | **{dotted-circle}** No |
| Ubuntu CVE Tracker (only data sources from mid 2021 and later) | **{check-circle}** Yes | **{check-circle}** Yes |
In addition to the sources provided by these scanners, GitLab maintains the following vulnerability databases:

View File

@ -15,14 +15,61 @@ if you add a large number of images or tags:
You should delete unnecessary images and tags and set up a [cleanup policy](#cleanup-policy)
to automatically manage your container registry usage.
## Check Container Registry storage use
## Check Container Registry storage use **(FREE SAAS)**
The Usage Quotas page (**Settings > Usage Quotas > Storage**) displays storage usage for Packages.
This page includes the [Container Registry usage](../../usage_quotas.md#container-registry-usage), which is only available on GitLab.com.
Measuring usage is only possible on the new version of the GitLab Container Registry backed by a
metadata database, which is [available on GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/5523) since GitLab 15.7.
For information on the planned availability for self-managed instances, see [epic 5521](https://gitlab.com/groups/gitlab-org/-/epics/5521).
## How container registry usage is calculated
Image layers stored in the Container Registry are deduplicated at the root namespace level.
An image is only counted once if:
- You tag the same image more than once in the same repository.
- You tag the same image across distinct repositories under the same root namespace.
An image layer is only counted once if:
- You share the image layer across multiple images in the same container repository, project, or group.
- You share the image layer across different repositories.
Only layers that are referenced by tagged images are accounted for. Untagged images and any layers
referenced exclusively by them are subject to [online garbage collection](../container_registry/delete_container_registry_images.md#garbage-collection).
Untagged image layers are automatically deleted after 24 hours if they remain unreferenced during that period.
Image layers are stored on the storage backend in the original (usually compressed) format. This
means that the measured size for any given image layer should match the size displayed on the
corresponding [image manifest](https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest).
Namespace usage is refreshed a few minutes after a tag is pushed or deleted from any container repository under the namespace.
### Delayed refresh
It is not possible to calculate container registry usage
with maximum precision in real time for extremely large namespaces (about 1% of namespaces).
To enable maintainers of these namespaces to see their usage, there is a delayed fallback mechanism.
See [epic 9413](https://gitlab.com/groups/gitlab-org/-/epics/9413) for more details.
If the usage for a namespace cannot be calculated with precision, GitLab falls back to the delayed method.
In the delayed method, the displayed usage size is the sum of **all** unique image layers
in the namespace. Untagged image layers are not ignored. As a result,
the displayed usage size might not change significantly after deleting tags. Instead,
the size value only changes when:
- An automated [garbage collection process](../container_registry/delete_container_registry_images.md#garbage-collection)
runs and deletes untagged image layers. After a user deletes a tag, a garbage collection run
is scheduled to start 24 hours later. During that run, images that were previously tagged
are analyzed and their layers deleted if not referenced by any other tagged image.
If any layers are deleted, the namespace usage is updated.
- The namespace's registry usage shrinks enough that GitLab can measure it with maximum precision.
As usage for namespaces shrinks to be under the [limits](../../../user/usage_quotas.md#namespace-storage-limit),
the measurement switches automatically from delayed to precise usage measurement.
There is no place in the UI to determine which measurement method is being used,
but [issue 386468](https://gitlab.com/gitlab-org/gitlab/-/issues/386468) proposes to improve this.
## Cleanup policy
> - [Renamed](https://gitlab.com/gitlab-org/gitlab/-/issues/218737) from "expiration policy" to "cleanup policy" in GitLab 13.2.

View File

@ -27,7 +27,7 @@ To restore a namespace to its standard state, you can:
- [Purchase a paid tier](https://about.gitlab.com/pricing/).
- For exceeded storage quota:
- [Purchase more storage for the namespace](../subscriptions/gitlab_com/index.md#purchase-more-storage-and-transfer).
- [Manage your storage usage](usage_quotas.md#manage-your-storage-usage).
- [Manage your storage usage](usage_quotas.md#manage-storage-usage).
## Restricted actions

View File

@ -5,7 +5,7 @@ group: Utilization
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
---
# Storage usage quota **(FREE ALL)**
# Storage **(FREE ALL)**
Storage usage statistics are available for projects and namespaces. You can use that information to
manage storage usage within the applicable quotas.
@ -13,8 +13,8 @@ manage storage usage within the applicable quotas.
Statistics include:
- Storage usage across projects in a namespace.
- Storage usage that exceeds the storage quota.
- Available purchased storage.
- Storage usage that exceeds the storage SaaS limit or [self-managed storage quota](../administration/settings/account_and_limit_settings.md#repository-size-limit).
- Available purchased storage for SaaS.
Storage and network usage are calculated with the binary measurement system (1024 unit multiples).
Storage usage is displayed in kibibytes (KiB), mebibytes (MiB),
@ -30,87 +30,33 @@ you might see references to `KB`, `MB`, and `GB` in the UI and documentation.
Prerequisites:
- To view storage usage for a project, you must have at least the Maintainer role for the project or Owner role for the namespace.
- To view storage usage for a namespace, you must have the Owner role for the namespace.
- To view storage usage for a group namespace, you must have the Owner role for the namespace.
1. On the left sidebar, select **Search or go to** and find your project or group.
1. On the left sidebar, select **Settings > Usage Quotas**.
1. Select the **Storage** tab.
1. Select the **Storage** tab to see namespace storage usage.
1. To view storage usage for a project, select one of the projects from the table at the bottom of the **Storage** tab of the **Usage Quotas** page.
Select any title to view details. The information on this page
is updated every 90 minutes.
The information on the **Usage Quotas** page is updated every 90 minutes.
If your namespace shows `'Not applicable.'`, push a commit to any project in the
namespace to recalculate the storage.
### Container Registry usage **(FREE SAAS)**
### View project fork storage usage **(FREE SAAS)**
Container Registry usage is available only for GitLab.com. This feature requires a
[new version](https://about.gitlab.com/blog/2022/04/12/next-generation-container-registry/)
of the GitLab Container Registry. To learn about the proposed release for self-managed
installations, see [epic 5521](https://gitlab.com/groups/gitlab-org/-/epics/5521).
A cost factor is applied to the storage consumed by project forks so that forks consume less namespace storage than their actual size.
#### How container registry usage is calculated
To view the amount of namespace storage the fork has used:
Image layers stored in the Container Registry are deduplicated at the root namespace level.
1. On the left sidebar, select **Search or go to** and find your project or group.
1. On the left sidebar, select **Settings > Usage Quotas**.
1. Select the **Storage** tab. The **Total** column displays the amount of namespace storage used by the fork as a portion of the actual size of the fork on disk.
An image is only counted once if:
The cost factor applies to the project repository, LFS objects, job artifacts, packages, snippets, and the wiki.
- You tag the same image more than once in the same repository.
- You tag the same image across distinct repositories under the same root namespace.
The cost factor does not apply to private forks in namespaces on the Free plan.
An image layer is only counted once if:
- You share the image layer across multiple images in the same container repository, project, or group.
- You share the image layer across different repositories.
Only layers that are referenced by tagged images are accounted for. Untagged images and any layers
referenced exclusively by them are subject to [online garbage collection](packages/container_registry/delete_container_registry_images.md#garbage-collection).
Untagged image layers are automatically deleted after 24 hours if they remain unreferenced during that period.
Image layers are stored on the storage backend in the original (usually compressed) format. This
means that the measured size for any given image layer should match the size displayed on the
corresponding [image manifest](https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest).
Namespace usage is refreshed a few minutes after a tag is pushed or deleted from any container repository under the namespace.
#### Delayed refresh
It is not possible to calculate [container registry usage](#container-registry-usage)
with maximum precision in real time for extremely large namespaces (about 1% of namespaces).
To enable maintainers of these namespaces to see their usage, there is a delayed fallback mechanism.
See [epic 9413](https://gitlab.com/groups/gitlab-org/-/epics/9413) for more details.
If the usage for a namespace cannot be calculated with precision, GitLab falls back to the delayed method.
In the delayed method, the displayed usage size is the sum of **all** unique image layers
in the namespace. Untagged image layers are not ignored. As a result,
the displayed usage size might not change significantly after deleting tags. Instead,
the size value only changes when:
- An automated [garbage collection process](packages/container_registry/delete_container_registry_images.md#garbage-collection)
runs and deletes untagged image layers. After a user deletes a tag, a garbage collection run
is scheduled to start 24 hours later. During that run, images that were previously tagged
are analyzed and their layers deleted if not referenced by any other tagged image.
If any layers are deleted, the namespace usage is updated.
- The namespace's registry usage shrinks enough that GitLab can measure it with maximum precision.
As usage for namespaces shrinks to be under the [limits](#namespace-storage-limit),
the measurement switches automatically from delayed to precise usage measurement.
There is no place in the UI to determine which measurement method is being used,
but [issue 386468](https://gitlab.com/gitlab-org/gitlab/-/issues/386468) proposes to improve this.
### Storage usage statistics
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68898) project-level graph in GitLab 14.4 [with a flag](../administration/feature_flags.md) named `project_storage_ui`. Disabled by default.
> - Enabled on GitLab.com in GitLab 14.4.
> - Enabled on self-managed in GitLab 14.5.
> - [Feature flag removed](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/71270) in GitLab 14.5.
The following storage usage statistics are available to a maintainer:
- Total namespace storage used: Total amount of storage used across projects in this namespace.
- Total excess storage used: Total amount of storage used that exceeds their allocated storage.
- Purchased storage available: Total storage that has been purchased but is not yet used.
## Manage your storage usage
## Manage storage usage
To manage your storage, if you are a namespace Owner you can [purchase more storage for the namespace](../subscriptions/gitlab_com/index.md#purchase-more-storage-and-transfer).
@ -126,14 +72,16 @@ Depending on your role, you can also use the following methods to manage or redu
To automate storage usage analysis and management, see the [storage management automation](storage_management_automation.md) documentation.
## Manage your transfer usage
## Set usage quotas **(FREE SELF)**
Depending on your role, to manage your transfer usage you can [reduce Container Registry data transfers](packages/container_registry/reduce_container_registry_data_transfer.md).
There are no application limits on the amount of storage and transfer for self-managed instances. The administrators are responsible for the underlying infrastructure costs. Administrators can set [repository size limits](../administration/settings/account_and_limit_settings.md#repository-size-limit) to manage your repositories size.
## Project storage limit
## Storage limits **(FREE SAAS)**
Projects on GitLab SaaS have a 10 GiB storage limit on their Git repository and LFS storage.
After namespace-level storage limits are applied, the project limit is removed. A namespace has either a namespace-level storage limit or a project-level storage limit, but not both.
### Project storage limit
Projects on GitLab SaaS have a 10 GiB storage limit on their Git repository and LFS storage. Limits on project storage
will be removed before limits are applied to GitLab SaaS namespace storage in the future.
When a project's repository and LFS reaches the quota, the project is set to a read-only state.
You cannot push changes to a read-only project. To monitor the size of each
@ -141,7 +89,7 @@ repository in a namespace, including a breakdown for each project,
[view storage usage](#view-storage-usage). To allow a project's repository and LFS to exceed the free quota
you must purchase additional storage. For more details, see [Excess storage usage](#excess-storage-usage).
### Excess storage usage
#### Excess storage usage
Excess storage usage is the amount that a project's repository and LFS exceeds the [project storage limit](#project-storage-limit). If no
purchased storage is available the project is set to a read-only state. You cannot push changes to a read-only project.
@ -185,12 +133,19 @@ available decreases. All projects no longer have the read-only status because 40
| Yellow | 5 GiB | 0 GiB | 10 GiB | Not read-only |
| **Totals** | **45 GiB** | **10 GiB** | - | - |
## Namespace storage limit
### Namespace storage limit **(FREE SAAS)**
Namespaces on GitLab SaaS have a storage limit. For more information, see our [pricing page](https://about.gitlab.com/pricing/).
GitLab plans to enforce a storage limit for namespaces on GitLab SaaS. For more information, see
the FAQs for the following tiers:
After namespace storage limits are enforced, view them in the **Usage quotas** page.
For more information about the namespace storage limit enforcement, see the FAQ pages for the [Free](https://about.gitlab.com/pricing/faq-efficient-free-tier/#storage-limits-on-gitlab-saas-free-tier) and [Paid](https://about.gitlab.com/pricing/faq-paid-storage-transfer/) tiers.
- [Free tier](https://about.gitlab.com/pricing/faq-efficient-free-tier/#storage-limits-on-gitlab-saas-free-tier).
- [Premium and Ultimate](https://about.gitlab.com/pricing/faq-paid-storage-transfer/).
Namespaces on GitLab SaaS have a [10 GiB project limit](#project-storage-limit) with a soft limit on
namespace storage. Soft storage limits are limits that have not yet been enforced by GitLab, and will become
hard limits after namespace storage limits apply. To avoid your namespace from becoming
[read-only](../user/read_only_namespaces.md) after namespace storage limits apply,
you should ensure that your namespace storage adheres to the soft storage limit.
Namespace storage limits do not apply to self-managed deployments, but administrators can [manage the repository size](../administration/settings/account_and_limit_settings.md#repository-size-limit).
@ -209,13 +164,13 @@ If your total namespace storage exceeds the available namespace storage quota, a
To notify you that you have nearly exceeded your namespace storage quota:
- In the command-line interface, a notification displays after each `git push` action when you've reached 95% and 100% of your namespace storage quota.
- In the GitLab UI, a notification displays when you've reached 75%, 95%, and 100% of your namespace storage quota.
- In the command-line interface, a notification displays after each `git push` action when your namespace has reached between 95% and 100%+ of your namespace storage quota.
- In the GitLab UI, a notification displays when your namespace has reached between 75% and 100%+ of your namespace storage quota.
- GitLab sends an email to members with the Owner role to notify them when namespace storage usage is at 70%, 85%, 95%, and 100%.
To prevent exceeding the namespace storage limit, you can:
- [Manage your storage usage](#manage-your-storage-usage).
- [Manage your storage usage](#manage-storage-usage).
- If you meet the eligibility requirements, you can apply for:
- [GitLab for Education](https://about.gitlab.com/solutions/education/join/)
- [GitLab for Open Source](https://about.gitlab.com/solutions/open-source/join/)
@ -225,16 +180,8 @@ To prevent exceeding the namespace storage limit, you can:
- [Start a trial](https://about.gitlab.com/free-trial/) or [upgrade to GitLab Premium or Ultimate](https://about.gitlab.com/pricing/), which include higher limits and features to enable growing teams to ship faster without sacrificing on quality.
- [Talk to an expert](https://page.gitlab.com/usage_limits_help.html) for more information about your options.
### View project fork storage usage
## Related Topics
A cost factor is applied to the storage consumed by project forks so that forks consume less namespace storage than their actual size.
To view the amount of namespace storage the fork has used:
1. On the left sidebar, select **Search or go to** and find your project or group.
1. On the left sidebar, select **Settings > Usage Quotas**.
1. Select the **Storage** tab. The **Total** column displays the amount of namespace storage used by the fork as a portion of the actual size of the fork on disk.
The cost factor applies to the project repository, LFS objects, job artifacts, packages, snippets, and the wiki.
The cost factor does not apply to private forks in namespaces on the Free plan.
- [Automate storage management](storage_management_automation.md)
- [Purchase storage and transfer](../subscriptions/gitlab_com/index.md#purchase-more-storage-and-transfer)
- [Transfer usage](packages/container_registry/reduce_container_registry_data_transfer.md)

View File

@ -25,7 +25,7 @@ module Gitlab
def reserved_claims
super.merge({
iss: Settings.gitlab.base_url,
iss: Feature.enabled?(:oidc_issuer_url) ? Gitlab.config.gitlab.url : Settings.gitlab.base_url,
sub: "project_path:#{project.full_path}:ref_type:#{ref_type}:ref:#{source_ref}",
aud: aud
}.compact)

View File

@ -8,7 +8,8 @@ module Gitlab
include ::Gitlab::Database::MigrationHelpers
include ::Gitlab::Database::MigrationHelpers::LooseForeignKeyHelpers
ALLOWED_TABLES = %w[audit_events web_hook_logs].freeze
ALLOWED_TABLES = %w[audit_events web_hook_logs merge_request_diff_files merge_request_diff_commits].freeze
ERROR_SCOPE = 'table partitioning'
MIGRATION_CLASS_NAME = "::#{module_parent_name}::BackfillPartitionedTable"
@ -16,6 +17,60 @@ module Gitlab
BATCH_INTERVAL = 2.minutes.freeze
BATCH_SIZE = 50_000
SUB_BATCH_SIZE = 2_500
PARTITION_BUFFER = 6
MIN_ID = 1
# Creates a partitioned copy of an existing table, using a RANGE partitioning strategy on a int/bigint column.
# One partition is created per partition_size between 1 and MAX(column_name). Also installs a trigger on
# the original table to copy writes into the partitioned table. To copy over historic data from before creation
# of the partitioned table, use the `enqueue_partitioning_data_migration` helper in a post-deploy migration.
# Note: If the original table is empty the system creates 6 partitions in the new table.
#
# A copy of the original table is required as PG currently does not support partitioning existing tables.
#
# Example:
#
# partition_table_by_int_range :merge_request_diff_commits, :merge_request_diff_id, partition_size: 500, primary_key: ['merge_request_diff_id', 'relative_order']
#
# Options are:
# :partition_size - a int specifying the partition size
# :primary_key - a array specifying the primary query of the new table
#
# Note: The system always adds a buffer of 6 partitions.
def partition_table_by_int_range(table_name, column_name, partition_size:, primary_key:)
Gitlab::Database::QueryAnalyzers::RestrictAllowedSchemas.require_ddl_mode!
assert_table_is_allowed(table_name)
assert_not_in_transaction_block(scope: ERROR_SCOPE)
current_primary_key = Array.wrap(connection.primary_key(table_name))
raise "primary key not defined for #{table_name}" if current_primary_key.blank?
partition_column = find_column_definition(table_name, column_name)
raise "partition column #{column_name} does not exist on #{table_name}" if partition_column.nil?
primary_key = Array.wrap(primary_key).map(&:to_s)
raise "the partition column must be part of the primary key" unless primary_key.include?(column_name.to_s)
primary_key_objects = connection.columns(table_name).select { |column| primary_key.include?(column.name) }
raise 'partition_size must be greater than 1' unless partition_size > 1
max_id = Gitlab::Database::QueryAnalyzers::RestrictAllowedSchemas.with_suppressed do
Gitlab::Database::QueryAnalyzers::GitlabSchemasValidateConnection.with_suppressed do
define_batchable_model(table_name, connection: connection).maximum(column_name) || partition_size * PARTITION_BUFFER
end
end
partitioned_table_name = make_partitioned_table_name(table_name)
with_lock_retries do
create_range_id_partitioned_copy(table_name, partitioned_table_name, partition_column, primary_key_objects)
create_int_range_partitions(partitioned_table_name, partition_size, MIN_ID, max_id)
create_trigger_to_sync_tables(table_name, partitioned_table_name, current_primary_key)
end
end
# Creates a partitioned copy of an existing table, using a RANGE partitioning strategy on a timestamp column.
# One partition is created per month between the given `min_date` and `max_date`. Also installs a trigger on
@ -332,6 +387,34 @@ module Gitlab
connection.columns(table).find { |c| c.name == column.to_s }
end
def create_range_id_partitioned_copy(source_table_name, partitioned_table_name, partition_column, primary_keys)
if table_exists?(partitioned_table_name)
Gitlab::AppLogger.warn "Partitioned table not created because it already exists" \
" (this may be due to an aborted migration or similar): table_name: #{partitioned_table_name} "
return
end
tmp_partitioning_column_name = "#{partition_column.name}_tmp"
temporary_columns = primary_keys.map { |key| "#{key.name}_tmp" }.join(", ")
temporary_columns_statement = build_temporary_columns_statement(primary_keys)
transaction do
execute(<<~SQL)
CREATE TABLE #{partitioned_table_name} (
LIKE #{source_table_name} INCLUDING ALL EXCLUDING INDEXES,
#{temporary_columns_statement},
PRIMARY KEY (#{temporary_columns})
) PARTITION BY RANGE (#{tmp_partitioning_column_name})
SQL
primary_keys.each do |key|
remove_column(partitioned_table_name, key.name)
rename_column(partitioned_table_name, "#{key.name}_tmp", key.name)
end
end
end
def create_range_partitioned_copy(source_table_name, partitioned_table_name, partition_column, primary_key)
if table_exists?(partitioned_table_name)
Gitlab::AppLogger.warn "Partitioned table not created because it already exists" \
@ -382,6 +465,20 @@ module Gitlab
end
end
def create_int_range_partitions(table_name, partition_size, min_id, max_id)
lower_bound = min_id
upper_bound = min_id + partition_size
end_id = max_id + PARTITION_BUFFER * partition_size # Adds a buffer of 6 partitions
while lower_bound < end_id
create_range_partition_safely("#{table_name}_#{lower_bound}", table_name, lower_bound, upper_bound)
lower_bound += partition_size
upper_bound += partition_size
end
end
def to_sql_date_literal(date)
connection.quote(date.strftime('%Y-%m-%d'))
end
@ -411,19 +508,23 @@ module Gitlab
return
end
unique_key = Array.wrap(unique_key)
delimiter = ",\n "
column_names = connection.columns(partitioned_table_name).map(&:name)
set_statements = build_set_statements(column_names, unique_key)
insert_values = column_names.map { |name| "NEW.#{name}" }
delete_where_statement = unique_key.map { |unique_key| "#{unique_key} = OLD.#{unique_key}" }.join(' AND ')
update_where_statement = unique_key.map { |unique_key| "#{partitioned_table_name}.#{unique_key} = NEW.#{unique_key}" }.join(' AND ')
create_trigger_function(name, replace: false) do
<<~SQL
IF (TG_OP = 'DELETE') THEN
DELETE FROM #{partitioned_table_name} where #{unique_key} = OLD.#{unique_key};
DELETE FROM #{partitioned_table_name} where #{delete_where_statement};
ELSIF (TG_OP = 'UPDATE') THEN
UPDATE #{partitioned_table_name}
SET #{set_statements.join(delimiter)}
WHERE #{partitioned_table_name}.#{unique_key} = NEW.#{unique_key};
WHERE #{update_where_statement};
ELSIF (TG_OP = 'INSERT') THEN
INSERT INTO #{partitioned_table_name} (#{column_names.join(delimiter)})
VALUES (#{insert_values.join(delimiter)});
@ -433,8 +534,16 @@ module Gitlab
end
end
def build_temporary_columns_statement(columns)
columns.map do |column|
type = column.name == 'id' || column.name.end_with?('_id') ? 'bigint' : column.sql_type
"#{column.name}_tmp #{type} NOT NULL"
end.join(", ")
end
def build_set_statements(column_names, unique_key)
column_names.reject { |name| name == unique_key }.map { |name| "#{name} = NEW.#{name}" }
column_names.reject { |name| unique_key.include?(name) }.map { |name| "#{name} = NEW.#{name}" }
end
def create_sync_trigger(table_name, trigger_name, function_name)

View File

@ -5,6 +5,28 @@ module Gitlab
class PathTraversalCheck
PATH_TRAVERSAL_MESSAGE = 'Potential path traversal attempt detected'
EXCLUDED_EXACT_PATHS = %w[/search].freeze
EXCLUDED_PATH_PREFIXES = %w[/search/].freeze
EXCLUDED_API_PATHS = %w[/search].freeze
EXCLUDED_PROJECT_API_PATHS = %w[/search].freeze
EXCLUDED_GROUP_API_PATHS = %w[/search].freeze
API_PREFIX = %r{/api/[^/]+}
API_SUFFIX = %r{(?:\.[^/]+)?}
EXCLUDED_API_PATHS_REGEX = [
EXCLUDED_API_PATHS.map do |path|
%r{\A#{API_PREFIX}#{path}#{API_SUFFIX}\z}
end.freeze,
EXCLUDED_PROJECT_API_PATHS.map do |path|
%r{\A#{API_PREFIX}/projects/[^/]+(?:/-)?#{path}#{API_SUFFIX}\z}
end.freeze,
EXCLUDED_GROUP_API_PATHS.map do |path|
%r{\A#{API_PREFIX}/groups/[^/]+(?:/-)?#{path}#{API_SUFFIX}\z}
end.freeze
].flatten.freeze
def initialize(app)
@app = app
end
@ -14,7 +36,8 @@ module Gitlab
log_params = {}
execution_time = measure_execution_time do
check(env, log_params)
request = ::Rack::Request.new(env.dup)
check(request, log_params) unless excluded?(request)
end
log_params[:duration_ms] = execution_time.round(5) if execution_time
@ -37,18 +60,25 @@ module Gitlab
end
end
def check(env, log_params)
request = ::Rack::Request.new(env.dup)
fullpath = request.fullpath
decoded_fullpath = CGI.unescape(fullpath)
def check(request, log_params)
decoded_fullpath = CGI.unescape(request.fullpath)
::Gitlab::PathTraversal.check_path_traversal!(decoded_fullpath, skip_decoding: true)
rescue ::Gitlab::PathTraversal::PathTraversalAttackError
log_params[:method] = request.request_method
log_params[:fullpath] = fullpath
log_params[:fullpath] = request.fullpath
log_params[:message] = PATH_TRAVERSAL_MESSAGE
end
def excluded?(request)
path = request.path
return true if path.in?(EXCLUDED_EXACT_PATHS)
return true if EXCLUDED_PATH_PREFIXES.any? { |p| path.start_with?(p) }
return true if EXCLUDED_API_PATHS_REGEX.any? { |r| path.match?(r) }
false
end
def log(payload)
Gitlab::AppLogger.warn(
payload.merge(class_name: self.class.name)

View File

@ -59,7 +59,7 @@
"@gitlab/favicon-overlay": "2.0.0",
"@gitlab/fonts": "^1.3.0",
"@gitlab/svgs": "3.69.0",
"@gitlab/ui": "67.5.1",
"@gitlab/ui": "67.5.2",
"@gitlab/visual-review-tools": "1.7.3",
"@gitlab/web-ide": "0.0.1-dev-20231004090414",
"@mattiasbuelens/web-streams-adapter": "^0.1.0",

View File

@ -1,7 +1,11 @@
# rubocop:disable Naming/FileName
# frozen_string_literal: true
# Load ActiveSupport to ensure that core extensions like `Enumerable#exclude?`
# are available in cop rules like `Performance/CollectionLiteralInLoop`.
require 'active_support/all'
# Auto-require all cops under `rubocop/cop/**/*.rb`
Dir[File.join(__dir__, 'cop', '**', '*.rb')].sort.each { |file| require file }
Dir[File.join(__dir__, 'cop', '**', '*.rb')].each { |file| require file }
# rubocop:enable Naming/FileName

View File

@ -33,14 +33,6 @@ RSpec.describe Gitlab::Ci::JwtV2, feature_category: :continuous_integration do
describe '#payload' do
subject(:payload) { ci_job_jwt_v2.payload }
it 'has correct values for the standard JWT attributes' do
aggregate_failures do
expect(payload[:iss]).to eq(Settings.gitlab.base_url)
expect(payload[:aud]).to eq(Settings.gitlab.base_url)
expect(payload[:sub]).to eq("project_path:#{project.full_path}:ref_type:branch:ref:#{pipeline.source_ref}")
end
end
it 'includes user identities when enabled' do
expect(user).to receive(:pass_user_identities_to_ci_jwt).and_return(true)
identities = payload[:user_identities].map { |identity| identity.slice(:extern_uid, :provider) }
@ -53,6 +45,34 @@ RSpec.describe Gitlab::Ci::JwtV2, feature_category: :continuous_integration do
expect(payload).not_to include(:user_identities)
end
context 'when oidc_issuer_url is disabled' do
before do
stub_feature_flags(oidc_issuer_url: false)
end
it 'has correct values for the standard JWT attributes' do
aggregate_failures do
expect(payload[:iss]).to eq(Settings.gitlab.base_url)
expect(payload[:aud]).to eq(Settings.gitlab.base_url)
expect(payload[:sub]).to eq("project_path:#{project.full_path}:ref_type:branch:ref:#{pipeline.source_ref}")
end
end
end
context 'when oidc_issuer_url is enabled' do
before do
stub_feature_flags(oidc_issuer_url: true)
end
it 'has correct values for the standard JWT attributes' do
aggregate_failures do
expect(payload[:iss]).to eq(Gitlab.config.gitlab.url)
expect(payload[:aud]).to eq(Settings.gitlab.base_url)
expect(payload[:sub]).to eq("project_path:#{project.full_path}:ref_type:branch:ref:#{pipeline.source_ref}")
end
end
end
context 'when given an aud' do
let(:aud) { 'AWS' }

View File

@ -2,6 +2,173 @@
require 'spec_helper'
RSpec.shared_examples "a measurable object" do
context 'when the table is not allowed' do
let(:source_table) { :_test_this_table_is_not_allowed }
it 'raises an error' do
expect(migration).to receive(:assert_table_is_allowed).with(source_table).and_call_original
expect do
subject
end.to raise_error(/#{source_table} is not allowed for use/)
end
end
context 'when run inside a transaction block' do
it 'raises an error' do
expect(migration).to receive(:transaction_open?).and_return(true)
expect do
subject
end.to raise_error(/can not be run inside a transaction/)
end
end
context 'when the given table does not have a primary key' do
it 'raises an error' do
migration.execute(<<~SQL)
ALTER TABLE #{source_table}
DROP CONSTRAINT #{source_table}_pkey
SQL
expect do
subject
end.to raise_error(/primary key not defined for #{source_table}/)
end
end
it 'creates the partitioned table with the same non-key columns' do
subject
copied_columns = filter_columns_by_name(connection.columns(partitioned_table), new_primary_key)
original_columns = filter_columns_by_name(connection.columns(source_table), new_primary_key)
expect(copied_columns).to match_array(original_columns)
end
it 'removes the default from the primary key column' do
subject
pk_column = connection.columns(partitioned_table).find { |c| c.name == old_primary_key }
expect(pk_column.default_function).to be_nil
end
describe 'constructing the partitioned table' do
it 'creates a table partitioned by the proper column' do
subject
expect(connection.table_exists?(partitioned_table)).to be(true)
expect(connection.primary_key(partitioned_table)).to eq(new_primary_key)
expect_table_partitioned_by(partitioned_table, [partition_column_name])
end
it 'requires the migration helper to be run in DDL mode' do
expect(Gitlab::Database::QueryAnalyzers::RestrictAllowedSchemas).to receive(:require_ddl_mode!)
subject
expect(connection.table_exists?(partitioned_table)).to be(true)
expect(connection.primary_key(partitioned_table)).to eq(new_primary_key)
expect_table_partitioned_by(partitioned_table, [partition_column_name])
end
it 'changes the primary key datatype to bigint' do
subject
pk_column = connection.columns(partitioned_table).find { |c| c.name == old_primary_key }
expect(pk_column.sql_type).to eq('bigint')
end
it 'removes the default from the primary key column' do
subject
pk_column = connection.columns(partitioned_table).find { |c| c.name == old_primary_key }
expect(pk_column.default_function).to be_nil
end
it 'creates the partitioned table with the same non-key columns' do
subject
copied_columns = filter_columns_by_name(connection.columns(partitioned_table), new_primary_key)
original_columns = filter_columns_by_name(connection.columns(source_table), new_primary_key)
expect(copied_columns).to match_array(original_columns)
end
end
describe 'keeping data in sync with the partitioned table' do
before do
partitioned_model.primary_key = :id
partitioned_model.table_name = partitioned_table
end
it 'creates a trigger function on the original table' do
expect_function_not_to_exist(function_name)
expect_trigger_not_to_exist(source_table, trigger_name)
subject
expect_function_to_exist(function_name)
expect_valid_function_trigger(source_table, trigger_name, function_name, after: %w[delete insert update])
end
it 'syncs inserts to the partitioned tables' do
subject
expect(partitioned_model.count).to eq(0)
first_record = source_model.create!(name: 'Bob', age: 20, created_at: timestamp, external_id: 1, updated_at: timestamp)
second_record = source_model.create!(name: 'Alice', age: 30, created_at: timestamp, external_id: 2, updated_at: timestamp)
expect(partitioned_model.count).to eq(2)
expect(partitioned_model.find(first_record.id).attributes).to eq(first_record.attributes)
expect(partitioned_model.find(second_record.id).attributes).to eq(second_record.attributes)
end
it 'syncs updates to the partitioned tables' do
subject
first_record = source_model.create!(name: 'Bob', age: 20, created_at: timestamp, external_id: 1, updated_at: timestamp)
second_record = source_model.create!(name: 'Alice', age: 30, created_at: timestamp, external_id: 2, updated_at: timestamp)
expect(partitioned_model.count).to eq(2)
first_copy = partitioned_model.find(first_record.id)
second_copy = partitioned_model.find(second_record.id)
expect(first_copy.attributes).to eq(first_record.attributes)
expect(second_copy.attributes).to eq(second_record.attributes)
first_record.update!(age: 21, updated_at: timestamp + 1.hour, external_id: 3)
expect(partitioned_model.count).to eq(2)
expect(first_copy.reload.attributes).to eq(first_record.attributes)
expect(second_copy.reload.attributes).to eq(second_record.attributes)
end
it 'syncs deletes to the partitioned tables' do
subject
first_record = source_model.create!(name: 'Bob', age: 20, created_at: timestamp, external_id: 1, updated_at: timestamp)
second_record = source_model.create!(name: 'Alice', age: 30, created_at: timestamp, external_id: 2, updated_at: timestamp)
expect(partitioned_model.count).to eq(2)
first_record.destroy!
expect(partitioned_model.count).to eq(1)
expect(partitioned_model.find_by_id(first_record.id)).to be_nil
expect(partitioned_model.find(second_record.id).attributes).to eq(second_record.attributes)
end
end
end
RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHelpers, feature_category: :database do
include Database::PartitioningHelpers
include Database::TriggerHelpers
@ -18,6 +185,7 @@ RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHe
let(:partitioned_table) { :_test_migration_partitioned_table }
let(:function_name) { :_test_migration_function_name }
let(:trigger_name) { :_test_migration_trigger_name }
let(:partition_column2) { 'external_id' }
let(:partition_column) { 'created_at' }
let(:min_date) { Date.new(2019, 12) }
let(:max_date) { Date.new(2020, 3) }
@ -29,6 +197,7 @@ RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHe
migration.create_table source_table do |t|
t.string :name, null: false
t.integer :age, null: false
t.integer partition_column2
t.datetime partition_column
t.datetime :updated_at
end
@ -127,32 +296,189 @@ RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHe
end
end
describe '#partition_table_by_int_range' do
let(:old_primary_key) { 'id' }
let(:new_primary_key) { ['id', partition_column2] }
let(:partition_column_name) { partition_column2 }
let(:partitioned_model) { Class.new(ActiveRecord::Base) }
let(:timestamp) { Time.utc(2019, 12, 1, 12).round }
let(:partition_size) { 500 }
subject { migration.partition_table_by_int_range(source_table, partition_column2, partition_size: partition_size, primary_key: ['id', partition_column2]) }
include_examples "a measurable object"
context 'simulates the merge_request_diff_commits migration' do
let(:table_name) { '_test_merge_request_diff_commits' }
let(:partition_column_name) { 'relative_order' }
let(:partition_size) { 2 }
let(:partitions) do
{
'1' => %w[1 3],
'3' => %w[3 5],
'5' => %w[5 7],
'7' => %w[7 9],
'9' => %w[9 11],
'11' => %w[11 13]
}
end
let(:buffer_partitions) do
{
'13' => %w[13 15],
'15' => %w[15 17],
'17' => %w[17 19],
'19' => %w[19 21],
'21' => %w[21 23],
'23' => %w[23 25]
}
end
let(:new_table_defition) do
{
new_path: { default: 'test', null: true, sql_type: 'text' },
merge_request_diff_id: { default: nil, null: false, sql_type: 'bigint' },
relative_order: { default: nil, null: false, sql_type: 'integer' }
}
end
let(:primary_key) { %w[merge_request_diff_id relative_order] }
before do
migration.create_table table_name, primary_key: primary_key do |t|
t.integer :merge_request_diff_id, null: false, default: 1
t.integer :relative_order, null: false
t.text :new_path, null: true, default: 'test'
end
source_model.table_name = table_name
end
it 'creates the partitions' do
migration.partition_table_by_int_range(table_name, partition_column_name, partition_size: partition_size, primary_key: primary_key)
expect_range_partitions_for(partitioned_table, partitions.merge(buffer_partitions))
end
it 'creates a composite primary key' do
migration.partition_table_by_int_range(table_name, partition_column_name, partition_size: partition_size, primary_key: primary_key)
expect(connection.primary_key(:_test_migration_partitioned_table)).to eql(%w[merge_request_diff_id relative_order])
end
it 'applies the correct column schema for the new table' do
migration.partition_table_by_int_range(table_name, partition_column_name, partition_size: partition_size, primary_key: primary_key)
columns = connection.columns(:_test_migration_partitioned_table)
columns.each do |column|
column_name = column.name.to_sym
expect(column.default).to eql(new_table_defition[column_name][:default])
expect(column.null).to eql(new_table_defition[column_name][:null])
expect(column.sql_type).to eql(new_table_defition[column_name][:sql_type])
end
end
it 'creates multiple partitions' do
migration.partition_table_by_int_range(table_name, partition_column_name, partition_size: 500, primary_key: primary_key)
expect_range_partitions_for(partitioned_table, {
'1' => %w[1 501],
'501' => %w[501 1001],
'1001' => %w[1001 1501],
'1501' => %w[1501 2001],
'2001' => %w[2001 2501],
'2501' => %w[2501 3001],
'3001' => %w[3001 3501],
'3501' => %w[3501 4001],
'4001' => %w[4001 4501],
'4501' => %w[4501 5001],
'5001' => %w[5001 5501],
'5501' => %w[5501 6001]
})
end
context 'when the table is not empty' do
before do
source_model.create!(merge_request_diff_id: 1, relative_order: 7, new_path: 'new_path')
end
let(:partition_size) { 2 }
let(:partitions) do
{
'1' => %w[1 3],
'3' => %w[3 5],
'5' => %w[5 7]
}
end
let(:buffer_partitions) do
{
'7' => %w[7 9],
'9' => %w[9 11],
'11' => %w[11 13],
'13' => %w[13 15],
'15' => %w[15 17],
'17' => %w[17 19]
}
end
it 'defaults the min_id to 1 and the max_id to 7' do
migration.partition_table_by_int_range(table_name, partition_column_name, partition_size: partition_size, primary_key: primary_key)
expect_range_partitions_for(partitioned_table, partitions.merge(buffer_partitions))
end
end
end
context 'when an invalid partition column is given' do
let(:invalid_column) { :_this_is_not_real }
it 'raises an error' do
expect do
migration.partition_table_by_int_range(source_table, invalid_column, partition_size: partition_size, primary_key: ['id'])
end.to raise_error(/partition column #{invalid_column} does not exist/)
end
end
context 'when partition_size is less than 1' do
let(:partition_size) { 1 }
it 'raises an error' do
expect do
subject
end.to raise_error(/partition_size must be greater than 1/)
end
end
context 'when the partitioned table already exists' do
before do
migration.send(:create_range_id_partitioned_copy, source_table,
migration.send(:make_partitioned_table_name, source_table),
connection.columns(source_table).find { |c| c.name == partition_column2 },
connection.columns(source_table).select { |c| new_primary_key.include?(c.name) })
end
it 'raises an error' do
expect(Gitlab::AppLogger).to receive(:warn).with(/Partitioned table not created because it already exists/)
expect { subject }.not_to raise_error
end
end
end
describe '#partition_table_by_date' do
let(:partition_column) { 'created_at' }
let(:old_primary_key) { 'id' }
let(:new_primary_key) { [old_primary_key, partition_column] }
let(:partition_column_name) { 'created_at' }
let(:partitioned_model) { Class.new(ActiveRecord::Base) }
let(:timestamp) { Time.utc(2019, 12, 1, 12).round }
context 'when the table is not allowed' do
let(:source_table) { :_test_this_table_is_not_allowed }
subject { migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date }
it 'raises an error' do
expect(migration).to receive(:assert_table_is_allowed).with(source_table).and_call_original
expect do
migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
end.to raise_error(/#{source_table} is not allowed for use/)
end
end
context 'when run inside a transaction block' do
it 'raises an error' do
expect(migration).to receive(:transaction_open?).and_return(true)
expect do
migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
end.to raise_error(/can not be run inside a transaction/)
end
end
include_examples "a measurable object"
context 'when the the max_date is less than the min_date' do
let(:max_date) { Time.utc(2019, 6) }
@ -174,19 +500,6 @@ RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHe
end
end
context 'when the given table does not have a primary key' do
it 'raises an error' do
migration.execute(<<~SQL)
ALTER TABLE #{source_table}
DROP CONSTRAINT #{source_table}_pkey
SQL
expect do
migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
end.to raise_error(/primary key not defined for #{source_table}/)
end
end
context 'when an invalid partition column is given' do
let(:invalid_column) { :_this_is_not_real }
@ -198,34 +511,6 @@ RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHe
end
describe 'constructing the partitioned table' do
it 'creates a table partitioned by the proper column' do
migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
expect(connection.table_exists?(partitioned_table)).to be(true)
expect(connection.primary_key(partitioned_table)).to eq(new_primary_key)
expect_table_partitioned_by(partitioned_table, [partition_column])
end
it 'requires the migration helper to be run in DDL mode' do
expect(Gitlab::Database::QueryAnalyzers::RestrictAllowedSchemas).to receive(:require_ddl_mode!)
migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
expect(connection.table_exists?(partitioned_table)).to be(true)
expect(connection.primary_key(partitioned_table)).to eq(new_primary_key)
expect_table_partitioned_by(partitioned_table, [partition_column])
end
it 'changes the primary key datatype to bigint' do
migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
pk_column = connection.columns(partitioned_table).find { |c| c.name == old_primary_key }
expect(pk_column.sql_type).to eq('bigint')
end
context 'with a non-integer primary key datatype' do
before do
connection.create_table non_int_table, id: false do |t|
@ -248,23 +533,6 @@ RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHe
end
end
it 'removes the default from the primary key column' do
migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
pk_column = connection.columns(partitioned_table).find { |c| c.name == old_primary_key }
expect(pk_column.default_function).to be_nil
end
it 'creates the partitioned table with the same non-key columns' do
migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
copied_columns = filter_columns_by_name(connection.columns(partitioned_table), new_primary_key)
original_columns = filter_columns_by_name(connection.columns(source_table), new_primary_key)
expect(copied_columns).to match_array(original_columns)
end
it 'creates a partition spanning over each month in the range given' do
migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
@ -350,75 +618,6 @@ RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHe
end
end
end
describe 'keeping data in sync with the partitioned table' do
let(:partitioned_model) { Class.new(ActiveRecord::Base) }
let(:timestamp) { Time.utc(2019, 12, 1, 12).round }
before do
partitioned_model.primary_key = :id
partitioned_model.table_name = partitioned_table
end
it 'creates a trigger function on the original table' do
expect_function_not_to_exist(function_name)
expect_trigger_not_to_exist(source_table, trigger_name)
migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
expect_function_to_exist(function_name)
expect_valid_function_trigger(source_table, trigger_name, function_name, after: %w[delete insert update])
end
it 'syncs inserts to the partitioned tables' do
migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
expect(partitioned_model.count).to eq(0)
first_record = source_model.create!(name: 'Bob', age: 20, created_at: timestamp, updated_at: timestamp)
second_record = source_model.create!(name: 'Alice', age: 30, created_at: timestamp, updated_at: timestamp)
expect(partitioned_model.count).to eq(2)
expect(partitioned_model.find(first_record.id).attributes).to eq(first_record.attributes)
expect(partitioned_model.find(second_record.id).attributes).to eq(second_record.attributes)
end
it 'syncs updates to the partitioned tables' do
migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
first_record = source_model.create!(name: 'Bob', age: 20, created_at: timestamp, updated_at: timestamp)
second_record = source_model.create!(name: 'Alice', age: 30, created_at: timestamp, updated_at: timestamp)
expect(partitioned_model.count).to eq(2)
first_copy = partitioned_model.find(first_record.id)
second_copy = partitioned_model.find(second_record.id)
expect(first_copy.attributes).to eq(first_record.attributes)
expect(second_copy.attributes).to eq(second_record.attributes)
first_record.update!(age: 21, updated_at: timestamp + 1.hour)
expect(partitioned_model.count).to eq(2)
expect(first_copy.reload.attributes).to eq(first_record.attributes)
expect(second_copy.reload.attributes).to eq(second_record.attributes)
end
it 'syncs deletes to the partitioned tables' do
migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
first_record = source_model.create!(name: 'Bob', age: 20, created_at: timestamp, updated_at: timestamp)
second_record = source_model.create!(name: 'Alice', age: 30, created_at: timestamp, updated_at: timestamp)
expect(partitioned_model.count).to eq(2)
first_record.destroy!
expect(partitioned_model.count).to eq(1)
expect(partitioned_model.find_by_id(first_record.id)).to be_nil
expect(partitioned_model.find(second_record.id).attributes).to eq(second_record.attributes)
end
end
end
describe '#drop_partitioned_table_for' do

View File

@ -55,6 +55,34 @@ RSpec.describe ::Gitlab::Middleware::PathTraversalCheck, feature_category: :shar
end
end
shared_examples 'excluded path' do
it 'measures and logs the execution time' do
expect(::Gitlab::PathTraversal)
.not_to receive(:check_path_traversal!)
expect(::Gitlab::AppLogger)
.to receive(:warn)
.with({ class_name: described_class.name, duration_ms: instance_of(Float) })
.and_call_original
expect(subject).to eq(fake_response)
end
context 'with log_execution_time_path_traversal_middleware disabled' do
before do
stub_feature_flags(log_execution_time_path_traversal_middleware: false)
end
it 'does nothing' do
expect(::Gitlab::PathTraversal)
.not_to receive(:check_path_traversal!)
expect(::Gitlab::AppLogger)
.not_to receive(:warn)
expect(subject).to eq(fake_response)
end
end
end
shared_examples 'path traversal' do
it 'logs the problem and measures the execution time' do
expect(::Gitlab::PathTraversal)
@ -112,23 +140,90 @@ RSpec.describe ::Gitlab::Middleware::PathTraversalCheck, feature_category: :shar
let(:method) { 'get' }
where(:path, :query_params, :shared_example_name) do
'/foo/bar' | {} | 'no issue'
'/foo/../bar' | {} | 'path traversal'
'/foo%2Fbar' | {} | 'no issue'
'/foo%2F..%2Fbar' | {} | 'path traversal'
'/foo%252F..%252Fbar' | {} | 'no issue'
'/foo/bar' | { x: 'foo' } | 'no issue'
'/foo/bar' | { x: 'foo/../bar' } | 'path traversal'
'/foo/bar' | { x: 'foo%2Fbar' } | 'no issue'
'/foo/bar' | { x: 'foo%2F..%2Fbar' } | 'no issue'
'/foo/bar' | { x: 'foo%252F..%252Fbar' } | 'no issue'
'/foo%2F..%2Fbar' | { x: 'foo%252F..%252Fbar' } | 'path traversal'
'/foo/bar' | {} | 'no issue'
'/foo/../bar' | {} | 'path traversal'
'/foo%2Fbar' | {} | 'no issue'
'/foo%2F..%2Fbar' | {} | 'path traversal'
'/foo%252F..%252Fbar' | {} | 'no issue'
'/foo/bar' | { x: 'foo' } | 'no issue'
'/foo/bar' | { x: 'foo/../bar' } | 'path traversal'
'/foo/bar' | { x: 'foo%2Fbar' } | 'no issue'
'/foo/bar' | { x: 'foo%2F..%2Fbar' } | 'no issue'
'/foo/bar' | { x: 'foo%252F..%252Fbar' } | 'no issue'
'/foo%2F..%2Fbar' | { x: 'foo%252F..%252Fbar' } | 'path traversal'
end
with_them do
it_behaves_like params[:shared_example_name]
end
context 'for global search excluded paths' do
excluded_paths = %w[
/search
/search/count
/api/v4/search
/api/v4/search.json
/api/v4/projects/4/search
/api/v4/projects/4/search.json
/api/v4/projects/4/-/search
/api/v4/projects/4/-/search.json
/api/v4/projects/my%2Fproject/search
/api/v4/projects/my%2Fproject/search.json
/api/v4/projects/my%2Fproject/-/search
/api/v4/projects/my%2Fproject/-/search.json
/api/v4/groups/4/search
/api/v4/groups/4/search.json
/api/v4/groups/4/-/search
/api/v4/groups/4/-/search.json
/api/v4/groups/my%2Fgroup/search
/api/v4/groups/my%2Fgroup/search.json
/api/v4/groups/my%2Fgroup/-/search
/api/v4/groups/my%2Fgroup/-/search.json
]
query_params_with_no_path_traversal = [
{},
{ x: 'foo' },
{ x: 'foo%2F..%2Fbar' },
{ x: 'foo%2F..%2Fbar' },
{ x: 'foo%252F..%252Fbar' }
]
query_params_with_path_traversal = [
{ x: 'foo/../bar' }
]
excluded_paths.each do |excluded_path|
[query_params_with_no_path_traversal + query_params_with_path_traversal].flatten.each do |qp|
context "for excluded path #{excluded_path} with query params #{qp}" do
let(:query_params) { qp }
let(:path) { excluded_path }
it_behaves_like 'excluded path'
end
end
non_excluded_path = excluded_path.gsub('search', 'searchtest')
query_params_with_no_path_traversal.each do |qp|
context "for non excluded path #{non_excluded_path} with query params #{qp}" do
let(:query_params) { qp }
let(:path) { non_excluded_path }
it_behaves_like 'no issue'
end
end
query_params_with_path_traversal.each do |qp|
context "for non excluded path #{non_excluded_path} with query params #{qp}" do
let(:query_params) { qp }
let(:path) { non_excluded_path }
it_behaves_like 'path traversal'
end
end
end
end
context 'with a issues search path' do
let(:query_params) { {} }
let(:path) do
@ -149,6 +244,7 @@ RSpec.describe ::Gitlab::Middleware::PathTraversalCheck, feature_category: :shar
'/foo%2Fbar' | {} | 'no issue'
'/foo%2F..%2Fbar' | {} | 'path traversal'
'/foo%252F..%252Fbar' | {} | 'no issue'
'/foo/bar' | { x: 'foo' } | 'no issue'
'/foo/bar' | { x: 'foo/../bar' } | 'no issue'
'/foo/bar' | { x: 'foo%2Fbar' } | 'no issue'
@ -160,6 +256,59 @@ RSpec.describe ::Gitlab::Middleware::PathTraversalCheck, feature_category: :shar
with_them do
it_behaves_like params[:shared_example_name]
end
context 'for global search excluded paths' do
excluded_paths = %w[
/search
/search/count
/api/v4/search
/api/v4/search.json
/api/v4/projects/4/search
/api/v4/projects/4/search.json
/api/v4/projects/4/-/search
/api/v4/projects/4/-/search.json
/api/v4/projects/my%2Fproject/search
/api/v4/projects/my%2Fproject/search.json
/api/v4/projects/my%2Fproject/-/search
/api/v4/projects/my%2Fproject/-/search.json
/api/v4/groups/4/search
/api/v4/groups/4/search.json
/api/v4/groups/4/-/search
/api/v4/groups/4/-/search.json
/api/v4/groups/my%2Fgroup/search
/api/v4/groups/my%2Fgroup/search.json
/api/v4/groups/my%2Fgroup/-/search
/api/v4/groups/my%2Fgroup/-/search.json
]
all_query_params = [
{},
{ x: 'foo' },
{ x: 'foo%2F..%2Fbar' },
{ x: 'foo%2F..%2Fbar' },
{ x: 'foo%252F..%252Fbar' },
{ x: 'foo/../bar' }
]
excluded_paths.each do |excluded_path|
all_query_params.each do |qp|
context "for excluded path #{excluded_path} with query params #{qp}" do
let(:query_params) { qp }
let(:path) { excluded_path }
it_behaves_like 'excluded path'
end
non_excluded_path = excluded_path.gsub('search', 'searchtest')
context "for non excluded path #{non_excluded_path} with query params #{qp}" do
let(:query_params) { qp }
let(:path) { excluded_path.gsub('search', 'searchtest') }
it_behaves_like 'no issue'
end
end
end
end
end
end
@ -179,6 +328,12 @@ RSpec.describe ::Gitlab::Middleware::PathTraversalCheck, feature_category: :shar
'/foo/bar' | { x: 'foo%2Fbar' }
'/foo/bar' | { x: 'foo%2F..%2Fbar' }
'/foo/bar' | { x: 'foo%252F..%252Fbar' }
'/search' | { x: 'foo/../bar' }
'/search' | { x: 'foo%2F..%2Fbar' }
'/search' | { x: 'foo%252F..%252Fbar' }
'%2Fsearch' | { x: 'foo/../bar' }
'%2Fsearch' | { x: 'foo%2F..%2Fbar' }
'%2Fsearch' | { x: 'foo%252F..%252Fbar' }
end
with_them do

View File

@ -0,0 +1,13 @@
# frozen_string_literal: true
# No spec helper is `require`d because `fast_spec_helper` requires
# `active_support/all` and we want to ensure that `rubocop/rubocop` loads it.
require 'rubocop'
require_relative '../../rubocop/rubocop'
RSpec.describe 'rubocop/rubocop', feature_category: :tooling do
it 'loads activesupport to enhance Enumerable' do
expect(Enumerable.instance_methods).to include(:exclude?)
end
end

View File

@ -71,29 +71,6 @@ RSpec.describe BulkImports::BatchedRelationExportService, feature_category: :imp
expect(export.batches.count).to eq(0)
end
end
context 'when exception occurs' do
it 'tracks exception and marks export as failed' do
allow_next_instance_of(BulkImports::Export) do |export|
allow(export).to receive(:update!).and_call_original
allow(export)
.to receive(:update!)
.with(status_event: 'finish', total_objects_count: 0, batched: true, batches_count: 0, jid: jid, error: nil)
.and_raise(StandardError, 'Error!')
end
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(StandardError, portable_id: portable.id, portable_type: portable.class.name)
service.execute
export = portable.bulk_import_exports.first
expect(export.reload.failed?).to eq(true)
end
end
end
describe '.cache_key' do

View File

@ -34,10 +34,7 @@ RSpec.describe BulkImports::RelationBatchExportService, feature_category: :impor
end
it 'removes exported contents after export' do
double = instance_double(BulkImports::FileTransfer::ProjectConfig, export_path: 'foo')
allow(BulkImports::FileTransfer).to receive(:config_for).and_return(double)
allow(double).to receive(:export_service_for).and_raise(StandardError, 'Error!')
allow(subject).to receive(:export_path).and_return('foo')
allow(FileUtils).to receive(:remove_entry)
expect(FileUtils).to receive(:remove_entry).with('foo')
@ -53,29 +50,10 @@ RSpec.describe BulkImports::RelationBatchExportService, feature_category: :impor
allow(subject).to receive(:export_path).and_return('foo')
allow(FileUtils).to receive(:remove_entry)
expect(FileUtils).to receive(:touch).with('foo/milestones.ndjson')
expect(FileUtils).to receive(:touch).with('foo/milestones.ndjson').and_call_original
subject.execute
end
end
context 'when exception occurs' do
before do
allow(service).to receive(:gzip).and_raise(StandardError, 'Error!')
end
it 'marks batch as failed' do
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(StandardError, portable_id: project.id, portable_type: 'Project')
service.execute
batch.reload
expect(batch.failed?).to eq(true)
expect(batch.objects_count).to eq(0)
expect(batch.error).to eq('Error!')
end
end
end
end

View File

@ -59,7 +59,7 @@ RSpec.describe BulkImports::RelationExportService, feature_category: :importers
let(:relation) { 'milestones' }
it 'creates empty file on disk' do
expect(FileUtils).to receive(:touch).with("#{export_path}/#{relation}.ndjson")
expect(FileUtils).to receive(:touch).with("#{export_path}/#{relation}.ndjson").and_call_original
subject.execute
end
@ -118,39 +118,6 @@ RSpec.describe BulkImports::RelationExportService, feature_category: :importers
end
end
context 'when exception occurs during export' do
shared_examples 'tracks exception' do |exception_class|
it 'tracks exception' do
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(exception_class, portable_id: group.id, portable_type: group.class.name)
.and_call_original
subject.execute
end
end
before do
allow_next_instance_of(BulkImports::ExportUpload) do |upload|
allow(upload).to receive(:save!).and_raise(StandardError)
end
end
it 'marks export as failed' do
subject.execute
expect(export.reload.failed?).to eq(true)
end
include_examples 'tracks exception', StandardError
context 'when passed relation is not supported' do
let(:relation) { 'unsupported' }
include_examples 'tracks exception', ActiveRecord::RecordInvalid
end
end
context 'when export was batched' do
let(:relation) { 'milestones' }
let(:export) { create(:bulk_import_export, group: group, relation: relation, batched: true, batches_count: 2) }

View File

@ -22,6 +22,7 @@ RSpec.describe ServiceDesk::CustomEmailVerifications::UpdateService, feature_cat
end
let(:expected_error_message) { error_parameter_missing }
let(:expected_custom_email_enabled) { false }
let(:logger_params) { { category: 'custom_email_verification' } }
before do
@ -30,7 +31,7 @@ RSpec.describe ServiceDesk::CustomEmailVerifications::UpdateService, feature_cat
end
shared_examples 'a failing verification process' do |expected_error_identifier|
it 'refuses to verify and sends result emails' do
it 'refuses to verify and sends result emails', :aggregate_failures do
expect(Notify).to receive(:service_desk_verification_result_email).twice
expect(Gitlab::AppLogger).to receive(:info).with(logger_params.merge(
@ -52,7 +53,7 @@ RSpec.describe ServiceDesk::CustomEmailVerifications::UpdateService, feature_cat
end
shared_examples 'an early exit from the verification process' do |expected_state|
it 'exits early' do
it 'exits early', :aggregate_failures do
expect(Notify).to receive(:service_desk_verification_result_email).exactly(0).times
expect(Gitlab::AppLogger).to receive(:warn).with(logger_params.merge(
@ -65,7 +66,7 @@ RSpec.describe ServiceDesk::CustomEmailVerifications::UpdateService, feature_cat
verification.reset
expect(response).to be_error
expect(settings).not_to be_custom_email_enabled
expect(settings.custom_email_enabled).to eq expected_custom_email_enabled
expect(verification.state).to eq expected_state
end
end
@ -179,6 +180,26 @@ RSpec.describe ServiceDesk::CustomEmailVerifications::UpdateService, feature_cat
it_behaves_like 'a failing verification process', 'mail_not_received_within_timeframe'
end
context 'when already verified' do
let(:expected_error_message) { error_already_finished }
before do
verification.mark_as_finished!
end
it_behaves_like 'an early exit from the verification process', 'finished'
context 'when enabled' do
let(:expected_custom_email_enabled) { true }
before do
settings.update!(custom_email_enabled: true)
end
it_behaves_like 'an early exit from the verification process', 'finished'
end
end
end
end
end

View File

@ -23,4 +23,21 @@ RSpec.describe BulkImports::RelationBatchExportWorker, feature_category: :import
end
end
end
describe '.sidekiq_retries_exhausted' do
let(:job) { { 'args' => job_args } }
it 'sets export status to failed and tracks the exception' do
portable = batch.export.portable
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(kind_of(StandardError), portable_id: portable.id, portable_type: portable.class.name)
described_class.sidekiq_retries_exhausted_block.call(job, StandardError.new('*' * 300))
expect(batch.reload.failed?).to eq(true)
expect(batch.error.size).to eq(255)
end
end
end

View File

@ -63,4 +63,20 @@ RSpec.describe BulkImports::RelationExportWorker, feature_category: :importers d
end
end
end
describe '.sidekiq_retries_exhausted' do
let(:job) { { 'args' => job_args } }
let!(:export) { create(:bulk_import_export, group: group, relation: relation) }
it 'sets export status to failed and tracks the exception' do
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(kind_of(StandardError), portable_id: group.id, portable_type: group.class.name)
described_class.sidekiq_retries_exhausted_block.call(job, StandardError.new('*' * 300))
expect(export.reload.failed?).to eq(true)
expect(export.error.size).to eq(255)
end
end
end

View File

@ -480,7 +480,9 @@ RSpec.describe 'Every Sidekiq worker', feature_category: :shared do
'X509CertificateRevokeWorker' => 3,
'ComplianceManagement::MergeRequests::ComplianceViolationsWorker' => 3,
'Zoekt::IndexerWorker' => 2,
'Issuable::RelatedLinksCreateWorker' => 3
'Issuable::RelatedLinksCreateWorker' => 3,
'BulkImports::RelationBatchExportWorker' => 3,
'BulkImports::RelationExportWorker' => 3
}.merge(extra_retry_exceptions)
end

View File

@ -1274,10 +1274,10 @@
resolved "https://registry.yarnpkg.com/@gitlab/svgs/-/svgs-3.69.0.tgz#bf76b8ffbe72a783807761a38abe8aaedcfe8c12"
integrity sha512-Zu8Fcjhi3Bk26jZOptcD5F4SHWC7/KuAe00NULViCeswKdoda1k19B+9oCSbsbxY7vMoFuD20kiCJdBCpxb3HA==
"@gitlab/ui@67.5.1":
version "67.5.1"
resolved "https://registry.yarnpkg.com/@gitlab/ui/-/ui-67.5.1.tgz#e331b34fa920f8f26ab0635dc84d15cbd4ac29e3"
integrity sha512-nsIlkZlU9Sig/KyVMkiHUp2Mcg/H2h4U5wN7ROnTv5heLhCP5oCvRpzGuazzhR/oemPqnKv/53ySAosDokhlmA==
"@gitlab/ui@67.5.2":
version "67.5.2"
resolved "https://registry.yarnpkg.com/@gitlab/ui/-/ui-67.5.2.tgz#9a8d6008353b7250c1686a2a20fcae6630c97213"
integrity sha512-Qtkh9AEHjPrST/rl4SeY7Wh/ZB68GQO9jjM4Jll+3JicX3prg55uuFd4PATcq71DbXyIySZuDI/DrltgkhWByw==
dependencies:
"@floating-ui/dom" "1.2.9"
bootstrap-vue "2.23.1"