diff --git a/app/models/project_export_job.rb b/app/models/project_export_job.rb index 029d40f8db4..65f40732bf8 100644 --- a/app/models/project_export_job.rb +++ b/app/models/project_export_job.rb @@ -18,6 +18,7 @@ class ProjectExportJob < ApplicationRecord }.freeze scope :prunable, -> { where("updated_at < ?", EXPIRES_IN.ago) } + scope :order_by_updated_at, -> { order(:updated_at, :id) } state_machine :status, initial: :queued do event :start do diff --git a/app/services/projects/import_export/prune_expired_export_jobs_service.rb b/app/services/projects/import_export/prune_expired_export_jobs_service.rb index 363357c08ac..1a9daf1d12c 100644 --- a/app/services/projects/import_export/prune_expired_export_jobs_service.rb +++ b/app/services/projects/import_export/prune_expired_export_jobs_service.rb @@ -3,36 +3,46 @@ module Projects module ImportExport class PruneExpiredExportJobsService + BATCH_SIZE = 1000 + class << self def execute - prunable_jobs = ProjectExportJob.prunable - - delete_uploads_for_expired_jobs(prunable_jobs) - delete_expired_jobs(prunable_jobs) + delete_uploads_for_expired_jobs + delete_expired_jobs end private - def delete_expired_jobs(prunable_jobs) - prunable_jobs.each_batch do |relation| - relation.delete_all + def delete_expired_jobs + loop do + deleted_count = ProjectExportJob.prunable.limit(BATCH_SIZE).delete_all + break if deleted_count == 0 end end - def delete_uploads_for_expired_jobs(prunable_jobs) - prunable_uploads = get_uploads_for_prunable_jobs(prunable_jobs) - prunable_upload_keys = prunable_uploads.begin_fast_destroy + def delete_uploads_for_expired_jobs + prunable_scope = ProjectExportJob.prunable.select(:id, :updated_at) + iterator = Gitlab::Pagination::Keyset::Iterator.new(scope: prunable_scope.order_by_updated_at) - prunable_uploads.each_batch do |relation| - relation.delete_all + iterator.each_batch(of: BATCH_SIZE) do |prunable_job_batch_scope| + prunable_job_batch = prunable_job_batch_scope.to_a + + loop do + prunable_uploads = uploads_for_expired_jobs(prunable_job_batch) + prunable_upload_keys = prunable_uploads.begin_fast_destroy + + deleted_count = prunable_uploads.delete_all + + break if deleted_count == 0 + + Upload.finalize_fast_destroy(prunable_upload_keys) + end end - - Upload.finalize_fast_destroy(prunable_upload_keys) end - def get_uploads_for_prunable_jobs(prunable_jobs) + def uploads_for_expired_jobs(prunable_jobs) prunable_export_uploads = Projects::ImportExport::RelationExportUpload - .for_project_export_jobs(prunable_jobs.select(:id)) + .for_project_export_jobs(prunable_jobs.map(&:id)) Upload.for_model_type_and_id( Projects::ImportExport::RelationExportUpload, diff --git a/db/migrate/20240221100733_add_index_on_updated_at_and_id_to_project_export_jobs.rb b/db/migrate/20240221100733_add_index_on_updated_at_and_id_to_project_export_jobs.rb new file mode 100644 index 00000000000..bf3f3891be1 --- /dev/null +++ b/db/migrate/20240221100733_add_index_on_updated_at_and_id_to_project_export_jobs.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +class AddIndexOnUpdatedAtAndIdToProjectExportJobs < Gitlab::Database::Migration[2.2] + disable_ddl_transaction! + + milestone '16.10' + + TABLE_NAME = :project_export_jobs + INDEX_NAME = 'index_project_export_jobs_on_updated_at_and_id' + + def up + add_concurrent_index TABLE_NAME, [:updated_at, :id], name: INDEX_NAME + end + + def down + remove_concurrent_index_by_name TABLE_NAME, INDEX_NAME + end +end diff --git a/db/schema_migrations/20240221100733 b/db/schema_migrations/20240221100733 new file mode 100644 index 00000000000..a03c2c1ac05 --- /dev/null +++ b/db/schema_migrations/20240221100733 @@ -0,0 +1 @@ +05f5baf54474c621e51cdab4fe58ceea6a3bd12253e53d0ccb35f4a4f18461a7 \ No newline at end of file diff --git a/db/structure.sql b/db/structure.sql index 4ad48e8acde..837e0971fe5 100644 --- a/db/structure.sql +++ b/db/structure.sql @@ -26280,6 +26280,8 @@ CREATE INDEX index_project_export_jobs_on_project_id_and_status ON project_expor CREATE INDEX index_project_export_jobs_on_status ON project_export_jobs USING btree (status); +CREATE INDEX index_project_export_jobs_on_updated_at_and_id ON project_export_jobs USING btree (updated_at, id); + CREATE INDEX index_project_feature_usages_on_project_id ON project_feature_usages USING btree (project_id); CREATE UNIQUE INDEX index_project_features_on_project_id ON project_features USING btree (project_id); diff --git a/doc/administration/backup_restore/backup_large_reference_architectures.md b/doc/administration/backup_restore/backup_large_reference_architectures.md index 1af8c7348de..2a1c597e1d9 100644 --- a/doc/administration/backup_restore/backup_large_reference_architectures.md +++ b/doc/administration/backup_restore/backup_large_reference_architectures.md @@ -103,81 +103,72 @@ Configure AWS Backup to back up S3 data. This can be done at the same time when ### Configure backup of Git repositories -NOTE: -There is a feature proposal to add the ability to back up repositories directly from Gitaly to object storage. See [epic 10077](https://gitlab.com/groups/gitlab-org/-/epics/10077). +If using Cloud native hybrid, provision a VM running the GitLab Linux package: -- Linux package (Omnibus): +1. Spin up a VM with 8 vCPU and 7.2 GB memory. This node will be used to back up Git repositories. + Adding support for Gitaly server-side backups to `backup-utility` is proposed in + [issue 438393](https://gitlab.com/gitlab-org/gitlab/-/issues/438393), which would + remove the need to provision a VM. +1. Configure the node as another GitLab Rails node as defined in your [reference architecture](../reference_architectures/index.md). + As with other GitLab Rails nodes, this node must have access to your main PostgreSQL database, Redis, object storage, and Gitaly Cluster. Find + your reference architecture and see the **Configure GitLab Rails** section for an example of how to set the server up. You might need to translate + some [Helm chart values](https://docs.gitlab.com/charts/charts/globals.html) to the equivalent ones for Linux package installations. + Note that [a Praefect node cannot be used to back up Git data](https://gitlab.com/gitlab-org/gitlab/-/issues/396343#note_1385950340). + It must be a GitLab Rails node. +1. Ensure the GitLab application isn't running on this node by disabling most services: - We will continue to use the [backup command](backup_gitlab.md#backup-command) to back up Git repositories. + 1. Edit `/etc/gitlab/gitlab.rb` to ensure the following services are disabled. + `roles(['application_role'])` disables Redis, PostgreSQL, and Consul, and + is the basis of the reference architecture Rails node definition. - If utilization is low enough, you can run it from an existing GitLab Rails node. Otherwise, spin up another node. + ```ruby + roles(['application_role']) + gitlab_workhorse['enable'] = false + puma['enable'] = false + sidekiq['enable'] = false + gitlab_kas['enable'] = false + gitaly['enable'] = false + prometheus_monitoring['enable'] = false + ``` -- Cloud native hybrid: + 1. Reconfigure GitLab: - [The `backup-utility` command in a `toolbox` pod fails when there is a large amount of data](https://gitlab.com/gitlab-org/gitlab/-/issues/396343#note_1352989908). In this case, you must run the [backup command](backup_gitlab.md#backup-command) to back up Git repositories, and you must run it in a VM running the GitLab Linux package: + ```shell + sudo gitlab-ctl reconfigure + ``` - 1. Spin up a VM with 8 vCPU and 7.2 GB memory. This node will be used to back up Git repositories. Note that - [a Praefect node cannot be used to back up Git data](https://gitlab.com/gitlab-org/gitlab/-/issues/396343#note_1385950340). - 1. Configure the node as another **GitLab Rails (webservice)** node as defined in your [reference architecture](../reference_architectures/index.md). - As with other GitLab Rails nodes, this node must have access to your main PostgreSQL database, Redis, object storage, and Gitaly Cluster. Find your reference architecture and see the "Configure GitLab Rails" section of an example how to set the server up. You might need to translate some [Helm chart values](https://docs.gitlab.com/charts/charts/globals.html) to the Linux package equivalent ones. - 1. Ensure the GitLab application isn't running on this node by disabling most services: + 1. The only service that should be left is `logrotate`. To verify that `logrotate` is the only remaining service, run: - 1. Edit `/etc/gitlab/gitlab.rb` to ensure the following services are disabled. - `roles(['application_role'])` disables Redis, PostgreSQL, and Consul, and - is the basis of the reference architecture Rails node definition. + ```shell + gitlab-ctl status + ``` - ```ruby - roles(['application_role']) - gitlab_workhorse['enable'] = false - puma['enable'] = false - sidekiq['enable'] = false - gitlab_kas['enable'] = false - gitaly['enable'] = false - prometheus_monitoring['enable'] = false - ``` - - 1. Reconfigure GitLab: - - ```shell - sudo gitlab-ctl reconfigure - ``` - - 1. The only service that should be left is `logrotate`, you can verify with: - - ```shell - gitlab-ctl status - ``` - - There is [a feature request](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/6823) for a role in the Linux package - that meets these requirements. + [Issue 6823](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/6823) proposes to add a role in the Linux package that meets these requirements. To back up the Git repositories: -1. Ensure that the GitLab Rails node has enough attached storage to store Git repositories and an archive of the Git repositories. Additionally, the contents of forked repositories are duplicated into their forks during backup. - For example, if you have 5 GB worth of Git repositories and two forks of a 1 GB repository, then you require at least 14 GB of attached storage to account for: - - 7 GB of Git data. - - A 7 GB archive file of all Git data. -1. SSH into the GitLab Rails node. -1. [Configure uploading backups to remote cloud storage](backup_gitlab.md#upload-backups-to-a-remote-cloud-storage). -1. [Configure AWS Backup](https://docs.aws.amazon.com/aws-backup/latest/devguide/creating-a-backup-plan.html) for this bucket, or use a bucket in the same account and region as your production data object storage buckets, and ensure this bucket is included in your [preexisting AWS Backup](#configure-backup-of-object-storage-data). -1. Run the [backup command](backup_gitlab.md#backup-command), skipping PostgreSQL data: +1. [Configure a server-side backup destination in all Gitaly nodes](../gitaly/configure_gitaly.md#configure-server-side-backups). +1. Add the destination bucket to your [backups of object storage data](#configure-backup-of-object-storage-data). +1. Take a full backup of your Git data. Use the `REPOSITORIES_SERVER_SIDE` variable, and skip PostgreSQL data: ```shell - sudo gitlab-backup create SKIP=db + sudo gitlab-backup create REPOSITORIES_SERVER_SIDE=true SKIP=db ``` - The resulting tar file will include only the Git repositories and some metadata. Blobs such as uploads, artifacts, and LFS do not need to be explicitly skipped, because the command does not back up object storage by default. The tar file will be created in the [`/var/opt/gitlab/backups` directory](https://docs.gitlab.com/omnibus/settings/backups.html#creating-an-application-backup) and [the filename will end in `_gitlab_backup.tar`](index.md#backup-id). + This causes Gitaly nodes to upload the Git data and some metadata to remote storage. Blobs such as uploads, artifacts, and LFS do not need to be explicitly skipped, because the `gitlab-backup` command does not back up object storage by default. - Since we configured uploading backups to remote cloud storage, the tar file will be uploaded to the remote region and deleted from disk. - -1. Note the [backup ID](index.md#backup-id) of the backup file for the next step. For example, if the backup archive name is `1493107454_2018_04_25_10.6.4-ce_gitlab_backup.tar`, the backup ID is `1493107454_2018_04_25_10.6.4-ce`. -1. Run the [backup command](backup_gitlab.md#backup-command) again, this time specifying [incremental backup of Git repositories](backup_gitlab.md#incremental-repository-backups), and the backup ID of the source backup file. Using the example ID from the previous step, the command is: +1. Note the [backup ID](index.md#backup-id) of the backup, which is needed for the next step. For example, if the backup command outputs + `2024-02-22 02:17:47 UTC -- Backup 1708568263_2024_02_22_16.9.0-ce is done.`, then the backup ID is `1708568263_2024_02_22_16.9.0-ce`. +1. Check that the full backup created data in the backup bucket. +1. Run the [backup command](backup_gitlab.md#backup-command) again, this time specifying [incremental backup of Git repositories](backup_gitlab.md#incremental-repository-backups), and a backup ID. Using the example ID from the previous step, the command is: ```shell - sudo gitlab-backup create SKIP=db INCREMENTAL=yes PREVIOUS_BACKUP=1493107454_2018_04_25_10.6.4-ce + sudo gitlab-backup create REPOSITORIES_SERVER_SIDE=true SKIP=db INCREMENTAL=yes PREVIOUS_BACKUP=1708568263_2024_02_22_16.9.0-ce ``` -1. Check that the incremental backup succeeded and uploaded to object storage. + The value of `PREVIOUS_BACKUP` is not used by this command, but it is required by the command. There is an issue for removing this unnecessary requirement, see [issue 429141](https://gitlab.com/gitlab-org/gitlab/-/issues/429141). + +1. Check that the incremental backup succeeded, and added data to object storage. 1. [Configure cron to make daily backups](backup_gitlab.md#configuring-cron-to-make-daily-backups). Edit the crontab for the `root` user: ```shell @@ -185,10 +176,11 @@ To back up the Git repositories: crontab -e ``` -1. There, add the following line to schedule the backup for everyday at 2 AM: +1. There, add the following lines to schedule the backup for everyday of every month at 2 AM. To limit the number of increments needed to restore a backup, a full backup of Git repositories will be taken on the first of each month, and the rest of the days will take an incremental backup.: ```plaintext - 0 2 * * * /opt/gitlab/bin/gitlab-backup create SKIP=db INCREMENTAL=yes PREVIOUS_BACKUP=1493107454_2018_04_25_10.6.4-ce CRON=1 + 0 2 1 * * /opt/gitlab/bin/gitlab-backup create REPOSITORIES_SERVER_SIDE=true SKIP=db CRON=1 + 0 2 2-31 * * /opt/gitlab/bin/gitlab-backup create REPOSITORIES_SERVER_SIDE=true SKIP=db INCREMENTAL=yes PREVIOUS_BACKUP=1708568263_2024_02_22_16.9.0-ce CRON=1 ``` ### Configure backup of configuration files diff --git a/doc/api/merge_request_approvals.md b/doc/api/merge_request_approvals.md index 9e55523902e..357176aad22 100644 --- a/doc/api/merge_request_approvals.md +++ b/doc/api/merge_request_approvals.md @@ -105,7 +105,7 @@ Supported attributes: | `name` | string | Yes | The name of the approval rule. | | `group_ids` | array | No | The IDs of groups as approvers. | | `report_type` | string | No | The report type required when the rule type is `report_approver`. The supported report types are `license_scanning` [(Deprecated in GitLab 15.9)](../update/deprecations.md#license-check-and-the-policies-tab-on-the-license-compliance-page) and `code_coverage`. | -| `rule_type` | string | No | The type of rule. `any_approver` is a pre-configured default rule with `approvals_required` at `0`. Other rules are `regular` and `report_approver`. | +| `rule_type` | string | No | The type of rule. `any_approver` is a pre-configured default rule with `approvals_required` at `0`. Other rules are `regular` (used for regular [merge request approval rules](../../ee/user/project/merge_requests/approvals/rules.md)) and `report_approver`. `report_approver` is used automatically when an approval rule is created from configured and enabled [merge request approval policies](../../ee/user/application_security/policies/scan-result-policies.md) and should not be used to create approval rule with this API. | | `user_ids` | array | No | The IDs of users as approvers. | Example request: @@ -179,7 +179,7 @@ Supported attributes: | `approvals_required` | string | No | The number of required approvals for this rule. | | `group_ids` | integer | No | The IDs of users as approvers. | | `name` | string | No | The name of the approval rule. | -| `rule_type` | array | No | The type of rule. `any_approver` is a pre-configured default rule with `approvals_required` at `0`. Other rules are `regular` and `report_approver`. | +| `rule_type` | array | No | The type of rule. `any_approver` is a pre-configured default rule with `approvals_required` at `0`. Other rules are `regular` (used for regular [merge request approval rules](../../ee/user/project/merge_requests/approvals/rules.md)) and `report_approver`. `report_approver` is used automatically when an approval rule is created from configured and enabled [merge request approval policies](../../ee/user/application_security/policies/scan-result-policies.md) and should not be used to create approval rule with this API. | | `user_ids` | array | No | The IDs of groups as approvers. | Example request: @@ -535,7 +535,7 @@ Supported attributes: | `group_ids` | Array | No | The IDs of groups as approvers. | | `protected_branch_ids` | Array | No | The IDs of protected branches to scope the rule by. To identify the ID, [use the API](protected_branches.md#list-protected-branches). | | `report_type` | string | No | The report type required when the rule type is `report_approver`. The supported report types are `license_scanning` [(Deprecated in GitLab 15.9)](../update/deprecations.md#license-check-and-the-policies-tab-on-the-license-compliance-page) and `code_coverage`. | -| `rule_type` | string | No | The type of rule. `any_approver` is a pre-configured default rule with `approvals_required` at `0`. Other rules are `regular` and `report_approver`. | +| `rule_type` | string | No | The type of rule. `any_approver` is a pre-configured default rule with `approvals_required` at `0`. Other rules are `regular` (used for regular [merge request approval rules](../../ee/user/project/merge_requests/approvals/rules.md)) and `report_approver`. `report_approver` is used automatically when an approval rule is created from configured and enabled [merge request approval policies](../../ee/user/application_security/policies/scan-result-policies.md) and should not be used to create approval rule with this API. | | `user_ids` | Array | No | The IDs of users as approvers. If you provide both `user_ids` and `usernames`, both lists of users are added. | | `usernames` | string array | No | The usernames of approvers for this rule (same as `user_ids` but requires a list of usernames). If you provide both `user_ids` and `usernames`, both lists of users are added. | diff --git a/doc/operations/error_tracking.md b/doc/operations/error_tracking.md index cff4523c60b..5383c3670b5 100644 --- a/doc/operations/error_tracking.md +++ b/doc/operations/error_tracking.md @@ -174,7 +174,7 @@ Prerequisites: To enable the Sentry integration: 1. Sign up to Sentry.io or [deploy your own](#deploying-sentry) Sentry instance. -1. [Create a new Sentry project](https://docs.sentry.io/product/sentry-basics/guides/integrate-frontend/create-new-project/). +1. [Create a new Sentry project](https://docs.sentry.io/product/sentry-basics/integrate-frontend/create-new-project/). For each GitLab project that you want to integrate, you should create a new Sentry project. 1. Find or generate a [Sentry auth token](https://docs.sentry.io/api/auth/#auth-tokens). For the SaaS version of Sentry, you can find or generate the auth token at [https://sentry.io/api/](https://sentry.io/api/). @@ -201,7 +201,7 @@ You can now visit **Monitor > Error Tracking** in your project's sidebar to ### Sentry's GitLab integration You might also want to enable Sentry's GitLab integration by following the steps -in the [Sentry documentation](https://docs.sentry.io/product/integrations/gitlab/). +in the [Sentry documentation](https://docs.sentry.io/product/integrations/source-code-mgmt/gitlab/). ### Enable GitLab Runner diff --git a/doc/operations/tracing.md b/doc/operations/tracing.md index 59c668f7afe..51ccbd6086f 100644 --- a/doc/operations/tracing.md +++ b/doc/operations/tracing.md @@ -21,7 +21,7 @@ The feature is not ready for production use. With distributed tracing, you can troubleshoot application performance issues by inspecting how a request moves through different services and systems, the timing of each operation, and any errors or logs as they occur. Tracing is particularly useful in the context of microservice applications, which group multiple independent services collaborating to fulfill user requests. -This feature is in [Beta](../policy/experiment-beta-support.md). For more information, see the [group direction page](https://about.gitlab.com/direction/analytics/observability/). To leave feedback about tracing bugs or functionality, comment in the [feedback issue](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/2590) or open a [new issue](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/new). +This feature is in [Beta](../policy/experiment-beta-support.md). For more information, see the [group direction page](https://about.gitlab.com/direction/monitor/observability/). To leave feedback about tracing bugs or functionality, comment in the [feedback issue](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/2590) or open a [new issue](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/new). ## Configure distributed tracing for a project diff --git a/doc/policy/experiment-beta-support.md b/doc/policy/experiment-beta-support.md index e35dd3229fd..dfb4dc129d1 100644 --- a/doc/policy/experiment-beta-support.md +++ b/doc/policy/experiment-beta-support.md @@ -108,12 +108,12 @@ by following the [Production Readiness Review process](https://handbook.gitlab.c ### GA features Generally Available features that [meet the review criteria](https://handbook.gitlab.com/handbook/engineering/infrastructure/production/readiness/#criteria-for-starting-a-production-readiness-review) -must complete the [Production Readiness Review](https://handbook.gitlab.com/handbook/engineering/infrastructure/production/readiness) +must complete the [Production Readiness Review](https://handbook.gitlab.com/handbook/engineering/infrastructure/production/readiness/) and complete all sections up to and including the [GA section in the readiness template](https://gitlab.com/gitlab-com/gl-infra/readiness/-/blob/master/.gitlab/issue_templates/production_readiness.md#general-availability). ### Provide earlier access -Our [mission is "everyone can contribute"](https://about.gitlab.com/company/mission/), +Our [mission is "everyone can contribute"](https://handbook.gitlab.com/handbook/company/mission/), and that is only possible if people outside the company can try a feature. We get higher quality (more diverse) feedback if people from different organizations try something, so give users the ability to opt in to Experimental features when there is enough value. diff --git a/doc/solutions/cloud/aws/gitlab_aws_integration.md b/doc/solutions/cloud/aws/gitlab_aws_integration.md index 58ef93d24f6..0dae96a6099 100644 --- a/doc/solutions/cloud/aws/gitlab_aws_integration.md +++ b/doc/solutions/cloud/aws/gitlab_aws_integration.md @@ -93,7 +93,7 @@ See [CD and Operations Integrations](#cd-and-operations-integrations) below for ### CD and Operations Integrations - **AWS CodeDeploy Integration** - through CodePipeline support discussed above in SCM integrations. This capability allows GitLab to interface with [this list of advanced deployment subsystems in AWS](https://docs.aws.amazon.com/codepipeline/latest/userguide/integrations-action-type.html#integrations-deploy). ([12/28/2023](https://aws.amazon.com/about-aws/whats-new/2023/12/codepipeline-gitlab-self-managed/)) `[AWS Built]` -- **AWS SAM Pipelines** - [pipelines support for GitLab](https://aws.amazon.com/about-aws/whats-new/2021/07/simplify-ci-cd-configuration-serverless-applications-your-favorite-ci-cd-system-public-preview). (7/31/2021) +- **AWS SAM Pipelines** - [pipelines support for GitLab](https://aws.amazon.com/about-aws/whats-new/2021/07/simplify-ci-cd-configuration-serverless-applications-your-favorite-ci-cd-system-public-preview/). (7/31/2021) - [Integrate EKS clusters for application deployment](../../../user/infrastructure/clusters/connect/new_eks_cluster.md). `[GitLab Built]` - [GitLab pushing a build Artifact to a CodePipeline monitored S3 location](https://docs.aws.amazon.com/codepipeline/latest/userguide/pipelines-about-starting.html#change-detection-methods) `[AWS Built]` - [GitLab Pushing a container to a CodePipeline monitored AWS ECR](https://docs.aws.amazon.com/codepipeline/latest/userguide/pipelines-about-starting.html#change-detection-methods) `[AWS Built]` diff --git a/doc/subscriptions/community_programs.md b/doc/subscriptions/community_programs.md index ca911685648..7d16e7b0d46 100644 --- a/doc/subscriptions/community_programs.md +++ b/doc/subscriptions/community_programs.md @@ -24,7 +24,7 @@ To meet GitLab for Open Source Program requirements, first add an OSI-approved o To add a license to a project: 1. On the left sidebar, select **Search or go to** and find your project. -1. On the overview page, select **Add LICENSE**. If the license you want is not available as a license template, manually copy the entire, unaltered [text of your chosen license](https://opensource.org/licenses/) into the `LICENSE` file. GitLab defaults to **All rights reserved** if users do not perform this action. +1. On the overview page, select **Add LICENSE**. If the license you want is not available as a license template, manually copy the entire, unaltered [text of your chosen license](https://opensource.org/license/) into the `LICENSE` file. GitLab defaults to **All rights reserved** if users do not perform this action. ![Add license](img/add-license.png) diff --git a/doc/subscriptions/gitlab_dedicated/index.md b/doc/subscriptions/gitlab_dedicated/index.md index 25250accb35..9a162654599 100644 --- a/doc/subscriptions/gitlab_dedicated/index.md +++ b/doc/subscriptions/gitlab_dedicated/index.md @@ -24,7 +24,7 @@ GitLab Dedicated allows you to select the cloud region where your data will be s ### Availability and scalability -GitLab Dedicated leverages modified versions of the GitLab [Cloud Native Hybrid reference architectures](../../administration/reference_architectures/index.md#cloud-native-hybrid) with high availability enabled. When [onboarding](../../administration/dedicated/create_instance.md#step-2-create-your-gitlab-dedicated-instance), GitLab will match you to the closest reference architecture size based on your number of users. Learn about the [current Service Level Objective](https://about.gitlab.com/handbook/engineering/infrastructure/team/gitlab-dedicated/slas/#current-service-level-objective). +GitLab Dedicated leverages modified versions of the GitLab [Cloud Native Hybrid reference architectures](../../administration/reference_architectures/index.md#cloud-native-hybrid) with high availability enabled. When [onboarding](../../administration/dedicated/create_instance.md#step-2-create-your-gitlab-dedicated-instance), GitLab will match you to the closest reference architecture size based on your number of users. Learn about the [current Service Level Objective](https://handbook.gitlab.com/handbook/engineering/infrastructure/team/gitlab-dedicated/slas/#current-service-level-objective). NOTE: The published [reference architectures](../../administration/reference_architectures/index.md) act as a starting point in defining the cloud resources deployed inside GitLab Dedicated environments, but they are not comprehensive. GitLab Dedicated leverages additional Cloud Provider services beyond what's included in the standard reference architectures for enhanced security and stability of the environment. Therefore, GitLab Dedicated costs differ from standard reference architecture costs. diff --git a/doc/topics/autodevops/requirements.md b/doc/topics/autodevops/requirements.md index adabebcc457..b91f4ee0914 100644 --- a/doc/topics/autodevops/requirements.md +++ b/doc/topics/autodevops/requirements.md @@ -144,7 +144,7 @@ To make full use of Auto DevOps with Kubernetes, you need: - **cert-manager** (optional, for TLS/HTTPS) - To enable HTTPS endpoints for your application, you can [install cert-manager](https://cert-manager.io/docs/installation/supported-releases/), + To enable HTTPS endpoints for your application, you can [install cert-manager](https://cert-manager.io/docs/releases/), a native Kubernetes certificate management controller that helps with issuing certificates. Installing cert-manager on your cluster issues a [Let's Encrypt](https://letsencrypt.org/) certificate and ensures the diff --git a/doc/tutorials/build_application.md b/doc/tutorials/build_application.md index ea899ee107e..7e5f5e7288b 100644 --- a/doc/tutorials/build_application.md +++ b/doc/tutorials/build_application.md @@ -16,7 +16,7 @@ Use CI/CD pipelines to automatically build, test, and deploy your code. | [Create and run your first GitLab CI/CD pipeline](../ci/quick_start/index.md) | Create a `.gitlab-ci.yml` file and start a pipeline. | **{star}** | | [Create a complex pipeline](../ci/quick_start/tutorial.md) | Learn about the most commonly used GitLab CI/CD keywords by building an increasingly complex pipeline. | | | [Get started: Learn about CI/CD](https://www.youtube.com/watch?v=sIegJaLy2ug) (9m 02s) | Learn about the `.gitlab-ci.yml` file and how it's used. | **{star}** | -| [GitLab CI Fundamentals](https://levelup.gitlab.com/learn/learning-path/gitlab-ci-fundamentals) | Learn about GitLab CI/CD and build a pipeline in this self-paced course. | **{star}** | +| [GitLab CI Fundamentals](https://university.gitlab.com/learn/learning-path/gitlab-ci-fundamentals) | Learn about GitLab CI/CD and build a pipeline in this self-paced course. | **{star}** | | [CI deep dive](https://www.youtube.com/watch?v=ZVUbmVac-m8&list=PL05JrBw4t0KorkxIFgZGnzzxjZRCGROt_&index=27) (22m 51s) | Take a closer look at pipelines and continuous integration concepts. | | | [Set up CI/CD in the cloud](../ci/examples/index.md#cicd-in-the-cloud) | Learn how to set up CI/CD in different cloud-based environments. | | | [Find CI/CD examples and templates](../ci/examples/index.md#cicd-examples) | Use these examples and templates to set up CI/CD for your use case. | | diff --git a/doc/tutorials/container_scanning/index.md b/doc/tutorials/container_scanning/index.md index 4cff9d4c023..9a24829b322 100644 --- a/doc/tutorials/container_scanning/index.md +++ b/doc/tutorials/container_scanning/index.md @@ -106,7 +106,7 @@ reports vulnerabilities: 1. In the root directory of your project, select the existing `Dockerfile` file. 1. Select **Edit**. 1. Replace `FROM hello-world:latest` with a different Docker image for the - [`FROM`](https://docs.docker.com/engine/reference/builder/#from) instruction. The best Docker images to demonstrate + [`FROM`](https://docs.docker.com/reference/dockerfile/#from) instruction. The best Docker images to demonstrate container scanning have: - Operating system packages. For example, from Debian, Ubuntu, Alpine, or Red Hat. - Programming language packages. For example, NPM packages or Python packages. diff --git a/doc/tutorials/gitlab_navigation.md b/doc/tutorials/gitlab_navigation.md index ce108c6e73d..8daab63a9ce 100644 --- a/doc/tutorials/gitlab_navigation.md +++ b/doc/tutorials/gitlab_navigation.md @@ -12,7 +12,7 @@ and running quickly. | Topic | Description | Good for beginners | |-------|-------------|--------------------| -| [GitLab with Git Essentials](https://levelup.gitlab.com/courses/gitlab-with-git-essentials-s2) | Learn the basics of Git and GitLab in this self-paced course. | **{star}** | +| [GitLab with Git Essentials](https://university.gitlab.com/courses/gitlab-with-git-essentials-s2) | Learn the basics of Git and GitLab in this self-paced course. | **{star}** | | [Use GitLab for DevOps](https://www.youtube.com/watch?v=7q9Y1Cv-ib0) (12m 34s) | Use GitLab through the entire DevOps lifecycle, from planning to monitoring. | **{star}** | | [Use the left sidebar to navigate GitLab](left_sidebar/index.md) | Start navigating the GitLab UI. | **{star}** | | [Use Markdown at GitLab](../user/markdown.md) | GitLab Flavored Markdown (GLFM) is used in many areas of GitLab, for example, in merge requests. | **{star}** | diff --git a/doc/tutorials/install_gitlab_single_node/index.md b/doc/tutorials/install_gitlab_single_node/index.md index 6f6b47ce62a..37812b32025 100644 --- a/doc/tutorials/install_gitlab_single_node/index.md +++ b/doc/tutorials/install_gitlab_single_node/index.md @@ -291,7 +291,7 @@ you of GitLab activity. #### Configure SMTP In this tutorial, you'll set up an [SMTP](https://docs.gitlab.com/omnibus/settings/smtp.html) -server and use the [Mailgun](https://mailgun.com) SMTP provider. +server and use the [Mailgun](https://www.mailgun.com/) SMTP provider. First, start by creating an encrypted file that will contain the login credentials, and then configure SMTP for the Linux package: diff --git a/doc/tutorials/issue_triage/index.md b/doc/tutorials/issue_triage/index.md index 424c059f4cb..14ee6e94d2d 100644 --- a/doc/tutorials/issue_triage/index.md +++ b/doc/tutorials/issue_triage/index.md @@ -70,8 +70,8 @@ For this tutorial, suppose you've decided on the following: For inspiration, see how we define these at GitLab: - [Types and subtypes](https://handbook.gitlab.com/handbook/engineering/metrics/#work-type-classification) -- [Priority](https://handbook.gitlab.com/handbook/engineering/quality/issue-triage/#priority) -- [Severity](https://handbook.gitlab.com/handbook/engineering/quality/issue-triage/#severity) +- [Priority](https://handbook.gitlab.com/handbook/engineering/infrastructure/engineering-productivity/issue-triage/#priority) +- [Severity](https://handbook.gitlab.com/handbook/engineering/infrastructure/engineering-productivity/issue-triage/#severity) ## Document your criteria @@ -236,5 +236,5 @@ Next, you can: ![Example triage report heatmap](img/triage_report_v16_3.png) -To learn more about issue triage at GitLab, see [Issue Triage](https://handbook.gitlab.com/handbook/engineering/quality/issue-triage/) +To learn more about issue triage at GitLab, see [Issue Triage](https://handbook.gitlab.com/handbook/engineering/infrastructure/engineering-productivity/issue-triage/) and [Triage Operations](https://handbook.gitlab.com/handbook/engineering/infrastructure/engineering-productivity/triage-operations/). diff --git a/doc/tutorials/plan_and_track.md b/doc/tutorials/plan_and_track.md index 74b99f0bc75..1156c410fc7 100644 --- a/doc/tutorials/plan_and_track.md +++ b/doc/tutorials/plan_and_track.md @@ -12,7 +12,7 @@ issues, epics, and more. | Topic | Description | Good for beginners | |-------|-------------|--------------------| -| [GitLab Agile Project Management](https://levelup.gitlab.com/courses/gitlab-agile-project-management-s2) | Learn how to use planning features to manage your projects in this self-paced course. | **{star}** | +| [GitLab Agile Project Management](https://university.gitlab.com/courses/gitlab-agile-project-management-s2) | Learn how to use planning features to manage your projects in this self-paced course. | **{star}** | | [Build a protected workflow for your project](protected_workflow/index.md) | Set up a workflow for your teams, and enforce protections with approval rules. | | | [Run an agile iteration](agile_sprint/index.md) | Use group, projects, and iterations to run an agile development iteration. | | | [Use GitLab to facilitate Scrum](scrum_events/index.md) | Learn to run core Scrum ceremonies and workflows in GitLab. | | diff --git a/doc/tutorials/secure_application.md b/doc/tutorials/secure_application.md index 42a7eda2dff..d44673e3879 100644 --- a/doc/tutorials/secure_application.md +++ b/doc/tutorials/secure_application.md @@ -18,4 +18,4 @@ GitLab can check your application for security vulnerabilities and that it meets | [Set up a scan execution policy](scan_execution_policy/index.md) | Learn how to create a scan execution policy to enforce security scanning of your project. | **{star}** | | [Scan a Docker container for vulnerabilities](container_scanning/index.md) | Learn how to use container scanning templates to add container scanning to your projects. | **{star}** | | [Get started with GitLab application security](../user/application_security/get-started-security.md) | Follow recommended steps to set up security tools. | | -| [GitLab Security Essentials](https://levelup.gitlab.com/courses/security-essentials) | Learn about the essential security capabilities of GitLab in this self-paced course. | | +| [GitLab Security Essentials](https://university.gitlab.com/courses/security-essentials) | Learn about the essential security capabilities of GitLab in this self-paced course. | | diff --git a/doc/user/gitlab_com/index.md b/doc/user/gitlab_com/index.md index 0a807e0ebbd..81a68b0451f 100644 --- a/doc/user/gitlab_com/index.md +++ b/doc/user/gitlab_com/index.md @@ -254,7 +254,7 @@ the default value [is the same as for self-managed instances](../../administrati | [Maximum download file size when importing from source GitLab instances by direct transfer](../../administration/settings/import_and_export_settings.md#maximum-download-file-size-for-imports-by-direct-transfer) | 5 GiB | | Maximum attachment size | 100 MiB | | [Maximum decompressed file size for imported archives](../../administration/settings/import_and_export_settings.md#maximum-decompressed-file-size-for-imported-archives) | 25 GiB | -| [Maximum push size](../../administration/settings/account_and_limit_settings.md#max-push-size) | 5 GB | +| [Maximum push size](../../administration/settings/account_and_limit_settings.md#max-push-size) | 5 GiB | If you are near or over the repository size limit, you can either: diff --git a/keeps/helpers/postgres_ai.rb b/keeps/helpers/postgres_ai.rb index e4833e14038..266a416ad75 100644 --- a/keeps/helpers/postgres_ai.rb +++ b/keeps/helpers/postgres_ai.rb @@ -11,7 +11,8 @@ module Keeps def fetch_background_migration_status(job_class_name) query = <<~SQL - SELECT id, created_at, updated_at, finished_at, started_at, status, job_class_name, gitlab_schema + SELECT id, created_at, updated_at, finished_at, started_at, status, job_class_name, + gitlab_schema, total_tuple_count FROM batched_background_migrations WHERE job_class_name = $1::text SQL @@ -19,6 +20,17 @@ module Keeps pg_client.exec_params(query, [job_class_name]) end + def fetch_migrated_tuple_count(batched_background_migration_id) + query = <<~SQL + SELECT SUM("batched_background_migration_jobs"."batch_size") + FROM "batched_background_migration_jobs" + WHERE "batched_background_migration_jobs"."batched_background_migration_id" = #{batched_background_migration_id} + AND ("batched_background_migration_jobs"."status" IN (3)) + SQL + + pg_client.exec_params(query) + end + private def connection_string diff --git a/spec/keeps/helpers/postgres_ai_spec.rb b/spec/keeps/helpers/postgres_ai_spec.rb index f7f11915224..b6e460fd4c2 100644 --- a/spec/keeps/helpers/postgres_ai_spec.rb +++ b/spec/keeps/helpers/postgres_ai_spec.rb @@ -39,7 +39,8 @@ RSpec.describe Keeps::Helpers::PostgresAi, feature_category: :tooling do let(:job_class_name) { 'ExampleJob' } let(:query) do <<~SQL - SELECT id, created_at, updated_at, finished_at, started_at, status, job_class_name, gitlab_schema + SELECT id, created_at, updated_at, finished_at, started_at, status, job_class_name, + gitlab_schema, total_tuple_count FROM batched_background_migrations WHERE job_class_name = $1::text SQL @@ -54,4 +55,25 @@ RSpec.describe Keeps::Helpers::PostgresAi, feature_category: :tooling do expect(result).to eq(query_response) end end + + describe '#fetch_migrated_tuple_count' do + let(:batched_background_migration_id) { 100 } + let(:query) do + <<~SQL + SELECT SUM("batched_background_migration_jobs"."batch_size") + FROM "batched_background_migration_jobs" + WHERE "batched_background_migration_jobs"."batched_background_migration_id" = 100 + AND ("batched_background_migration_jobs"."status" IN (3)) + SQL + end + + let(:query_response) { double } + + subject(:result) { described_class.new.fetch_migrated_tuple_count(batched_background_migration_id) } + + it 'fetches data from Postgres AI' do + expect(pg_client).to receive(:exec_params).with(query).and_return(query_response) + expect(result).to eq(query_response) + end + end end diff --git a/spec/models/project_export_job_spec.rb b/spec/models/project_export_job_spec.rb index 6b4066febd8..565a2e88ee7 100644 --- a/spec/models/project_export_job_spec.rb +++ b/spec/models/project_export_job_spec.rb @@ -13,4 +13,45 @@ RSpec.describe ProjectExportJob, feature_category: :importers, type: :model do it { is_expected.to validate_presence_of(:jid) } it { is_expected.to validate_presence_of(:status) } end + + describe 'scopes' do + let_it_be(:current_time) { Time.current } + let_it_be(:eight_days_ago) { current_time - 8.days } + let_it_be(:seven_days_ago) { current_time - 7.days } + let_it_be(:five_days_ago) { current_time - 5.days } + + let_it_be(:recent_export_job) { create(:project_export_job, updated_at: five_days_ago) } + let_it_be(:week_old_export_job) { create(:project_export_job, updated_at: seven_days_ago) } + let_it_be(:prunable_export_job_1) { create(:project_export_job, updated_at: eight_days_ago) } + let_it_be(:prunable_export_job_2) { create(:project_export_job, updated_at: eight_days_ago) } + + around do |example| + travel_to(current_time) { example.run } + end + + describe '.prunable' do + it 'only includes records with updated_at older than 7 days ago' do + expect(described_class.prunable).to match_array([prunable_export_job_1, prunable_export_job_2]) + end + end + + describe '.order_by_updated_at' do + it 'sorts by updated_at' do + expect(described_class.order_by_updated_at).to eq( + [ + prunable_export_job_1, + prunable_export_job_2, + week_old_export_job, + recent_export_job + ] + ) + end + + it 'uses id as a tiebreaker' do + export_jobs_with_same_updated_at = described_class.where(updated_at: eight_days_ago).order_by_updated_at + + expect(export_jobs_with_same_updated_at[0].id).to be < export_jobs_with_same_updated_at[1].id + end + end + end end diff --git a/spec/services/projects/import_export/prune_expired_export_jobs_service_spec.rb b/spec/services/projects/import_export/prune_expired_export_jobs_service_spec.rb index d176816a78d..15b98dac4ad 100644 --- a/spec/services/projects/import_export/prune_expired_export_jobs_service_spec.rb +++ b/spec/services/projects/import_export/prune_expired_export_jobs_service_spec.rb @@ -3,67 +3,105 @@ require 'spec_helper' RSpec.describe Projects::ImportExport::PruneExpiredExportJobsService, feature_category: :importers do - describe '#execute' do - context 'when pruning expired jobs' do - let_it_be(:old_job_1) { create(:project_export_job, updated_at: 37.months.ago) } - let_it_be(:old_job_2) { create(:project_export_job, updated_at: 12.months.ago) } - let_it_be(:old_job_3) { create(:project_export_job, updated_at: 8.days.ago) } - let_it_be(:fresh_job_1) { create(:project_export_job, updated_at: 1.day.ago) } - let_it_be(:fresh_job_2) { create(:project_export_job, updated_at: 2.days.ago) } - let_it_be(:fresh_job_3) { create(:project_export_job, updated_at: 6.days.ago) } + describe '#execute', :freeze_time do + let_it_be(:project) { create(:project) } - let_it_be(:old_relation_export_1) { create(:project_relation_export, project_export_job_id: old_job_1.id) } - let_it_be(:old_relation_export_2) { create(:project_relation_export, project_export_job_id: old_job_2.id) } - let_it_be(:old_relation_export_3) { create(:project_relation_export, project_export_job_id: old_job_3.id) } - let_it_be(:fresh_relation_export_1) { create(:project_relation_export, project_export_job_id: fresh_job_1.id) } + let!(:old_job_1) { create(:project_export_job, updated_at: 37.months.ago, project: project) } + let!(:old_job_2) { create(:project_export_job, updated_at: 12.months.ago, project: project) } + let!(:old_job_3) { create(:project_export_job, updated_at: 8.days.ago, project: project) } + let!(:fresh_job_1) { create(:project_export_job, updated_at: 1.day.ago, project: project) } + let!(:fresh_job_2) { create(:project_export_job, updated_at: 2.days.ago, project: project) } + let!(:fresh_job_3) { create(:project_export_job, updated_at: 6.days.ago, project: project) } - let_it_be(:old_upload_1) { create(:relation_export_upload, project_relation_export_id: old_relation_export_1.id) } - let_it_be(:old_upload_2) { create(:relation_export_upload, project_relation_export_id: old_relation_export_2.id) } - let_it_be(:old_upload_3) { create(:relation_export_upload, project_relation_export_id: old_relation_export_3.id) } - let_it_be(:fresh_upload_1) do - create( - :relation_export_upload, - project_relation_export_id: fresh_relation_export_1.id - ) - end + it 'prunes ProjectExportJob records and associations older than 7 days' do + expect { described_class.execute }.to change { ProjectExportJob.count }.by(-3) - it 'prunes jobs and associations older than 7 days' do - old_uploads = Upload.for_model_type_and_id( - Projects::ImportExport::RelationExportUpload, - [old_upload_1, old_upload_2, old_upload_3].map(&:id) - ) - old_upload_file_paths = Uploads::Local.new.keys(old_uploads) + expect(ProjectExportJob.find_by(id: old_job_1.id)).to be_nil + expect(ProjectExportJob.find_by(id: old_job_2.id)).to be_nil + expect(ProjectExportJob.find_by(id: old_job_3.id)).to be_nil - expect(DeleteStoredFilesWorker).to receive(:perform_async).with(Uploads::Local, old_upload_file_paths) + expect(fresh_job_1.reload).to be_present + expect(fresh_job_2.reload).to be_present + expect(fresh_job_3.reload).to be_present + end - expect { described_class.execute }.to change { ProjectExportJob.count }.by(-3) + it 'prunes ProjectExportJob records in batches' do + stub_const("#{described_class.name}::BATCH_SIZE", 2) - expect(ProjectExportJob.find_by(id: old_job_1.id)).to be_nil - expect(ProjectExportJob.find_by(id: old_job_2.id)).to be_nil - expect(ProjectExportJob.find_by(id: old_job_3.id)).to be_nil + allow(described_class).to receive(:delete_uploads_for_expired_jobs).and_return(nil) + expect(ProjectExportJob).to receive(:prunable).and_call_original.exactly(3).times + + described_class.execute + end + + context 'with associated RelationExport records' do + let!(:old_relation_export_1) { create(:project_relation_export, project_export_job_id: old_job_1.id) } + let!(:old_relation_export_2) { create(:project_relation_export, project_export_job_id: old_job_2.id) } + let!(:old_relation_export_3) { create(:project_relation_export, project_export_job_id: old_job_3.id) } + let!(:fresh_relation_export_1) { create(:project_relation_export, project_export_job_id: fresh_job_1.id) } + + it 'prunes expired RelationExport records' do + expect { described_class.execute }.to change { Projects::ImportExport::RelationExport.count }.by(-3) expect(Projects::ImportExport::RelationExport.find_by(id: old_relation_export_1.id)).to be_nil expect(Projects::ImportExport::RelationExport.find_by(id: old_relation_export_2.id)).to be_nil expect(Projects::ImportExport::RelationExport.find_by(id: old_relation_export_3.id)).to be_nil - expect(Projects::ImportExport::RelationExportUpload.find_by(id: old_upload_1.id)).to be_nil - expect(Projects::ImportExport::RelationExportUpload.find_by(id: old_upload_2.id)).to be_nil - expect(Projects::ImportExport::RelationExportUpload.find_by(id: old_upload_3.id)).to be_nil - - expect(old_uploads.reload).to be_empty + expect(fresh_relation_export_1.reload).to be_present end - it 'does not delete associated records for jobs younger than 7 days' do - described_class.execute + context 'and RelationExportUploads' do + let!(:old_upload_1) { create(:relation_export_upload, project_relation_export_id: old_relation_export_1.id) } + let!(:old_upload_2) { create(:relation_export_upload, project_relation_export_id: old_relation_export_2.id) } + let!(:old_upload_3) { create(:relation_export_upload, project_relation_export_id: old_relation_export_3.id) } + let!(:fresh_upload_1) do + create( + :relation_export_upload, + project_relation_export_id: fresh_relation_export_1.id + ) + end - expect(fresh_job_1.reload).to be_present - expect(fresh_job_2.reload).to be_present - expect(fresh_job_3.reload).to be_present - expect(fresh_relation_export_1.reload).to be_present - expect(fresh_upload_1.reload).to be_present - expect( - Upload.for_model_type_and_id(Projects::ImportExport::RelationExportUpload, fresh_upload_1.id) - ).to be_present + let(:old_uploads) do + Upload.for_model_type_and_id( + Projects::ImportExport::RelationExportUpload, + [old_upload_1, old_upload_2, old_upload_3].map(&:id) + ) + end + + it 'prunes expired RelationExportUpload records' do + expect { described_class.execute }.to change { Projects::ImportExport::RelationExportUpload.count }.by(-3) + + expect(Projects::ImportExport::RelationExportUpload.find_by(id: old_upload_1.id)).to be_nil + expect(Projects::ImportExport::RelationExportUpload.find_by(id: old_upload_2.id)).to be_nil + expect(Projects::ImportExport::RelationExportUpload.find_by(id: old_upload_3.id)).to be_nil + end + + it 'deletes associated Upload records' do + described_class.execute + + expect(old_uploads).to be_empty + + expect(fresh_upload_1.reload).to be_present + expect( + Upload.for_model_type_and_id(Projects::ImportExport::RelationExportUpload, fresh_upload_1.id) + ).to be_present + end + + it 'deletes stored upload files' do + old_upload_file_paths = Uploads::Local.new.keys(old_uploads) + + expect(DeleteStoredFilesWorker).to receive(:perform_async).with(Uploads::Local, old_upload_file_paths) + + described_class.execute + end + + it 'deletes expired uploads in batches' do + stub_const("#{described_class.name}::BATCH_SIZE", 2) + + expect(Upload).to receive(:finalize_fast_destroy).and_call_original.twice + + described_class.execute + end end end end