Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2023-09-27 15:09:37 +00:00
parent 5471fef236
commit 57ae76cdb5
46 changed files with 1501 additions and 639 deletions

View File

@ -161,6 +161,7 @@ variables:
FLAKY_RSPEC_SUITE_REPORT_PATH: rspec/flaky/report-suite.json
FRONTEND_FIXTURES_MAPPING_PATH: crystalball/frontend_fixtures_mapping.json
GITLAB_WORKHORSE_FOLDER: "gitlab-workhorse"
JOB_METRICS_FILE_PATH: "${CI_PROJECT_DIR}/tmp/job-metrics.json"
KNAPSACK_RSPEC_SUITE_REPORT_PATH: knapsack/report-master.json
RSPEC_CHANGED_FILES_PATH: rspec/changed_files.txt
RSPEC_FAIL_FAST_THRESHOLD: 20

View File

@ -537,7 +537,6 @@ lib/gitlab/checks/**
/doc/administration/reporting/spamcheck.md @axil
/doc/administration/repository_checks.md @eread
/doc/administration/repository_storage_paths.md @eread
/doc/administration/repository_storage_types.md @eread
/doc/administration/restart_gitlab.md @axil
/doc/administration/review_abuse_reports.md @phillipwells
/doc/administration/server_hooks.md @eread
@ -549,10 +548,12 @@ lib/gitlab/checks/**
/doc/administration/settings/files_api_rate_limits.md @msedlakjakubowski
/doc/administration/settings/git_lfs_rate_limits.md @msedlakjakubowski
/doc/administration/settings/gitaly_timeouts.md @eread
/doc/administration/settings/import_and_export_settings.md @eread @ashrafkhamis
/doc/administration/settings/import_export_rate_limits.md @eread @ashrafkhamis
/doc/administration/settings/incident_management_rate_limits.md @msedlakjakubowski
/doc/administration/settings/index.md @msedlakjakubowski
/doc/administration/settings/instance_template_repository.md @msedlakjakubowski
/doc/administration/settings/jira_cloud_app.md @eread @ashrafkhamis
/doc/administration/settings/package_registry_rate_limits.md @phillipwells
/doc/administration/settings/project_integration_management.md @eread @ashrafkhamis
/doc/administration/settings/push_event_activities_limit.md @msedlakjakubowski
@ -590,7 +591,6 @@ lib/gitlab/checks/**
/doc/api/applications.md @jglassman1
/doc/api/audit_events.md @eread
/doc/api/avatar.md @jglassman1
/doc/api/award_emoji.md @msedlakjakubowski
/doc/api/boards.md @msedlakjakubowski
/doc/api/branches.md @msedlakjakubowski
/doc/api/bulk_imports.md @eread @ashrafkhamis
@ -600,6 +600,7 @@ lib/gitlab/checks/**
/doc/api/custom_attributes.md @msedlakjakubowski
/doc/api/database_migrations.md @aqualls
/doc/api/dependencies.md @rdickenson
/doc/api/dependency_list_export.md @rdickenson
/doc/api/dependency_proxy.md @marcel.amirault
/doc/api/deploy_keys.md @phillipwells
/doc/api/deploy_tokens.md @phillipwells
@ -607,6 +608,7 @@ lib/gitlab/checks/**
/doc/api/discussions.md @msedlakjakubowski
/doc/api/dora/ @lciutacu
/doc/api/draft_notes.md @aqualls
/doc/api/emoji_reactions.md @msedlakjakubowski
/doc/api/environments.md @phillipwells
/doc/api/epic_issues.md @msedlakjakubowski
/doc/api/epic_links.md @msedlakjakubowski
@ -643,6 +645,7 @@ lib/gitlab/checks/**
/doc/api/group_relations_export.md @eread @ashrafkhamis
/doc/api/group_releases.md @phillipwells
/doc/api/group_repository_storage_moves.md @ashrafkhamis
/doc/api/group_ssh_certificates.md @msedlakjakubowski
/doc/api/groups.md @lciutacu
/doc/api/import.md @eread @ashrafkhamis
/doc/api/index.md @eread @ashrafkhamis
@ -745,6 +748,8 @@ lib/gitlab/checks/**
/doc/architecture/blueprints/ci_builds_runner_fleet_metrics/ @fneill
/doc/architecture/blueprints/database/scalability/patterns/ @aqualls
/doc/architecture/blueprints/database_scaling/ @aqualls
/doc/architecture/blueprints/google_artifact_registry_integration/ @marcel.amirault
/doc/architecture/blueprints/organization/ @lciutacu
/doc/ci/ @marcel.amirault
/doc/ci/chatops/ @phillipwells
/doc/ci/cloud_deployment/ @phillipwells
@ -758,6 +763,7 @@ lib/gitlab/checks/**
/doc/ci/services/ @fneill
/doc/ci/test_cases/ @msedlakjakubowski
/doc/ci/testing/code_quality.md @rdickenson
/doc/development/activitypub/ @msedlakjakubowski
/doc/development/advanced_search.md @ashrafkhamis
/doc/development/ai_features/ @sselhorn
/doc/development/application_limits.md @axil
@ -896,11 +902,13 @@ lib/gitlab/checks/**
/doc/tutorials/convert_personal_namespace_to_group/ @lciutacu
/doc/tutorials/create_register_first_runner/ @fneill
/doc/tutorials/dependency_scanning.md @rdickenson
/doc/tutorials/export_sbom.md @rdickenson
/doc/tutorials/fuzz_testing/ @rdickenson
/doc/tutorials/install_gitlab_single_node/ @axil
/doc/tutorials/issue_triage/ @msedlakjakubowski
/doc/tutorials/move_personal_project_to_group/ @lciutacu
/doc/tutorials/protected_workflow/ @aqualls
/doc/tutorials/scan_execution_policy/ @rdickenson
/doc/tutorials/scan_result_policy/ @rdickenson
/doc/tutorials/update_commit_messages/ @msedlakjakubowski
/doc/tutorials/website_project_with_analytics/ @lciutacu
@ -910,23 +918,22 @@ lib/gitlab/checks/**
/doc/user/analytics/ @lciutacu
/doc/user/analytics/ci_cd_analytics.md @phillipwells
/doc/user/application_security/ @rdickenson
/doc/user/asciidoc.md @aqualls
/doc/user/award_emojis.md @msedlakjakubowski
/doc/user/asciidoc.md @msedlakjakubowski
/doc/user/clusters/ @phillipwells
/doc/user/compliance/ @rdickenson
/doc/user/compliance/compliance_center/ @eread
/doc/user/compliance/index.md @eread
/doc/user/crm/ @msedlakjakubowski
/doc/user/discussions/ @aqualls
/doc/user/emoji_reactions.md @msedlakjakubowski
/doc/user/enterprise_user/ @jglassman1
/doc/user/feature_flags.md @sselhorn
/doc/user/group/ @lciutacu
/doc/user/group/clusters/ @phillipwells
/doc/user/group/compliance_frameworks.md @eread
/doc/user/group/custom_project_templates.md @aqualls
/doc/user/group/custom_project_templates.md @msedlakjakubowski
/doc/user/group/epics/ @msedlakjakubowski
/doc/user/group/import/ @eread @ashrafkhamis
/doc/user/group/issues_analytics/ @msedlakjakubowski
/doc/user/group/iterations/ @msedlakjakubowski
/doc/user/group/moderate_users.md @phillipwells
/doc/user/group/planning_hierarchy/ @msedlakjakubowski
@ -958,55 +965,39 @@ lib/gitlab/checks/**
/doc/user/profile/personal_access_tokens.md @jglassman1
/doc/user/profile/service_accounts.md @jglassman1
/doc/user/profile/user_passwords.md @jglassman1
/doc/user/project/autocomplete_characters.md @aqualls
/doc/user/project/ @msedlakjakubowski
/doc/user/project/badges.md @lciutacu
/doc/user/project/changelogs.md @aqualls
/doc/user/project/clusters/ @phillipwells
/doc/user/project/code_intelligence.md @aqualls
/doc/user/project/codeowners/ @aqualls
/doc/user/project/deploy_boards.md @phillipwells
/doc/user/project/deploy_keys/ @phillipwells
/doc/user/project/deploy_tokens/ @phillipwells
/doc/user/project/description_templates.md @msedlakjakubowski
/doc/user/project/file_lock.md @aqualls
/doc/user/project/git_attributes.md @aqualls
/doc/user/project/highlighting.md @aqualls
/doc/user/project/import/ @eread @ashrafkhamis
/doc/user/project/import/jira.md @msedlakjakubowski
/doc/user/project/index.md @lciutacu
/doc/user/project/insights/ @lciutacu
/doc/user/project/integrations/ @eread @ashrafkhamis
/doc/user/project/issue_board.md @msedlakjakubowski
/doc/user/project/issues/ @msedlakjakubowski
/doc/user/project/issues/csv_import.md @eread @ashrafkhamis
/doc/user/project/labels.md @msedlakjakubowski
/doc/user/project/members/ @lciutacu
/doc/user/project/merge_requests/ @aqualls
/doc/user/project/merge_requests/approvals/ @msedlakjakubowski
/doc/user/project/merge_requests/cherry_pick_changes.md @msedlakjakubowski
/doc/user/project/merge_requests/csv_export.md @eread
/doc/user/project/merge_requests/methods/ @msedlakjakubowski
/doc/user/project/merge_requests/reviews/data_usage.md @sselhorn
/doc/user/project/merge_requests/squash_and_merge.md @msedlakjakubowski
/doc/user/project/merge_requests/status_checks.md @eread
/doc/user/project/milestones/ @msedlakjakubowski
/doc/user/project/organize_work_with_projects.md @lciutacu
/doc/user/project/protected_branches.md @aqualls
/doc/user/project/protected_tags.md @aqualls
/doc/user/project/push_options.md @aqualls
/doc/user/project/quick_actions.md @msedlakjakubowski
/doc/user/project/releases/ @phillipwells
/doc/user/project/releases/release_evidence.md @eread
/doc/user/project/remote_development/ @ashrafkhamis
/doc/user/project/repository/ @aqualls
/doc/user/project/repository/code_suggestions/ @sselhorn
/doc/user/project/repository/file_finder.md @ashrafkhamis
/doc/user/project/repository/managing_large_repositories.md @eread
/doc/user/project/repository/web_editor.md @ashrafkhamis
/doc/user/project/requirements/ @msedlakjakubowski
/doc/user/project/service_desk/ @msedlakjakubowski
/doc/user/project/settings/import_export.md @eread @ashrafkhamis
/doc/user/project/settings/import_export_troubleshooting.md @eread @ashrafkhamis
/doc/user/project/settings/index.md @lciutacu
/doc/user/project/settings/project_access_tokens.md @jglassman1
/doc/user/project/system_notes.md @aqualls
/doc/user/project/time_tracking.md @msedlakjakubowski
/doc/user/project/web_ide/ @ashrafkhamis
/doc/user/project/working_with_projects.md @lciutacu
/doc/user/public_access.md @lciutacu
@ -1015,7 +1006,7 @@ lib/gitlab/checks/**
/doc/user/search/ @ashrafkhamis
/doc/user/search/command_palette.md @sselhorn
/doc/user/shortcuts.md @ashrafkhamis
/doc/user/snippets.md @aqualls
/doc/user/snippets.md @msedlakjakubowski
/doc/user/ssh.md @jglassman1
/doc/user/storage_management_automation.md @fneill
/doc/user/tasks.md @msedlakjakubowski

View File

@ -27,6 +27,7 @@ include:
- section_start "gitaly-test-spawn" "Spawning Gitaly"; scripts/gitaly-test-spawn; section_end "gitaly-test-spawn" # Do not use 'bundle exec' here
- export RSPEC_SKIPPED_TESTS_REPORT_PATH="rspec/skipped_tests-${CI_JOB_ID}.txt"
- export RSPEC_RETRIED_TESTS_REPORT_PATH="rspec/retried_tests-${CI_JOB_ID}.txt"
- tooling/bin/create_job_metrics_file || true
.no-redis-cluster:
variables:
@ -92,6 +93,7 @@ include:
bundle exec slow-test-merge-request-report-note --input-files "rspec/rspec-*.json" --project "gitlab-org/gitlab" --merge_request_iid "$CI_MERGE_REQUEST_IID" --token "${TEST_SLOW_NOTE_PROJECT_TOKEN}";
fi
- echo -e "\e[0Ksection_end:`date +%s`:report_results_section\r\e[0K"
- tooling/bin/push_job_metrics || true
allow_failure:
exit_codes: !reference [.rspec-base, variables, SUCCESSFULLY_RETRIED_TEST_EXIT_CODE]

View File

@ -424,6 +424,9 @@ group :development, :test do
gem 'benchmark-ips', '~> 2.11.0', require: false
gem 'benchmark-memory', '~> 0.1', require: false
# Profiling data from CI/CD pipelines
gem 'influxdb-client', '~> 2.9', require: false
gem 'knapsack', '~> 1.21.1'
gem 'crystalball', '~> 0.7.0', require: false

View File

@ -303,6 +303,7 @@
{"name":"ice_cube","version":"0.16.4","platform":"ruby","checksum":"da117e5de24bdc33931be629f9b55048641924442c7e9b72fedc05e5592531b7"},
{"name":"ice_nine","version":"0.11.2","platform":"ruby","checksum":"5d506a7d2723d5592dc121b9928e4931742730131f22a1a37649df1c1e2e63db"},
{"name":"imagen","version":"0.1.8","platform":"ruby","checksum":"fde7b727d4fe79c6bb5ac46c1f7184bf87a6d54df54d712ad2be039d2f93a162"},
{"name":"influxdb-client","version":"2.9.0","platform":"ruby","checksum":"b51fadb69e521460bcb6626cd0a9e6c29cd1c8426369c7f0a2b67e93ff9b9f02"},
{"name":"invisible_captcha","version":"2.1.0","platform":"ruby","checksum":"02b452f3eb1b691d155ba3e8e97e1be0e6b6be62e8bc94957234b9cde0852b1e"},
{"name":"ipaddr","version":"1.2.5","platform":"ruby","checksum":"4e679c71d6d8ed99f925487082f70f9a958de155591caa0e7f6cef9aa160f17a"},
{"name":"ipaddress","version":"0.8.3","platform":"ruby","checksum":"85640c4f9194c26937afc8c78e3074a8e7c97d5d1210358d1440f01034d006f5"},

View File

@ -885,6 +885,7 @@ GEM
ice_nine (0.11.2)
imagen (0.1.8)
parser (>= 2.5, != 2.5.1.1)
influxdb-client (2.9.0)
invisible_captcha (2.1.0)
rails (>= 5.2)
ipaddr (1.2.5)
@ -1874,6 +1875,7 @@ DEPENDENCIES
html2text
httparty (~> 0.21.0)
icalendar
influxdb-client (~> 2.9)
invisible_captcha (~> 2.1.0)
ipaddr (~> 1.2.5)
ipaddress (~> 0.8.3)

View File

@ -241,7 +241,11 @@ export function removeParams(params, url = window.location.href, skipEncoding =
return `${root}${writableQuery}${writableFragment}`;
}
export const getLocationHash = (hash = window.location.hash) => hash.split('#')[1];
/**
* Returns value after the '#' in the location hash
* @returns Current value of the hash, undefined if not set
*/
export const getLocationHash = () => window.location.hash?.split('#')[1];
/**
* Returns a boolean indicating whether the URL hash contains the given string value

View File

@ -33,11 +33,9 @@ class BulkImports::Tracker < ApplicationRecord
entity_scope.where(stage: next_stage_scope).with_status(:created)
}
def self.stage_running?(entity_id, stage)
where(stage: stage, bulk_import_entity_id: entity_id)
.with_status(:created, :enqueued, :started)
.exists?
end
scope :running_trackers, -> (entity_id) {
where(bulk_import_entity_id: entity_id).with_status(:enqueued, :started)
}
def pipeline_class
unless entity.pipeline_exists?(pipeline_name)

View File

@ -1,97 +1,68 @@
# frozen_string_literal: true
module BulkImports
class EntityWorker # rubocop:disable Scalability/IdempotentWorker
class EntityWorker
include ApplicationWorker
idempotent!
deduplicate :until_executing
deduplicate :until_executed
data_consistency :always
feature_category :importers
sidekiq_options retry: false, dead: false
worker_has_external_dependencies!
def perform(entity_id, current_stage = nil)
PERFORM_DELAY = 5.seconds
# Keep `_current_stage` parameter for backwards compatibility.
# The parameter will be remove in https://gitlab.com/gitlab-org/gitlab/-/issues/426311
def perform(entity_id, _current_stage = nil)
@entity = ::BulkImports::Entity.find(entity_id)
if stage_running?(entity_id, current_stage)
logger.info(
structured_payload(
bulk_import_entity_id: entity_id,
bulk_import_id: entity.bulk_import_id,
bulk_import_entity_type: entity.source_type,
source_full_path: entity.source_full_path,
current_stage: current_stage,
message: 'Stage running',
source_version: source_version,
importer: 'gitlab_migration'
)
)
return unless @entity.started?
return
if running_tracker.present?
log_info(message: 'Stage running', entity_stage: running_tracker.stage)
else
start_next_stage
end
logger.info(
structured_payload(
bulk_import_entity_id: entity_id,
bulk_import_id: entity.bulk_import_id,
bulk_import_entity_type: entity.source_type,
source_full_path: entity.source_full_path,
current_stage: current_stage,
message: 'Stage starting',
source_version: source_version,
importer: 'gitlab_migration'
)
)
next_pipeline_trackers_for(entity_id).each do |pipeline_tracker|
BulkImports::PipelineWorker.perform_async(
pipeline_tracker.id,
pipeline_tracker.stage,
entity_id
)
end
re_enqueue
rescue StandardError => e
log_exception(e,
{
bulk_import_entity_id: entity_id,
bulk_import_id: entity.bulk_import_id,
bulk_import_entity_type: entity.source_type,
source_full_path: entity.source_full_path,
current_stage: current_stage,
message: 'Entity failed',
source_version: source_version,
importer: 'gitlab_migration'
}
)
Gitlab::ErrorTracking.track_exception(e, log_params(message: 'Entity failed'))
Gitlab::ErrorTracking.track_exception(
e,
bulk_import_entity_id: entity_id,
bulk_import_id: entity.bulk_import_id,
bulk_import_entity_type: entity.source_type,
source_full_path: entity.source_full_path,
source_version: source_version,
importer: 'gitlab_migration'
)
entity.fail_op!
@entity.fail_op!
end
private
attr_reader :entity
def stage_running?(entity_id, stage)
return unless stage
def re_enqueue
BulkImports::EntityWorker.perform_in(PERFORM_DELAY, entity.id)
end
BulkImports::Tracker.stage_running?(entity_id, stage)
def running_tracker
@running_tracker ||= BulkImports::Tracker.running_trackers(entity.id).first
end
def next_pipeline_trackers_for(entity_id)
BulkImports::Tracker.next_pipeline_trackers_for(entity_id).update(status_event: 'enqueue')
end
def start_next_stage
next_pipeline_trackers = next_pipeline_trackers_for(entity.id)
next_pipeline_trackers.each_with_index do |pipeline_tracker, index|
log_info(message: 'Stage starting', entity_stage: pipeline_tracker.stage) if index == 0
BulkImports::PipelineWorker.perform_async(
pipeline_tracker.id,
pipeline_tracker.stage,
entity.id
)
end
end
def source_version
entity.bulk_import.source_version_info.to_s
end
@ -105,5 +76,22 @@ module BulkImports
logger.error(structured_payload(payload))
end
def log_info(payload)
logger.info(structured_payload(log_params(payload)))
end
def log_params(extra)
defaults = {
bulk_import_entity_id: entity.id,
bulk_import_id: entity.bulk_import_id,
bulk_import_entity_type: entity.source_type,
source_full_path: entity.source_full_path,
source_version: source_version,
importer: 'gitlab_migration'
}
defaults.merge(extra)
end
end
end

View File

@ -12,6 +12,8 @@ module BulkImports
data_consistency :always # rubocop:disable SidekiqLoadBalancing/WorkerDataConsistency
feature_category :importers
version 2
def perform(pipeline_tracker_id)
@tracker = Tracker.find(pipeline_tracker_id)
@ -27,7 +29,9 @@ module BulkImports
end
ensure
::BulkImports::EntityWorker.perform_async(tracker.entity.id, tracker.stage)
# This is needed for in-flight migrations.
# It will be remove in https://gitlab.com/gitlab-org/gitlab/-/issues/426299
::BulkImports::EntityWorker.perform_async(tracker.entity.id) if job_version.nil?
end
private

View File

@ -14,7 +14,10 @@ module BulkImports
deduplicate :until_executing
worker_resource_boundary :memory
def perform(pipeline_tracker_id, stage, entity_id)
version 2
# Keep _stage parameter for backwards compatibility.
def perform(pipeline_tracker_id, _stage, entity_id)
@entity = ::BulkImports::Entity.find(entity_id)
@pipeline_tracker = ::BulkImports::Tracker.find(pipeline_tracker_id)
@ -32,7 +35,9 @@ module BulkImports
end
end
ensure
::BulkImports::EntityWorker.perform_async(entity_id, stage)
# This is needed for in-flight migrations.
# It will be remove in https://gitlab.com/gitlab-org/gitlab/-/issues/426299
::BulkImports::EntityWorker.perform_async(entity_id) if job_version.nil?
end
private

View File

@ -1,8 +0,0 @@
---
name: ai_related_settings
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/118222
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/408791
milestone: '16.0'
type: development
group: group::ai framework
default_enabled: false

View File

@ -1,8 +0,0 @@
---
name: ai_tool_info
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/128501
rollout_issue_url:
milestone: '16.3'
type: development
group: group::ai framework
default_enabled: false

View File

@ -1,8 +0,0 @@
---
name: chat_epic_identifier
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/128487
rollout_issue_url:
milestone: '16.3'
type: development
group: group::ai framework
default_enabled: false

View File

@ -1,8 +0,0 @@
---
name: explain_code_snippet
introduced_by_url:
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/404631
milestone: '15.11'
type: development
group: group::source code
default_enabled: false

View File

@ -1,8 +0,0 @@
---
name: gitlab_duo
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/122235
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/413688
milestone: '16.1'
type: development
group: group::ai framework
default_enabled: false

View File

@ -6,7 +6,7 @@
stage: Package # (required) String value of the stage that the feature was created in. e.g., Growth
issue_url: https://gitlab.com/gitlab-org/container-registry/-/issues/854 # (required) Link to the deprecation issue in GitLab
body: | # (required) Do not modify this line, instead modify the lines below.
The Azure Storage Driver writes to `//` as the default root directory. This default root directory appears in some places within the Azure UI as `/<no-name>/`. We have maintained this legacy behavior to support older deployments using this storage driver. However, when moving to Azure from another storage driver, this behavior hides all your data until you configure the storage driver to build root paths without an extra leading slash by setting `trimlegacyrootprefix: true`.
The container registry's Azure Storage Driver writes to `//` as the default root directory. This default root directory appears in some places within the Azure UI as `/<no-name>/`. We have maintained this legacy behavior to support older deployments using this storage driver. However, when moving to Azure from another storage driver, this behavior hides all your data until you configure the storage driver to build root paths without an extra leading slash by setting `trimlegacyrootprefix: true`.
The new default configuration for the storage driver will set `trimlegacyrootprefix: true`, and `/` will be the default root directory. You can add `trimlegacyrootprefix: false` to your current configuration to avoid any disruptions.

View File

@ -52,18 +52,20 @@ To create an OAuth application:
> Introduced in GitLab 15.7.
You can link self-managed instances after installing the GitLab for Jira Cloud app from the marketplace.
You can link your self-managed instance after you install the GitLab for Jira Cloud app from the marketplace.
Jira apps can only link to one URL per marketplace listing. The official listing links to GitLab.com.
NOTE:
With this method, GitLab.com serves as a proxy for Jira traffic from your instance.
If your instance doesn't meet the [prerequisites](#prerequisites) or you don't want to use the official marketplace listing, you can
[install the app manually](#install-the-gitlab-for-jira-cloud-app-manually).
With this method, it's not possible to create branches from Jira Cloud for self-managed instances.
When you connect the app, it's not possible to create branches from Jira Cloud for self-managed instances.
For more information, see [issue 391432](https://gitlab.com/gitlab-org/gitlab/-/issues/391432).
To create branches from Jira Cloud, [install the app manually](#install-the-gitlab-for-jira-cloud-app-manually).
[Install the GitLab for Jira Cloud app manually](#install-the-gitlab-for-jira-cloud-app-manually) if:
- Your instance does not meet the [prerequisites](#prerequisites).
- You do not want to use the official marketplace listing.
- You want to create branches from Jira Cloud.
### Prerequisites
@ -101,17 +103,41 @@ To link your self-managed instance to the GitLab for Jira Cloud app:
1. Enter your GitLab instance URL.
1. Select **Save**.
### Check if Jira Cloud is linked
You can use the [Rails console](../../administration/operations/rails_console.md#starting-a-rails-console-session)
to check if Jira Cloud is linked to:
- A specific group:
```ruby
JiraConnectSubscription.where(namespace: Namespace.by_path('group/subgroup'))
```
- A specific project:
```ruby
Project.find_by_full_path('path/to/project').jira_subscription_exists?
```
- Any group:
```ruby
installation = JiraConnectInstallation.find_by_base_url("https://customer_name.atlassian.net")
installation.subscriptions
```
## Install the GitLab for Jira Cloud app manually
If your GitLab instance is self-managed and you don't want to use the official marketplace listing,
you can install the app manually.
If you do not want to use the official marketplace listing and want to create branches from Jira Cloud,
install the GitLab for Jira Cloud app manually.
Each Jira Cloud application must be installed from a single location. Jira fetches
a [manifest file](https://developer.atlassian.com/cloud/jira/platform/connect-app-descriptor/)
from the location you provide. The manifest file describes the application to the system. To support
self-managed GitLab instances with Jira Cloud, you can do one of the following:
You must install each Jira Cloud app from a single location. Jira fetches a
[manifest file](https://developer.atlassian.com/cloud/jira/platform/connect-app-descriptor/)
from the location you provide. The manifest file describes the app to the system.
To support your self-managed instance with Jira Cloud, do one of the following:
- [Install the application in development mode](#install-the-application-in-development-mode).
- [Install the app in development mode](#install-the-app-in-development-mode).
- [Create a marketplace listing](#create-a-marketplace-listing).
### Prerequisites
@ -119,41 +145,42 @@ self-managed GitLab instances with Jira Cloud, you can do one of the following:
- The instance must be publicly available.
- You must set up [OAuth authentication](#set-up-oauth-authentication).
### Install the application in development mode
### Install the app in development mode
[Prerequisites](#prerequisites-1)
To configure your Jira instance so you can install applications
from outside the marketplace:
To configure your Jira instance so you can install apps from outside the marketplace:
1. Sign in to your Jira instance as an administrator.
1. Place your Jira instance into
[development mode](https://developer.atlassian.com/cloud/jira/platform/getting-started-with-connect/#step-2--enable-development-mode).
1. Sign in to your GitLab application as a user with administrator access.
1. Install the GitLab application from your Jira instance as
described in the [Atlassian developer guide](https://developer.atlassian.com/cloud/jira/platform/getting-started-with-connect/#step-3--install-and-test-your-app):
1. In your Jira instance, go to **Apps > Manage Apps** and select **Upload app**:
1. For **App descriptor URL**, provide the full URL to your manifest file based
on your instance configuration. By default, your manifest file is located at `/-/jira_connect/app_descriptor.json`. For example, if your GitLab self-managed instance domain is `app.pet-store.cloud`, your manifest file is located at `https://app.pet-store.cloud/-/jira_connect/app_descriptor.json`.
1. Select **Upload**. Jira fetches the content of your `app_descriptor` file and installs
it.
1. To configure the integration, select **Get started**.
1. Disable [development mode](https://developer.atlassian.com/cloud/jira/platform/getting-started-with-connect/#step-2--enable-development-mode) on your Jira instance.
1. [Enable development mode](https://developer.atlassian.com/cloud/jira/platform/getting-started-with-connect/#step-3--enable-development-mode-in-your-site)
on your Jira instance.
1. Sign in to GitLab as an administrator.
1. [Install GitLab from your Jira instance](https://developer.atlassian.com/cloud/jira/platform/getting-started-with-connect/#step-3--install-and-test-your-app):
1. On your Jira instance, go to **Apps > Manage Apps** and select **Upload app**.
1. In **App descriptor URL**, provide the full URL to your manifest file based
on your instance configuration.
The **GitLab for Jira Cloud** app now displays under **Manage apps**. You can also
select **Get started** to open the configuration page rendered from your GitLab instance.
By default, your manifest file is located at `/-/jira_connect/app_descriptor.json`.
For example, if your GitLab self-managed instance domain is `app.pet-store.cloud`,
your manifest file is located at `https://app.pet-store.cloud/-/jira_connect/app_descriptor.json`.
1. Select **Upload**.
1. Select **Get started** to configure the integration.
1. [Disable development mode](https://developer.atlassian.com/cloud/jira/platform/getting-started-with-connect/#step-3--enable-development-mode-in-your-site)
on your Jira instance.
In **Apps > Manage Apps**, **GitLab for Jira Cloud** now appears.
You can also select **Get started** to open the configuration page from your GitLab instance.
NOTE:
If a GitLab update makes changes to the application descriptor, you must uninstall,
then reinstall the application.
If a GitLab upgrade makes changes to the app descriptor, you must reinstall the app.
### Create a marketplace listing
[Prerequisites](#prerequisites-1)
If you don't want to use development mode on your Jira instance, you can create
your own marketplace listing. This way, your application
can be installed from the Atlassian Marketplace.
If you do not want to use development mode, you can create your own marketplace listing
and install the GitLab for Jira Cloud app from the Atlassian Marketplace.
To create a marketplace listing:
@ -168,7 +195,8 @@ NOTE:
This method uses [automatic updates](../../integration/jira/connect-app.md#update-the-gitlab-for-jira-cloud-app)
like the GitLab.com marketplace listing.
For more information about creating a marketplace listing, see the [Atlassian documentation](https://developer.atlassian.com/platform/marketplace/installing-cloud-apps/#creating-the-marketplace-listing).
For more information about creating a marketplace listing, see the
[Atlassian documentation](https://developer.atlassian.com/platform/marketplace/installing-cloud-apps/#creating-the-marketplace-listing).
## Configure your GitLab instance to serve as a proxy
@ -327,26 +355,3 @@ Cross-Origin Request Blocked: The Same Origin Policy disallows reading the remot
- The authenticated Jira user does not have [site administrator](https://support.atlassian.com/user-management/docs/give-users-admin-permissions/#Make-someone-a-site-admin) access.
To resolve this issue, ensure the authenticated user is a Jira site administrator and try again.
### Check if Jira Cloud is linked
You can use the [Rails console](../../administration/operations/rails_console.md#starting-a-rails-console-session) to check if Jira Cloud is linked to:
- A specified group:
```ruby
JiraConnectSubscription.where(namespace: Namespace.by_path('group/subgroup'))
```
- A specified project:
```ruby
Project.find_by_full_path('path/to/project').jira_subscription_exists?
```
- Any group:
```ruby
installation = JiraConnectInstallation.find_by_base_url("https://customer_name.atlassian.net")
installation.subscriptions
```

View File

@ -23020,6 +23020,19 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="projectpipelinesupdatedbefore"></a>`updatedBefore` | [`Time`](#time) | Pipelines updated before this date. |
| <a id="projectpipelinesusername"></a>`username` | [`String`](#string) | Filter pipelines by the user that triggered the pipeline. |
##### `Project.productAnalyticsEventsStored`
Count of all events used, filtered optionally by month.
Returns [`Int`](#int).
###### Arguments
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="projectproductanalyticseventsstoredmonth"></a>`month` | [`Int`](#int) | Month for the period to return. |
| <a id="projectproductanalyticseventsstoredyear"></a>`year` | [`Int`](#int) | Year for the period to return. |
##### `Project.projectMembers`
Members of the project.

View File

@ -14,14 +14,6 @@ Use [this snippet](https://gitlab.com/gitlab-org/gitlab/-/snippets/2554994) for
1. [Enable Anthropic API features](index.md#configure-anthropic-access).
1. [Enable OpenAI support](index.md#configure-openai-access).
1. [Ensure the embedding database is configured](index.md#set-up-the-embedding-database).
1. Enable feature specific feature flag.
```ruby
Feature.enable(:gitlab_duo)
Feature.enable(:tanuki_bot)
Feature.enable(:ai_redis_cache)
```
1. Ensure that your current branch is up-to-date with `master`.
1. To access the GitLab Duo Chat interface, in the lower-left corner of any page, select **Help** and **Ask GitLab Duo Chat**.

View File

@ -58,7 +58,6 @@ Use [this snippet](https://gitlab.com/gitlab-org/gitlab/-/snippets/2554994) for
1. Enable the required general feature flags:
```ruby
Feature.enable(:ai_related_settings)
Feature.enable(:openai_experimentation)
```

View File

@ -146,6 +146,40 @@ triggering the job.
The job token is secured by its short life-time and limited scope. It could possibly be leaked if multiple jobs run on the same machine ([like with the shell runner](https://docs.gitlab.com/runner/security/#usage-of-shell-executor)). On Docker Machine runners, configuring [`MaxBuilds=1`](https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runnersmachine-section) is recommended to make sure runner machines only ever run one build and are destroyed afterwards. This may impact performance, as provisioning machines takes some time.
## GitLab cluster agent tokens
When [registering a GitLab Agent for Kubernetes](../user/clusters/agent/install/index.md#register-the-agent-with-gitlab), GitLab generates an access token to authenticate the cluster agent with GitLab.
To revoke this cluster agent token, you can use either the:
- [Agents API](../api/cluster_agents.md#revoke-an-agent-token) to revoke the token.
- [UI](../user/clusters/agent/work_with_agent.md#reset-the-agent-token) to reset the token.
For both methods, you must know the token, agent, and project IDs. To find this information, use the [Rails console](../administration/operations/rails_console.md)
```irb
# Find token ID
Clusters::AgentToken.find_by_token('glagent-xxx').id
# Find agent ID
Clusters::AgentToken.find_by_token('glagent-xxx').agent.id
=> 1234
# Find project ID
Clusters::AgentToken.find_by_token('glagent-xxx').agent.project_id
=> 12345
```
You can also revoke a token directly in the Rails console:
```irb
# Revoke token with RevokeService, including generating an audit event
Clusters::AgentTokens::RevokeService.new(token: Clusters::AgentToken.find_by_token('glagent-xxx'), current_user: User.find_by_username('admin-user')).execute
# Revoke token manually, which does not generate an audit event
Clusters::AgentToken.find_by_token('glagent-xxx').revoke!
```
## Other tokens
### Feed token

View File

@ -1303,7 +1303,7 @@ set the `POSTGRES_ENABLED` CI/CD variable to `true`.
- To discuss this change or learn more, see the [deprecation issue](https://gitlab.com/gitlab-org/container-registry/-/issues/854).
</div>
The Azure Storage Driver writes to `//` as the default root directory. This default root directory appears in some places within the Azure UI as `/<no-name>/`. We have maintained this legacy behavior to support older deployments using this storage driver. However, when moving to Azure from another storage driver, this behavior hides all your data until you configure the storage driver to build root paths without an extra leading slash by setting `trimlegacyrootprefix: true`.
The container registry's Azure Storage Driver writes to `//` as the default root directory. This default root directory appears in some places within the Azure UI as `/<no-name>/`. We have maintained this legacy behavior to support older deployments using this storage driver. However, when moving to Azure from another storage driver, this behavior hides all your data until you configure the storage driver to build root paths without an extra leading slash by setting `trimlegacyrootprefix: true`.
The new default configuration for the storage driver will set `trimlegacyrootprefix: true`, and `/` will be the default root directory. You can add `trimlegacyrootprefix: false` to your current configuration to avoid any disruptions.

View File

@ -464,7 +464,7 @@ To enable Code Suggestions for a group:
## Enable Experiment features **(ULTIMATE SAAS)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/404856) in GitLab 16.0.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/118222) in GitLab 16.0.
WARNING:
[Experiment features](../../policy/experiment-beta-support.md#experiment) may produce unexpected results
@ -485,7 +485,7 @@ To enable Experiment features for a top-level group:
## Enable third-party AI features **(ULTIMATE SAAS)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/404856) in GitLab 16.0.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/118222) in GitLab 16.0.
WARNING:
These AI features use [third-party services](../ai_features.md#data-usage)

View File

@ -78,7 +78,6 @@ module Gitlab
push_frontend_feature_flag(:server_side_frecent_namespaces, current_user)
# To be removed with https://gitlab.com/gitlab-org/gitlab/-/issues/399248
push_frontend_feature_flag(:remove_monitor_metrics)
push_frontend_feature_flag(:gitlab_duo, current_user)
push_frontend_feature_flag(:custom_emoji)
end

View File

@ -1,48 +0,0 @@
# frozen_string_literal: true
module ProductAnalytics
class Settings
BASE_CONFIG_KEYS = %w[product_analytics_data_collector_host cube_api_base_url cube_api_key].freeze
SNOWPLOW_CONFIG_KEYS = (%w[product_analytics_configurator_connection_string] +
BASE_CONFIG_KEYS).freeze
ALL_CONFIG_KEYS = (ProductAnalytics::Settings::BASE_CONFIG_KEYS +
ProductAnalytics::Settings::SNOWPLOW_CONFIG_KEYS).freeze
def initialize(project:)
@project = project
end
def enabled?
::Gitlab::CurrentSettings.product_analytics_enabled? && configured?
end
def configured?
ALL_CONFIG_KEYS.all? do |key|
get_setting_value(key).present?
end
end
ALL_CONFIG_KEYS.each do |key|
define_method key.to_sym do
get_setting_value(key)
end
end
class << self
def for_project(project)
ProductAnalytics::Settings.new(project: project)
end
end
private
# rubocop:disable GitlabSecurity/PublicSend
def get_setting_value(key)
@project.project_setting.public_send(key).presence ||
::Gitlab::CurrentSettings.public_send(key)
end
# rubocop:enable GitlabSecurity/PublicSend
end
end

View File

@ -73,7 +73,7 @@ namespace :tw do
CodeOwnerRule.new('Runner', '@fneill'),
CodeOwnerRule.new('Runner SaaS', '@fneill'),
CodeOwnerRule.new('Security Policies', '@rdickenson'),
CodeOwnerRule.new('Source Code', ->(path) { path.start_with?('/doc/user') ? '@aqualls' : '@msedlakjakubowski' }),
CodeOwnerRule.new('Source Code', '@msedlakjakubowski'),
CodeOwnerRule.new('Static Analysis', '@rdickenson'),
CodeOwnerRule.new('Style Guide', '@sselhorn'),
CodeOwnerRule.new('Tenant Scale', '@lciutacu'),

View File

@ -5394,6 +5394,12 @@ msgstr ""
msgid "Analytics|Analytics settings for '%{project_name}' were successfully updated."
msgstr ""
msgid "Analytics|Are you sure you want to cancel creating this dashboard?"
msgstr ""
msgid "Analytics|Are you sure you want to cancel editing this dashboard?"
msgstr ""
msgid "Analytics|Browser"
msgstr ""
@ -5424,6 +5430,12 @@ msgstr ""
msgid "Analytics|Configure Dashboard Project"
msgstr ""
msgid "Analytics|Continue creating"
msgstr ""
msgid "Analytics|Continue editing"
msgstr ""
msgid "Analytics|Create dashboard %{dashboardSlug}"
msgstr ""
@ -12348,19 +12360,28 @@ msgstr ""
msgid "ComplianceReport|Update result"
msgstr ""
msgid "ComplianceStandardsAdherence|A rule is configured to prevent author approved merge requests."
msgstr ""
msgid "ComplianceStandardsAdherence|A rule is configured to prevent merge requests approved by committers."
msgstr ""
msgid "ComplianceStandardsAdherence|A rule is configured to require two approvals."
msgstr ""
msgid "ComplianceStandardsAdherence|At least two approvals"
msgstr ""
msgid "ComplianceStandardsAdherence|Failure reason"
msgstr ""
msgid "ComplianceStandardsAdherence|Have a valid rule that prevents author approved merge requests"
msgid "ComplianceStandardsAdherence|Have a valid rule that prevents author-approved merge requests from being merged"
msgstr ""
msgid "ComplianceStandardsAdherence|Have a valid rule that prevents merge requests approved by committers"
msgid "ComplianceStandardsAdherence|Have a valid rule that prevents merge requests with less than two approvals from being merged"
msgstr ""
msgid "ComplianceStandardsAdherence|Have a valid rule that requires any merge request to have more than two approvals"
msgid "ComplianceStandardsAdherence|Have a valid rule that prevents users from approving merge requests where theyve added commits"
msgstr ""
msgid "ComplianceStandardsAdherence|How to fix"
@ -12372,10 +12393,10 @@ msgstr ""
msgid "ComplianceStandardsAdherence|No projects with standards adherence checks found"
msgstr ""
msgid "ComplianceStandardsAdherence|No rule configured to prevent merge requests approved by committers."
msgid "ComplianceStandardsAdherence|No rule is configured to prevent author approved merge requests."
msgstr ""
msgid "ComplianceStandardsAdherence|No rule is configured to prevent author approved merge requests."
msgid "ComplianceStandardsAdherence|No rule is configured to prevent merge requests approved by committers."
msgstr ""
msgid "ComplianceStandardsAdherence|No rule is configured to require two approvals."
@ -12390,6 +12411,9 @@ msgstr ""
msgid "ComplianceStandardsAdherence|Requirement"
msgstr ""
msgid "ComplianceStandardsAdherence|Success reason"
msgstr ""
msgid "ComplianceStandardsAdherence|The following features help satisfy this requirement."
msgstr ""

View File

@ -2,6 +2,7 @@
require_relative '../../migration_helpers'
require_relative '../../code_reuse_helpers'
require_relative '../../feature_categories'
module RuboCop
module Cop
@ -10,32 +11,20 @@ module RuboCop
class FeatureCategory < RuboCop::Cop::Base
include MigrationHelpers
FEATURE_CATEGORIES_FILE_PATH = File.expand_path("../../../config/feature_categories.yml", __dir__)
MSG = "'feature_category' should be defined to better assign the ownership for batched migration jobs. " \
"For more details refer: " \
"https://docs.gitlab.com/ee/development/feature_categorization/#batched-background-migrations"
INVALID_FEATURE_CATEGORY_MSG = "'feature_category' is invalid. " \
"List of valid ones can be found in #{FEATURE_CATEGORIES_FILE_PATH}".freeze
INVALID_FEATURE_CATEGORY_MSG =
"'feature_category' is invalid. " \
"List of valid ones can be found in #{FeatureCategories::CONFIG_PATH}".freeze
RESTRICT_ON_SEND = [:feature_category].freeze
class << self
attr_accessor :available_feature_categories
end
def_node_search :feature_category?, <<~PATTERN
(:send nil? :feature_category ...)
PATTERN
def on_new_investigation
super
# Defined only once per rubocop whole run instead of each file.
fetch_available_feature_categories unless self.class.available_feature_categories.present?
end
def on_class(node)
return unless in_background_migration?(node) && node.parent_class&.short_name == :BatchedMigrationJob
@ -48,15 +37,15 @@ module RuboCop
add_offense(node, message: INVALID_FEATURE_CATEGORY_MSG) unless valid_feature_category?(node)
end
def external_dependency_checksum
FeatureCategories.config_checksum
end
private
def valid_feature_category?(node)
feature_category = node.descendants.first.value
self.class.available_feature_categories.include?(feature_category.to_s)
end
def fetch_available_feature_categories
self.class.available_feature_categories = YAML.load_file(FEATURE_CATEGORIES_FILE_PATH).to_set
FeatureCategories.available.include?(feature_category.to_s)
end
end
end

View File

@ -4,6 +4,8 @@ require 'rubocop/cop/rspec/base'
require 'rubocop/cop/rspec/mixin/top_level_group'
require 'did_you_mean'
require_relative '../../feature_categories'
module RuboCop
module Cop
module RSpec
@ -34,16 +36,6 @@ module RuboCop
MSG_SYMBOL = 'Please use a symbol as value.'
FEATURE_CATEGORIES_PATH = File.expand_path('../../../config/feature_categories.yml', __dir__).freeze
# List of feature categories which are not defined in config/feature_categories.yml
CUSTOM_FEATURE_CATEGORIES = [
# https://docs.gitlab.com/ee/development/feature_categorization/#tooling-feature-category
:tooling,
# https://docs.gitlab.com/ee/development/feature_categorization/#shared-feature-category
:shared
].to_set.freeze
# @!method feature_category?(node)
def_node_matcher :feature_category_value, <<~PATTERN
(block
@ -69,17 +61,14 @@ module RuboCop
add_offense(value_node, message: message)
end
# Used by RuboCop to invalidate its cache if the contents of
# config/feature_categories.yml changes.
def external_dependency_checksum
@external_dependency_checksum ||=
Digest::SHA256.file(FEATURE_CATEGORIES_PATH).hexdigest
FeatureCategories.config_checksum
end
private
def suggestion_message(value_node)
spell = DidYouMean::SpellChecker.new(dictionary: self.class.feature_categories)
spell = DidYouMean::SpellChecker.new(dictionary: FeatureCategories.available_with_custom)
suggestions = spell.correct(value_node.value)
return if suggestions.none?
@ -88,15 +77,7 @@ module RuboCop
end
def valid_feature_category?(node)
self.class.feature_categories.include?(node.value)
end
def self.feature_categories
@feature_categories ||= YAML
.load_file(FEATURE_CATEGORIES_PATH)
.map(&:to_sym)
.to_set
.union(CUSTOM_FEATURE_CATEGORIES)
FeatureCategories.available_with_custom.include?(node.value.to_s)
end
end
end

View File

@ -0,0 +1,35 @@
# frozen_string_literal: true
require 'set'
require 'yaml'
require 'digest/sha2'
module RuboCop
module FeatureCategories
CONFIG_PATH = File.expand_path("../config/feature_categories.yml", __dir__)
# List of feature categories which are not defined in config/feature_categories.yml
# https://docs.gitlab.com/ee/development/feature_categorization/#tooling-feature-category
# https://docs.gitlab.com/ee/development/feature_categorization/#shared-feature-category
CUSTOM_CATEGORIES = %w[
tooling
shared
].to_set.freeze
def self.available
@available ||= YAML.load_file(CONFIG_PATH).to_set
end
def self.available_with_custom
@available_with_custom ||= available.union(CUSTOM_CATEGORIES)
end
# Used by RuboCop to invalidate its cache if the contents of
# config/feature_categories.yml changes.
# Define a method called `external_dependency_checksum` and call
# this method to use it.
def self.config_checksum
@config_checksum ||= Digest::SHA256.file(CONFIG_PATH).hexdigest
end
end
end

View File

@ -297,6 +297,9 @@ function retry_failed_rspec_examples() {
exit 1
fi
# Job metrics for influxDB/Grafana
tooling/bin/update_job_metrics_tag rspec_retried_in_new_process "true" || true
# Keep track of the tests that are retried, later consolidated in a single file by the `rspec:flaky-tests-report` job
local failed_examples=$(grep " failed" ${RSPEC_LAST_RUN_RESULTS_FILE})
local report_name=$(echo "${CI_JOB_NAME}" | sed -E 's|[/ ]|_|g') # e.g. 'rspec unit pg13 1/24' would become 'rspec_unit_pg13_1_24'

View File

@ -327,6 +327,26 @@ describe('URL utility', () => {
});
});
describe('getLocationHash', () => {
it('gets a default empty value', () => {
setWindowLocation(TEST_HOST);
expect(urlUtils.getLocationHash()).toBeUndefined();
});
it('gets a value', () => {
setWindowLocation('#hash-value');
expect(urlUtils.getLocationHash()).toBe('hash-value');
});
it('gets an empty value when only hash is set', () => {
setWindowLocation('#');
expect(urlUtils.getLocationHash()).toBeUndefined();
});
});
describe('doesHashExistInUrl', () => {
beforeEach(() => {
setWindowLocation('#note_1');

View File

@ -1,101 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe ProductAnalytics::Settings, feature_category: :product_analytics_data_management do
let_it_be(:project) { create(:project) }
subject { described_class.for_project(project) }
describe 'config settings' do
context 'when configured' do
before do
mock_settings('test')
end
it 'will be configured' do
expect(subject.configured?).to be_truthy
end
end
context 'when not configured' do
before do
mock_settings('')
end
it 'will not be configured' do
expect(subject.configured?).to be_falsey
end
end
context 'when one configuration setting is missing' do
before do
missing_key = ProductAnalytics::Settings::ALL_CONFIG_KEYS.last
mock_settings('test', ProductAnalytics::Settings::ALL_CONFIG_KEYS - [missing_key])
allow(::Gitlab::CurrentSettings).to receive(missing_key).and_return('')
end
it 'will not be configured' do
expect(subject.configured?).to be_falsey
end
end
ProductAnalytics::Settings::ALL_CONFIG_KEYS.each do |key|
it "can read #{key}" do
expect(::Gitlab::CurrentSettings).to receive(key).and_return('test')
expect(subject.send(key)).to eq('test')
end
context 'with project' do
it "will override when provided a project #{key}" do
expect(::Gitlab::CurrentSettings).not_to receive(key)
expect(project.project_setting).to receive(key).and_return('test')
expect(subject.send(key)).to eq('test')
end
it "will will not override when provided a blank project #{key}" do
expect(::Gitlab::CurrentSettings).to receive(key).and_return('test')
expect(project.project_setting).to receive(key).and_return('')
expect(subject.send(key)).to eq('test')
end
end
end
end
describe '.enabled?' do
before do
allow(subject).to receive(:configured?).and_return(true)
end
context 'when enabled' do
before do
allow(::Gitlab::CurrentSettings).to receive(:product_analytics_enabled?).and_return(true)
end
it 'will be enabled' do
expect(subject.enabled?).to be_truthy
end
end
context 'when disabled' do
before do
allow(::Gitlab::CurrentSettings).to receive(:product_analytics_enabled?).and_return(false)
end
it 'will be enabled' do
expect(subject.enabled?).to be_falsey
end
end
end
private
def mock_settings(setting, keys = ProductAnalytics::Settings::ALL_CONFIG_KEYS)
keys.each do |key|
allow(::Gitlab::CurrentSettings).to receive(key).and_return(setting)
end
end
end

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe BulkImports::Tracker, type: :model do
RSpec.describe BulkImports::Tracker, type: :model, feature_category: :importers do
describe 'associations' do
it do
is_expected.to belong_to(:entity).required.class_name('BulkImports::Entity')
@ -30,19 +30,14 @@ RSpec.describe BulkImports::Tracker, type: :model do
end
end
describe '.stage_running?' do
it 'returns true if there is any unfinished pipeline in the given stage' do
tracker = create(:bulk_import_tracker)
describe '.running_trackers' do
it 'returns trackers that are running for a given entity' do
entity = create(:bulk_import_entity)
BulkImports::Tracker.state_machines[:status].states.map(&:value).each do |status|
create(:bulk_import_tracker, status: status, entity: entity)
end
expect(described_class.stage_running?(tracker.entity.id, 0))
.to eq(true)
end
it 'returns false if there are no unfinished pipeline in the given stage' do
tracker = create(:bulk_import_tracker, :finished)
expect(described_class.stage_running?(tracker.entity.id, 0))
.to eq(false)
expect(described_class.running_trackers(entity.id).pluck(:status)).to include(1, 3)
end
end

View File

@ -66,4 +66,10 @@ RSpec.describe RuboCop::Cop::BackgroundMigration::FeatureCategory, feature_categ
RUBY
end
end
describe '#external_dependency_checksum' do
it 'returns a SHA256 digest used by RuboCop to invalid cache' do
expect(cop.external_dependency_checksum).to match(/^\h{64}$/)
end
end
end

View File

@ -0,0 +1,30 @@
# frozen_string_literal: true
require 'rubocop_spec_helper'
require_relative '../../rubocop/feature_categories'
RSpec.describe RuboCop::FeatureCategories, feature_category: :tooling do
describe '.available' do
it 'returns a list of available feature categories in a set of strings' do
expect(described_class.available).to be_a(Set)
expect(described_class.available).to all(be_a(String))
end
end
describe '.available_with_custom' do
it 'returns a list of available feature categories' do
expect(described_class.available_with_custom).to include(described_class.available)
end
it 'returns a list containing the custom feature categories' do
expect(described_class.available_with_custom).to include(described_class::CUSTOM_CATEGORIES)
end
end
describe '.config_checksum' do
it 'returns a SHA256 digest used by RuboCop to invalid cache' do
expect(described_class.config_checksum).to match(/^\h{64}$/)
end
end
end

View File

@ -0,0 +1,721 @@
# frozen_string_literal: true
require 'fast_spec_helper'
require 'tempfile'
require 'time'
require_relative '../../../../tooling/lib/tooling/job_metrics'
RSpec.describe Tooling::JobMetrics, feature_category: :tooling do
include StubENV
attr_accessor :job_metrics_file, :job_metrics_file_path
around do |example|
self.job_metrics_file = Tempfile.new('test-folder/job-metrics.json')
self.job_metrics_file_path = job_metrics_file.path
# See https://ruby-doc.org/stdlib-1.9.3/libdoc/tempfile/rdoc/
# Tempfile.html#class-Tempfile-label-Explicit+close
begin
example.run
ensure
job_metrics_file.close
job_metrics_file.unlink
end
end
let(:instance) { described_class.new(metrics_file_path: job_metrics_file_path) }
let(:pipeline_created_at) { '2023-05-03T12:35:39.932Z' }
before do
stub_env(
'CI_JOB_ID' => '1234',
'CI_JOB_NAME' => 'rspec unit pg13 1/24',
'CI_JOB_STAGE' => 'test',
'CI_JOB_STARTED_AT' => (Time.now - 3600).iso8601, # 1h ago
'CI_JOB_STATUS' => 'success',
'CI_MERGE_REQUEST_IID' => '23412',
'CI_PIPELINE_CREATED_AT' => pipeline_created_at,
'CI_PIPELINE_ID' => '3393923023',
'CI_PROJECT_ID' => '7489',
'CI_SERVER_HOST' => 'localhost:300',
'JOB_METRICS_FILE_PATH' => job_metrics_file_path
)
end
describe '#initialize' do
context 'when a path is given' do
subject { described_class.new(metrics_file_path: job_metrics_file_path) }
it 'instantiates the object' do
expect(subject).to be_a(described_class)
end
it 'sets the correct path for the metrics file' do
expect(subject.metrics_file_path).to eq(job_metrics_file_path)
end
end
context 'when a path is not given' do
subject { described_class.new }
context 'when the JOB_METRICS_FILE_PATH env variable is set' do
before do
stub_env(
'JOB_METRICS_FILE_PATH' => job_metrics_file_path
)
end
it 'instantiates the object' do
expect(subject).to be_a(described_class)
end
it 'sets the correct path for the metrics file' do
expect(subject.metrics_file_path).to eq(ENV['JOB_METRICS_FILE_PATH'])
end
end
context 'when the JOB_METRICS_FILE_PATH env variable is not set' do
before do
stub_env(
'JOB_METRICS_FILE_PATH' => nil
)
end
it 'raises an error' do
expect { subject }.to raise_error('Please specify a path for the job metrics file.')
end
end
end
end
describe '#create_metrics_file' do
subject { instance.create_metrics_file }
context 'when a valid metrics file exists' do
before do
allow(instance).to receive(:warn)
allow(instance).to receive(:valid_metrics_file?).and_return(true)
end
it 'prints a message to the user' do
allow(instance).to receive(:warn).and_call_original
expect { subject }.to output(
"A valid job metrics file already exists. We're not going to overwrite it.\n"
).to_stderr
end
it 'does not overwrite the existing metrics file' do
expect(instance).not_to receive(:persist_metrics_file)
subject
end
end
context 'when a valid metrics file does not exist' do
before do
allow(instance).to receive(:valid_metrics_file?).and_return(false)
end
it 'persists the metrics file' do
expect(instance).to receive(:persist_metrics_file).with(instance.default_metrics)
subject
end
end
end
describe '#update_field' do
subject { instance.update_field(field_name, field_value) }
let(:field_name) { instance.default_fields.each_key.first }
let(:field_value) { 'test_value' }
context 'when the field to update is not in the default fields list' do
let(:field_name) { 'not-in-default-list' }
before do
allow(instance).to receive(:warn)
end
it 'returns a warning to the user' do
allow(instance).to receive(:warn).and_call_original
expect { subject }.to output(
"[job-metrics] ERROR: Could not update field #{field_name}, as it is not part of the allowed fields.\n"
).to_stderr
end
it 'does not write to the metrics file' do
expect(instance).not_to receive(:persist_metrics_file)
subject
end
end
context 'when the field to update is in the default fields list' do
it 'calls the update_file method with the correct arguments' do
expect(instance).to receive(:update_file).with(field_name, field_value, type: :field)
subject
end
end
end
describe '#update_tag' do
subject { instance.update_tag(tag_name, tag_value) }
let(:tag_name) { instance.default_tags.each_key.first }
let(:tag_value) { 'test_value' }
context 'when the tag to update is not in the default tags list' do
let(:tag_name) { 'not-in-default-list' }
before do
allow(instance).to receive(:warn)
end
it 'returns a warning to the user' do
allow(instance).to receive(:warn).and_call_original
expect { subject }.to output(
"[job-metrics] ERROR: Could not update tag #{tag_name}, as it is not part of the allowed tags.\n"
).to_stderr
end
it 'does not write to the metrics file' do
expect(instance).not_to receive(:persist_metrics_file)
subject
end
end
context 'when the tag to update is in the default tags list' do
it 'calls the update_file method with the correct arguments' do
expect(instance).to receive(:update_file).with(tag_name, tag_value, type: :tag)
subject
end
end
end
describe '#update_file' do
subject { instance.update_file(tag_name, tag_value, type: type) }
let(:type) { :tag }
let(:tag_name) { instance.default_tags.each_key.first }
let(:tag_value) { 'test_value' }
context 'when the metrics file is not valid' do
before do
allow(instance).to receive(:valid_metrics_file?).and_return(false)
allow(instance).to receive(:warn)
end
it 'returns a warning to the user' do
allow(instance).to receive(:warn).and_call_original
expect { subject }.to output(
"[job-metrics] ERROR: Invalid job metrics file.\n"
).to_stderr
end
it 'does not write to the metrics file' do
expect(instance).not_to receive(:persist_metrics_file)
subject
end
end
context 'when the metrics file is valid' do
let(:metrics_hash) do
{
name: 'job-metrics',
time: ENV['CI_PIPELINE_CREATED_AT'].to_time,
tags: tags_hash,
fields: fields_hash
}
end
let(:tags_hash) { instance.default_tags }
let(:fields_hash) { instance.default_fields }
before do
allow(instance).to receive(:valid_metrics_file?).and_return(true)
allow(instance).to receive(:load_metrics_file).and_return(metrics_hash)
end
context 'when updating a tag' do
let(:type) { :tag }
it 'updates the tag value' do
expect(instance).to receive(:persist_metrics_file).with(
hash_including(
tags: hash_including(tag_name)
)
)
subject
end
end
context 'when updating a field' do
let(:type) { :field }
let(:field_name) { instance.default_fields.each_key.first }
let(:field_value) { 'test_value' }
it 'updates the field value' do
expect(instance).to receive(:persist_metrics_file).with(
hash_including(
fields: hash_including(field_name)
)
)
subject
end
end
end
end
describe '#push_metrics' do
subject { instance.push_metrics }
context 'when the metrics file is not valid' do
before do
allow(instance).to receive(:valid_metrics_file?).and_return(false)
allow(instance).to receive(:warn)
end
it 'returns a warning to the user' do
allow(instance).to receive(:warn).and_call_original
expect { subject }.to output(
"[job-metrics] ERROR: Invalid job metrics file. We will not push the metrics to InfluxDB\n"
).to_stderr
end
it 'does not write to the metrics file' do
expect(instance).not_to receive(:persist_metrics_file)
subject
end
end
context 'when the metrics file is valid' do
let(:metrics_hash) do
{
name: 'job-metrics',
time: ENV['CI_PIPELINE_CREATED_AT'].to_time,
tags: tags_hash,
fields: fields_hash
}
end
let(:tags_hash) { instance.default_tags }
let(:fields_hash) { instance.default_fields }
let(:influx_write_api) { double('influx_write_api') } # rubocop:disable RSpec:VerifiedDoubles
before do
allow(instance).to receive(:influx_write_api).and_return(influx_write_api)
allow(instance).to receive(:valid_metrics_file?).and_return(true)
allow(instance).to receive(:load_metrics_file).and_return(metrics_hash)
allow(instance).to receive(:warn)
allow(instance).to receive(:puts)
end
context 'when we are missing ENV variables to push to influxDB' do
before do
stub_env(
'QA_INFLUXDB_URL' => 'https://test.com',
'EP_CI_JOB_METRICS_TOKEN' => nil
)
end
it 'displays an error to the user' do
allow(instance).to receive(:influx_write_api).and_call_original
allow(instance).to receive(:warn).and_call_original
expect { subject }.to output(
"[job-metrics] Failed to push CI job metrics to InfluxDB, " \
"error: Missing EP_CI_JOB_METRICS_TOKEN env variable\n"
).to_stderr
end
end
context 'when pushing the data to InfluxDB raises an exception' do
it 'displays an error to the user' do
allow(instance).to receive(:warn).and_call_original
expect(influx_write_api).to receive(:write).and_raise("connectivity issues")
expect { subject }.to output(
"[job-metrics] Failed to push CI job metrics to InfluxDB, error: connectivity issues\n"
).to_stderr
end
end
context 'when some tags/fields are empty/nil' do
before do
allow(instance).to receive(:load_metrics_file).and_return({
name: 'job-metrics',
time: ENV['CI_PIPELINE_CREATED_AT'].to_time,
tags: {
first_tag: '',
third_tag: 'hello'
},
fields: {
second_tag: nil
}
})
end
it 'removes the metrics with empty/nil values from the metrics list' do
expect(influx_write_api).to receive(:write).with(data: {
name: 'job-metrics',
time: anything,
tags: { third_tag: 'hello' },
fields: {
job_duration_seconds: anything # Added right before pushing to influxDB
}
})
subject
end
end
it 'pushes the data to InfluxDB' do
expect(influx_write_api).to receive(:write).with(data: metrics_hash)
subject
end
it 'sets the job_duration_seconds field' do
# We want the job to last for 10 minutes (600 seconds)
allow(Time).to receive(:now).and_return(Time.parse(ENV.fetch('CI_JOB_STARTED_AT')) + 600)
expect(influx_write_api).to receive(:write).with(
data: hash_including(
fields: hash_including(
job_duration_seconds: 600
)
)
)
subject
end
end
end
describe '#load_metrics_file' do
subject { instance.load_metrics_file }
context 'when the metrics file does not exist on disk' do
before do
allow(File).to receive(:exist?).with(job_metrics_file_path).and_return(false)
end
it 'returns nil' do
expect(subject).to be_nil
end
end
context 'when the metrics file exists on disk' do
context 'when the metrics file does not contain valid JSON' do
before do
File.write(job_metrics_file_path, 'THIS IS NOT JSON CONTENT!')
end
it 'returns nil' do
expect(subject).to be_nil
end
end
context 'when the metrics file contains valid JSON' do
before do
File.write(job_metrics_file_path, { 'key' => 'value' }.to_json)
end
it 'returns the content of the file as a hash with symbolized keys' do
expect(subject).to eq({ key: 'value' })
end
end
end
end
describe '#valid_metrics_file?' do
subject { instance.valid_metrics_file? }
context 'when the metrics file cannot be loaded in memory' do
before do
allow(instance).to receive(:load_metrics_file).and_return(nil)
end
it 'returns false' do
expect(subject).to be_falsey
end
end
context 'when the metrics file can be loaded in memory' do
let(:metrics_file_content) do
{ key: 'value' }
end
before do
allow(instance).to receive(:load_metrics_file).and_return(metrics_file_content)
end
context 'when the metrics file validation succeeds' do
before do
allow(instance).to receive(:valid_metrics?).with(metrics_file_content).and_return(true)
end
it 'returns true' do
expect(subject).to be_truthy
end
end
context 'when the metrics file validation fails' do
before do
allow(instance).to receive(:valid_metrics?).with(metrics_file_content).and_return(false)
end
it 'returns false' do
expect(subject).to be_falsey
end
end
end
end
describe '#valid_metrics?' do
subject { instance.valid_metrics?(metrics_hash) }
let(:metrics_hash) do
{
name: 'job-metrics',
time: ENV['CI_PIPELINE_CREATED_AT'].to_time,
tags: tags_hash,
fields: fields_hash
}
end
let(:tags_hash) { instance.default_tags }
let(:fields_hash) { instance.default_fields }
describe 'metrics hash keys' do
context 'when it is missing a key' do
before do
metrics_hash.delete(:time)
end
it 'returns false' do
expect(subject).to be_falsey
end
end
context 'when it has an extra key' do
before do
metrics_hash[:extra_key] = ''
end
it 'returns false' do
expect(subject).to be_falsey
end
end
end
describe 'metrics hash tags keys' do
context 'when it is missing a key' do
before do
tags_hash.delete(tags_hash.each_key.first)
end
it 'returns false' do
expect(subject).to be_falsey
end
end
context 'when it has an extra key' do
before do
tags_hash[:extra_key] = ''
end
it 'returns false' do
expect(subject).to be_falsey
end
end
end
describe 'metrics hash fields keys' do
context 'when it is missing a key' do
before do
fields_hash.delete(fields_hash.each_key.first)
end
it 'returns false' do
expect(subject).to be_falsey
end
end
context 'when it has an extra key' do
before do
fields_hash[:extra_key] = ''
end
it 'returns false' do
expect(subject).to be_falsey
end
end
end
context 'when the metrics hash is valid' do
it 'returns true' do
expect(subject).to be_truthy
end
end
end
describe '#persist_metrics_file' do
let(:metrics_hash) do
{ key: 'value' }.to_json
end
subject { instance.persist_metrics_file(metrics_hash) }
context 'when the metrics hash is not valid' do
before do
allow(instance).to receive(:valid_metrics?).and_return(false)
allow(instance).to receive(:warn)
end
it 'returns a warning to the user' do
allow(instance).to receive(:warn).and_call_original
expect { subject }.to output(
"cannot persist the metrics, as it doesn't have the correct data structure.\n"
).to_stderr
end
it 'does not write to the metrics file' do
expect(File).not_to receive(:write).with(job_metrics_file_path, any_args)
subject
end
end
context 'when the metrics hash is valid' do
before do
allow(instance).to receive(:valid_metrics?).and_return(true)
end
it 'persists the metrics file' do
expect { subject }.to change { File.read(job_metrics_file_path) }.from('').to(metrics_hash.to_json)
end
end
end
describe '#default_metrics' do
subject { instance.default_metrics }
let(:returned_time) { ENV['CI_PIPELINE_CREATED_AT'].to_time }
let(:default_tags) { instance.default_tags }
let(:default_fields) { instance.default_fields }
it 'returns the expected metrics keys' do
expect(subject).to eq(
name: 'job-metrics',
time: returned_time,
tags: default_tags,
fields: default_fields
)
end
end
describe '#default_tags' do
subject { instance.default_tags }
it 'returns the expected tags keys' do
expect(subject).to eq(
job_name: ENV['CI_JOB_NAME'],
job_stage: ENV['CI_JOB_STAGE'],
job_status: ENV['CI_JOB_STATUS'],
project_id: ENV['CI_PROJECT_ID'],
rspec_retried_in_new_process: 'false',
server_host: ENV['CI_SERVER_HOST']
)
end
context 'when an ENV variable is not set' do
before do
stub_env('CI_JOB_NAME' => nil)
end
it 'replaces the value with nil' do
expect(subject).to eq(
job_name: nil,
job_stage: ENV['CI_JOB_STAGE'],
job_status: ENV['CI_JOB_STATUS'],
project_id: ENV['CI_PROJECT_ID'],
rspec_retried_in_new_process: 'false',
server_host: ENV['CI_SERVER_HOST']
)
end
end
end
describe '#default_fields' do
subject { instance.default_fields }
it 'returns the expected fields keys' do
expect(subject).to eq(
job_id: ENV['CI_JOB_ID'],
job_duration_seconds: nil,
merge_request_iid: ENV['CI_MERGE_REQUEST_IID'],
pipeline_id: ENV['CI_PIPELINE_ID']
)
end
context 'when an ENV variable is not set' do
before do
stub_env('CI_JOB_ID' => nil)
end
it 'replaces the value with nil' do
expect(subject).to eq(
job_id: nil,
job_duration_seconds: nil,
merge_request_iid: ENV['CI_MERGE_REQUEST_IID'],
pipeline_id: ENV['CI_PIPELINE_ID']
)
end
end
end
describe '#time' do
subject { instance.time }
let(:current_time) { '2011-01-01' }
before do
stub_env('CI_PIPELINE_CREATED_AT' => pipeline_created_at)
allow(DateTime).to receive(:now).and_return(current_time)
end
context 'when the CI_PIPELINE_CREATED_AT env variable is set' do
let(:pipeline_created_at) { '2000-01-01T00:00:00Z' }
it 'returns the correct time' do
expect(subject).to eq(pipeline_created_at)
end
end
context 'when the CI_PIPELINE_CREATED_AT env variable is not set' do
let(:pipeline_created_at) { nil }
it 'returns the current time' do
expect(subject).to eq(current_time)
end
end
end
end

View File

@ -3,9 +3,11 @@
require 'spec_helper'
RSpec.describe BulkImports::EntityWorker, feature_category: :importers do
let_it_be(:entity) { create(:bulk_import_entity) }
subject(:worker) { described_class.new }
let_it_be(:pipeline_tracker) do
let_it_be(:entity) { create(:bulk_import_entity, :started) }
let_it_be_with_reload(:pipeline_tracker) do
create(
:bulk_import_tracker,
entity: entity,
@ -14,97 +16,116 @@ RSpec.describe BulkImports::EntityWorker, feature_category: :importers do
)
end
let(:job_args) { entity.id }
it 'updates pipeline trackers to enqueued state when selected' do
worker = described_class.new
next_tracker = worker.send(:next_pipeline_trackers_for, entity.id).first
next_tracker.reload
expect(next_tracker.enqueued?).to be_truthy
expect(worker.send(:next_pipeline_trackers_for, entity.id))
.not_to include(next_tracker)
let_it_be_with_reload(:pipeline_tracker_2) do
create(
:bulk_import_tracker,
entity: entity,
pipeline_name: 'Stage1::Pipeline',
stage: 1
)
end
include_examples 'an idempotent worker' do
it 'enqueues the first stage pipelines work' do
expect_next_instance_of(Gitlab::Import::Logger) do |logger|
# the worker runs twice but only executes once
expect(logger)
.to receive(:info).twice
.with(
hash_including(
'bulk_import_entity_id' => entity.id,
'bulk_import_id' => entity.bulk_import_id,
'bulk_import_entity_type' => entity.source_type,
'source_full_path' => entity.source_full_path,
'current_stage' => nil,
'message' => 'Stage starting',
'source_version' => entity.bulk_import.source_version_info.to_s,
'importer' => 'gitlab_migration'
)
)
end
let(:job_args) { entity.id }
expect(BulkImports::PipelineWorker)
.to receive(:perform_async)
.with(
pipeline_tracker.id,
pipeline_tracker.stage,
entity.id
)
subject
before do
allow(described_class).to receive(:perform_in)
allow(BulkImports::PipelineWorker).to receive(:perform_async)
end
it 'logs and tracks the raised exceptions' do
exception = StandardError.new('Error!')
it 'enqueues the pipeline workers of the first stage and then re-enqueues itself' do
expect_next_instance_of(Gitlab::Import::Logger) do |logger|
expect(logger).to receive(:info).with(hash_including('message' => 'Stage starting', 'entity_stage' => 0))
expect(logger).to receive(:info).with(hash_including('message' => 'Stage running', 'entity_stage' => 0))
end
expect(BulkImports::PipelineWorker)
.to receive(:perform_async)
.and_raise(exception)
.with(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
expect(described_class).to receive(:perform_in).twice.with(described_class::PERFORM_DELAY, entity.id)
expect { subject }.to change { pipeline_tracker.reload.status_name }.from(:created).to(:enqueued)
end
end
context 'when pipeline workers from a stage are running' do
before do
pipeline_tracker.enqueue!
end
it 'does not enqueue the pipeline workers from the next stage and re-enqueues itself' do
expect_next_instance_of(Gitlab::Import::Logger) do |logger|
expect(logger)
.to receive(:info).twice
.with(
hash_including(
'bulk_import_entity_id' => entity.id,
'bulk_import_id' => entity.bulk_import_id,
'bulk_import_entity_type' => entity.source_type,
'source_full_path' => entity.source_full_path,
'current_stage' => nil,
'source_version' => entity.bulk_import.source_version_info.to_s,
'importer' => 'gitlab_migration'
)
)
expect(logger)
.to receive(:error)
.with(
hash_including(
'bulk_import_entity_id' => entity.id,
'bulk_import_id' => entity.bulk_import_id,
'bulk_import_entity_type' => entity.source_type,
'source_full_path' => entity.source_full_path,
'current_stage' => nil,
'message' => 'Entity failed',
'exception.backtrace' => anything,
'exception.class' => 'StandardError',
'exception.message' => 'Error!',
'importer' => 'gitlab_migration',
'source_version' => entity.bulk_import.source_version_info.to_s
)
)
expect(logger).to receive(:info).with(hash_including('message' => 'Stage running', 'entity_stage' => 0))
end
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(
exception,
expect(BulkImports::PipelineWorker).not_to receive(:perform_async)
expect(described_class).to receive(:perform_in).with(described_class::PERFORM_DELAY, entity.id)
worker.perform(entity.id)
end
end
context 'when there are no pipeline workers from the previous stage running' do
before do
pipeline_tracker.fail_op!
end
it 'enqueues the pipeline workers from the next stage and re-enqueues itself' do
expect_next_instance_of(Gitlab::Import::Logger) do |logger|
expect(logger).to receive(:info).with(hash_including('message' => 'Stage starting', 'entity_stage' => 1))
end
expect(BulkImports::PipelineWorker)
.to receive(:perform_async)
.with(
pipeline_tracker_2.id,
pipeline_tracker_2.stage,
entity.id
)
expect(described_class).to receive(:perform_in).with(described_class::PERFORM_DELAY, entity.id)
worker.perform(entity.id)
end
end
context 'when there are no next stage to run' do
before do
pipeline_tracker.fail_op!
pipeline_tracker_2.fail_op!
end
it 'does not enqueue any pipeline worker and re-enqueues itself' do
expect(BulkImports::PipelineWorker).not_to receive(:perform_async)
expect(described_class).to receive(:perform_in).with(described_class::PERFORM_DELAY, entity.id)
worker.perform(entity.id)
end
end
context 'when entity status is not started' do
let(:entity) { create(:bulk_import_entity, :finished) }
it 'does not re-enqueues itself' do
expect(described_class).not_to receive(:perform_in)
worker.perform(entity.id)
end
end
it 'logs and tracks the raised exceptions' do
exception = StandardError.new('Error!')
expect(BulkImports::PipelineWorker)
.to receive(:perform_async)
.and_raise(exception)
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(
exception,
hash_including(
bulk_import_entity_id: entity.id,
bulk_import_id: entity.bulk_import_id,
bulk_import_entity_type: entity.source_type,
@ -112,75 +133,10 @@ RSpec.describe BulkImports::EntityWorker, feature_category: :importers do
source_version: entity.bulk_import.source_version_info.to_s,
importer: 'gitlab_migration'
)
subject
expect(entity.reload.failed?).to eq(true)
end
context 'in first stage' do
let(:job_args) { [entity.id, 0] }
it 'do not enqueue a new pipeline job if the current stage still running' do
expect_next_instance_of(Gitlab::Import::Logger) do |logger|
expect(logger)
.to receive(:info).twice
.with(
hash_including(
'bulk_import_entity_id' => entity.id,
'bulk_import_id' => entity.bulk_import_id,
'bulk_import_entity_type' => entity.source_type,
'source_full_path' => entity.source_full_path,
'current_stage' => 0,
'message' => 'Stage running',
'source_version' => entity.bulk_import.source_version_info.to_s,
'importer' => 'gitlab_migration'
)
)
end
expect(BulkImports::PipelineWorker)
.not_to receive(:perform_async)
subject
end
it 'enqueues the next stage pipelines when the current stage is finished' do
next_stage_pipeline_tracker = create(
:bulk_import_tracker,
entity: entity,
pipeline_name: 'Stage1::Pipeline',
stage: 1
)
pipeline_tracker.fail_op!
expect_next_instance_of(Gitlab::Import::Logger) do |logger|
expect(logger)
.to receive(:info).twice
.with(
hash_including(
'bulk_import_entity_id' => entity.id,
'bulk_import_id' => entity.bulk_import_id,
'bulk_import_entity_type' => entity.source_type,
'source_full_path' => entity.source_full_path,
'current_stage' => 0,
'source_version' => entity.bulk_import.source_version_info.to_s,
'importer' => 'gitlab_migration'
)
)
end
expect(BulkImports::PipelineWorker)
.to receive(:perform_async)
.with(
next_stage_pipeline_tracker.id,
next_stage_pipeline_tracker.stage,
entity.id
)
subject
end
end
worker.perform(entity.id)
expect(entity.reload.failed?).to eq(true)
end
end

View File

@ -13,14 +13,29 @@ RSpec.describe BulkImports::FinishBatchedPipelineWorker, feature_category: :impo
subject(:worker) { described_class.new }
describe '#perform' do
it 'finishes pipeline and enqueues entity worker' do
expect(BulkImports::EntityWorker)
.to receive(:perform_async)
.with(entity.id, pipeline_tracker.stage)
context 'when job version is nil' do
before do
allow(subject).to receive(:job_version).and_return(nil)
end
subject.perform(pipeline_tracker.id)
it 'finishes pipeline and enqueues entity worker' do
expect(BulkImports::EntityWorker).to receive(:perform_async)
.with(entity.id)
expect(pipeline_tracker.reload.finished?).to eq(true)
subject.perform(pipeline_tracker.id)
expect(pipeline_tracker.reload.finished?).to eq(true)
end
end
context 'when job version is present' do
it 'finishes pipeline and does not enqueues entity worker' do
expect(BulkImports::EntityWorker).not_to receive(:perform_async)
subject.perform(pipeline_tracker.id)
expect(pipeline_tracker.reload.finished?).to eq(true)
end
end
context 'when import is in progress' do

View File

@ -38,39 +38,61 @@ RSpec.describe BulkImports::PipelineWorker, feature_category: :importers do
end
end
shared_examples 'successfully runs the pipeline' do
it 'runs the given pipeline successfully' do
expect_next_instance_of(Gitlab::Import::Logger) do |logger|
expect(logger)
.to receive(:info)
.with(
hash_including(
'pipeline_name' => 'FakePipeline',
'bulk_import_id' => entity.bulk_import_id,
'bulk_import_entity_id' => entity.id,
'bulk_import_entity_type' => entity.source_type,
'source_full_path' => entity.source_full_path
)
it 'runs the given pipeline successfully' do
expect_next_instance_of(Gitlab::Import::Logger) do |logger|
expect(logger)
.to receive(:info)
.with(
hash_including(
'pipeline_name' => 'FakePipeline',
'bulk_import_id' => entity.bulk_import_id,
'bulk_import_entity_id' => entity.id,
'bulk_import_entity_type' => entity.source_type,
'source_full_path' => entity.source_full_path
)
end
)
end
expect(BulkImports::EntityWorker)
.to receive(:perform_async)
.with(entity.id, pipeline_tracker.stage)
allow(subject).to receive(:jid).and_return('jid')
allow(subject).to receive(:jid).and_return('jid')
subject.perform(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
pipeline_tracker.reload
expect(pipeline_tracker.status_name).to eq(:finished)
expect(pipeline_tracker.jid).to eq('jid')
end
context 'when job version is nil' do
before do
allow(subject).to receive(:job_version).and_return(nil)
end
it 'runs the given pipeline successfully and enqueues entity worker' do
expect(BulkImports::EntityWorker).to receive(:perform_async).with(entity.id)
subject.perform(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
pipeline_tracker.reload
expect(pipeline_tracker.status_name).to eq(:finished)
expect(pipeline_tracker.jid).to eq('jid')
end
context 'when an error occurs' do
it 'enqueues entity worker' do
expect_next_instance_of(pipeline_class) do |pipeline|
expect(pipeline)
.to receive(:run)
.and_raise(StandardError, 'Error!')
end
expect(BulkImports::EntityWorker).to receive(:perform_async).with(entity.id)
subject.perform(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
end
end
end
it_behaves_like 'successfully runs the pipeline'
context 'when exclusive lease cannot be obtained' do
it 'does not run the pipeline' do
expect(subject).to receive(:try_obtain_lease).and_return(false)
@ -132,10 +154,6 @@ RSpec.describe BulkImports::PipelineWorker, feature_category: :importers do
)
)
expect(BulkImports::EntityWorker)
.to receive(:perform_async)
.with(entity.id, pipeline_tracker.stage)
expect(BulkImports::Failure)
.to receive(:create)
.with(
@ -157,37 +175,6 @@ RSpec.describe BulkImports::PipelineWorker, feature_category: :importers do
expect(pipeline_tracker.jid).to eq('jid')
end
shared_examples 'successfully runs the pipeline' do
it 'runs the given pipeline successfully' do
expect_next_instance_of(Gitlab::Import::Logger) do |logger|
expect(logger)
.to receive(:info)
.with(
hash_including(
'pipeline_name' => 'FakePipeline',
'bulk_import_id' => entity.bulk_import_id,
'bulk_import_entity_id' => entity.id,
'bulk_import_entity_type' => entity.source_type,
'source_full_path' => entity.source_full_path
)
)
end
expect(BulkImports::EntityWorker)
.to receive(:perform_async)
.with(entity.id, pipeline_tracker.stage)
allow(subject).to receive(:jid).and_return('jid')
subject.perform(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
pipeline_tracker.reload
expect(pipeline_tracker.status_name).to eq(:finished)
expect(pipeline_tracker.jid).to eq('jid')
end
end
context 'when enqueued pipeline cannot be found' do
shared_examples 'logs the error' do
it 'logs the error' do
@ -212,10 +199,6 @@ RSpec.describe BulkImports::PipelineWorker, feature_category: :importers do
)
end
expect(BulkImports::EntityWorker)
.to receive(:perform_async)
.with(entity.id, pipeline_tracker.stage)
subject.perform(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
end
end

View File

@ -0,0 +1,12 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
require_relative '../lib/tooling/job_metrics'
unless ENV['CI_JOB_METRICS_ENABLED'] == 'true'
puts "[job-metrics] Feature disabled because CI_JOB_METRICS_ENABLED is not set to true."
exit 0
end
puts "[job-metrics] Creating the job metrics file for the CI/CD job."
Tooling::JobMetrics.new.create_metrics_file

12
tooling/bin/push_job_metrics Executable file
View File

@ -0,0 +1,12 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
require_relative '../lib/tooling/job_metrics'
unless ENV['CI_JOB_METRICS_ENABLED'] == 'true'
puts "[job-metrics] Feature disabled because CI_JOB_METRICS_ENABLED is not set to true."
exit 0
end
puts "[job-metrics] Pushing job metrics file for the CI/CD job."
Tooling::JobMetrics.new.push_metrics

View File

@ -0,0 +1,20 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
require_relative '../lib/tooling/job_metrics'
unless ENV['CI_JOB_METRICS_ENABLED'] == 'true'
puts "[job-metrics] Feature disabled because CI_JOB_METRICS_ENABLED is not set to true."
exit 0
end
field_name = ARGV.shift
field_value = ARGV.shift
if field_name.nil? || field_value.nil?
puts 'usage: update_job_metric_field <field_name> <field_value>'
exit 1
end
puts "[job-metrics] Updating job metrics field for the CI/CD job."
Tooling::JobMetrics.new.update_field(field_name, field_value)

View File

@ -0,0 +1,20 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
require_relative '../lib/tooling/job_metrics'
unless ENV['CI_JOB_METRICS_ENABLED'] == 'true'
puts "[job-metrics] Feature disabled because CI_JOB_METRICS_ENABLED is not set to true."
exit 0
end
tag_name = ARGV.shift
tag_value = ARGV.shift
if tag_name.nil? || tag_value.nil?
puts 'usage: update_job_metric_tag <tag_name> <tag_value>'
exit 1
end
puts "[job-metrics] Updating job metrics tag for the CI/CD job."
Tooling::JobMetrics.new.update_tag(tag_name, tag_value)

View File

@ -0,0 +1,188 @@
# frozen_string_literal: true
require 'bundler/setup'
require 'influxdb-client'
require 'json'
require 'date'
module Tooling
class JobMetrics
attr_reader :metrics_file_path
# @return [String] bucket for storing all CI job metrics
INFLUX_CI_JOB_METRICS_BUCKET = "ci-job-metrics"
ALLOWED_TYPES = %i[tag field].freeze
def initialize(metrics_file_path: nil)
metrics_file_path ||= ENV['JOB_METRICS_FILE_PATH']
raise "Please specify a path for the job metrics file." unless metrics_file_path
@metrics_file_path = metrics_file_path
end
def create_metrics_file
if valid_metrics_file?
warn "A valid job metrics file already exists. We're not going to overwrite it."
return
end
# We always first create tag metrics file with the default values
persist_metrics_file(default_metrics)
end
def update_field(name, value)
name = name&.to_sym
unless default_fields.key?(name)
warn "[job-metrics] ERROR: Could not update field #{name}, as it is not part of the allowed fields."
return
end
update_file(name, value, type: :field)
end
def update_tag(name, value)
name = name&.to_sym
unless default_tags.key?(name)
warn "[job-metrics] ERROR: Could not update tag #{name}, as it is not part of the allowed tags."
return
end
update_file(name, value, type: :tag)
end
def update_file(name, value, type:)
unless valid_metrics_file?
warn "[job-metrics] ERROR: Invalid job metrics file."
return
end
metrics = load_metrics_file
metrics[:"#{type}s"][name] = value
persist_metrics_file(metrics)
end
def push_metrics
unless valid_metrics_file?
warn "[job-metrics] ERROR: Invalid job metrics file. We will not push the metrics to InfluxDB"
return
end
update_field(:job_duration_seconds, (Time.now - job_start_time).to_i)
metrics = load_metrics_file
ALLOWED_TYPES.each do |type|
metrics[:"#{type}s"] = metrics[:"#{type}s"].delete_if { |_, v| v.nil? || v.to_s.empty? }
end
influx_write_api.write(data: metrics)
puts "[job-metrics] Pushed #{metrics.length} CI job metric entries to InfluxDB."
rescue StandardError => e
warn "[job-metrics] Failed to push CI job metrics to InfluxDB, error: #{e}"
end
def load_metrics_file
return unless File.exist?(metrics_file_path)
metrics_hash = JSON.parse(File.read(metrics_file_path), symbolize_names: true) # rubocop:disable Gitlab/Json
# Inflate the timestamp from string to Time object
metrics_hash[:time] = Time.parse(metrics_hash[:time]) if metrics_hash[:time]
metrics_hash
rescue JSON::ParserError, TypeError
nil
end
def valid_metrics_file?
metrics = load_metrics_file
return false unless metrics
valid_metrics?(metrics)
end
def valid_metrics?(metrics_hash)
default_metrics.keys == metrics_hash.keys &&
default_tags.keys == metrics_hash[:tags].keys &&
default_fields.keys == metrics_hash[:fields].keys
end
def persist_metrics_file(metrics_hash)
unless valid_metrics?(metrics_hash)
warn "cannot persist the metrics, as it doesn't have the correct data structure."
return
end
File.write(metrics_file_path, metrics_hash.to_json)
end
def default_metrics
{
name: 'job-metrics',
time: time,
tags: default_tags,
fields: default_fields
}
end
def default_tags
{
job_name: ENV.fetch('CI_JOB_NAME', nil),
job_stage: ENV.fetch('CI_JOB_STAGE', nil),
job_status: ENV.fetch('CI_JOB_STATUS', nil),
project_id: ENV.fetch('CI_PROJECT_ID', nil),
rspec_retried_in_new_process: 'false',
server_host: ENV.fetch('CI_SERVER_HOST', nil)
}
end
def default_fields
{
merge_request_iid: ENV.fetch('CI_MERGE_REQUEST_IID', nil),
pipeline_id: ENV.fetch('CI_PIPELINE_ID', nil),
job_id: ENV.fetch('CI_JOB_ID', nil),
job_duration_seconds: nil
}
end
# Single common timestamp for all exported example metrics to keep data points consistently grouped
#
# @return [Time]
def time
@time ||= begin
return DateTime.now unless ENV['CI_PIPELINE_CREATED_AT'] # rubocop:disable Lint/NoReturnInBeginEndBlocks
DateTime.parse(ENV['CI_PIPELINE_CREATED_AT'])
end
end
private
# Write client
#
# @return [WriteApi]
def influx_write_api
@write_api ||= influx_client.create_write_api
end
# InfluxDb client
#
# @return [InfluxDB2::Client]
def influx_client
@influx_client ||= InfluxDB2::Client.new(
ENV["QA_INFLUXDB_URL"] || raise("Missing QA_INFLUXDB_URL env variable"),
ENV["EP_CI_JOB_METRICS_TOKEN"] || raise("Missing EP_CI_JOB_METRICS_TOKEN env variable"),
bucket: INFLUX_CI_JOB_METRICS_BUCKET,
org: "gitlab-qa",
precision: InfluxDB2::WritePrecision::NANOSECOND
)
end
def job_start_time
Time.parse(ENV.fetch('CI_JOB_STARTED_AT'))
end
end
end