Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-02-11 18:18:58 +00:00
parent ec377e4162
commit e57809ded8
76 changed files with 1347 additions and 238 deletions

View File

@ -127,6 +127,7 @@
.ci-patterns: &ci-patterns
- ".gitlab-ci.yml"
- ".gitlab/ci/**/*"
- "scripts/rspec_helpers.sh"
.ci-build-images-patterns: &ci-build-images-patterns
- ".gitlab-ci.yml"
@ -245,6 +246,7 @@
- ".gitlab-ci.yml"
- ".gitlab/ci/**/*"
- "*_VERSION"
- "scripts/rspec_helpers.sh"
# DB patterns + .ci-patterns
.db-patterns: &db-patterns

View File

@ -39,10 +39,10 @@ Which tier is this feature available in?
- [ ] @mention your stage's stable counterparts on this issue. For example, Customer Support, Customer Success (Technical Account Manager), Product Marketing Manager.
- To see who the stable counterparts are for a product team visit [product categories](https://about.gitlab.com/handbook/product/categories/)
- If there is no stable counterpart listed for Sales/CS please mention `@timtams`
- If there is no stable counterpart listed for Support please @mention `@gitlab-com/support/managers`
- If there is no stable counterpart listed for Support please mention `@gitlab-com/support/managers`
- If there is no stable counterpart listed for Marketing please mention `@cfoster3`
- [ ] @mention your GPM so that they are aware of planned deprecations. The goal is to have reviews happen at least two releases before the final removal of the feature or introduction of a breaking change.
- [ ] `@mention` your GPM so that they are aware of planned deprecations. The goal is to have reviews happen at least two releases before the final removal of the feature or introduction of a breaking change.
### Deprecation Milestone

View File

@ -1 +1 @@
6130522b84e1ea354467c2a51989c66fb0566d9d
d3ab199f7923a9d75516b8d1f1ea2f84b03190b1

View File

@ -42,7 +42,7 @@ export const BLOB_RENDER_ERRORS = {
id: 'load',
text: __('load it anyway'),
conjunction: __('or'),
href: '#',
href: '?expanded=true&viewer=simple',
target: '',
event: BLOB_RENDER_EVENT_LOAD,
},

View File

@ -2,6 +2,9 @@
import { GlButton, GlTable } from '@gitlab/ui';
import { __ } from '~/locale';
const cloudRun = 'cloudRun';
const cloudStorage = 'cloudStorage';
const i18n = {
cloudRun: __('Cloud Run'),
cloudRunDescription: __('Deploy container based web apps on Google managed clusters'),
@ -28,6 +31,13 @@ export default {
required: true,
},
},
methods: {
actionUrl(key) {
if (key === cloudRun) return this.cloudRunUrl;
else if (key === cloudStorage) return this.cloudStorageUrl;
return '#';
},
},
fields: [
{ key: 'title', label: i18n.service },
{ key: 'description', label: i18n.description },
@ -37,12 +47,19 @@ export default {
{
title: i18n.cloudRun,
description: i18n.cloudRunDescription,
action: { title: i18n.configureViaMergeRequest, disabled: true },
action: {
key: cloudRun,
title: i18n.configureViaMergeRequest,
},
},
{
title: i18n.cloudStorage,
description: i18n.cloudStorageDescription,
action: { title: i18n.configureViaMergeRequest, disabled: true },
action: {
key: cloudStorage,
title: i18n.configureViaMergeRequest,
disabled: true,
},
},
],
i18n,
@ -54,7 +71,9 @@ export default {
<p>{{ $options.i18n.deploymentsDescription }}</p>
<gl-table :fields="$options.fields" :items="$options.items">
<template #cell(action)="{ value }">
<gl-button :disabled="value.disabled">{{ value.title }}</gl-button>
<gl-button :disabled="value.disabled" :href="actionUrl(value.key)">
{{ value.title }}
</gl-button>
</template>
</gl-table>
</div>

View File

@ -23,11 +23,11 @@ export default {
type: String,
required: true,
},
deploymentsCloudRunUrl: {
enableCloudRunUrl: {
type: String,
required: true,
},
deploymentsCloudStorageUrl: {
enableCloudStorageUrl: {
type: String,
required: true,
},
@ -47,8 +47,8 @@ export default {
</gl-tab>
<gl-tab :title="__('Deployments')">
<deployments-service-table
:cloud-run-url="deploymentsCloudRunUrl"
:cloud-storage-url="deploymentsCloudStorageUrl"
:cloud-run-url="enableCloudRunUrl"
:cloud-storage-url="enableCloudStorageUrl"
/>
</gl-tab>
<gl-tab :title="__('Services')" disabled />

View File

@ -41,6 +41,7 @@ export default {
},
data() {
return {
isModalVisible: false,
isLoading: true,
isSearchEmpty: false,
searchEmptyMessage: '',
@ -101,6 +102,12 @@ export default {
eventHub.$off(`${this.action}updateGroups`, this.updateGroups);
},
methods: {
hideModal() {
this.isModalVisible = false;
},
showModal() {
this.isModalVisible = true;
},
fetchGroups({ parentId, page, filterGroupsBy, sortBy, archived, updatePagination }) {
return this.service
.getGroups(parentId, page, filterGroupsBy, sortBy, archived)
@ -185,6 +192,7 @@ export default {
showLeaveGroupModal(group, parentGroup) {
this.targetGroup = group;
this.targetParentGroup = parentGroup;
this.showModal();
},
leaveGroup() {
this.targetGroup.isBeingRemoved = true;
@ -256,10 +264,12 @@ export default {
/>
<gl-modal
modal-id="leave-group-modal"
:visible="isModalVisible"
:title="__('Are you sure?')"
:action-primary="primaryProps"
:action-cancel="cancelProps"
@primary="leaveGroup"
@hide="hideModal"
>
{{ groupLeaveConfirmationMessage }}
</gl-modal>

View File

@ -34,8 +34,8 @@ export default {
),
itemCaret,
itemTypeIcon,
itemStats,
itemActions,
itemStats,
},
props: {
parentGroup: {
@ -92,6 +92,9 @@ export default {
complianceFramework() {
return this.group.complianceFramework;
},
showActionsMenu() {
return this.isGroup && (this.group.canEdit || this.group.canRemove || this.group.canLeave);
},
},
methods: {
onClickRowGroup(e) {
@ -197,17 +200,19 @@ export default {
<div v-if="isGroupPendingRemoval">
<gl-badge variant="warning">{{ __('pending deletion') }}</gl-badge>
</div>
<div class="metadata d-flex flex-grow-1 flex-shrink-0 flex-wrap justify-content-md-between">
<div
class="metadata gl-display-flex gl-flex-grow-1 gl-flex-shrink-0 gl-flex-wrap justify-content-md-between"
>
<item-stats
:item="group"
class="group-stats gl-mt-2 gl-display-none gl-md-display-flex gl-align-items-center"
/>
<item-actions
v-if="isGroup"
v-if="showActionsMenu"
:group="group"
:parent-group="parentGroup"
:action="action"
/>
<item-stats
:item="group"
class="group-stats gl-mt-2 d-none d-md-flex gl-align-items-center"
/>
</div>
</div>
</div>

View File

@ -1,15 +1,17 @@
<script>
import { GlTooltipDirective, GlButton, GlModalDirective } from '@gitlab/ui';
import { GlTooltipDirective, GlDropdown, GlDropdownItem } from '@gitlab/ui';
import { COMMON_STR } from '../constants';
import eventHub from '../event_hub';
const { LEAVE_BTN_TITLE, EDIT_BTN_TITLE, REMOVE_BTN_TITLE, OPTIONS_DROPDOWN_TITLE } = COMMON_STR;
export default {
components: {
GlButton,
GlDropdown,
GlDropdownItem,
},
directives: {
GlTooltip: GlTooltipDirective,
GlModal: GlModalDirective,
},
props: {
parentGroup: {
@ -28,11 +30,8 @@ export default {
},
},
computed: {
leaveBtnTitle() {
return COMMON_STR.LEAVE_BTN_TITLE;
},
editBtnTitle() {
return COMMON_STR.EDIT_BTN_TITLE;
removeButtonHref() {
return `${this.group.editPath}#js-remove-group-form`;
},
},
methods: {
@ -40,33 +39,51 @@ export default {
eventHub.$emit(`${this.action}showLeaveGroupModal`, this.group, this.parentGroup);
},
},
i18n: {
leaveBtnTitle: LEAVE_BTN_TITLE,
editBtnTitle: EDIT_BTN_TITLE,
removeBtnTitle: REMOVE_BTN_TITLE,
optionsDropdownTitle: OPTIONS_DROPDOWN_TITLE,
},
};
</script>
<template>
<div class="controls d-flex justify-content-end">
<gl-button
v-if="group.canLeave"
v-gl-tooltip.top
v-gl-modal.leave-group-modal
:title="leaveBtnTitle"
:aria-label="leaveBtnTitle"
data-testid="leave-group-btn"
size="small"
icon="leave"
class="leave-group gl-ml-3"
@click.stop="onLeaveGroup"
/>
<gl-button
v-if="group.canEdit"
v-gl-tooltip.top
:href="group.editPath"
:title="editBtnTitle"
:aria-label="editBtnTitle"
data-testid="edit-group-btn"
size="small"
icon="pencil"
class="edit-group gl-ml-3"
/>
<div class="gl-display-flex gl-justify-content-end gl-ml-5">
<gl-dropdown
v-gl-tooltip.hover.focus="$options.i18n.optionsDropdownTitle"
right
category="tertiary"
icon="ellipsis_v"
no-caret
:data-testid="`group-${group.id}-dropdown-button`"
data-qa-selector="group_dropdown_button"
:data-qa-group-id="group.id"
>
<gl-dropdown-item
v-if="group.canEdit"
:data-testid="`edit-group-${group.id}-btn`"
:href="group.editPath"
@click.stop
>
{{ $options.i18n.editBtnTitle }}
</gl-dropdown-item>
<gl-dropdown-item
v-if="group.canLeave"
:data-testid="`leave-group-${group.id}-btn`"
@click.stop="onLeaveGroup"
>
{{ $options.i18n.leaveBtnTitle }}
</gl-dropdown-item>
<gl-dropdown-item
v-if="group.canRemove"
:href="removeButtonHref"
:data-testid="`remove-group-${group.id}-btn`"
variant="danger"
@click.stop
>
{{ $options.i18n.removeBtnTitle }}
</gl-dropdown-item>
</gl-dropdown>
</div>
</template>

View File

@ -15,8 +15,10 @@ export const COMMON_STR = {
LEAVE_FORBIDDEN: s__(
'GroupsTree|Failed to leave the group. Please make sure you are not the only owner.',
),
LEAVE_BTN_TITLE: s__('GroupsTree|Leave this group'),
EDIT_BTN_TITLE: s__('GroupsTree|Edit group'),
LEAVE_BTN_TITLE: s__('GroupsTree|Leave group'),
EDIT_BTN_TITLE: s__('GroupsTree|Edit'),
REMOVE_BTN_TITLE: s__('GroupsTree|Delete'),
OPTIONS_DROPDOWN_TITLE: s__('GroupsTree|Options'),
GROUP_SEARCH_EMPTY: s__('GroupsTree|No groups matched your search'),
GROUP_PROJECT_SEARCH_EMPTY: s__('GroupsTree|No groups or projects matched your search'),
};

View File

@ -83,6 +83,7 @@ export default class GroupsStore {
leavePath: rawGroupItem.leave_path,
canEdit: rawGroupItem.can_edit,
canLeave: rawGroupItem.can_leave,
canRemove: rawGroupItem.can_remove,
type: rawGroupItem.type,
permission: rawGroupItem.permission,
children: groupChildren,

View File

@ -45,6 +45,7 @@ export default {
projectPath: this.projectPath,
filePath: this.path,
ref: this.originalBranch || this.ref,
shouldFetchRawText: Boolean(this.glFeatures.highlightJs),
};
},
result() {
@ -203,7 +204,7 @@ export default {
<template>
<div>
<gl-loading-icon v-if="isLoading" size="sm" />
<div v-if="blobInfo && !isLoading" class="file-holder gl-overflow-hidden">
<div v-if="blobInfo && !isLoading" class="file-holder">
<blob-header
:blob="blobInfo"
:hide-viewer-switcher="!hasRichViewer || isBinaryFileType || isUsingLfs"

View File

@ -195,6 +195,7 @@ export default {
projectPath: this.projectPath,
filePath: this.path,
ref: this.ref,
shouldFetchRawText: Boolean(this.glFeatures.highlightJs),
});
},
apolloQuery(query, variables) {

View File

@ -1,6 +1,11 @@
#import "ee_else_ce/repository/queries/path_locks.fragment.graphql"
query getBlobInfo($projectPath: ID!, $filePath: String!, $ref: String!) {
query getBlobInfo(
$projectPath: ID!
$filePath: String!
$ref: String!
$shouldFetchRawText: Boolean!
) {
project(fullPath: $projectPath) {
userPermissions {
pushCode
@ -18,7 +23,7 @@ query getBlobInfo($projectPath: ID!, $filePath: String!, $ref: String!) {
name
size
rawSize
rawTextBlob
rawTextBlob @include(if: $shouldFetchRawText)
fileType
language
path

View File

@ -4,10 +4,63 @@ class Projects::GoogleCloud::DeploymentsController < Projects::GoogleCloud::Base
before_action :validate_gcp_token!
def cloud_run
render json: "Placeholder"
params = { token_in_session: token_in_session }
enable_cloud_run_response = GoogleCloud::EnableCloudRunService
.new(project, current_user, params).execute
if enable_cloud_run_response[:status] == :error
flash[:error] = enable_cloud_run_response[:message]
redirect_to project_google_cloud_index_path(project)
else
params = { action: GoogleCloud::GeneratePipelineService::ACTION_DEPLOY_TO_CLOUD_RUN }
generate_pipeline_response = GoogleCloud::GeneratePipelineService
.new(project, current_user, params).execute
if generate_pipeline_response[:status] == :error
flash[:error] = 'Failed to generate pipeline'
redirect_to project_google_cloud_index_path(project)
else
cloud_run_mr_params = cloud_run_mr_params(generate_pipeline_response[:branch_name])
redirect_to project_new_merge_request_path(project, merge_request: cloud_run_mr_params)
end
end
rescue Google::Apis::ClientError => error
handle_gcp_error(error, project)
end
def cloud_storage
render json: "Placeholder"
end
private
def cloud_run_mr_params(branch_name)
{
title: cloud_run_mr_title,
description: cloud_run_mr_description(branch_name),
source_project_id: project.id,
target_project_id: project.id,
source_branch: branch_name,
target_branch: project.default_branch
}
end
def cloud_run_mr_title
'Enable deployments to Cloud Run'
end
def cloud_run_mr_description(branch_name)
<<-TEXT
This merge request includes a Cloud Run deployment job in the pipeline definition (.gitlab-ci.yml).
The `deploy-to-cloud-run` job:
* Requires the following environment variables
* `GCP_PROJECT_ID`
* `GCP_SERVICE_ACCOUNT_KEY`
* Job definition can be found at: https://gitlab.com/gitlab-org/incubation-engineering/five-minute-production/library
This pipeline definition has been committed to the branch `#{branch_name}`.
You may modify the pipeline definition further or accept the changes as-is if suitable.
TEXT
end
end

View File

@ -6,6 +6,8 @@ class Projects::GoogleCloudController < Projects::GoogleCloud::BaseController
screen: 'home',
serviceAccounts: GoogleCloud::ServiceAccountsService.new(project).find_for_project,
createServiceAccountUrl: project_google_cloud_service_accounts_path(project),
enableCloudRunUrl: project_google_cloud_deployments_cloud_run_path(project),
enableCloudStorageUrl: project_google_cloud_deployments_cloud_storage_path(project),
emptyIllustrationUrl: ActionController::Base.helpers.image_path('illustrations/pipelines_empty.svg')
}.to_json
end

View File

@ -5,14 +5,17 @@ class ContainerRepository < ApplicationRecord
include Gitlab::SQL::Pattern
include EachBatch
include Sortable
include AfterCommitQueue
WAITING_CLEANUP_STATUSES = %i[cleanup_scheduled cleanup_unfinished].freeze
REQUIRING_CLEANUP_STATUSES = %i[cleanup_unscheduled cleanup_scheduled].freeze
IDLE_MIGRATION_STATES = %w[default pre_import_done import_done import_aborted import_skipped].freeze
ACTIVE_MIGRATION_STATES = %w[pre_importing importing].freeze
ABORTABLE_MIGRATION_STATES = (ACTIVE_MIGRATION_STATES + ['pre_import_done']).freeze
ABORTABLE_MIGRATION_STATES = (ACTIVE_MIGRATION_STATES + %w[pre_import_done default]).freeze
MIGRATION_STATES = (IDLE_MIGRATION_STATES + ACTIVE_MIGRATION_STATES).freeze
TooManyImportsError = Class.new(StandardError)
belongs_to :project
validates :name, length: { minimum: 0, allow_nil: false }
@ -48,6 +51,32 @@ class ContainerRepository < ApplicationRecord
scope :with_migration_pre_import_started_at_nil_or_before, ->(timestamp) { where("COALESCE(migration_pre_import_started_at, '01-01-1970') < ?", timestamp) }
scope :with_migration_pre_import_done_at_nil_or_before, ->(timestamp) { where("COALESCE(migration_pre_import_done_at, '01-01-1970') < ?", timestamp) }
scope :with_stale_ongoing_cleanup, ->(threshold) { cleanup_ongoing.where('expiration_policy_started_at < ?', threshold) }
scope :import_in_process, -> { where(migration_state: %w[pre_importing pre_import_done importing]) }
scope :recently_done_migration_step, -> do
where(migration_state: %w[import_done pre_import_done import_aborted])
.order(Arel.sql('GREATEST(migration_pre_import_done_at, migration_import_done_at, migration_aborted_at) DESC'))
end
scope :ready_for_import, -> do
# There is no yaml file for the container_registry_phase_2_deny_list
# feature flag since it is only accessed in this query.
# https://gitlab.com/gitlab-org/gitlab/-/issues/350543 tracks the rollout and
# removal of this feature flag.
joins(:project).where(
migration_state: [:default],
created_at: ...ContainerRegistry::Migration.created_before
).with_target_import_tier
.where(
"NOT EXISTS (
SELECT 1
FROM feature_gates
WHERE feature_gates.feature_key = 'container_registry_phase_2_deny_list'
AND feature_gates.key = 'actors'
AND feature_gates.value = concat('Group:', projects.namespace_id)
)"
)
end
state_machine :migration_state, initial: :default do
state :pre_importing do
@ -104,7 +133,7 @@ class ContainerRepository < ApplicationRecord
end
event :skip_import do
transition %i[default pre_importing importing] => :import_skipped
transition ABORTABLE_MIGRATION_STATES.map(&:to_sym) => :import_skipped
end
event :retry_pre_import do
@ -121,7 +150,9 @@ class ContainerRepository < ApplicationRecord
end
after_transition any => :pre_importing do |container_repository|
container_repository.abort_import unless container_repository.migration_pre_import == :ok
container_repository.try_import do
container_repository.migration_pre_import
end
end
before_transition pre_importing: :pre_import_done do |container_repository|
@ -134,7 +165,9 @@ class ContainerRepository < ApplicationRecord
end
after_transition any => :importing do |container_repository|
container_repository.abort_import unless container_repository.migration_import == :ok
container_repository.try_import do
container_repository.migration_import
end
end
before_transition importing: :import_done do |container_repository|
@ -156,9 +189,10 @@ class ContainerRepository < ApplicationRecord
container_repository.migration_skipped_at = Time.zone.now
end
before_transition any => %i[import_done import_aborted] do
# EnqueuerJob.enqueue perform_async or perform_in depending on the speed FF
# To be implemented in https://gitlab.com/gitlab-org/gitlab/-/issues/349744
before_transition any => %i[import_done import_aborted] do |container_repository|
container_repository.run_after_commit do
::ContainerRegistry::Migration::EnqueuerWorker.perform_async
end
end
end
@ -201,6 +235,14 @@ class ContainerRepository < ApplicationRecord
from("(#{union.to_sql}) #{ContainerRepository.table_name}")
end
def self.with_target_import_tier
# overridden in ee
#
# Repositories are being migrated by tier on Saas, so we need to
# filter by plan/subscription which is not available in FOSS
all
end
def skip_import(reason:)
self.migration_skipped_reason = reason
@ -230,6 +272,41 @@ class ContainerRepository < ApplicationRecord
finish_pre_import && start_import
end
def retry_migration
return if migration_import_done_at
if migration_pre_import_done_at
retry_import
else
retry_pre_import
end
end
def try_import
raise ArgumentError, 'block not given' unless block_given?
try_count = 0
begin
try_count += 1
return true if yield == :ok
abort_import
false
rescue TooManyImportsError
if try_count <= ::ContainerRegistry::Migration.start_max_retries
sleep 0.1 * try_count
retry
else
abort_import
false
end
end
end
def last_import_step_done_at
[migration_pre_import_done_at, migration_import_done_at, migration_aborted_at].compact.max
end
# rubocop: disable CodeReuse/ServiceClass
def registry
@registry ||= begin
@ -327,13 +404,19 @@ class ContainerRepository < ApplicationRecord
def migration_pre_import
return :error unless gitlab_api_client.supports_gitlab_api?
gitlab_api_client.pre_import_repository(self.path)
response = gitlab_api_client.pre_import_repository(self.path)
raise TooManyImportsError if response == :too_many_imports
response
end
def migration_import
return :error unless gitlab_api_client.supports_gitlab_api?
gitlab_api_client.import_repository(self.path)
response = gitlab_api_client.import_repository(self.path)
raise TooManyImportsError if response == :too_many_imports
response
end
def self.build_from_path(path)

View File

@ -58,6 +58,10 @@ class GroupChildEntity < Grape::Entity
end
end
expose :can_remove, unless: lambda { |_instance, _options| project? } do |group|
can?(request.current_user, :admin_group, group)
end
expose :number_users_with_delimiter, unless: lambda { |_instance, _options| project? } do |instance|
number_with_delimiter(instance.member_count)
end

View File

@ -0,0 +1,34 @@
# frozen_string_literal: true
module GoogleCloud
class EnableCloudRunService < :: BaseService
def execute
gcp_project_ids = unique_gcp_project_ids
if gcp_project_ids.empty?
error("No GCP projects found. Configure a service account or GCP_PROJECT_ID ci variable.")
else
google_api_client = GoogleApi::CloudPlatform::Client.new(token_in_session, nil)
gcp_project_ids.each do |gcp_project_id|
google_api_client.enable_cloud_run(gcp_project_id)
google_api_client.enable_artifacts_registry(gcp_project_id)
google_api_client.enable_cloud_build(gcp_project_id)
end
success({ gcp_project_ids: gcp_project_ids })
end
end
private
def unique_gcp_project_ids
all_gcp_project_ids = project.variables.filter { |var| var.key == 'GCP_PROJECT_ID' }.map { |var| var.value }
all_gcp_project_ids.uniq
end
def token_in_session
@params[:token_in_session]
end
end
end

View File

@ -1,6 +1,7 @@
= render "projects/blob/breadcrumb", blob: blob
- project = @project.present(current_user: current_user)
- ref = local_assigns[:ref] || @ref
- expanded = params[:expanded].present?
.info-well.d-none.d-sm-block
.well-segment
@ -13,7 +14,7 @@
#blob-content-holder.blob-content-holder
- if @code_navigation_path
#js-code-navigation{ data: { code_navigation_path: @code_navigation_path, blob_path: blob.path, definition_path_prefix: project_blob_path(@project, @ref) } }
- if Feature.enabled?(:refactor_blob_viewer, @project, default_enabled: :yaml)
- if Feature.enabled?(:refactor_blob_viewer, @project, default_enabled: :yaml) && !expanded
-# Data info will be removed once we migrate this to use GraphQL
-# Follow-up issue: https://gitlab.com/gitlab-org/gitlab/-/issues/330406
#js-view-blob-app{ data: { blob_path: blob.path,

View File

@ -273,6 +273,15 @@
:weight: 1
:idempotent:
:tags: []
- :name: cronjob:container_registry_migration_enqueuer
:worker_name: ContainerRegistry::Migration::EnqueuerWorker
:feature_category: :container_registry
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
- :name: cronjob:container_registry_migration_guard
:worker_name: ContainerRegistry::Migration::GuardWorker
:feature_category: :container_registry

View File

@ -0,0 +1,116 @@
# frozen_string_literal: true
module ContainerRegistry
module Migration
class EnqueuerWorker
include ApplicationWorker
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext
include Gitlab::Utils::StrongMemoize
data_consistency :always
feature_category :container_registry
urgency :low
deduplicate :until_executing, including_scheduled: true
idempotent!
def perform
return unless migration.enabled?
return unless below_capacity?
return unless waiting_time_passed?
re_enqueue_if_capacity if handle_aborted_migration || handle_next_migration
rescue StandardError => e
Gitlab::ErrorTracking.log_exception(
e,
next_repository_id: next_repository&.id,
next_aborted_repository_id: next_aborted_repository&.id
)
next_repository&.abort_import
end
private
def handle_aborted_migration
return unless next_aborted_repository&.retry_migration
log_extra_metadata_on_done(:container_repository_id, next_aborted_repository.id)
log_extra_metadata_on_done(:import_type, 'retry')
true
end
def handle_next_migration
return unless next_repository
# We return true because the repository was successfully processed (migration_state is changed)
return true if tag_count_too_high?
return unless next_repository.start_pre_import
log_extra_metadata_on_done(:container_repository_id, next_repository.id)
log_extra_metadata_on_done(:import_type, 'next')
true
end
def tag_count_too_high?
return false unless next_repository.tags_count > migration.max_tags_count
next_repository.skip_import(reason: :too_many_tags)
true
end
def below_capacity?
current_capacity <= maximum_capacity
end
def waiting_time_passed?
delay = migration.enqueue_waiting_time
return true if delay == 0
return true unless last_step_completed_repository
last_step_completed_repository.last_import_step_done_at < Time.zone.now - delay
end
def current_capacity
strong_memoize(:current_capacity) do
ContainerRepository.with_migration_states(
%w[pre_importing pre_import_done importing]
).count
end
end
def maximum_capacity
migration.capacity
end
def next_repository
strong_memoize(:next_repository) do
ContainerRepository.ready_for_import.take # rubocop:disable CodeReuse/ActiveRecord
end
end
def next_aborted_repository
strong_memoize(:next_aborted_repository) do
ContainerRepository.with_migration_state('import_aborted').take # rubocop:disable CodeReuse/ActiveRecord
end
end
def last_step_completed_repository
strong_memoize(:last_step_completed_repository) do
ContainerRepository.recently_done_migration_step.first
end
end
def migration
::ContainerRegistry::Migration
end
def re_enqueue_if_capacity
return unless current_capacity < maximum_capacity
self.class.perform_async
end
end
end
end

View File

@ -4,5 +4,5 @@ introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/16654
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/254938
milestone: '12.4'
type: development
group: group::pipeline execution
group: group::pipeline insights
default_enabled: false

View File

@ -0,0 +1,8 @@
---
name: container_registry_migration_limit_gitlab_org
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/78613
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/350543
milestone: '14.8'
type: development
group: group::package
default_enabled: false

View File

@ -545,6 +545,9 @@ Settings.cron_jobs['container_registry_migration_guard_worker']['job_class'] = '
Settings.cron_jobs['container_registry_migration_observer_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['container_registry_migration_observer_worker']['cron'] ||= '*/30 * * * *'
Settings.cron_jobs['container_registry_migration_observer_worker']['job_class'] = 'ContainerRegistry::Migration::ObserverWorker'
Settings.cron_jobs['container_registry_migration_enqueuer_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['container_registry_migration_enqueuer_worker']['cron'] ||= '45 */1 * * *'
Settings.cron_jobs['container_registry_migration_enqueuer_worker']['job_class'] = 'ContainerRegistry::Migration::EnqueuerWorker'
Settings.cron_jobs['image_ttl_group_policy_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['image_ttl_group_policy_worker']['cron'] ||= '40 0 * * *'
Settings.cron_jobs['image_ttl_group_policy_worker']['job_class'] = 'DependencyProxy::ImageTtlGroupPolicyWorker'

View File

@ -0,0 +1,19 @@
- name: "Request profiling"
announcement_milestone: "14.8"
announcement_date: "2021-02-22"
removal_milestone: "15.0"
removal_date: "2022-05-22"
breaking_change: true
reporter: iroussos
body: | # Do not modify this line, instead modify the lines below.
[Request profiling](https://docs.gitlab.com/ee/administration/monitoring/performance/request_profiling.html) is deprecated in GitLab 14.8 and scheduled for removal in GitLab 15.0.
We're working on [consolidating our profiling tools](https://gitlab.com/groups/gitlab-org/-/epics/7327) and making them more easily accessible.
We [evaluated](https://gitlab.com/gitlab-org/gitlab/-/issues/350152) the use of this feature and we found that it is not widely used.
It also depends on a few third-party gems that are not actively maintained anymore, have not been updated for the latest version of Ruby, or crash frequently when profiling heavy page loads.
For more information, check the [summary section of the deprecation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/352488#deprecation-summary).
stage: Monitor
tiers: [Free, Premium, Ultimate]
issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/352488
documentation_url: https://docs.gitlab.com/ee/administration/monitoring/performance/request_profiling.html

View File

@ -0,0 +1,14 @@
# frozen_string_literal: true
class AddIndexOnMigrationStateAndImportDoneAtToContainerRepositories < Gitlab::Database::Migration[1.0]
INDEX_NAME = 'index_container_repositories_on_migration_state_import_done_at'
disable_ddl_transaction!
def up
add_concurrent_index :container_repositories, [:migration_state, :migration_import_done_at], name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :container_repositories, INDEX_NAME
end
end

View File

@ -0,0 +1,17 @@
# frozen_string_literal: true
class AddIndexOnGreatestDoneAtToContainerRepositories < Gitlab::Database::Migration[1.0]
INDEX_NAME = 'index_container_repositories_on_greatest_done_at'
disable_ddl_transaction!
def up
add_concurrent_index :container_repositories,
'GREATEST(migration_pre_import_done_at, migration_import_done_at, migration_aborted_at)',
where: "migration_state IN ('import_done', 'pre_import_done', 'import_aborted')",
name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :container_repositories, INDEX_NAME
end
end

View File

@ -0,0 +1 @@
087338f0b438d2aa33bc22bd3973d818c5d1f40948525d95181751722158605b

View File

@ -0,0 +1 @@
efecc3c6468d8a5036352f5b62e8d70de835d1beb4e45ba6d3906906d0317848

View File

@ -26066,6 +26066,10 @@ CREATE INDEX index_composer_cache_files_where_namespace_id_is_null ON packages_c
CREATE INDEX index_container_expiration_policies_on_next_run_at_and_enabled ON container_expiration_policies USING btree (next_run_at, enabled);
CREATE INDEX index_container_repositories_on_greatest_done_at ON container_repositories USING btree (GREATEST(migration_pre_import_done_at, migration_import_done_at, migration_aborted_at)) WHERE (migration_state = ANY (ARRAY['import_done'::text, 'pre_import_done'::text, 'import_aborted'::text]));
CREATE INDEX index_container_repositories_on_migration_state_import_done_at ON container_repositories USING btree (migration_state, migration_import_done_at);
CREATE INDEX index_container_repositories_on_project_id ON container_repositories USING btree (project_id);
CREATE INDEX index_container_repositories_on_project_id_and_id ON container_repositories USING btree (project_id, id);

View File

@ -0,0 +1,14 @@
---
# Suggestion: gitlab.BadPlurals
#
# Don't write plural words with the '(s)' construction. "HTTP(S)" is acceptable.
#
# For a list of all options, see https://docs.errata.ai/vale/styles
extends: existence
message: 'Rewrite "%s" to be plural, without parentheses.'
link: https://docs.gitlab.com/ee/development/documentation/styleguide/word_list.html
level: warning
scope: raw
ignorecase: true
raw:
- '\w*\(s\)(?<!http\(s\))'

View File

@ -25,8 +25,8 @@ these definitions yet.
| Site | One or a collection of nodes running a single GitLab application. A site can be single-node or multi-node. | GitLab | deployment, installation instance |
| Single-node site | A specific configuration of GitLab that uses exactly one node. | GitLab | single-server, single-instance
| Multi-node site | A specific configuration of GitLab that uses more than one node. | GitLab | multi-server, multi-instance, high availability |
| Primary site | A GitLab site that is configured to be read and writable. There can only be a single primary site. | Geo-specific | Geo deployment, Primary node |
| Secondary site(s) | GitLab site that is configured to be read-only. There can be one or more secondary sites. | Geo-specific | Geo deployment, Secondary node |
| Primary site | A GitLab site whose data is being replicated by at least one secondary site. There can only be a single primary site. | Geo-specific | Geo deployment, Primary node |
| Secondary site(s) | A GitLab site that is configured to replicate the data of a primary site. There can be one or more secondary sites. | Geo-specific | Geo deployment, Secondary node |
| Geo deployment | A collection of two or more GitLab sites with exactly one primary site being replicated by one or more secondary sites. | Geo-specific | |
| Reference architecture(s) | A [specified configuration of GitLab for a number of users](../reference_architectures/index.md), possibly including multiple nodes and multiple sites. | GitLab | |
| Promoting | Changing the role of a site from secondary to primary. | Geo-specific | |

View File

@ -7,6 +7,12 @@ type: howto
# Location-aware Git remote URL with AWS Route53 **(PREMIUM SELF)**
NOTE:
Since GitLab 14.6,
[GitLab Geo supports a location-aware URL including web UI and API traffic.](../secondary_proxy/location_aware_external_url.md)
This configuration is recommended over the location-aware Git remote URL
described in this document.
You can provide GitLab users with a single remote URL that automatically uses
the Geo site closest to them. This means users don't need to update their Git
configuration to take advantage of closer Geo sites as they move.
@ -18,12 +24,6 @@ Though these instructions use [AWS Route53](https://aws.amazon.com/route53/),
other services such as [Cloudflare](https://www.cloudflare.com/) could be used
as well.
NOTE:
You can also use a load balancer to distribute web UI or API traffic to
[multiple Geo **secondary** sites](../../../user/admin_area/geo_nodes.md#multiple-secondary-sites-behind-a-load-balancer).
Importantly, the **primary** site cannot yet be included. See the feature request
[Support putting the **primary** behind a Geo node load balancer](https://gitlab.com/gitlab-org/gitlab/-/issues/10888).
## Prerequisites
In this example, we have already set up:

View File

@ -8,7 +8,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
Review this page for update instructions for your version. These steps
accompany the [general steps](updating_the_geo_sites.md#general-update-steps)
for updating Geo nodes.
for updating Geo sites.
## Updating to 14.2 through 14.7
@ -33,7 +33,7 @@ There is [an issue in GitLab 14.4.0 through 14.4.2](../../../update/index.md#144
### Multi-arch images
We found an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/336013) where the Container Registry replication wasn't fully working if you used multi-arch images. In case of a multi-arch image, only the primary architecture (for example `amd64`) would be replicated to the secondary node. This has been [fixed in GitLab 14.3](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67624) and was backported to 14.2 and 14.1, but manual steps are required to force a re-sync.
We found an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/336013) where the Container Registry replication wasn't fully working if you used multi-arch images. In case of a multi-arch image, only the primary architecture (for example `amd64`) would be replicated to the secondary site. This has been [fixed in GitLab 14.3](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67624) and was backported to 14.2 and 14.1, but manual steps are required to force a re-sync.
You can check if you are affected by running:
@ -41,12 +41,12 @@ You can check if you are affected by running:
docker manifest inspect <SECONDARY_IMAGE_LOCATION> | jq '.mediaType'
```
Where `<SECONDARY_IMAGE_LOCATION>` is a container image on your secondary node.
Where `<SECONDARY_IMAGE_LOCATION>` is a container image on your secondary site.
If the output matches `application/vnd.docker.distribution.manifest.list.v2+json`
(there can be a `mediaType` entry at several levels, we only care about the top level entry),
then you don't need to do anything.
Otherwise, on all your **secondary** nodes, in a [Rails console](../../operations/rails_console.md), run the following:
Otherwise, for each **secondary** site, on a Rails application node, open a [Rails console](../../operations/rails_console.md), and run the following:
```ruby
list_type = 'application/vnd.docker.distribution.manifest.list.v2+json'
@ -78,7 +78,7 @@ We found an issue where [Primary sites can not be removed from the UI](https://g
This bug only exists in the UI and does not block the removal of Primary sites using any other method.
If you are running an affected version and need to remove your Primary site, you can manually remove the Primary site by using the [Geo Nodes API](../../../api/geo_nodes.md#delete-a-geo-node).
If you are running an affected version and need to remove your Primary site, you can manually remove the Primary site by using the [Geo Sites API](../../../api/geo_nodes.md#delete-a-geo-node).
### Geo Admin Area shows 'Unhealthy' after enabling Maintenance Mode
@ -86,9 +86,9 @@ GitLab 13.9 through GitLab 14.3 are affected by a bug in which enabling [GitLab
## Updating to GitLab 13.12
### Secondary nodes re-download all LFS files upon update
### Secondary sites re-download all LFS files upon update
We found an issue where [secondary nodes re-download all LFS files](https://gitlab.com/gitlab-org/gitlab/-/issues/334550) upon update. This bug:
We found an issue where [secondary sites re-download all LFS files](https://gitlab.com/gitlab-org/gitlab/-/issues/334550) upon update. This bug:
- Only applies to Geo secondary sites that have replicated LFS objects.
- Is _not_ a data loss risk.
@ -187,7 +187,7 @@ In GitLab 13.3, Geo removed the PostgreSQL [Foreign Data Wrapper](https://www.po
dependency for the tracking database.
The FDW server, user, and the extension is removed during the upgrade
process on each secondary node. The GitLab settings related to the FDW in the
process on each secondary site. The GitLab settings related to the FDW in the
`/etc/gitlab/gitlab.rb` have been deprecated and can be safely removed.
There are some scenarios like using an external PostgreSQL instance for the
@ -200,9 +200,9 @@ DROP EXTENSION IF EXISTS postgres_fdw;
```
WARNING:
In GitLab 13.3, promoting a secondary node to a primary while the secondary is
In GitLab 13.3, promoting a secondary site to a primary while the secondary is
paused fails. Do not pause replication before promoting a secondary. If the
node is paused, be sure to resume before promoting. To avoid this issue,
site is paused, be sure to resume before promoting. To avoid this issue,
upgrade to GitLab 13.4 or later.
WARNING:
@ -213,9 +213,9 @@ contain a workaround if you run into errors during the failover.
## Updating to GitLab 13.2
In GitLab 13.2, promoting a secondary node to a primary while the secondary is
In GitLab 13.2, promoting a secondary site to a primary while the secondary is
paused fails. Do not pause replication before promoting a secondary. If the
node is paused, be sure to resume before promoting. To avoid this issue,
site is paused, be sure to resume before promoting. To avoid this issue,
upgrade to GitLab 13.4 or later.
## Updating to GitLab 13.0
@ -390,5 +390,5 @@ For the recommended procedure, see the
WARNING:
This version is affected by a [bug that results in new LFS objects not being
replicated to Geo secondary nodes](https://gitlab.com/gitlab-org/gitlab/-/issues/32696).
replicated to Geo secondary sites](https://gitlab.com/gitlab-org/gitlab/-/issues/32696).
The issue is fixed in GitLab 12.1. Be sure to upgrade to GitLab 12.1 or later.

View File

@ -2,7 +2,6 @@
stage: Enablement
group: Distribution
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
type: reference
---
# Load Balancer for multi-node GitLab **(FREE SELF)**
@ -21,38 +20,38 @@ How do you want to handle SSL in your multi-node environment? There are several
options:
- Each application node terminates SSL
- The load balancer(s) terminate SSL and communication is not secure between
the load balancer(s) and the application nodes
- The load balancer(s) terminate SSL and communication is *secure* between the
load balancer(s) and the application nodes
- The load balancers terminate SSL and communication is not secure between
the load balancers and the application nodes
- The load balancers terminate SSL and communication is *secure* between the
load balancers and the application nodes
### Application nodes terminate SSL
Configure your load balancer(s) to pass connections on port 443 as 'TCP' rather
Configure your load balancers to pass connections on port 443 as 'TCP' rather
than 'HTTP(S)' protocol. This passes the connection to the application nodes
NGINX service untouched. NGINX has the SSL certificate and listen on port 443.
See [NGINX HTTPS documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https)
for details on managing SSL certificates and configuring NGINX.
### Load Balancer(s) terminate SSL without backend SSL
### Load Balancers terminate SSL without backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) is be responsible for managing SSL certificates and
Configure your load balancers to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancers is be responsible for managing SSL certificates and
terminating SSL.
Since communication between the load balancer(s) and GitLab isn't secure,
Since communication between the load balancers and GitLab isn't secure,
there is some additional configuration needed. See
[NGINX Proxied SSL documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#supporting-proxied-ssl)
for details.
### Load Balancer(s) terminate SSL with backend SSL
### Load Balancers terminate SSL with backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) is responsible for managing SSL certificates that
Configure your load balancers to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancers is responsible for managing SSL certificates that
end users see.
Traffic is secure between the load balancer(s) and NGINX in this
Traffic is secure between the load balancers and NGINX in this
scenario. There is no need to add configuration for proxied SSL since the
connection is secure all the way. However, configuration must be
added to GitLab to configure SSL certificates. See

View File

@ -274,11 +274,11 @@ for details.
### Load balancer terminates SSL with backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) will be responsible for managing SSL certificates that
Configure your load balancers to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancers will be responsible for managing SSL certificates that
end users will see.
Traffic will also be secure between the load balancer(s) and NGINX in this
Traffic will also be secure between the load balancers and NGINX in this
scenario. There is no need to add configuration for proxied SSL since the
connection will be secure all the way. However, configuration will need to be
added to GitLab to configure SSL certificates. See

View File

@ -277,11 +277,11 @@ for details.
### Load balancer terminates SSL with backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) will be responsible for managing SSL certificates that
Configure your load balancers to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancers will be responsible for managing SSL certificates that
end users will see.
Traffic will also be secure between the load balancer(s) and NGINX in this
Traffic will also be secure between the load balancers and NGINX in this
scenario. There is no need to add configuration for proxied SSL since the
connection will be secure all the way. However, configuration will need to be
added to GitLab to configure SSL certificates. See

View File

@ -278,11 +278,11 @@ for details.
### Load balancer terminates SSL with backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) will be responsible for managing SSL certificates that
Configure your load balancers to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancers will be responsible for managing SSL certificates that
end users will see.
Traffic will also be secure between the load balancer(s) and NGINX in this
Traffic will also be secure between the load balancers and NGINX in this
scenario. There is no need to add configuration for proxied SSL since the
connection will be secure all the way. However, configuration will need to be
added to GitLab to configure SSL certificates. See

View File

@ -283,11 +283,11 @@ for details.
### Load balancer terminates SSL with backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) will be responsible for managing SSL certificates that
Configure your load balancers to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancers will be responsible for managing SSL certificates that
end users will see.
Traffic will also be secure between the load balancer(s) and NGINX in this
Traffic will also be secure between the load balancers and NGINX in this
scenario. There is no need to add configuration for proxied SSL since the
connection will be secure all the way. However, configuration will need to be
added to GitLab to configure SSL certificates. See

View File

@ -276,11 +276,11 @@ for details.
### Load balancer terminates SSL with backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) will be responsible for managing SSL certificates that
Configure your load balancers to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancers will be responsible for managing SSL certificates that
end users will see.
Traffic will also be secure between the load balancer(s) and NGINX in this
Traffic will also be secure between the load balancers and NGINX in this
scenario. There is no need to add configuration for proxied SSL since the
connection will be secure all the way. However, configuration will need to be
added to GitLab to configure SSL certificates. See

View File

@ -7,7 +7,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# How to restart GitLab **(FREE SELF)**
Depending on how you installed GitLab, there are different methods to restart
its service(s).
its services.
## Omnibus installations

View File

@ -105,7 +105,7 @@ To configure Sidekiq:
gitlab_rails['db_encoding'] = 'unicode'
gitlab_rails['auto_migrate'] = false
# Add the Sidekiq node(s) to PostgreSQL's trusted addresses.
# Add the Sidekiq nodes to PostgreSQL's trusted addresses.
# In the following example, 10.10.1.30/32 is the private IP
# of the Sidekiq server.
postgresql['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.10.1.30/32)

View File

@ -38,7 +38,7 @@ end
### About namespace naming
A good guideline for naming a top-level namespace (bounded context) is to use the related
feature category. For example, `Continuous Integration` feature category maps to `Ci::` namespace.
[feature category](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/master/data/categories.yml). For example, `Continuous Integration` feature category maps to `Ci::` namespace.
Alternatively a new class could be added to `Projects::` or `Groups::` if it's either:

View File

@ -958,6 +958,24 @@ Gitaly Cluster offers tremendous benefits for our customers such as:
**Planned removal milestone: 15.0 (2022-05-22)**
### Request profiling
WARNING:
This feature will be changed or removed in 15.0
as a [breaking change](https://docs.gitlab.com/ee/development/contributing/#breaking-changes).
Before updating GitLab, review the details carefully to determine if you need to make any
changes to your code, settings, or workflow.
[Request profiling](https://docs.gitlab.com/ee/administration/monitoring/performance/request_profiling.html) is deprecated in GitLab 14.8 and scheduled for removal in GitLab 15.0.
We're working on [consolidating our profiling tools](https://gitlab.com/groups/gitlab-org/-/epics/7327) and making them more easily accessible.
We [evaluated](https://gitlab.com/gitlab-org/gitlab/-/issues/350152) the use of this feature and we found that it is not widely used.
It also depends on a few third-party gems that are not actively maintained anymore, have not been updated for the latest version of Ruby, or crash frequently when profiling heavy page loads.
For more information, check the [summary section of the deprecation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/352488#deprecation-summary).
**Planned removal milestone: 15.0 (2022-05-22)**
### Test coverage project CI/CD setting
WARNING:

View File

@ -422,10 +422,22 @@ for the group's projects to meet your group's needs.
To remove a group and its contents:
1. Go to your group's **Settings > General** page.
1. Expand the **Path, transfer, remove** section.
1. On the top bar, select **Menu > Groups** and find your group.
1. On the left sidebar, select **Settings > General**.
1. Expand the **Advanced** section.
1. In the **Remove group** section, select **Remove group**.
1. Type the group name.
1. Select **Confirm**.
A group can also be removed from the groups dashboard:
1. On the top bar, select **Menu > Groups**.
1. Select **Your Groups**.
1. Select (**{ellipsis_v}**) for the group you want to delete.
1. Select **Delete**.
1. In the Remove group section, select **Remove group**.
1. Confirm the action.
1. Type the group name.
1. Select **Confirm**.
This action removes the group. It also adds a background job to delete all projects in the group.

View File

@ -22,6 +22,10 @@ module ContainerRegistry
Feature.enabled?(:container_registry_migration_phase2_enabled)
end
def self.limit_gitlab_org?
Feature.enabled?(:container_registry_migration_limit_gitlab_org)
end
def self.enqueue_waiting_time
return 0 if Feature.enabled?(:container_registry_migration_phase2_enqueue_speed_fast)
return 6.hours if Feature.enabled?(:container_registry_migration_phase2_enqueue_speed_slow)
@ -36,5 +40,9 @@ module ContainerRegistry
0
end
def self.target_plan
Plan.find_by_name(target_plan_name)
end
end
end

View File

@ -7,11 +7,12 @@ require 'google/apis/container_v1beta1'
require 'google/apis/cloudbilling_v1'
require 'google/apis/cloudresourcemanager_v1'
require 'google/apis/iam_v1'
require 'google/apis/serviceusage_v1'
module GoogleApi
module CloudPlatform
class Client < GoogleApi::Auth
SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
SCOPE = 'https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/service.management'
LEAST_TOKEN_LIFE_TIME = 10.minutes
CLUSTER_MASTER_AUTH_USERNAME = 'admin'
CLUSTER_IPV4_CIDR_BLOCK = '/16'
@ -133,8 +134,27 @@ module GoogleApi
cloud_resource_manager_service.set_project_iam_policy(gcp_project_id, body)
end
def enable_cloud_run(gcp_project_id)
enable_service(gcp_project_id, 'run.googleapis.com')
end
def enable_artifacts_registry(gcp_project_id)
enable_service(gcp_project_id, 'artifactregistry.googleapis.com')
end
def enable_cloud_build(gcp_project_id)
enable_service(gcp_project_id, 'cloudbuild.googleapis.com')
end
private
def enable_service(gcp_project_id, service_name)
name = "projects/#{gcp_project_id}/services/#{service_name}"
service = Google::Apis::ServiceusageV1::ServiceUsageService.new
service.authorization = access_token
service.enable_service(name)
end
def make_cluster_options(cluster_name, cluster_size, machine_type, legacy_abac, enable_addons)
{
cluster: {

View File

@ -17701,13 +17701,16 @@ msgstr ""
msgid "GroupsTree|Are you sure you want to leave the \"%{fullName}\" group?"
msgstr ""
msgid "GroupsTree|Edit group"
msgid "GroupsTree|Delete"
msgstr ""
msgid "GroupsTree|Edit"
msgstr ""
msgid "GroupsTree|Failed to leave the group. Please make sure you are not the only owner."
msgstr ""
msgid "GroupsTree|Leave this group"
msgid "GroupsTree|Leave group"
msgstr ""
msgid "GroupsTree|Loading groups"
@ -17719,6 +17722,9 @@ msgstr ""
msgid "GroupsTree|No groups or projects matched your search"
msgstr ""
msgid "GroupsTree|Options"
msgstr ""
msgid "GroupsTree|Search by name"
msgstr ""

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
module QA
RSpec.describe 'Create' do # convert back to a smoke test once proved to be stable
RSpec.describe 'Create', :smoke do
describe 'Personal snippet creation' do
let(:snippet) do
Resource::Snippet.fabricate_via_browser_ui! do |snippet|

View File

@ -296,7 +296,7 @@ function retry_failed_rspec_examples() {
echo "${CI_JOB_URL}" > "${RETRIED_TESTS_REPORT_PATH}"
echo $failed_examples >> "${RETRIED_TESTS_REPORT_PATH}"
echoinfo "Retrying the failing examples in a new RSpec proces..."
echoinfo "Retrying the failing examples in a new RSpec process..."
install_junit_merge_gem

View File

@ -15,6 +15,10 @@ RSpec.describe 'Dashboard Groups page', :js do
wait_for_requests
end
def click_options_menu(group)
page.find("[data-testid='group-#{group.id}-dropdown-button'").click
end
it 'shows groups user is member of' do
group.add_owner(user)
nested_group.add_owner(user)
@ -112,6 +116,67 @@ RSpec.describe 'Dashboard Groups page', :js do
end
end
context 'group actions dropdown' do
let!(:subgroup) { create(:group, :public, parent: group) }
context 'user with subgroup ownership' do
before do
subgroup.add_owner(user)
sign_in(user)
visit dashboard_groups_path
end
it 'cannot remove parent group' do
expect(page).not_to have_selector("[data-testid='group-#{group.id}-dropdown-button'")
end
end
context 'user with parent group ownership' do
before do
group.add_owner(user)
sign_in(user)
visit dashboard_groups_path
end
it 'can remove parent group' do
click_options_menu(group)
expect(page).to have_selector("[data-testid='remove-group-#{group.id}-btn']")
end
it 'can remove subgroups' do
click_group_caret(group)
click_options_menu(subgroup)
expect(page).to have_selector("[data-testid='remove-group-#{subgroup.id}-btn']")
end
end
context 'user is a maintainer' do
before do
group.add_maintainer(user)
sign_in(user)
visit dashboard_groups_path
click_options_menu(group)
end
it 'cannot remove the group' do
expect(page).not_to have_selector("[data-testid='remove-group-#{group.id}-btn']")
end
it 'cannot edit the group' do
expect(page).not_to have_selector("[data-testid='edit-group-#{group.id}-btn']")
end
it 'can leave the group' do
expect(page).to have_selector("[data-testid='leave-group-#{group.id}-btn']")
end
end
end
context 'when using pagination' do
let(:group) { create(:group, created_at: 5.days.ago) }
let(:group2) { create(:group, created_at: 2.days.ago) }

View File

@ -24,8 +24,8 @@ const HOME_PROPS = {
serviceAccounts: [{}, {}],
createServiceAccountUrl: '#url-create-service-account',
emptyIllustrationUrl: '#url-empty-illustration',
deploymentsCloudRunUrl: '#url-deployments-cloud-run',
deploymentsCloudStorageUrl: '#deploymentsCloudStorageUrl',
enableCloudRunUrl: '#url-enable-cloud-run',
enableCloudStorageUrl: '#enableCloudStorageUrl',
};
describe('google_cloud App component', () => {

View File

@ -12,8 +12,8 @@ describe('google_cloud DeploymentsServiceTable component', () => {
beforeEach(() => {
const propsData = {
cloudRunUrl: '#url-deployments-cloud-run',
cloudStorageUrl: '#url-deployments-cloud-storage',
cloudRunUrl: '#url-enable-cloud-run',
cloudStorageUrl: '#url-enable-cloud-storage',
};
wrapper = mount(DeploymentsServiceTable, { propsData });
});
@ -29,12 +29,13 @@ describe('google_cloud DeploymentsServiceTable component', () => {
it('should contain configure cloud run button', () => {
const cloudRunButton = findCloudRunButton();
expect(cloudRunButton.exists()).toBe(true);
expect(cloudRunButton.props().disabled).toBe(true);
expect(cloudRunButton.attributes('href')).toBe('#url-enable-cloud-run');
});
it('should contain configure cloud storage button', () => {
const cloudStorageButton = findCloudStorageButton();
expect(cloudStorageButton.exists()).toBe(true);
expect(cloudStorageButton.props().disabled).toBe(true);
expect(cloudStorageButton.attributes('href')).toBe('#url-enable-cloud-storage');
});
});

View File

@ -20,8 +20,8 @@ describe('google_cloud Home component', () => {
serviceAccounts: [{}, {}],
createServiceAccountUrl: '#url-create-service-account',
emptyIllustrationUrl: '#url-empty-illustration',
deploymentsCloudRunUrl: '#url-deployments-cloud-run',
deploymentsCloudStorageUrl: '#deploymentsCloudStorageUrl',
enableCloudRunUrl: '#url-enable-cloud-run',
enableCloudStorageUrl: '#enableCloudStorageUrl',
};
beforeEach(() => {

View File

@ -280,6 +280,7 @@ describe('AppComponent', () => {
expect(vm.targetParentGroup).toBe(null);
vm.showLeaveGroupModal(group, mockParentGroupItem);
expect(vm.isModalVisible).toBe(true);
expect(vm.targetGroup).not.toBe(null);
expect(vm.targetParentGroup).not.toBe(null);
});
@ -290,6 +291,7 @@ describe('AppComponent', () => {
expect(vm.groupLeaveConfirmationMessage).toBe('');
vm.showLeaveGroupModal(group, mockParentGroupItem);
expect(vm.isModalVisible).toBe(true);
expect(vm.groupLeaveConfirmationMessage).toBe(
`Are you sure you want to leave the "${group.fullName}" group?`,
);

View File

@ -1,4 +1,4 @@
import { shallowMount } from '@vue/test-utils';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import ItemActions from '~/groups/components/item_actions.vue';
import eventHub from '~/groups/event_hub';
import { mockParentGroupItem, mockChildren } from '../mock_data';
@ -13,7 +13,7 @@ describe('ItemActions', () => {
};
const createComponent = (props = {}) => {
wrapper = shallowMount(ItemActions, {
wrapper = shallowMountExtended(ItemActions, {
propsData: { ...defaultProps, ...props },
});
};
@ -23,8 +23,10 @@ describe('ItemActions', () => {
wrapper = null;
});
const findEditGroupBtn = () => wrapper.find('[data-testid="edit-group-btn"]');
const findLeaveGroupBtn = () => wrapper.find('[data-testid="leave-group-btn"]');
const findEditGroupBtn = () => wrapper.findByTestId(`edit-group-${mockParentGroupItem.id}-btn`);
const findLeaveGroupBtn = () => wrapper.findByTestId(`leave-group-${mockParentGroupItem.id}-btn`);
const findRemoveGroupBtn = () =>
wrapper.findByTestId(`remove-group-${mockParentGroupItem.id}-btn`);
describe('template', () => {
let group;
@ -34,6 +36,7 @@ describe('ItemActions', () => {
...mockParentGroupItem,
canEdit: true,
canLeave: true,
canRemove: true,
};
createComponent({ group });
});
@ -41,21 +44,21 @@ describe('ItemActions', () => {
it('renders component template correctly', () => {
createComponent();
expect(wrapper.classes()).toContain('controls');
expect(wrapper.classes()).toContain('gl-display-flex', 'gl-justify-content-end', 'gl-ml-5');
});
it('renders "Edit group" button with correct attribute values', () => {
it('renders "Edit" group button with correct attribute values', () => {
const button = findEditGroupBtn();
expect(button.exists()).toBe(true);
expect(button.props('icon')).toBe('pencil');
expect(button.attributes('aria-label')).toBe('Edit group');
expect(button.attributes('href')).toBe(mockParentGroupItem.editPath);
});
it('renders "Leave this group" button with correct attribute values', () => {
const button = findLeaveGroupBtn();
it('renders "Delete" group button with correct attribute values', () => {
const button = findRemoveGroupBtn();
expect(button.exists()).toBe(true);
expect(button.props('icon')).toBe('leave');
expect(button.attributes('aria-label')).toBe('Leave this group');
expect(button.attributes('href')).toBe(
`${mockParentGroupItem.editPath}#js-remove-group-form`,
);
});
it('emits `showLeaveGroupModal` event in the event hub', () => {
@ -103,4 +106,15 @@ describe('ItemActions', () => {
expect(findEditGroupBtn().exists()).toBe(false);
});
it('does not render delete button if group can not be edited', () => {
createComponent({
group: {
...mockParentGroupItem,
canRemove: false,
},
});
expect(findRemoveGroupBtn().exists()).toBe(false);
});
});

View File

@ -1,4 +1,4 @@
import Vue from 'vue';
import Vue, { nextTick } from 'vue';
import { createComponentWithStore } from 'helpers/vue_mount_component_helper';
import newDropdown from '~/ide/components/new_dropdown/index.vue';
import { createStore } from '~/ide/stores';
@ -57,17 +57,15 @@ describe('new dropdown component', () => {
});
describe('isOpen', () => {
it('scrolls dropdown into view', (done) => {
it('scrolls dropdown into view', async () => {
jest.spyOn(vm.$refs.dropdownMenu, 'scrollIntoView').mockImplementation(() => {});
vm.isOpen = true;
setImmediate(() => {
expect(vm.$refs.dropdownMenu.scrollIntoView).toHaveBeenCalledWith({
block: 'nearest',
});
await nextTick();
done();
expect(vm.$refs.dropdownMenu.scrollIntoView).toHaveBeenCalledWith({
block: 'nearest',
});
});
});

View File

@ -4,6 +4,7 @@ import Vue, { nextTick } from 'vue';
import { dispatch } from 'codesandbox-api';
import smooshpack from 'smooshpack';
import Vuex from 'vuex';
import waitForPromises from 'helpers/wait_for_promises';
import Clientside from '~/ide/components/preview/clientside.vue';
import { PING_USAGE_PREVIEW_KEY, PING_USAGE_PREVIEW_SUCCESS_KEY } from '~/ide/constants';
import eventHub from '~/ide/eventhub';
@ -43,8 +44,6 @@ describe('IDE clientside preview', () => {
};
const dispatchCodesandboxReady = () => dispatch({ type: 'done' });
const waitForCalls = () => new Promise(setImmediate);
const createComponent = ({ state, getters } = {}) => {
store = new Vuex.Store({
state: {
@ -100,7 +99,7 @@ describe('IDE clientside preview', () => {
beforeEach(() => {
createComponent({ getters: { packageJson: dummyPackageJson } });
return waitForCalls();
return waitForPromises();
});
it('creates sandpack manager', () => {
@ -139,7 +138,7 @@ describe('IDE clientside preview', () => {
state: { codesandboxBundlerUrl: TEST_BUNDLER_URL },
});
return waitForCalls();
return waitForPromises();
});
it('creates sandpack manager with bundlerURL', () => {
@ -154,7 +153,7 @@ describe('IDE clientside preview', () => {
beforeEach(() => {
createComponent({ getters: { packageJson: dummyPackageJson } });
return waitForCalls();
return waitForPromises();
});
it('creates sandpack manager', () => {
@ -340,7 +339,7 @@ describe('IDE clientside preview', () => {
wrapper.setData({ sandpackReady: true });
wrapper.vm.update();
return waitForCalls().then(() => {
return waitForPromises().then(() => {
expect(smooshpack.Manager).toHaveBeenCalled();
});
});

View File

@ -81,16 +81,13 @@ describe('Multi-file editor library model', () => {
});
describe('onChange', () => {
it('calls callback on change', (done) => {
it('calls callback on change', () => {
const spy = jest.fn();
model.onChange(spy);
model.getModel().setValue('123');
setImmediate(() => {
expect(spy).toHaveBeenCalledWith(model, expect.anything());
done();
});
expect(spy).toHaveBeenCalledWith(model, expect.anything());
});
});

View File

@ -274,24 +274,17 @@ describe('Multi-file store actions', () => {
});
describe('scrollToTab', () => {
it('focuses the current active element', (done) => {
it('focuses the current active element', () => {
document.body.innerHTML +=
'<div id="tabs"><div class="active"><div class="repo-tab"></div></div></div>';
const el = document.querySelector('.repo-tab');
jest.spyOn(el, 'focus').mockImplementation();
store
.dispatch('scrollToTab')
.then(() => {
setImmediate(() => {
expect(el.focus).toHaveBeenCalled();
return store.dispatch('scrollToTab').then(() => {
expect(el.focus).toHaveBeenCalled();
document.getElementById('tabs').remove();
done();
});
})
.catch(done.fail);
document.getElementById('tabs').remove();
});
});
});

View File

@ -12,7 +12,7 @@ describe('RelatedMergeRequests', () => {
let wrapper;
let mock;
beforeEach((done) => {
beforeEach(() => {
// put the fixture in DOM as the component expects
document.body.innerHTML = `<div id="js-issuable-app"></div>`;
document.getElementById('js-issuable-app').dataset.initial = JSON.stringify(mockData);
@ -29,7 +29,7 @@ describe('RelatedMergeRequests', () => {
},
});
setImmediate(done);
return axios.waitForAll();
});
afterEach(() => {

View File

@ -394,8 +394,7 @@ describe('common_utils', () => {
describe('backOff', () => {
beforeEach(() => {
// shortcut our timeouts otherwise these tests will take a long time to finish
jest.spyOn(window, 'setTimeout').mockImplementation((cb) => setImmediate(cb, 0));
jest.spyOn(window, 'setTimeout');
});
it('solves the promise from the callback', (done) => {
@ -446,6 +445,7 @@ describe('common_utils', () => {
if (numberOfCalls < 3) {
numberOfCalls += 1;
next();
jest.runOnlyPendingTimers();
} else {
stop(resp);
}
@ -464,7 +464,10 @@ describe('common_utils', () => {
it('rejects the backOff promise after timing out', (done) => {
commonUtils
.backOff((next) => next(), 64000)
.backOff((next) => {
next();
jest.runOnlyPendingTimers();
}, 64000)
.catch((errBackoffResp) => {
const timeouts = window.setTimeout.mock.calls.map(([, timeout]) => timeout);

View File

@ -1,6 +1,7 @@
import MockAdapter from 'axios-mock-adapter';
import $ from 'jquery';
import { TEST_HOST } from 'spec/test_constants';
import waitForPromises from 'helpers/wait_for_promises';
import axios from '~/lib/utils/axios_utils';
import MergeRequest from '~/merge_request';
@ -27,31 +28,31 @@ describe('MergeRequest', () => {
mock.restore();
});
it('modifies the Markdown field', (done) => {
it('modifies the Markdown field', async () => {
jest.spyOn($, 'ajax').mockImplementation();
const changeEvent = document.createEvent('HTMLEvents');
changeEvent.initEvent('change', true, true);
$('input[type=checkbox]').first().attr('checked', true)[0].dispatchEvent(changeEvent);
setImmediate(() => {
expect($('.js-task-list-field').val()).toBe(
'- [x] Task List Item\n- [ ]\n- [ ] Task List Item 2\n',
);
done();
});
await waitForPromises();
expect($('.js-task-list-field').val()).toBe(
'- [x] Task List Item\n- [ ]\n- [ ] Task List Item 2\n',
);
});
it('ensure that task with only spaces does not get checked incorrectly', (done) => {
it('ensure that task with only spaces does not get checked incorrectly', async () => {
// fixed in 'deckar01-task_list', '2.2.1' gem
jest.spyOn($, 'ajax').mockImplementation();
const changeEvent = document.createEvent('HTMLEvents');
changeEvent.initEvent('change', true, true);
$('input[type=checkbox]').last().attr('checked', true)[0].dispatchEvent(changeEvent);
setImmediate(() => {
expect($('.js-task-list-field').val()).toBe(
'- [ ] Task List Item\n- [ ]\n- [x] Task List Item 2\n',
);
done();
});
await waitForPromises();
expect($('.js-task-list-field').val()).toBe(
'- [ ] Task List Item\n- [ ]\n- [x] Task List Item 2\n',
);
});
describe('tasklist', () => {
@ -60,29 +61,27 @@ describe('MergeRequest', () => {
const index = 3;
const checked = true;
it('submits an ajax request on tasklist:changed', (done) => {
it('submits an ajax request on tasklist:changed', async () => {
$('.js-task-list-field').trigger({
type: 'tasklist:changed',
detail: { lineNumber, lineSource, index, checked },
});
setImmediate(() => {
expect(axios.patch).toHaveBeenCalledWith(
`${TEST_HOST}/frontend-fixtures/merge-requests-project/-/merge_requests/1.json`,
{
merge_request: {
description: '- [ ] Task List Item\n- [ ]\n- [ ] Task List Item 2\n',
lock_version: 0,
update_task: { line_number: lineNumber, line_source: lineSource, index, checked },
},
},
);
await waitForPromises();
done();
});
expect(axios.patch).toHaveBeenCalledWith(
`${TEST_HOST}/frontend-fixtures/merge-requests-project/-/merge_requests/1.json`,
{
merge_request: {
description: '- [ ] Task List Item\n- [ ]\n- [ ] Task List Item 2\n',
lock_version: 0,
update_task: { line_number: lineNumber, line_source: lineSource, index, checked },
},
},
);
});
it('shows an error notification when tasklist update failed', (done) => {
it('shows an error notification when tasklist update failed', async () => {
mock
.onPatch(`${TEST_HOST}/frontend-fixtures/merge-requests-project/-/merge_requests/1.json`)
.reply(409, {});
@ -92,13 +91,11 @@ describe('MergeRequest', () => {
detail: { lineNumber, lineSource, index, checked },
});
setImmediate(() => {
expect(document.querySelector('.flash-container .flash-text').innerText.trim()).toBe(
'Someone edited this merge request at the same time you did. Please refresh the page to see changes.',
);
await waitForPromises();
done();
});
expect(document.querySelector('.flash-container .flash-text').innerText.trim()).toBe(
'Someone edited this merge request at the same time you did. Please refresh the page to see changes.',
);
});
});
});

View File

@ -361,6 +361,19 @@ describe('Blob content viewer component', () => {
});
describe('blob info query', () => {
it.each`
highlightJs | shouldFetchRawText
${true} | ${true}
${false} | ${false}
`(
'calls blob info query with shouldFetchRawText: $shouldFetchRawText when highlightJs (feature flag): $highlightJs',
async ({ highlightJs, shouldFetchRawText }) => {
await createComponent({ highlightJs });
expect(mockResolver).toHaveBeenCalledWith(expect.objectContaining({ shouldFetchRawText }));
},
);
it('is called with originalBranch value if the prop has a value', async () => {
await createComponent({ inject: { originalBranch: 'some-branch' } });

View File

@ -19,6 +19,20 @@ RSpec.describe ContainerRegistry::Migration do
end
end
describe '.limit_gitlab_org?' do
subject { described_class.limit_gitlab_org? }
it { is_expected.to eq(true) }
context 'feature flag disabled' do
before do
stub_feature_flags(container_registry_migration_limit_gitlab_org: false)
end
it { is_expected.to eq(false) }
end
end
describe '.enqueue_waiting_time' do
subject { described_class.enqueue_waiting_time }
@ -139,4 +153,16 @@ RSpec.describe ContainerRegistry::Migration do
expect(described_class.created_before).to eq(value)
end
end
describe '.target_plan' do
let_it_be(:plan) { create(:plan) }
before do
stub_application_setting(container_registry_import_target_plan: plan.name)
end
it 'returns the matching application_setting' do
expect(described_class.target_plan).to eq(plan)
end
end
end

View File

@ -6,6 +6,8 @@ RSpec.describe GoogleApi::CloudPlatform::Client do
let(:token) { 'token' }
let(:client) { described_class.new(token, nil) }
let(:user_agent_options) { client.instance_eval { user_agent_header } }
let(:gcp_project_id) { String('gcp_proj_id') }
let(:operation) { true }
describe '.session_key_for_redirect_uri' do
let(:state) { 'random_string' }
@ -296,4 +298,40 @@ RSpec.describe GoogleApi::CloudPlatform::Client do
client.grant_service_account_roles(mock_gcp_id, mock_email)
end
end
describe '#enable_cloud_run' do
subject { client.enable_cloud_run(gcp_project_id) }
it 'calls Google Api IamService#create_service_account_key' do
expect_any_instance_of(Google::Apis::ServiceusageV1::ServiceUsageService)
.to receive(:enable_service)
.with("projects/#{gcp_project_id}/services/run.googleapis.com")
.and_return(operation)
is_expected.to eq(operation)
end
end
describe '#enable_artifacts_registry' do
subject { client.enable_artifacts_registry(gcp_project_id) }
it 'calls Google Api IamService#create_service_account_key' do
expect_any_instance_of(Google::Apis::ServiceusageV1::ServiceUsageService)
.to receive(:enable_service)
.with("projects/#{gcp_project_id}/services/artifactregistry.googleapis.com")
.and_return(operation)
is_expected.to eq(operation)
end
end
describe '#enable_cloud_build' do
subject { client.enable_cloud_build(gcp_project_id) }
it 'calls Google Api IamService#create_service_account_key' do
expect_any_instance_of(Google::Apis::ServiceusageV1::ServiceUsageService)
.to receive(:enable_service)
.with("projects/#{gcp_project_id}/services/cloudbuild.googleapis.com")
.and_return(operation)
is_expected.to eq(operation)
end
end
end

View File

@ -174,6 +174,14 @@ RSpec.describe ContainerRepository, :aggregate_failures do
end
end
shared_examples 'queueing the next import' do
it 'starts the worker' do
expect(::ContainerRegistry::Migration::EnqueuerWorker).to receive(:perform_async)
subject
end
end
describe '#start_pre_import' do
let_it_be_with_reload(:repository) { create(:container_repository) }
@ -256,8 +264,9 @@ RSpec.describe ContainerRepository, :aggregate_failures do
subject { repository.finish_import }
it_behaves_like 'transitioning from allowed states', %w[importing]
it_behaves_like 'queueing the next import'
it 'sets migration_import_done_at' do
it 'sets migration_import_done_at and queues the next import' do
expect { subject }.to change { repository.reload.migration_import_done_at }
expect(repository).to be_import_done
@ -283,9 +292,10 @@ RSpec.describe ContainerRepository, :aggregate_failures do
subject { repository.abort_import }
it_behaves_like 'transitioning from allowed states', %w[pre_importing pre_import_done importing]
it_behaves_like 'transitioning from allowed states', ContainerRepository::ABORTABLE_MIGRATION_STATES
it_behaves_like 'queueing the next import'
it 'sets migration_aborted_at and migration_aborted_at and increments the retry count' do
it 'sets migration_aborted_at and migration_aborted_at, increments the retry count, and queues the next import' do
expect { subject }.to change { repository.migration_aborted_at }
.and change { repository.reload.migration_retries_count }.by(1)
@ -299,7 +309,7 @@ RSpec.describe ContainerRepository, :aggregate_failures do
subject { repository.skip_import(reason: :too_many_retries) }
it_behaves_like 'transitioning from allowed states', %w[default pre_importing importing]
it_behaves_like 'transitioning from allowed states', ContainerRepository::ABORTABLE_MIGRATION_STATES
it 'sets migration_skipped_at and migration_skipped_reason' do
expect { subject }.to change { repository.reload.migration_skipped_at }
@ -329,6 +339,43 @@ RSpec.describe ContainerRepository, :aggregate_failures do
end
end
describe '#retry_migration' do
subject { repository.retry_migration }
it 'retries the pre_import' do
expect(repository).to receive(:retry_pre_import).and_return(true)
expect(repository).not_to receive(:retry_import)
expect(subject).to eq(true)
end
context 'when migration is done pre-importing' do
before do
repository.update_columns(migration_pre_import_done_at: Time.zone.now)
end
it 'returns' do
expect(repository).to receive(:retry_import).and_return(true)
expect(repository).not_to receive(:retry_pre_import)
expect(subject).to eq(true)
end
end
context 'when migration is already complete' do
before do
repository.update_columns(migration_import_done_at: Time.zone.now)
end
it 'returns' do
expect(repository).not_to receive(:retry_pre_import)
expect(repository).not_to receive(:retry_import)
expect(subject).to eq(nil)
end
end
end
describe '#tag' do
it 'has a test tag' do
expect(repository.tag('test')).not_to be_nil
@ -524,6 +571,14 @@ RSpec.describe ContainerRepository, :aggregate_failures do
expect(subject).to eq(:error)
end
end
context 'too many imports' do
it 'raises an error when it receives too_many_imports as a response' do
expect(repository.gitlab_api_client)
.to receive(step).with(repository.path).and_return(:too_many_imports)
expect { subject }.to raise_error(described_class::TooManyImportsError)
end
end
end
describe '#migration_pre_import' do
@ -900,6 +955,61 @@ RSpec.describe ContainerRepository, :aggregate_failures do
end
end
describe '#try_import' do
let_it_be_with_reload(:container_repository) { create(:container_repository) }
let(:response) { nil }
subject do
container_repository.try_import do
container_repository.foo
end
end
before do
allow(container_repository).to receive(:foo).and_return(response)
end
context 'successful request' do
let(:response) { :ok }
it { is_expected.to eq(true) }
end
context 'TooManyImportsError' do
before do
stub_application_setting(container_registry_import_start_max_retries: 3)
allow(container_repository).to receive(:foo).and_raise(described_class::TooManyImportsError)
end
it 'tries again exponentially and aborts the migration' do
expect(container_repository).to receive(:sleep).with(a_value_within(0.01).of(0.1))
expect(container_repository).to receive(:sleep).with(a_value_within(0.01).of(0.2))
expect(container_repository).to receive(:sleep).with(a_value_within(0.01).of(0.3))
expect(subject).to eq(false)
expect(container_repository).to be_import_aborted
end
end
context 'other response' do
let(:response) { :error }
it 'aborts the migration' do
expect(subject).to eq(false)
expect(container_repository).to be_import_aborted
end
end
context 'with no block given' do
it 'raises an error' do
expect { container_repository.try_import }.to raise_error(ArgumentError)
end
end
end
context 'with repositories' do
let_it_be_with_reload(:repository) { create(:container_repository, :cleanup_unscheduled) }
let_it_be(:other_repository) { create(:container_repository, :cleanup_unscheduled) }
@ -951,6 +1061,48 @@ RSpec.describe ContainerRepository, :aggregate_failures do
it { is_expected.to eq([repository]) }
end
end
describe '.recently_done_migration_step' do
let_it_be(:import_done_repository) { create(:container_repository, :import_done, migration_pre_import_done_at: 3.days.ago, migration_import_done_at: 2.days.ago) }
let_it_be(:import_aborted_repository) { create(:container_repository, :import_aborted, migration_pre_import_done_at: 5.days.ago, migration_aborted_at: 1.day.ago) }
let_it_be(:pre_import_done_repository) { create(:container_repository, :pre_import_done, migration_pre_import_done_at: 1.hour.ago) }
subject { described_class.recently_done_migration_step }
it 'returns completed imports by done_at date' do
expect(subject.to_a).to eq([pre_import_done_repository, import_aborted_repository, import_done_repository])
end
end
describe '.ready_for_import' do
include_context 'importable repositories'
subject { described_class.ready_for_import }
before do
stub_application_setting(container_registry_import_target_plan: project.namespace.actual_plan_name)
end
it 'works' do
expect(subject).to contain_exactly(valid_container_repository, valid_container_repository2)
end
end
describe '#last_import_step_done_at' do
let_it_be(:aborted_at) { Time.zone.now - 1.hour }
let_it_be(:pre_import_done_at) { Time.zone.now - 2.hours }
subject { repository.last_import_step_done_at }
before do
repository.update_columns(
migration_pre_import_done_at: pre_import_done_at,
migration_aborted_at: aborted_at
)
end
it { is_expected.to eq(aborted_at) }
end
end
describe '.with_stale_migration' do

View File

@ -3,7 +3,8 @@
require 'spec_helper'
RSpec.describe Projects::GoogleCloud::DeploymentsController do
let_it_be(:project) { create(:project, :public) }
let_it_be(:project) { create(:project, :public, :repository) }
let_it_be(:repository) { project.repository }
let_it_be(:user_guest) { create(:user) }
let_it_be(:user_developer) { create(:user) }
@ -36,8 +37,6 @@ RSpec.describe Projects::GoogleCloud::DeploymentsController do
it 'returns not found on GET request' do
urls_list.each do |url|
unauthorized_members.each do |unauthorized_member|
sign_in(unauthorized_member)
get url
expect(response).to have_gitlab_http_status(:not_found)
@ -65,18 +64,63 @@ RSpec.describe Projects::GoogleCloud::DeploymentsController do
let_it_be(:url) { "#{project_google_cloud_deployments_cloud_run_path(project)}" }
before do
sign_in(user_maintainer)
allow_next_instance_of(GoogleApi::CloudPlatform::Client) do |client|
allow(client).to receive(:validate_token).and_return(true)
end
end
it 'renders placeholder' do
authorized_members.each do |authorized_member|
sign_in(authorized_member)
it 'redirects to google_cloud home on enable service error' do
# since GPC_PROJECT_ID is not set, enable cloud run service should return an error
get url
expect(response).to redirect_to(project_google_cloud_index_path(project))
end
it 'tracks error and redirects to gcp_error' do
mock_google_error = Google::Apis::ClientError.new('some_error')
allow_next_instance_of(GoogleCloud::EnableCloudRunService) do |service|
allow(service).to receive(:execute).and_raise(mock_google_error)
end
expect(Gitlab::ErrorTracking).to receive(:track_exception).with(mock_google_error, { project_id: project.id })
get url
expect(response).to render_template(:gcp_error)
end
context 'GCP_PROJECT_IDs are defined' do
it 'redirects to google_cloud home on generate pipeline error' do
allow_next_instance_of(GoogleCloud::EnableCloudRunService) do |enable_cloud_run_service|
allow(enable_cloud_run_service).to receive(:execute).and_return({ status: :success })
end
allow_next_instance_of(GoogleCloud::GeneratePipelineService) do |generate_pipeline_service|
allow(generate_pipeline_service).to receive(:execute).and_return({ status: :error })
end
get url
expect(response).to have_gitlab_http_status(:ok)
expect(response).to redirect_to(project_google_cloud_index_path(project))
end
it 'redirects to create merge request form' do
allow_next_instance_of(GoogleCloud::EnableCloudRunService) do |service|
allow(service).to receive(:execute).and_return({ status: :success })
end
allow_next_instance_of(GoogleCloud::GeneratePipelineService) do |service|
allow(service).to receive(:execute).and_return({ status: :success })
end
get url
expect(response).to have_gitlab_http_status(:found)
expect(response.location).to include(project_new_merge_request_path(project))
end
end
end

View File

@ -6,7 +6,8 @@ RSpec.describe GroupChildEntity do
include ExternalAuthorizationServiceHelpers
include Gitlab::Routing.url_helpers
let(:user) { create(:user) }
let_it_be(:user) { create(:user) }
let(:request) { double('request') }
let(:entity) { described_class.new(object, request: request) }
@ -103,6 +104,22 @@ RSpec.describe GroupChildEntity do
expect(json[:can_leave]).to be_truthy
end
it 'allows an owner to delete the group' do
expect(json[:can_remove]).to be_truthy
end
it 'allows admin to delete the group', :enable_admin_mode do
allow(request).to receive(:current_user).and_return(create(:admin))
expect(json[:can_remove]).to be_truthy
end
it 'disallows a maintainer to delete the group' do
object.add_maintainer(user)
expect(json[:can_remove]).to be_falsy
end
it 'has the correct edit path' do
expect(json[:edit_path]).to eq(edit_group_path(object))
end

View File

@ -0,0 +1,41 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe GoogleCloud::EnableCloudRunService do
describe 'when a project does not have any gcp projects' do
let_it_be(:project) { create(:project) }
it 'returns error' do
result = described_class.new(project).execute
expect(result[:status]).to eq(:error)
expect(result[:message]).to eq('No GCP projects found. Configure a service account or GCP_PROJECT_ID ci variable.')
end
end
describe 'when a project has 3 gcp projects' do
let_it_be(:project) { create(:project) }
before do
project.variables.build(environment_scope: 'production', key: 'GCP_PROJECT_ID', value: 'prj-prod')
project.variables.build(environment_scope: 'staging', key: 'GCP_PROJECT_ID', value: 'prj-staging')
project.save!
end
it 'enables cloud run, artifacts registry and cloud build', :aggregate_failures do
expect_next_instance_of(GoogleApi::CloudPlatform::Client) do |instance|
expect(instance).to receive(:enable_cloud_run).with('prj-prod')
expect(instance).to receive(:enable_artifacts_registry).with('prj-prod')
expect(instance).to receive(:enable_cloud_build).with('prj-prod')
expect(instance).to receive(:enable_cloud_run).with('prj-staging')
expect(instance).to receive(:enable_artifacts_registry).with('prj-staging')
expect(instance).to receive(:enable_cloud_build).with('prj-staging')
end
result = described_class.new(project).execute
expect(result[:status]).to eq(:success)
end
end
end

View File

@ -0,0 +1,27 @@
# frozen_string_literal: true
RSpec.shared_context 'importable repositories' do
let_it_be(:project) { create(:project) }
let_it_be(:valid_container_repository) { create(:container_repository, project: project, created_at: 2.days.ago) }
let_it_be(:valid_container_repository2) { create(:container_repository, project: project, created_at: 1.year.ago) }
let_it_be(:importing_container_repository) { create(:container_repository, :importing, project: project, created_at: 2.days.ago) }
let_it_be(:new_container_repository) { create(:container_repository, project: project) }
let_it_be(:denied_group) { create(:group) }
let_it_be(:denied_project) { create(:project, group: denied_group) }
let_it_be(:denied_container_repository) { create(:container_repository, project: denied_project, created_at: 2.days.ago) }
before do
stub_application_setting(container_registry_import_created_before: 1.day.ago)
stub_feature_flags(
container_registry_phase_2_deny_list: false,
container_registry_migration_limit_gitlab_org: false
)
Feature::FlipperGate.create!(
feature_key: 'container_registry_phase_2_deny_list',
key: 'actors',
value: "Group:#{denied_group.id}"
)
end
end

View File

@ -18,9 +18,7 @@ RSpec.describe Tooling::ParallelRSpecRunner do # rubocop:disable RSpec/FilePath
allow(File).to receive(:exist?).with(filter_tests_file).and_return(true)
allow(File).to receive(:read).and_call_original
allow(File).to receive(:read).with(filter_tests_file).and_return(filter_tests)
allow(Process).to receive(:spawn)
allow(Process).to receive(:wait)
allow(Process).to receive(:last_status).and_return(double(exitstatus: 0))
allow(subject).to receive(:exec)
end
subject { described_class.new(allocator: allocator, filter_tests_file: filter_tests_file, rspec_args: rspec_args) }
@ -88,7 +86,7 @@ RSpec.describe Tooling::ParallelRSpecRunner do # rubocop:disable RSpec/FilePath
end
def expect_command(cmd)
expect(Process).to receive(:spawn).with(*cmd)
expect(subject).to receive(:exec).with(*cmd)
end
end
end

View File

@ -0,0 +1,177 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures do
let_it_be_with_reload(:container_repository) { create(:container_repository, created_at: 2.days.ago) }
let(:worker) { described_class.new }
before do
stub_container_registry_config(enabled: true)
stub_application_setting(container_registry_import_created_before: 1.day.ago)
stub_container_registry_tags(repository: container_repository.path, tags: %w(tag1 tag2 tag3), with_manifest: true)
end
describe '#perform' do
subject { worker.perform }
shared_examples 'no action' do
it 'does not queue or change any repositories' do
subject
expect(container_repository.reload).to be_default
end
end
shared_examples 're-enqueuing based on capacity' do
context 'below capacity' do
before do
allow(ContainerRegistry::Migration).to receive(:capacity).and_return(9999)
end
it 're-enqueues the worker' do
expect(ContainerRegistry::Migration::EnqueuerWorker).to receive(:perform_async)
subject
end
end
context 'above capacity' do
before do
allow(ContainerRegistry::Migration).to receive(:capacity).and_return(-1)
end
it 'does not re-enqueue the worker' do
expect(ContainerRegistry::Migration::EnqueuerWorker).not_to receive(:perform_async)
subject
end
end
end
context 'with qualified repository' do
it 'starts the pre-import for the next qualified repository' do
method = worker.method(:next_repository)
allow(worker).to receive(:next_repository) do
next_qualified_repository = method.call
allow(next_qualified_repository).to receive(:migration_pre_import).and_return(:ok)
next_qualified_repository
end
expect(worker).to receive(:log_extra_metadata_on_done)
.with(:container_repository_id, container_repository.id)
expect(worker).to receive(:log_extra_metadata_on_done)
.with(:import_type, 'next')
subject
expect(container_repository.reload).to be_pre_importing
end
it_behaves_like 're-enqueuing based on capacity'
end
context 'migrations are disabled' do
before do
allow(ContainerRegistry::Migration).to receive(:enabled?).and_return(false)
end
it_behaves_like 'no action'
end
context 'above capacity' do
before do
create(:container_repository, :importing)
create(:container_repository, :importing)
allow(ContainerRegistry::Migration).to receive(:capacity).and_return(1)
end
it_behaves_like 'no action'
it 'does not re-enqueue the worker' do
expect(ContainerRegistry::Migration::EnqueuerWorker).not_to receive(:perform_async)
subject
end
end
context 'too soon before previous completed import step' do
before do
create(:container_repository, :import_done, migration_import_done_at: 1.minute.ago)
allow(ContainerRegistry::Migration).to receive(:enqueue_waiting_time).and_return(1.hour)
end
it_behaves_like 'no action'
end
context 'when an aborted import is available' do
let_it_be(:aborted_repository) { create(:container_repository, :import_aborted) }
it 'retries the import for the aborted repository' do
method = worker.method(:next_aborted_repository)
allow(worker).to receive(:next_aborted_repository) do
next_aborted_repository = method.call
allow(next_aborted_repository).to receive(:migration_import).and_return(:ok)
next_aborted_repository
end
expect(worker).to receive(:log_extra_metadata_on_done)
.with(:container_repository_id, aborted_repository.id)
expect(worker).to receive(:log_extra_metadata_on_done)
.with(:import_type, 'retry')
subject
expect(aborted_repository.reload).to be_importing
expect(container_repository.reload).to be_default
end
it_behaves_like 're-enqueuing based on capacity'
end
context 'when no repository qualifies' do
include_examples 'an idempotent worker' do
before do
allow(ContainerRepository).to receive(:ready_for_import).and_return(ContainerRepository.none)
end
it_behaves_like 'no action'
end
end
context 'over max tag count' do
before do
stub_application_setting(container_registry_import_max_tags_count: 2)
end
it 'skips the repository' do
subject
expect(container_repository.reload).to be_import_skipped
expect(container_repository.migration_skipped_reason).to eq('too_many_tags')
expect(container_repository.migration_skipped_at).not_to be_nil
end
it_behaves_like 're-enqueuing based on capacity'
end
context 'when an error occurs' do
before do
allow(ContainerRegistry::Migration).to receive(:max_tags_count).and_raise(StandardError)
end
it 'aborts the import' do
expect(Gitlab::ErrorTracking).to receive(:log_exception).with(
instance_of(StandardError),
next_repository_id: container_repository.id,
next_aborted_repository_id: nil
)
subject
expect(container_repository.reload).to be_import_aborted
end
end
end
end

View File

@ -16,4 +16,4 @@ OptionParser.new do |opts|
end
end.parse!
exit Tooling::ParallelRSpecRunner.run(**options)
Tooling::ParallelRSpecRunner.run(**options)

View File

@ -38,14 +38,12 @@ module Tooling
Knapsack.logger.info tests_to_run
Knapsack.logger.info
# Without this guard clause, we're run all the specs instead of none!
if tests_to_run.empty?
Knapsack.logger.info 'No tests to run on this node, exiting.'
return 0
return
end
Process.wait Process.spawn(*rspec_command)
Process.last_status.exitstatus
exec(*rspec_command)
end
private