Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2020-12-10 21:10:15 +00:00
parent cba8ff6440
commit e109a7799e
119 changed files with 1115 additions and 648 deletions

View File

@ -55,7 +55,6 @@ Rails/SaveBang:
- 'ee/spec/lib/gitlab/auth/ldap/access_spec.rb'
- 'ee/spec/lib/gitlab/auth/o_auth/user_spec.rb'
- 'ee/spec/lib/gitlab/auth/saml/user_spec.rb'
- 'ee/spec/lib/gitlab/background_migration/fix_orphan_promoted_issues_spec.rb'
- 'ee/spec/lib/gitlab/elastic/search_results_spec.rb'
- 'ee/spec/lib/gitlab/email/handler/ee/service_desk_handler_spec.rb'
- 'ee/spec/lib/gitlab/geo_spec.rb'
@ -63,11 +62,6 @@ Rails/SaveBang:
- 'ee/spec/lib/gitlab/import_export/group/relation_factory_spec.rb'
- 'ee/spec/lib/gitlab/mirror_spec.rb'
- 'ee/spec/mailers/notify_spec.rb'
- 'ee/spec/migrations/fix_any_approver_rule_for_projects_spec.rb'
- 'ee/spec/migrations/geo/migrate_ci_job_artifacts_to_separate_registry_spec.rb'
- 'ee/spec/migrations/geo/migrate_lfs_objects_to_separate_registry_spec.rb'
- 'ee/spec/migrations/schedule_merge_request_any_approval_rule_migration_spec.rb'
- 'ee/spec/migrations/schedule_project_any_approval_rule_migration_spec.rb'
- 'ee/spec/models/application_setting_spec.rb'
- 'ee/spec/models/approval_merge_request_rule_spec.rb'
- 'ee/spec/models/approval_project_rule_spec.rb'
@ -287,32 +281,6 @@ Rails/SaveBang:
- 'spec/lib/gitlab/auth/saml/user_spec.rb'
- 'spec/lib/gitlab/auth_spec.rb'
- 'spec/lib/gitlab/authorized_keys_spec.rb'
- 'spec/lib/gitlab/background_migration/backfill_deployment_clusters_from_deployments_spec.rb'
- 'spec/lib/gitlab/background_migration/backfill_project_repositories_spec.rb'
- 'spec/lib/gitlab/background_migration/backfill_project_settings_spec.rb'
- 'spec/lib/gitlab/background_migration/backfill_push_rules_id_in_projects_spec.rb'
- 'spec/lib/gitlab/background_migration/backfill_snippet_repositories_spec.rb'
- 'spec/lib/gitlab/background_migration/digest_column_spec.rb'
- 'spec/lib/gitlab/background_migration/encrypt_columns_spec.rb'
- 'spec/lib/gitlab/background_migration/fix_cross_project_label_links_spec.rb'
- 'spec/lib/gitlab/background_migration/fix_projects_without_project_feature_spec.rb'
- 'spec/lib/gitlab/background_migration/fix_projects_without_prometheus_service_spec.rb'
- 'spec/lib/gitlab/background_migration/fix_user_namespace_names_spec.rb'
- 'spec/lib/gitlab/background_migration/fix_user_project_route_names_spec.rb'
- 'spec/lib/gitlab/background_migration/legacy_upload_mover_spec.rb'
- 'spec/lib/gitlab/background_migration/legacy_uploads_migrator_spec.rb'
- 'spec/lib/gitlab/background_migration/link_lfs_objects_projects_spec.rb'
- 'spec/lib/gitlab/background_migration/migrate_issue_trackers_sensitive_data_spec.rb'
- 'spec/lib/gitlab/background_migration/migrate_stage_index_spec.rb'
- 'spec/lib/gitlab/background_migration/migrate_users_bio_to_user_details_spec.rb'
- 'spec/lib/gitlab/background_migration/populate_canonical_emails_spec.rb'
- 'spec/lib/gitlab/background_migration/populate_merge_request_assignees_table_spec.rb'
- 'spec/lib/gitlab/background_migration/populate_user_highest_roles_table_spec.rb'
- 'spec/lib/gitlab/background_migration/recalculate_project_authorizations_spec.rb'
- 'spec/lib/gitlab/background_migration/remove_restricted_todos_spec.rb'
- 'spec/lib/gitlab/background_migration/reset_merge_status_spec.rb'
- 'spec/lib/gitlab/background_migration/set_confidential_note_events_on_services_spec.rb'
- 'spec/lib/gitlab/background_migration/set_confidential_note_events_on_webhooks_spec.rb'
- 'spec/lib/gitlab/bitbucket_server_import/importer_spec.rb'
- 'spec/lib/gitlab/ci/ansi2json/style_spec.rb'
- 'spec/lib/gitlab/ci/status/build/common_spec.rb'
@ -363,38 +331,6 @@ Rails/SaveBang:
- 'spec/lib/mattermost/session_spec.rb'
- 'spec/lib/mattermost/team_spec.rb'
- 'spec/mailers/notify_spec.rb'
- 'spec/migrations/20190924152703_migrate_issue_trackers_data_spec.rb'
- 'spec/migrations/20200122123016_backfill_project_settings_spec.rb'
- 'spec/migrations/20200123155929_remove_invalid_jira_data_spec.rb'
- 'spec/migrations/20200127090233_remove_invalid_issue_tracker_data_spec.rb'
- 'spec/migrations/20200130145430_reschedule_migrate_issue_trackers_data_spec.rb'
- 'spec/migrations/20200313203550_remove_orphaned_chat_names_spec.rb'
- 'spec/migrations/20200406102120_backfill_deployment_clusters_from_deployments_spec.rb'
- 'spec/migrations/20200526115436_dedup_mr_metrics_spec.rb'
- 'spec/migrations/add_deploy_token_type_to_deploy_tokens_spec.rb'
- 'spec/migrations/add_incident_settings_to_all_existing_projects_spec.rb'
- 'spec/migrations/add_unique_constraint_to_approvals_user_id_and_merge_request_id_spec.rb'
- 'spec/migrations/backfill_and_add_not_null_constraint_to_released_at_column_on_releases_table_spec.rb'
- 'spec/migrations/backfill_imported_snippet_repositories_spec.rb'
- 'spec/migrations/backfill_releases_table_updated_at_and_add_not_null_constraints_to_timestamps_spec.rb'
- 'spec/migrations/backfill_snippet_repositories_spec.rb'
- 'spec/migrations/encrypt_plaintext_attributes_on_application_settings_spec.rb'
- 'spec/migrations/enqueue_reset_merge_status_second_run_spec.rb'
- 'spec/migrations/enqueue_reset_merge_status_spec.rb'
- 'spec/migrations/fill_file_store_lfs_objects_spec.rb'
- 'spec/migrations/fill_store_uploads_spec.rb'
- 'spec/migrations/fix_null_type_labels_spec.rb'
- 'spec/migrations/fix_pool_repository_source_project_id_spec.rb'
- 'spec/migrations/fix_projects_without_project_feature_spec.rb'
- 'spec/migrations/fix_projects_without_prometheus_services_spec.rb'
- 'spec/migrations/fix_wrong_pages_access_level_spec.rb'
- 'spec/migrations/insert_project_hooks_plan_limits_spec.rb'
- 'spec/migrations/migrate_auto_dev_ops_domain_to_cluster_domain_spec.rb'
- 'spec/migrations/move_limits_from_plans_spec.rb'
- 'spec/migrations/populate_project_statistics_packages_size_spec.rb'
- 'spec/migrations/schedule_link_lfs_objects_projects_spec.rb'
- 'spec/migrations/schedule_populate_merge_request_assignees_table_spec.rb'
- 'spec/migrations/seed_repository_storages_weighted_spec.rb'
- 'spec/models/appearance_spec.rb'
- 'spec/models/application_record_spec.rb'
- 'spec/models/application_setting_spec.rb'

View File

@ -31,7 +31,7 @@ const SUBMIT_ACTION_TEXT = {
const SUBMIT_BUTTON_CLASS = {
create: 'btn-success',
update: 'btn-success',
delete: 'btn-remove',
delete: 'btn-danger',
};
export default {

View File

@ -394,10 +394,10 @@ export default {
data-qa-selector="prometheus_graph_widgets"
>
<div data-testid="dropdown-wrapper" class="d-flex align-items-center">
<!--
<!--
This component should be replaced with a variant developed
as part of https://gitlab.com/gitlab-org/gitlab-ui/-/issues/936
The variant will create a dropdown with an icon, no text and no caret
The variant will create a dropdown with an icon, no text and no caret
-->
<gl-dropdown
v-gl-tooltip

View File

@ -138,8 +138,8 @@ export default {
? __('merge request')
: __('issue');
},
isMergeRequest() {
return this.noteableType === constants.MERGE_REQUEST_NOTEABLE_TYPE;
isIssue() {
return this.noteableDisplayName === constants.ISSUE_NOTEABLE_TYPE;
},
trackingLabel() {
return slugifyWithUnderscore(`${this.commentButtonTitle} button`);
@ -167,8 +167,8 @@ export default {
'stopPolling',
'restartPolling',
'removePlaceholderNotes',
'closeMergeRequest',
'reopenMergeRequest',
'closeIssuable',
'reopenIssuable',
'toggleIssueLocalState',
]),
setIsSubmitButtonDisabled(note, isSubmitting) {
@ -229,22 +229,18 @@ export default {
}
},
toggleIssueState() {
if (!this.isMergeRequest) {
if (this.isIssue) {
// We want to invoke the close/reopen logic in the issue header
// since that is where the blocked-by issues modal logic is also defined
eventHub.$emit('toggle.issuable.state');
return;
}
const toggleMergeRequestState = this.isOpen
? this.closeMergeRequest
: this.reopenMergeRequest;
const toggleState = this.isOpen ? this.closeIssuable : this.reopenIssuable;
const errorMessage = this.isOpen
? __('Something went wrong while closing the merge request. Please try again later')
: __('Something went wrong while reopening the merge request. Please try again later');
toggleMergeRequestState()
toggleState()
.then(refreshUserMergeRequestCounts)
.catch(() => Flash(errorMessage));
.catch(() => Flash(constants.toggleStateErrorMessage[this.noteableType][this.openState]));
},
discard(shouldClear = true) {
// `blur` is needed to clear slash commands autocomplete cache if event fired.

View File

@ -422,7 +422,7 @@ export default {
</button>
<button
v-if="discussion.resolvable"
class="btn btn-nr btn-default gl-mr-3 js-comment-resolve-button"
class="btn btn-default gl-mr-3 js-comment-resolve-button"
@click.prevent="handleUpdate(true)"
>
{{ resolveButtonTitle }}

View File

@ -1,3 +1,5 @@
import { __ } from '~/locale';
export const DISCUSSION_NOTE = 'DiscussionNote';
export const DIFF_NOTE = 'DiffNote';
export const DISCUSSION = 'discussion';
@ -36,3 +38,16 @@ export const DISCUSSION_FILTER_TYPES = {
COMMENTS: 'comments',
HISTORY: 'history',
};
export const toggleStateErrorMessage = {
Epic: {
[CLOSED]: __('Something went wrong while reopening the epic. Please try again later.'),
[OPENED]: __('Something went wrong while closing the epic. Please try again later.'),
[REOPENED]: __('Something went wrong while closing the epic. Please try again later.'),
},
MergeRequest: {
[CLOSED]: __('Something went wrong while reopening the merge request. Please try again later.'),
[OPENED]: __('Something went wrong while closing the merge request. Please try again later.'),
[REOPENED]: __('Something went wrong while closing the merge request. Please try again later.'),
},
};

View File

@ -244,7 +244,7 @@ export const toggleResolveNote = ({ commit, dispatch }, { endpoint, isResolved,
});
};
export const closeMergeRequest = ({ commit, dispatch, state }) => {
export const closeIssuable = ({ commit, dispatch, state }) => {
dispatch('toggleStateButtonLoading', true);
return axios.put(state.notesData.closePath).then(({ data }) => {
commit(types.CLOSE_ISSUE);
@ -253,7 +253,7 @@ export const closeMergeRequest = ({ commit, dispatch, state }) => {
});
};
export const reopenMergeRequest = ({ commit, dispatch, state }) => {
export const reopenIssuable = ({ commit, dispatch, state }) => {
dispatch('toggleStateButtonLoading', true);
return axios.put(state.notesData.reopenPath).then(({ data }) => {
commit(types.REOPEN_ISSUE);

View File

@ -166,8 +166,7 @@
line-height: $gl-btn-xs-line-height;
}
&.btn-success,
&.btn-register {
&.btn-success {
@include btn-green;
}
@ -176,7 +175,6 @@
@include btn-outline($white, $green-600, $green-500, $green-100, $green-700, $green-500, $green-200, $green-600, $green-800);
}
&.btn-remove,
&.btn-danger {
@include btn-outline($white, $red-500, $red-500, $red-100, $red-700, $red-500, $red-200, $red-600, $red-800);
}
@ -204,13 +202,7 @@
@include btn-outline($white, $orange-500, $orange-500, $orange-50, $orange-600, $orange-600, $orange-100, $orange-700, $orange-700);
}
&.btn-spam {
@include btn-outline($white, $red-500, $red-500, $red-100, $red-700, $red-500, $red-200, $red-600, $red-800);
}
&.btn-danger,
&.btn-remove,
&.btn-red {
&.btn-danger {
@include btn-red;
}
@ -218,11 +210,6 @@
float: right;
}
&.btn-reopen,
.btn-reopen-color {
/* should be same as parent class for now */
}
&.btn-grouped {
@include btn-with-margin;
}

View File

@ -36,14 +36,6 @@
}
}
.btn-remove {
width: 100%;
@include media-breakpoint-up(sm) {
width: auto;
}
}
&.existing-title {
@include media-breakpoint-up(sm) {
float: left;

View File

@ -977,12 +977,12 @@ body.navless {
border-color: #e3e3e3;
color: #303030;
}
.btn.btn-success, .btn.btn-register {
.btn.btn-success {
background-color: #108548;
border-color: #217645;
color: #fff;
}
.btn.btn-success:active, .btn.btn-success.active, .btn.btn-register:active, .btn.btn-register.active {
.btn.btn-success:active, .btn.btn-success.active {
box-shadow: rgba(0, 0, 0, 0.16);
background-color: #24663b;
border-color: #0d532a;

View File

@ -32,14 +32,6 @@ module IssuesHelper
end
end
def issue_button_visibility(issue, closed)
return 'hidden' if issue_button_hidden?(issue, closed)
end
def issue_button_hidden?(issue, closed)
issue.closed? == closed || (!closed && issue.discussion_locked)
end
def confidential_icon(issue)
sprite_icon('eye-slash', css_class: 'gl-vertical-align-text-bottom') if issue.confidential?
end

View File

@ -9,6 +9,16 @@ module ApplicationSettings
MARKDOWN_CACHE_INVALIDATING_PARAMS = %w(asset_proxy_enabled asset_proxy_url asset_proxy_secret_key asset_proxy_whitelist).freeze
def execute
result = update_settings
auto_approve_blocked_users if result
result
end
private
def update_settings
validate_classification_label(application_setting, :external_authorization_service_default_label) unless bypass_external_auth?
if application_setting.errors.any?
@ -40,8 +50,6 @@ module ApplicationSettings
@application_setting.save
end
private
def usage_stats_updated?
params.key?(:usage_ping_enabled) || params.key?(:version_check_enabled)
end
@ -95,6 +103,20 @@ module ApplicationSettings
def bypass_external_auth?
params.key?(:external_authorization_service_enabled) && !Gitlab::Utils.to_boolean(params[:external_authorization_service_enabled])
end
def auto_approve_blocked_users
return unless should_auto_approve_blocked_users?
ApproveBlockedPendingApprovalUsersWorker.perform_async(current_user.id)
end
def should_auto_approve_blocked_users?
return false unless application_setting.previous_changes.key?(:require_admin_approval_after_user_signup)
enabled_previous, enabled_current = application_setting.previous_changes[:require_admin_approval_after_user_signup]
enabled_previous && !enabled_current
end
end
end

View File

@ -1,7 +1,7 @@
- page_title _("Labels")
%div
= link_to new_admin_label_path, class: "float-right btn gl-button btn-nr btn-success" do
= link_to new_admin_label_path, class: "float-right btn gl-button btn-success" do
= _('New label')
%h3.page-title
= _('Labels')

View File

@ -3,11 +3,6 @@
- @gfm_form = true
- content_for :note_actions do
- if can?(current_user, :update_issue, @issue)
= link_to 'Reopen issue', issue_path(@issue, issue: {state_event: :reopen}, format: 'json'), data: {original_text: "Reopen issue", alternative_text: "Comment & reopen issue"}, class: "gl-button btn btn-nr btn-reopen btn-comment js-note-target-reopen #{issue_button_visibility(@issue, false)}", title: 'Reopen issue'
= link_to 'Close issue', issue_path(@issue, issue: {state_event: :close}, format: 'json'), data: {original_text: "Close issue", alternative_text: "Comment & close issue"}, class: "gl-button btn btn-nr btn-close btn-comment js-note-target-close #{issue_button_visibility(@issue, true)}", title: 'Close issue'
%section.issuable-discussion.js-vue-notes-event
#js-vue-notes{ data: { notes_data: notes_data(@issue).to_json,
noteable_data: serialize_issuable(@issue, with_blocking_issues: true),

View File

@ -30,4 +30,4 @@
- if partial_exists? "registrations/welcome/button"
= render "registrations/welcome/button"
- else
= f.submit _('Get started!'), class: 'btn-register gl-button btn btn-block gl-mb-0 gl-p-3', data: { qa_selector: 'get_started_button' }
= f.submit _('Get started!'), class: 'btn-success gl-button btn btn-block gl-mb-0 gl-p-3', data: { qa_selector: 'get_started_button' }

View File

@ -28,7 +28,7 @@
- if milestone.active?
= link_to _('Close milestone'), update_milestone_path(milestone, { state_event: :close }), method: :put, class: 'btn gl-button btn-grouped btn-close'
- else
= link_to _('Reopen milestone'), update_milestone_path(milestone, { state_event: :activate }), method: :put, class: 'btn gl-button btn-grouped btn-reopen'
= link_to _('Reopen milestone'), update_milestone_path(milestone, { state_event: :activate }), method: :put, class: 'btn gl-button btn-grouped'
= render 'shared/milestones/delete_button'

View File

@ -59,6 +59,6 @@
- if can?(current_user, :admin_milestone, milestone)
- if milestone.closed?
= link_to s_('Milestones|Reopen Milestone'), milestone_path(milestone, milestone: { state_event: :activate }), method: :put, class: "btn gl-button btn-sm btn-grouped btn-reopen"
= link_to s_('Milestones|Reopen Milestone'), milestone_path(milestone, milestone: { state_event: :activate }), method: :put, class: "btn gl-button btn-sm btn-grouped"
- else
= link_to s_('Milestones|Close Milestone'), milestone_path(milestone, milestone: { state_event: :close }), method: :put, class: "btn gl-button btn-warning-secondary btn-sm btn-grouped btn-close"

View File

@ -1,10 +1,10 @@
- noteable_name = @note.noteable.human_class_name
.float-left.btn-group.gl-mr-3.droplab-dropdown.comment-type-dropdown.js-comment-type-dropdown
%input.btn.btn-nr.btn-success.js-comment-button.js-comment-submit-button{ type: 'submit', value: _('Comment'), data: { qa_selector: 'comment_button' } }
%input.btn.btn-success.js-comment-button.js-comment-submit-button{ type: 'submit', value: _('Comment'), data: { qa_selector: 'comment_button' } }
- if @note.can_be_discussion_note?
= button_tag type: 'button', class: 'btn btn-nr dropdown-toggle btn-success js-note-new-discussion js-disable-on-submit', data: { 'dropdown-trigger' => '#resolvable-comment-menu' }, 'aria-label' => _('Open comment type dropdown') do
= button_tag type: 'button', class: 'btn dropdown-toggle btn-success js-note-new-discussion js-disable-on-submit', data: { 'dropdown-trigger' => '#resolvable-comment-menu' }, 'aria-label' => _('Open comment type dropdown') do
= sprite_icon('chevron-down')
%ul#resolvable-comment-menu.dropdown-menu.dropdown-open-top{ data: { dropdown: true } }

View File

@ -9,6 +9,6 @@
.note-form-actions.clearfix
.settings-message.note-edit-warning.js-finish-edit-warning
= _("Finish editing this message first!")
= submit_tag _('Save comment'), class: 'btn btn-nr btn-success js-comment-save-button', data: { qa_selector: 'save_comment_button' }
%button.btn.btn-nr.btn-cancel.note-edit-cancel{ type: 'button' }
= submit_tag _('Save comment'), class: 'btn btn-success js-comment-save-button', data: { qa_selector: 'save_comment_button' }
%button.btn.btn-cancel.note-edit-cancel{ type: 'button' }
= _("Cancel")

View File

@ -38,7 +38,5 @@
.note-form-actions.clearfix
= render partial: 'shared/notes/comment_button'
= yield(:note_actions)
%a.btn.btn-cancel.js-close-discussion-note-form.hide{ role: "button", data: { cancel_text: _("Cancel") } }
= _('Cancel')

View File

@ -1384,6 +1384,14 @@
:weight: 1
:idempotent: true
:tags: []
- :name: approve_blocked_pending_approval_users
:feature_category: :users
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
- :name: authorized_keys
:feature_category: :source_code_management
:has_external_dependencies:

View File

@ -0,0 +1,17 @@
# frozen_string_literal: true
class ApproveBlockedPendingApprovalUsersWorker
include ApplicationWorker
idempotent!
feature_category :users
def perform(current_user_id)
current_user = User.find(current_user_id)
User.blocked_pending_approval.find_each do |user|
Users::ApproveService.new(current_user).execute(user)
end
end
end

View File

@ -0,0 +1,6 @@
---
title: Improve CI for external repo with configurable maximum mirroring frequency
on self-hosted
merge_request: 48955
author:
type: changed

View File

@ -0,0 +1,5 @@
---
title: Auto approve users if Admin approval after sign up setting is disabled
merge_request: 49068
author:
type: changed

View File

@ -0,0 +1,5 @@
---
title: Remove unnecessary queries in milestone page
merge_request: 49662
author:
type: performance

View File

@ -34,7 +34,7 @@
- 1
- - analytics_instance_statistics_counter_job
- 1
- - approve_blocked_users
- - approve_blocked_pending_approval_users
- 1
- - authorized_keys
- 2

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
class AddPullMirrorIntervalToPlanLimits < ActiveRecord::Migration[6.0]
DOWNTIME = false
def change
add_column :plan_limits, :pull_mirror_interval_seconds, :integer, default: 300, null: false
end
end

View File

@ -0,0 +1 @@
a3dd8cfe4a5d83ca370cac90acf127facf40c0fd63ae8d1d3f99418295bae148

View File

@ -14987,7 +14987,8 @@ CREATE TABLE plan_limits (
debian_max_file_size bigint DEFAULT '3221225472'::bigint NOT NULL,
project_feature_flags integer DEFAULT 200 NOT NULL,
ci_max_artifact_size_api_fuzzing integer DEFAULT 0 NOT NULL,
ci_pipeline_deployments integer DEFAULT 500 NOT NULL
ci_pipeline_deployments integer DEFAULT 500 NOT NULL,
pull_mirror_interval_seconds integer DEFAULT 300 NOT NULL
);
CREATE SEQUENCE plan_limits_id_seq

View File

@ -22,6 +22,10 @@ swap:
behaviour: behavior
busses: buses
calibre: caliber
categorise: categorize
categorised: categorized
categorises: categorizes
categorising: categorizing
centre: center
cheque: check
civilisation: civilization

View File

@ -210,6 +210,11 @@ values obtained during the LDAP client configuration earlier:
1. Save the file and [restart](../../restart_gitlab.md#installations-from-source) GitLab for the changes to take effect.
## Using encrypted credentials
You can optionally store the `bind_dn` and `password` in a separate encrypted configuration file using the
[same steps as the regular LDAP integration](index.md#using-encrypted-credentials).
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues

View File

@ -360,6 +360,93 @@ This does not disable [using LDAP credentials for Git access](#git-password-auth
1. [Restart GitLab](../../restart_gitlab.md#installations-from-source) for the changes to take effect.
### Using encrypted credentials **(CORE ONLY)**
Instead of having the LDAP integration credentials stored in plaintext in the configuration files, you can optionally
use an encrypted file for the LDAP credentials. To use this feature, you first need to enable
[GitLab encrypted configuration](../../encrypted_configuration.md).
The encrypted configuration for LDAP exists in an encrypted YAML file. By default the file will be created at
`shared/encrypted_configuration/ldap.yaml.enc`. This location is configurable in the GitLab configuration.
The unencrypted contents of the file should be a subset of the secret settings from your `servers` block in the LDAP
configuration.
The supported configuration items for the encrypted file are:
- `bind_dn`
- `password`
The encrypted contents can be configured with the [LDAP secret edit Rake command](../../raketasks/ldap.md#edit-secret).
**Omnibus configuration**
If initially your LDAP configuration looked like:
1. In `/etc/gitlab/gitlab.rb`:
```ruby
gitlab_rails['ldap_servers'] = {
'main' => {
# snip...
'bind_dn' => 'admin',
'password' => '123'
}
}
```
1. Edit the encrypted secret:
```shell
sudo gitlab-rake gitlab:ldap:secret:edit EDITOR=vim
```
1. The unencrypted contents of the LDAP secret should be entered like:
```yaml
main:
bind_dn: admin
password: '123'
```
1. Edit `/etc/gitlab/gitlab.rb` and remove the settings for `user_bn` and `password`.
1. [Reconfigure GitLab](../../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
**Source configuration**
If initially your LDAP configuration looked like:
1. In `config/gitlab.yaml`:
```yaml
production:
ldap:
servers:
main:
# snip...
bind_dn: admin
password: '123'
```
1. Edit the encrypted secret:
```shell
bundle exec rake gitlab:ldap:secret:edit EDITOR=vim RAILS_ENVIRONMENT=production
```
1. The unencrypted contents of the LDAP secret should be entered like:
```yaml
main:
bind_dn: admin
password: '123'
```
1. Edit `config/gitlab.yaml` and remove the settings for `user_bn` and `password`.
1. [Restart GitLab](../../restart_gitlab.md#installations-from-source) for the changes to take effect.
## Encryption **(CORE ONLY)**
### TLS Server Authentication

View File

@ -0,0 +1,37 @@
---
stage: Enablement
group: Distribution
info: "To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#designated-technical-writers"
type: reference
---
# Encrypted Configuration **(CORE ONLY)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/45712) in GitLab 13.7.
GitLab can read settings for certain features from encrypted settings files. The supported features are:
- [LDAP `user_bn` and `password`](auth/ldap/index.md#using-encrypted-credentials)
In order to enable the encrypted configuration settings, a new base key needs to be generated for
`encrypted_settings_key_base`. The secret can be generated in the following ways:
**Omnibus Installation**
Starting with 13.7 the new secret is automatically generated for you, but you will need to ensure your
`/etc/gitlab/gitlab-secrets.json` contains the same values on all nodes.
**GitLab Cloud Native Helm Chart**
Starting with GitLab 13.7, the new secret is automatically generated if you have the `shared-secrets` chart enabled. Otherwise, you need
to follow the [secrets guide for adding the secret](https://docs.gitlab.com/charts/installation/secrets.html#gitlab-rails-secret).
**Source Installation**
The new secret can be generated by running:
```shell
bundle exec rake gitlab:env:info RAILS_ENV=production GITLAB_GENERATE_ENCRYPTED_SETTINGS_KEY_BASE=true
```
This will print general info on the GitLab instance, but will also cause the key to be generated in `<path-to-gitlab-rails>/config/secrets.yml`

View File

@ -181,6 +181,23 @@ Plan.default.actual_limits.update!(group_hooks: 100)
Set the limit to `0` to disable it.
## Pull Mirroring Interval
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/237891) in GitLab 13.7.
The [minimum time between pull refreshes](../user/project/repository/repository_mirroring.md)
defaults to 300 seconds (5 minutes).
To change this limit on a self-managed installation, run the following in the
[GitLab Rails console](operations/rails_console.md#starting-a-rails-console-session):
```ruby
# If limits don't exist for the default plan, you can create one with:
# Plan.default.create_limits!
Plan.default.actual_limits.update!(pull_mirror_interval_seconds: 200)
```
## Incoming emails from auto-responders
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/30327) in GitLab 12.4.

View File

@ -147,3 +147,96 @@ confirmation dialog:
```shell
sudo gitlab-rake gitlab:ldap:rename_provider[old_provider,new_provider] force=yes
```
## Secrets
GitLab can use [LDAP configuration secrets](../auth/ldap/index.md#using-encrypted-credentials) to read from an encrypted file. The following Rake tasks are provided for updating the contents of the encrypted file.
### Show secret
Show the contents of the current LDAP secrets.
**Omnibus Installation**
```shell
sudo gitlab-rake gitlab:ldap:secret:show
```
**Source Installation**
```shell
bundle exec rake gitlab:ldap:secret:show RAILS_ENV=production
```
**Example output:**
```plaintext
main:
password: '123'
user_bn: 'gitlab-adm'
```
### Edit secret
Opens the secret contents in your editor, and writes the resulting content to the encrypted secret file when you exit.
**Omnibus Installation**
```shell
sudo gitlab-rake gitlab:ldap:secret:edit EDITOR=vim
```
**Source Installation**
```shell
bundle exec rake gitlab:ldap:secret:edit RAILS_ENV=production EDITOR=vim
```
### Write raw secret
Write new secret content by providing it on STDIN.
**Omnibus Installation**
```shell
echo -e "main:\n password: '123'" | sudo gitlab-rake gitlab:ldap:secret:write
```
**Source Installation**
```shell
echo -e "main:\n password: '123'" | bundle exec rake gitlab:ldap:secret:write RAILS_ENV=production
```
### Secrets examples
**Editor example**
The write task can be used in cases where the edit command does not work with your editor:
```shell
# Write the existing secret to a plaintext file
sudo gitlab-rake gitlab:ldap:secret:show > ldap.yaml
# Edit the ldap file in your editor
...
# Re-encrypt the file
cat ldap.yaml | sudo gitlab-rake gitlab:ldap:secret:write
# Remove the plaintext file
rm ldap.yaml
```
**KMS integration example**
It can also be used as a receiving application for content encrypted with a KMS:
```shell
gcloud kms decrypt --key my-key --keyring my-test-kms --plaintext-file=- --ciphertext-file=my-file --location=us-west1 | sudo gitlab-rake gitlab:ldap:secret:write
```
**gcloud secret integration example**
It can also be used as a receiving application for secrets out of gcloud:
```shell
gcloud secrets versions access latest --secret="my-test-secret" > $1 | sudo gitlab-rake gitlab:ldap:secret:write
```

View File

@ -52,8 +52,8 @@ below.
## Using object storage **(CORE ONLY)**
Instead of storing Terraform state files on disk, we recommend the use of an object
store that is S3-compatible instead. This configuration relies on valid credentials to
Instead of storing Terraform state files on disk, we recommend the use of [one of the supported object
storage options](object_storage.md#options). This configuration relies on valid credentials to
be configured already.
[Read more about using object storage with GitLab](object_storage.md).

View File

@ -15750,11 +15750,6 @@ type Pipeline {
"""
before: String
"""
Filter query for given domain
"""
domain: AlertManagementDomainFilter! = operations
"""
Returns the first _n_ elements from the list.
"""
@ -16107,6 +16102,11 @@ type Project {
"""
before: String
"""
Filter query for given domain
"""
domain: AlertManagementDomainFilter! = operations
"""
Returns the first _n_ elements from the list.
"""

View File

@ -56,7 +56,7 @@ agent uses this token to authenticate with GitLab. This token is a random string
and does not encode any information in it, but it is secret and must
be treated with care. Store it as a `Secret` in Kubernetes.
Each agent can have 0 or more tokens in GitLab's database. Having several valid
Each agent can have 0 or more tokens in a GitLab database. Having several valid
tokens helps you rotate tokens without needing to re-register an agent. Each token
record in the database has the following fields:

View File

@ -40,7 +40,7 @@ For approvals, we use the approval functionality found in the merge request
widget. Reviewers can add their approval by [approving additionally](../user/project/merge_requests/merge_request_approvals.md#adding-or-removing-an-approval).
Getting your merge request **merged** also requires a maintainer. If it requires
more than one approval, the last maintainer to review and approve it will also merge it.
more than one approval, the last maintainer to review and approve merges it.
### Domain experts
@ -69,7 +69,7 @@ It picks reviewers and maintainers from the list at the
[engineering projects](https://about.gitlab.com/handbook/engineering/projects/)
page, with these behaviors:
1. It will not pick people whose [GitLab status](../user/profile/index.md#current-status)
1. It doesn't pick people whose [GitLab status](../user/profile/index.md#current-status)
contains the string 'OOO', or the emoji is `:palm_tree:` or `:beach:`.
1. [Trainee maintainers](https://about.gitlab.com/handbook/engineering/workflow/code-review/#trainee-maintainer)
are three times as likely to be picked as other reviewers.
@ -156,9 +156,9 @@ up confusion or verify that the end result matches what they had in mind, to
database specialists to get input on the data model or specific queries, or to
any other developer to get an in-depth review of the solution.
If an author is unsure if a merge request needs a [domain expert's](#domain-experts) opinion, that's
usually a pretty good sign that it does, since without it the required level of
confidence in their solution will not have been reached.
If an author is unsure if a merge request needs a [domain expert's](#domain-experts) opinion,
that indicates it does. Without it it's unlikely they have the required level of confidence in their
solution.
Before the review, the author is requested to submit comments on the merge
request diff alerting the reviewer to anything important as well as for anything
@ -197,18 +197,18 @@ however, if one isn't available or you think the merge request doesn't need a re
Maintainers are responsible for the overall health, quality, and consistency of
the GitLab codebase, across domains and product areas.
Consequently, their reviews will focus primarily on things like overall
Consequently, their reviews focus primarily on things like overall
architecture, code organization, separation of concerns, tests, DRYness,
consistency, and readability.
Since a maintainer's job only depends on their knowledge of the overall GitLab
Because a maintainer's job only depends on their knowledge of the overall GitLab
codebase, and not that of any specific domain, they can review, approve, and merge
merge requests from any team and in any product area.
Maintainers will do their best to also review the specifics of the chosen solution
Maintainers do their best to also review the specifics of the chosen solution
before merging, but as they are not necessarily [domain experts](#domain-experts), they may be poorly
placed to do so without an unreasonable investment of time. In those cases, they
will defer to the judgment of the author and earlier reviewers, in favor of focusing on their primary responsibilities.
defer to the judgment of the author and earlier reviewers, in favor of focusing on their primary responsibilities.
If a maintainer feels that an MR is substantial enough that it warrants a review from a [domain expert](#domain-experts),
and it is unclear whether a domain expert have been involved in the reviews to date,
@ -259,8 +259,8 @@ Instead these should be sent to the [Release Manager](https://about.gitlab.com/c
understand" or "Alternative solution:" comments. Post a follow-up comment
summarizing one-on-one discussion.
- If you ask a question to a specific person, always start the comment by
mentioning them; this will ensure they see it if their notification level is
set to "mentioned" and other people will understand they don't have to respond.
mentioning them; this ensures they see it if their notification level is
set to "mentioned" and other people understand they don't have to respond.
### Having your merge request reviewed
@ -272,8 +272,10 @@ first time.
of your shiny new branch, read through the entire diff. Does it make sense?
Did you include something unrelated to the overall purpose of the changes? Did
you forget to remove any debugging code?
<!-- vale gitlab.FutureTense = NO -->
- Be grateful for the reviewer's suggestions. ("Good call. I'll make that
change.")
<!-- vale gitlab.FutureTense = YES -->
- Don't take it personally. The review is of the code, not of you.
- Explain why the code exists. ("It's like that because of these reasons. Would
it be more clear if I rename this class/file/method/variable?")
@ -361,7 +363,7 @@ your own suggestions to the merge request. Note that:
- **Before applying suggestions**, edit the merge request to make sure
[squash and
merge](../user/project/merge_requests/squash_and_merge.md#squash-and-merge)
is enabled, otherwise, the pipeline's Danger job will fail.
is enabled, otherwise, the pipeline's Danger job fails.
- If a merge request does not have squash and merge enabled, and it
has more than one commit, then see the note below about rewriting
commit history.
@ -390,10 +392,10 @@ When ready to merge:
- When you set the MR to "Merge When Pipeline Succeeds", you should take over
subsequent revisions for anything that would be spotted after that.
Thanks to **Pipeline for Merged Results**, authors won't have to rebase their
branch as frequently anymore (only when there are conflicts) since the Merge
Results Pipeline will already incorporate the latest changes from `master`.
This results in faster review/merge cycles since maintainers don't have to ask
Thanks to **Pipeline for Merged Results**, authors no longer have to rebase their
branch as frequently anymore (only when there are conflicts) because the Merge
Results Pipeline already incorporate the latest changes from `master`.
This results in faster review/merge cycles because maintainers don't have to ask
for a final rebase: instead, they only have to start a MR pipeline and set MWPS.
This step brings us very close to the actual Merge Trains feature by testing the
Merge Results against the latest `master` at the time of the pipeline creation.
@ -451,7 +453,7 @@ Enterprise Edition instance. This has some implications:
1. Reversible.
1. Performant at the scale of GitLab.com - ask a maintainer to test the
migration on the staging environment if you aren't sure.
1. Categorised correctly:
1. Categorized correctly:
- Regular migrations run before the new code is running on the instance.
- [Post-deployment migrations](post_deployment_migrations.md) run _after_
the new code is deployed, when the instance is configured to do that.
@ -459,12 +461,12 @@ Enterprise Edition instance. This has some implications:
should only be done for migrations that would take an extreme amount of
time at GitLab.com scale.
1. **Sidekiq workers** [cannot change in a backwards-incompatible way](sidekiq_style_guide.md#sidekiq-compatibility-across-updates):
1. Sidekiq queues are not drained before a deploy happens, so there will be
1. Sidekiq queues are not drained before a deploy happens, so there are
workers in the queue from the previous version of GitLab.
1. If you need to change a method signature, try to do so across two releases,
and accept both the old and new arguments in the first of those.
1. Similarly, if you need to remove a worker, stop it from being scheduled in
one release, then remove it in the next. This will allow existing jobs to
one release, then remove it in the next. This allows existing jobs to
execute.
1. Don't forget, not every instance will upgrade to every intermediate version
(some people may go from X.1.0 to X.10.0, or even try bigger upgrades!), so
@ -496,15 +498,15 @@ To ensure swift feedback to ready-to-review code, we maintain a `Review-response
> - review-response SLO = (time when first review response is provided) - (time MR is assigned to reviewer) < 2 business days
If you don't think you'll be able to review a merge request within the `Review-response` SLO
If you don't think you can review a merge request in the `Review-response` SLO
time frame, let the author know as soon as possible and try to help them find
another reviewer or maintainer who will be able to, so that they can be unblocked
another reviewer or maintainer who is able to, so that they can be unblocked
and get on with their work quickly.
If you think you are at capacity and are unable to accept any more reviews until
some have been completed, communicate this through your GitLab status by setting
the `:red_circle:` emoji and mentioning that you are at capacity in the status
text. This will guide contributors to pick a different reviewer, helping us to
text. This guides contributors to pick a different reviewer, helping us to
meet the SLO.
Of course, if you are out of office and have
@ -522,13 +524,13 @@ A merge request may benefit from being considered a customer critical priority b
Properties of customer critical merge requests:
- The [Senior Director of Development](https://about.gitlab.com/job-families/engineering/engineering-management/#senior-director-engineering) ([@clefelhocz1](https://gitlab.com/clefelhocz1)) is the DRI for deciding if a merge request will be customer critical.
- The DRI will assign the `customer-critical-merge-request` label to the merge request.
- The [Senior Director of Development](https://about.gitlab.com/job-families/engineering/engineering-management/#senior-director-engineering) ([@clefelhocz1](https://gitlab.com/clefelhocz1)) is the DRI for deciding if a merge request is customer critical.
- The DRI assigns the `customer-critical-merge-request` label to the merge request.
- It is required that the reviewer(s) and maintainer(s) involved with a customer critical merge request are engaged as soon as this decision is made.
- It is required to prioritize work for those involved on a customer critical merge request so that they have the time available necessary to focus on it.
- It is required to adhere to GitLab [values](https://about.gitlab.com/handbook/values/) and processes when working on customer critical merge requests, taking particular note of family and friends first/work second, definition of done, iteration, and release when it's ready.
- Customer critical merge requests are required to not reduce security, introduce data-loss risk, reduce availability, nor break existing functionality per the process for [prioritizing technical decisions](https://about.gitlab.com/handbook/engineering/#prioritizing-technical-decisions.md).
- On customer critical requests, it is _recommended_ that those involved _consider_ coordinating synchronously (Zoom, Slack) in addition to asynchronously (merge requests comments) if they believe this will reduce elapsed time to merge even though this _may_ sacrifice [efficiency](https://about.gitlab.com/company/culture/all-remote/asynchronous/#evaluating-efficiency.md).
- On customer critical requests, it is _recommended_ that those involved _consider_ coordinating synchronously (Zoom, Slack) in addition to asynchronously (merge requests comments) if they believe this may reduce the elapsed time to merge even though this _may_ sacrifice [efficiency](https://about.gitlab.com/company/culture/all-remote/asynchronous/#evaluating-efficiency.md).
- After a customer critical merge request is merged, a retrospective must be completed with the intention of reducing the frequency of future customer critical merge requests.
## Examples

View File

@ -6,7 +6,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Kubernetes integration - development guidelines
This document provides various guidelines when developing for GitLab's
This document provides various guidelines when developing for the GitLab
[Kubernetes integration](../user/project/clusters/index.md).
## Development

View File

@ -104,7 +104,7 @@ In short:
- Never make claims based on just benchmarks, always measure in production to
confirm your findings.
- X being N times faster than Y is meaningless if you don't know what impact it
will actually have on your production environment.
has on your production environment.
- A production environment is the _only_ benchmark that always tells the truth
(unless your performance monitoring systems are not set up correctly).
- If you must write a benchmark use the benchmark-ips Gem instead of Ruby's
@ -119,7 +119,7 @@ allowing you to profile which code is running on CPU in detail.
It's important to note that profiling an application *alters its performance*.
Different profiling strategies have different overheads. Stackprof is a sampling
profiler. It will sample stack traces from running threads at a configurable
profiler. It samples stack traces from running threads at a configurable
frequency (e.g. 100hz, that is 100 stacks per second). This type of profiling
has quite a low (albeit non-zero) overhead and is generally considered to be
safe for production.
@ -241,7 +241,7 @@ BasePolicy#abilities (/Users/lupine/dev/gitlab.com/gitlab-org/gitlab-development
Since the profile includes the work done by the test suite as well as the
application code, these profiles can be used to investigate slow tests as well.
However, for smaller runs (like this example), this means that the cost of
setting up the test suite will tend to dominate.
setting up the test suite tends to dominate.
### Production
@ -256,7 +256,7 @@ The following configuration options can be configured:
- `STACKPROF_MODE`: See [sampling modes](https://github.com/tmm1/stackprof#sampling).
Defaults to `cpu`.
- `STACKPROF_INTERVAL`: Sampling interval. Unit semantics depend on `STACKPROF_MODE`.
For `object` mode this is a per-event interval (every `n`th event will be sampled)
For `object` mode this is a per-event interval (every `n`th event is sampled)
and defaults to `1000`.
For other modes such as `cpu` this is a frequency and defaults to `10000` μs (100hz).
- `STACKPROF_FILE_PREFIX`: File path prefix where profiles are stored. Defaults
@ -268,8 +268,8 @@ The following configuration options can be configured:
and disk overhead. Defaults to `true`.
Once enabled, profiling can be triggered by sending a `SIGUSR2` signal to the
Ruby process. The process will begin sampling stacks. Profiling can be stopped
by sending another `SIGUSR2`. Alternatively, it will automatically stop after
Ruby process. The process begins sampling stacks. Profiling can be stopped
by sending another `SIGUSR2`. Alternatively, it stops automatically after
the timeout.
Once profiling stops, the profile is written out to disk at
@ -284,7 +284,7 @@ Currently supported profiling targets are:
NOTE:
The Puma master process is not supported. Neither is Unicorn.
Sending SIGUSR2 to either of those will trigger restarts. In the case of Puma,
Sending SIGUSR2 to either of those triggers restarts. In the case of Puma,
take care to only send the signal to Puma workers.
This can be done via `pkill -USR2 puma:`. The `:` disambiguates between `puma
@ -292,7 +292,7 @@ This can be done via `pkill -USR2 puma:`. The `:` disambiguates between `puma
worker processes), selecting the latter.
For Sidekiq, the signal can be sent to the `sidekiq-cluster` process via `pkill
-USR2 bin/sidekiq-cluster`, which will forward the signal to all Sidekiq
-USR2 bin/sidekiq-cluster`, which forwards the signal to all Sidekiq
children. Alternatively, you can also select a specific pid of interest.
Production profiles can be especially noisy. It can be helpful to visualize them
@ -377,9 +377,9 @@ The report breaks down 2 key concepts:
- Retained: long lived memory use and object count retained due to the execution of the code block.
- Allocated: all object allocation and memory allocation during code block.
As a general rule, **retained** will always be smaller than or equal to allocated.
As a general rule, **retained** is always smaller than or equal to **allocated**.
The actual RSS cost will always be slightly higher as MRI heaps are not squashed to size and memory fragments.
The actual RSS cost is always slightly higher as MRI heaps are not squashed to size and memory fragments.
### Rbtrace
@ -444,11 +444,11 @@ Slow operations, like merging branches, or operations that are prone to errors
directly in a web request as much as possible. This has numerous benefits such
as:
1. An error won't prevent the request from completing.
1. The process being slow won't affect the loading time of a page.
1. In case of a failure it's easy to re-try the process (Sidekiq takes care of
1. An error doesn't prevent the request from completing.
1. The process being slow doesn't affect the loading time of a page.
1. In case of a failure you can retry the process (Sidekiq takes care of
this automatically).
1. By isolating the code from a web request it will hopefully be easier to test
1. By isolating the code from a web request it should be easier to test
and maintain.
It's especially important to use Sidekiq as much as possible when dealing with
@ -480,7 +480,7 @@ end
## Caching
Operations that will often return the same result should be cached using Redis,
Operations that often return the same result should be cached using Redis,
in particular Git operations. When caching data in Redis, make sure the cache is
flushed whenever needed. For example, a cache for the list of tags should be
flushed whenever a new tag is pushed or a tag is removed.
@ -494,7 +494,7 @@ the Repository class instead of leaking into other classes.
When caching data, make sure to also memoize the result in an instance variable.
While retrieving data from Redis is much faster than raw Git operations, it still
has overhead. By caching the result in an instance variable, repeated calls to
the same method won't end up retrieving data from Redis upon every call. When
the same method don't retrieve data from Redis upon every call. When
memoizing cached data in an instance variable, make sure to also reset the
instance variable when flushing the cache. An example:
@ -512,7 +512,7 @@ end
## String Freezing
In recent Ruby versions calling `freeze` on a String leads to it being allocated
only once and re-used. For example, on Ruby 2.3 or later this will only allocate the
only once and re-used. For example, on Ruby 2.3 or later this only allocates the
"foo" String once:
```ruby
@ -523,10 +523,10 @@ end
Depending on the size of the String and how frequently it would be allocated
(before the `.freeze` call was added), this _may_ make things faster, but
there's no guarantee it will.
this isn't guaranteed.
Strings will be frozen by default in Ruby 3.0. To prepare our code base for
this eventuality, we will be adding the following header to all Ruby files:
Strings are frozen by default in Ruby 3.0. To prepare our codebase for
this eventuality, we are adding the following header to all Ruby files:
```ruby
# frozen_string_literal: true
@ -549,8 +549,8 @@ Ruby offers several convenience functions that deal with file contents specifica
or I/O streams in general. Functions such as `IO.read` and `IO.readlines` make
it easy to read data into memory, but they can be inefficient when the
data grows large. Because these functions read the entire contents of a data
source into memory, memory use will grow by _at least_ the size of the data source.
In the case of `readlines`, it will grow even further, due to extra bookkeeping
source into memory, memory use grows by _at least_ the size of the data source.
In the case of `readlines`, it grows even further, due to extra bookkeeping
the Ruby VM has to perform to represent each line.
Consider the following program, which reads a text file that is 750MB on disk:
@ -588,12 +588,12 @@ which is roughly two orders of magnitude more compared to reading the file line
line instead. It was not just the raw memory usage that increased, but also how the garbage collector (GC)
responded to this change in anticipation of future memory use. We can see that `malloc_increase_bytes` jumped
to ~30MB, which compares to just ~4kB for a "fresh" Ruby program. This figure specifies how
much additional heap space the Ruby GC will claim from the operating system next time it runs out of memory.
much additional heap space the Ruby GC claims from the operating system next time it runs out of memory.
Not only did we occupy more memory, we also changed the behavior of the application
to increase memory use at a faster rate.
The `IO.read` function exhibits similar behavior, with the difference that no extra memory will
be allocated for each line object.
The `IO.read` function exhibits similar behavior, with the difference that no extra memory is
allocated for each line object.
### Recommendations
@ -630,7 +630,7 @@ production environments.
### Moving Allocations to Constants
Storing an object as a constant so you only allocate it once _may_ improve
performance, but there's no guarantee this will. Looking up constants has an
performance, but this is not guaranteed. Looking up constants has an
impact on runtime performance, and as such, using a constant instead of
referencing an object directly may even slow code down. For example:

View File

@ -8,10 +8,10 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> This doc refers to <https://gitlab.com/gitlab-org/gitlab/blob/master/app/models/concerns/reactive_caching.rb>.
The `ReactiveCaching` concern is used for fetching some data in the background and store it
The `ReactiveCaching` concern is used for fetching some data in the background and storing it
in the Rails cache, keeping it up-to-date for as long as it is being requested. If the
data hasn't been requested for `reactive_cache_lifetime`, it will stop being refreshed,
and then be removed.
data hasn't been requested for `reactive_cache_lifetime`, it stops being refreshed,
and is removed.
## Examples
@ -35,38 +35,40 @@ class Foo < ApplicationRecord
end
```
In this example, the first time `#result` is called, it will return `nil`. However,
it will enqueue a background worker to call `#calculate_reactive_cache` and set an
initial cache lifetime of 10 min.
In this example, the first time `#result` is called, it returns `nil`. However,
it enqueues a background worker to call `#calculate_reactive_cache` and set an
initial cache lifetime of 10 minutes.
## How it works
The first time `#with_reactive_cache` is called, a background job is enqueued and
`with_reactive_cache` returns `nil`. The background job calls `#calculate_reactive_cache`
and stores its return value. It also re-enqueues the background job to run again after
`reactive_cache_refresh_interval`. Therefore, it will keep the stored value up to date.
`reactive_cache_refresh_interval`. Therefore, it keeps the stored value up to date.
Calculations never run concurrently.
Calling `#with_reactive_cache` while a value is cached will call the block given to
`#with_reactive_cache`, yielding the cached value. It will also extend the lifetime
Calling `#with_reactive_cache` while a value is cached calls the block given to
`#with_reactive_cache`, yielding the cached value. It also extends the lifetime
of the cache by the `reactive_cache_lifetime` value.
Once the lifetime has expired, no more background jobs will be enqueued and calling
`#with_reactive_cache` will again return `nil` - starting the process all over again.
After the lifetime has expired, no more background jobs are enqueued and calling
`#with_reactive_cache` again returns `nil`, starting the process all over again.
### 1 MB hard limit
### Set a hard limit for ReactiveCaching
`ReactiveCaching` has a 1 megabyte default limit. [This value is configurable](#selfreactive_cache_worker_finder).
To preserve performance, you should set a hard caching limit in the class that includes
`ReactiveCaching`. See the example of [how to set it up](#selfreactive_cache_hard_limit).
If the data we're trying to cache has over 1 megabyte, it will not be cached and a handled `ReactiveCaching::ExceededReactiveCacheLimit` will be notified on Sentry.
For more information, read the internal issue
[Redis (or ReactiveCache) soft and hard limits](https://gitlab.com/gitlab-org/gitlab/-/issues/14015).
## When to use
- If we need to make a request to an external API (for example, requests to the k8s API).
It is not advisable to keep the application server worker blocked for the duration of
the external request.
It is not advisable to keep the application server worker blocked for the duration of
the external request.
- If a model needs to perform a lot of database calls or other time consuming
calculations.
calculations.
## How to use
@ -99,13 +101,13 @@ Controller endpoints that call a model or service method that uses `ReactiveCach
not wait until the background worker completes.
- An API that calls a model or service method that uses `ReactiveCaching` should return
`202 accepted` when the cache is being calculated (when `#with_reactive_cache` returns `nil`).
`202 accepted` when the cache is being calculated (when `#with_reactive_cache` returns `nil`).
- It should also
[set the polling interval header](fe_guide/performance.md#realtime-components) with
`Gitlab::PollingInterval.set_header`.
[set the polling interval header](fe_guide/performance.md#realtime-components) with
`Gitlab::PollingInterval.set_header`.
- The consumer of the API is expected to poll the API.
- You can also consider implementing [ETag caching](polling.md) to reduce the server
load caused by polling.
load caused by polling.
### Methods to implement in a model or service
@ -113,17 +115,17 @@ These are methods that should be implemented in the model/service that includes
#### `#calculate_reactive_cache` (required)
- This method must be implemented. Its return value will be cached.
- It will be called by `ReactiveCaching` when it needs to populate the cache.
- Any arguments passed to `with_reactive_cache` will also be passed to `calculate_reactive_cache`.
- This method must be implemented. Its return value is cached.
- It is called by `ReactiveCaching` when it needs to populate the cache.
- Any arguments passed to `with_reactive_cache` are also passed to `calculate_reactive_cache`.
#### `#reactive_cache_updated` (optional)
- This method can be implemented if needed.
- It is called by the `ReactiveCaching` concern whenever the cache is updated.
If the cache is being refreshed and the new cache value is the same as the old cache
value, this method will not be called. It is only called if a new value is stored in
the cache.
If the cache is being refreshed and the new cache value is the same as the old cache
value, this method is not called. It is only called if a new value is stored in
the cache.
- It can be used to perform an action whenever the cache is updated.
### Methods called by a model or service
@ -134,22 +136,22 @@ the model/service.
#### `#with_reactive_cache` (required)
- `with_reactive_cache` must be called where the result of `calculate_reactive_cache`
is required.
is required.
- A block can be given to `with_reactive_cache`. `with_reactive_cache` can also take
any number of arguments. Any arguments passed to `with_reactive_cache` will be
passed to `calculate_reactive_cache`. The arguments passed to `with_reactive_cache`
will be appended to the cache key name.
any number of arguments. Any arguments passed to `with_reactive_cache` will be
passed to `calculate_reactive_cache`. The arguments passed to `with_reactive_cache`
are appended to the cache key name.
- If `with_reactive_cache` is called when the result has already been cached, the
block will be called, yielding the cached value and the return value of the block
will be returned by `with_reactive_cache`. It will also reset the timeout of the
cache to the `reactive_cache_lifetime` value.
- If the result has not been cached as yet, `with_reactive_cache` will return nil.
It will also enqueue a background job, which will call `calculate_reactive_cache`
and cache the result.
- Once the background job has completed and the result is cached, the next call
to `with_reactive_cache` will pick up the cached value.
block is called, yielding the cached value and the return value of the block
is returned by `with_reactive_cache`. It also resets the timeout of the
cache to the `reactive_cache_lifetime` value.
- If the result has not been cached as yet, `with_reactive_cache` return `nil`.
It also enqueues a background job, which calls `calculate_reactive_cache`
and caches the result.
- After the background job has completed and the result is cached, the next call
to `with_reactive_cache` picks up the cached value.
- In the example below, `data` is the cached value which is yielded to the block
given to `with_reactive_cache`.
given to `with_reactive_cache`.
```ruby
class Foo < ApplicationRecord
@ -170,16 +172,16 @@ given to `with_reactive_cache`.
#### `#clear_reactive_cache!` (optional)
- This method can be called when the cache needs to be expired/cleared. For example,
it can be called in an `after_save` callback in a model so that the cache is
cleared after the model is modified.
it can be called in an `after_save` callback in a model so that the cache is
cleared after the model is modified.
- This method should be called with the same parameters that are passed to
`with_reactive_cache` because the parameters are part of the cache key.
`with_reactive_cache` because the parameters are part of the cache key.
#### `#without_reactive_cache` (optional)
- This is a convenience method that can be used for debugging purposes.
- This method calls `calculate_reactive_cache` in the current process instead of
in a background worker.
in a background worker.
### Configurable options
@ -188,19 +190,19 @@ There are some `class_attribute` options which can be tweaked.
#### `self.reactive_cache_key`
- The value of this attribute is the prefix to the `data` and `alive` cache key names.
The parameters passed to `with_reactive_cache` form the rest of the cache key names.
The parameters passed to `with_reactive_cache` form the rest of the cache key names.
- By default, this key uses the model's name and the ID of the record.
```ruby
self.reactive_cache_key = -> (record) { [model_name.singular, record.id] }
```
- The `data` and `alive` cache keys in this case will be `"ExampleModel:1:arg1:arg2"`
and `"ExampleModel:1:arg1:arg2:alive"` respectively, where `ExampleModel` is the
name of the model, `1` is the ID of the record, `arg1` and `arg2` are parameters
passed to `with_reactive_cache`.
- If you're including this concern in a service instead, you will need to override
the default by adding the following to your service:
- The `data` and `alive` cache keys in this case are `"ExampleModel:1:arg1:arg2"`
and `"ExampleModel:1:arg1:arg2:alive"` respectively, where `ExampleModel` is the
name of the model, `1` is the ID of the record, `arg1` and `arg2` are parameters
passed to `with_reactive_cache`.
- If you're including this concern in a service instead, you must override
the default by adding the following to your service:
```ruby
self.reactive_cache_key = ->(service) { [service.class.model_name.singular, service.project_id] }
@ -212,7 +214,7 @@ the default by adding the following to your service:
#### `self.reactive_cache_lease_timeout`
- `ReactiveCaching` uses `Gitlab::ExclusiveLease` to ensure that the cache calculation
is never run concurrently by multiple workers.
is never run concurrently by multiple workers.
- This attribute is the timeout for the `Gitlab::ExclusiveLease`.
- It defaults to 2 minutes, but can be overridden if a different timeout is required.
@ -231,11 +233,11 @@ self.reactive_cache_lease_timeout = 1.minute
#### `self.reactive_cache_lifetime`
- This is the duration after which the cache will be cleared if there are no requests.
- This is the duration after which the cache is cleared if there are no requests.
- The default is 10 minutes. If there are no requests for this cache value for 10 minutes,
the cache will expire.
- If the cache value is requested before it expires, the timeout of the cache will
be reset to `reactive_cache_lifetime`.
the cache expires.
- If the cache value is requested before it expires, the timeout of the cache is
reset to `reactive_cache_lifetime`.
```ruby
self.reactive_cache_lifetime = 10.minutes
@ -244,8 +246,8 @@ self.reactive_cache_lifetime = 10.minutes
#### `self.reactive_cache_hard_limit`
- This is the maximum data size that `ReactiveCaching` allows to be cached.
- The default is 1 megabyte. Data that goes over this value will not be cached
and will silently raise `ReactiveCaching::ExceededReactiveCacheLimit` on Sentry.
- The default is 1 megabyte. Data that goes over this value is not cached
and silently raises `ReactiveCaching::ExceededReactiveCacheLimit` on Sentry.
```ruby
self.reactive_cache_hard_limit = 5.megabytes
@ -300,7 +302,7 @@ which `calculate_reactive_cache` can be called.
end
```
- In this example, the primary key ID will be passed to `reactive_cache_worker_finder`
along with the parameters passed to `with_reactive_cache`.
- In this example, the primary key ID is passed to `reactive_cache_worker_finder`
along with the parameters passed to `with_reactive_cache`.
- The custom `reactive_cache_worker_finder` calls `.from_cache` with the parameters
passed to `with_reactive_cache`.
passed to `with_reactive_cache`.

View File

@ -27,12 +27,12 @@ The list displays the integration name, type, and status (enabled or disabled):
## Configuration
GitLab can receive alerts via a [HTTP endpoint](#generic-http-endpoint) that you configure,
GitLab can receive alerts via a HTTP endpoint that you configure,
or the [Prometheus integration](#external-prometheus-integration).
### Generic HTTP Endpoint **CORE**
### Single HTTP Endpoint **CORE**
Enabling the Generic HTTP Endpoint activates a unique HTTP endpoint that can
Enabling the HTTP Endpoint in a GitLab projects activates it to
receive alert payloads in JSON format. You can always
[customize the payload](#customize-the-alert-payload-outside-of-gitlab) to your liking.
@ -172,7 +172,7 @@ If the existing alert is already `resolved`, GitLab creates a new alert instead.
WARNING:
We are building deeper integration with Opsgenie and other alerting tools through
[HTTP endpoint integrations](#generic-http-endpoint) so you can see alerts within
[HTTP endpoint integrations](#single-http-endpoint) so you can see alerts within
the GitLab interface. As a result, the previous direct link to Opsgenie Alerts from
the GitLab alerts list is scheduled for deprecation following the 13.7 release on December 22, 2020.
@ -181,7 +181,7 @@ the GitLab alerts list is scheduled for deprecation following the 13.7 release o
You can monitor alerts using a GitLab integration with [Opsgenie](https://www.atlassian.com/software/opsgenie).
If you enable the Opsgenie integration, you can't have other GitLab alert
services, such as [Generic Alerts](alert_integrations.md) or Prometheus alerts,
services
active at the same time.
To enable Opsgenie integration:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

View File

@ -46,10 +46,7 @@ Incident, you have two options to do this manually.
With Maintainer or higher [permissions](../../user/permissions.md), you can enable
GitLab to create incident automatically whenever an alert is triggered:
1. Navigate to **Settings > Operations > Incidents** and expand **Incidents**:
![Incident Management Settings](img/incident_management_settings_v13_3.png)
1. Navigate to **Settings > Operations > Incidents** and expand **Incidents**.
1. Check the **Create an incident** checkbox.
1. To customize the incident, select an
[issue template](../../user/project/description_templates.md#creating-issue-templates).

View File

@ -76,7 +76,7 @@ innovative work done by [Heroku](https://www.heroku.com/) and goes beyond it
in multiple ways:
- Auto DevOps works with any Kubernetes cluster; you're not limited to running
on GitLab's infrastructure. (Note that many features also work without Kubernetes).
on infrastructure managed by GitLab. (Note that many features also work without Kubernetes).
- There is no additional cost (no markup on the infrastructure costs), and you
can use a Kubernetes cluster you host or Containers as a Service on any
public cloud (for example, [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/)).
@ -316,7 +316,7 @@ simplify configuration and prevent any unforeseen issues.
### Install applications behind a proxy
GitLab's Helm integration does not support installing applications when
The GitLab integration with Helm does not support installing applications when
behind a proxy. Users who want to do so must inject their proxy settings
into the installation pods at runtime, such as by using a
[`PodPreset`](https://kubernetes.io/docs/concepts/workloads/pods/podpreset/):

View File

@ -9,7 +9,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
This step-by-step guide helps you use [Auto DevOps](index.md) to
deploy a project hosted on GitLab.com to Google Kubernetes Engine.
You are using GitLab's native Kubernetes integration, so you don't need
You are using the GitLab native Kubernetes integration, so you don't need
to create a Kubernetes cluster manually using the Google Cloud Platform console.
You are creating and deploying a simple application that you create from a GitLab template.
@ -32,13 +32,13 @@ or Google Drive, or create a new one.
NOTE:
Every new Google Cloud Platform (GCP) account receives [$300 in credit](https://console.cloud.google.com/freetrial),
and in partnership with Google, GitLab is able to offer an additional $200 for new
GCP accounts to get started with GitLab's Google Kubernetes Engine Integration.
GCP accounts to get started with the GitLab integration with Google Kubernetes Engine.
[Follow this link](https://cloud.google.com/partners/partnercredit/?pcn_code=0014M00001h35gDQAQ#contact-form)
and apply for credit.
## Create a new project from a template
We are using one of GitLab's project templates to get started. As the name suggests,
We are using a GitLab project template to get started. As the name suggests,
those projects provide a bare-bones application built on some well-known frameworks.
1. In GitLab, click the plus icon (**{plus-square}**) at the top of the navigation bar, and select

View File

@ -28,15 +28,15 @@ To make full use of Auto DevOps with Kubernetes, you need:
[Auto Deploy for Kubernetes 1.16+](stages.md#kubernetes-116).
1. NGINX Ingress. You can deploy it to your Kubernetes cluster by installing
the [GitLab-managed app for Ingress](../../user/clusters/applications.md#ingress),
after configuring GitLab's Kubernetes integration in the previous step.
after configuring the GitLab integration with Kubernetes in the previous step.
Alternatively, you can use the
[`nginx-ingress`](https://github.com/helm/charts/tree/master/stable/nginx-ingress)
Helm chart to install Ingress manually.
NOTE:
If you use your own Ingress instead of the one provided by GitLab's managed
apps, ensure you're running at least version 0.9.0 of NGINX Ingress and
If you use your own Ingress instead of the one provided by GitLab Managed
Apps, ensure you're running at least version 0.9.0 of NGINX Ingress and
[enable Prometheus metrics](https://github.com/helm/charts/tree/master/stable/nginx-ingress#prometheus-metrics)
for the response metrics to appear. You must also
[annotate](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
@ -64,7 +64,7 @@ To make full use of Auto DevOps with Kubernetes, you need:
You can configure Docker-based runners to autoscale as well, using
[Docker Machine](https://docs.gitlab.com/runner/install/autoscaling.html).
If you've configured GitLab's Kubernetes integration in the first step, you
If you've configured the GitLab integration with Kubernetes in the first step, you
can deploy it to your cluster by installing the
[GitLab-managed app for GitLab Runner](../../user/clusters/applications.md#gitlab-runner).
@ -77,7 +77,7 @@ To make full use of Auto DevOps with Kubernetes, you need:
To enable Auto Monitoring, you need Prometheus installed either inside or
outside your cluster, and configured to scrape your Kubernetes cluster.
If you've configured GitLab's Kubernetes integration, you can deploy it to
If you've configured the GitLab integration with Kubernetes, you can deploy it to
your cluster by installing the
[GitLab-managed app for Prometheus](../../user/clusters/applications.md#prometheus).
@ -95,8 +95,8 @@ To make full use of Auto DevOps with Kubernetes, you need:
a native Kubernetes certificate management controller that helps with issuing
certificates. Installing cert-manager on your cluster issues a
[Lets Encrypt](https://letsencrypt.org/) certificate and ensures the
certificates are valid and up-to-date. If you've configured GitLab's Kubernetes
integration, you can deploy it to your cluster by installing the
certificates are valid and up-to-date. If you've configured the GitLab integration
with Kubernetes, you can deploy it to your cluster by installing the
[GitLab-managed app for cert-manager](../../user/clusters/applications.md#cert-manager).
If you don't have Kubernetes or Prometheus installed, then

View File

@ -618,7 +618,7 @@ may require commands to be wrapped as follows:
Some of the reasons you may need to wrap commands:
- Attaching using `kubectl exec`.
- Using GitLab's [Web Terminal](../../ci/environments/index.md#web-terminals).
- Using the GitLab [Web Terminal](../../ci/environments/index.md#web-terminals).
For example, to start a Rails console from the application root directory, run:

View File

@ -77,10 +77,10 @@ in a pod within the `gitlab-managed-apps` namespace inside the cluster.
with a local [Tiller](https://v2.helm.sh/docs/glossary/#tiller) server. Prior
to [GitLab 13.2](https://gitlab.com/gitlab-org/gitlab/-/issues/209736), GitLab
used an in-cluster Tiller server in the `gitlab-managed-apps` namespace. You
can safely uninstall the server from GitLab's application page if you have
can safely uninstall the server from the GitLab application page if you have
previously installed it. This doesn't affect your other applications.
GitLab's Helm integration does not support installing applications behind a proxy,
The GitLab Helm integration does not support installing applications behind a proxy,
but a [workaround](../../topics/autodevops/index.md#install-applications-behind-a-proxy)
is available.
@ -286,7 +286,7 @@ of a WAF are:
By default, GitLab provides you with a WAF known as [`ModSecurity`](https://www.modsecurity.org/),
which is a toolkit for real-time web application monitoring, logging, and access
control. GitLab's offering applies the [OWASP's Core Rule Set](https://www.modsecurity.org/CRS/Documentation/),
control. GitLab applies the [OWASP's Core Rule Set](https://www.modsecurity.org/CRS/Documentation/),
which provides generic attack detection capabilities.
This feature:
@ -301,7 +301,7 @@ This feature:
To enable WAF, switch its respective toggle to the enabled position when installing or updating [Ingress application](#ingress).
If this is your first time using GitLab's WAF, we recommend you follow the
If this is your first time using the GitLab WAF, we recommend you follow the
[quick start guide](../../topics/web_application_firewall/quick_start_guide.md).
There is a small performance overhead by enabling ModSecurity. If this is

View File

@ -10,7 +10,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
Cluster cost management provides insights into cluster resource usage. GitLab provides an example
[`kubecost-cost-model`](https://gitlab.com/gitlab-examples/kubecost-cost-model/)
project that uses GitLab's Prometheus integration and
project that uses the GitLab Prometheus integration and
[Kubecost's `cost-model`](https://github.com/kubecost/cost-model) to provide cluster cost
insights within GitLab:
@ -28,7 +28,7 @@ permissions in a project or group.
need to change this value if you use a non-managed Prometheus.
- Adds the necessary annotations to the deployment manifest to be scraped by
GitLab-managed Prometheus.
- Changes the Google Pricing API access key to GitLab's access key.
- Changes the Google Pricing API access key to the GitLab access key.
- Contains definitions for a custom GitLab Metrics dashboard to show the cost insights.
1. Connect GitLab with Prometheus, depending on your configuration:
- *If Prometheus is already configured,* navigate to **Settings > Integrations > Prometheus**

View File

@ -9,7 +9,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
## Motivation
The Terraform integration features within GitLab enable your GitOps / Infrastructure-as-Code (IaC)
workflows to tie into GitLab's authentication and authorization. These features focus on
workflows to tie into GitLab authentication and authorization. These features focus on
lowering the barrier to entry for teams to adopt Terraform, collaborate effectively within
GitLab, and support Terraform best practices.

View File

@ -8,8 +8,8 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/7934) in [GitLab Premium](https://about.gitlab.com/pricing/) 11.11.
> - [Moved](https://gitlab.com/gitlab-org/gitlab/-/issues/273655) to [GitLab Core](https://about.gitlab.com/pricing/) in GitLab 13.6.
> - [Support for private groups](https://gitlab.com/gitlab-org/gitlab/-/issues/11582) in [GitLab Premium](https://about.gitlab.com/pricing/) 13.7.
> - Anonymous access to images in public groups is no longer available starting in [GitLab Premium](https://about.gitlab.com/pricing/) 13.7.
> - [Support for private groups](https://gitlab.com/gitlab-org/gitlab/-/issues/11582) in [GitLab Core](https://about.gitlab.com/pricing/) 13.7.
> - Anonymous access to images in public groups is no longer available starting in [GitLab Core](https://about.gitlab.com/pricing/) 13.7.
The GitLab Dependency Proxy is a local proxy you can use for your frequently-accessed
upstream images.
@ -79,6 +79,8 @@ You can authenticate using:
#### Authenticate within CI/CD
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/280582) in 13.7.
To work with the Dependency Proxy in [GitLab CI/CD](../../../ci/README.md), you can use:
- `CI_DEPENDENCY_PROXY_USER`: A CI user for logging in to the Dependency Proxy.

View File

@ -10,7 +10,7 @@ GitLab supports adding new and existing EKS clusters.
## EKS requirements
Before creating your first cluster on Amazon EKS with GitLab's integration, make sure the following
Before creating your first cluster on Amazon EKS with the GitLab integration, make sure the following
requirements are met:
- An [Amazon Web Services](https://aws.amazon.com/) account is set up and you are able to log in.

View File

@ -10,7 +10,7 @@ GitLab supports adding new and existing GKE clusters.
## GKE requirements
Before creating your first cluster on Google GKE with GitLab's integration, make sure the following
Before creating your first cluster on Google GKE with GitLab integration, make sure the following
requirements are met:
- A [billing account](https://cloud.google.com/billing/docs/how-to/manage-billing-account)

View File

@ -22,7 +22,7 @@ NOTE:
Every new Google Cloud Platform (GCP) account receives
[$300 in credit upon sign up](https://console.cloud.google.com/freetrial).
In partnership with Google, GitLab is able to offer an additional $200 for new GCP
accounts to get started with GitLab's Google Kubernetes Engine Integration.
accounts to get started with the GitLab integration with Google Kubernetes Engine.
[Follow this link](https://cloud.google.com/partners/partnercredit/?pcn_code=0014M00001h35gDQAQ#contact-form)
to apply for credit.

View File

@ -25,7 +25,7 @@ pre-written code blocks or database queries against a given environment.
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/45912) in GitLab 11.4.
The JupyterHub app offered via GitLabs Kubernetes integration now ships
The JupyterHub app offered via the GitLab Kubernetes integration now ships
with Nurtchs Rubix library, providing a simple way to create DevOps
runbooks. A sample runbook is provided, showcasing common operations. While
Rubix makes it simple to create common Kubernetes and AWS workflows, you can
@ -41,7 +41,7 @@ To create an executable runbook, you need:
- **Kubernetes** - A Kubernetes cluster is required to deploy the rest of the
applications. The simplest way to get started is to add a cluster using one
of [GitLab's integrations](../add_remove_clusters.md#create-new-cluster).
of the [GitLab integrations](../add_remove_clusters.md#create-new-cluster).
- **Ingress** - Ingress can provide load balancing, SSL termination, and name-based
virtual hosting. It acts as a web proxy for your applications.
- **JupyterHub** - [JupyterHub](https://jupyterhub.readthedocs.io/) is a multi-user

View File

@ -49,7 +49,7 @@ To run Knative on GitLab, you need:
clone the sample [Knative Ruby App](https://gitlab.com/knative-examples/knative-ruby-app) to get
started.
1. **Kubernetes Cluster:** An RBAC-enabled Kubernetes cluster is required to deploy Knative.
The simplest way to get started is to add a cluster using GitLab's [GKE integration](../add_remove_clusters.md).
The simplest way to get started is to add a cluster using the GitLab [GKE integration](../add_remove_clusters.md).
The set of minimum recommended cluster specifications to run Knative is 3 nodes, 6 vCPUs, and 22.50 GB memory.
1. **GitLab Runner:** A runner is required to run the CI jobs that deploy serverless
applications or functions onto your cluster. You can install GitLab Runner
@ -73,7 +73,7 @@ To run Knative on GitLab, you need:
1. **Logging** (optional): Configuring logging allows you to view and search request logs for your serverless function/application.
See [Configuring logging](#configuring-logging) for more information.
## Installing Knative via GitLab's Kubernetes integration
## Installing Knative via the GitLab Kubernetes integration
The minimum recommended cluster size to run Knative is 3-nodes, 6 vCPUs, and 22.50 GB
memory. **RBAC must be enabled.**
@ -123,7 +123,7 @@ which already has Knative installed. You must do the following:
- For a GitLab managed cluster, if you added the cluster in [GitLab 12.1 or later](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/30235),
then GitLab already has the required access and you can proceed to the next step.
Otherwise, you need to manually grant GitLab's service account the ability to manage
Otherwise, you need to manually grant the GitLab service account the ability to manage
resources in the `serving.knative.dev` API group. Since every GitLab service account
has the `edit` cluster role, the simplest way to do this is with an
[aggregated ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles)

View File

@ -11,8 +11,7 @@ Repository mirroring allows for mirroring of repositories to and from external s
used to mirror branches, tags, and commits between repositories.
A repository mirror at GitLab will be updated automatically. You can also manually trigger an update
at most once every 5 minutes. Follow [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/237891)
for discussions on how to potentially reduce the delay.
at most once every 5 minutes on GitLab.com with [the limit set by the administrator on self-managed instances](../../../administration/instance_limits.md#pull-mirroring-interval).
## Overview
@ -30,7 +29,7 @@ Users with at least [Developer access](../../permissions.md) to the project can
immediate update, unless:
- The mirror is already being updated.
- 5 minutes haven't elapsed since its last update.
- The [limit for pull mirroring interval seconds](../../../administration/instance_limits.md#pull-mirroring-interval) has not elapsed since its last update.
For security reasons, in [GitLab 12.10 and later](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/27166),
the URL to the original repository is only displayed to users with

View File

@ -9676,6 +9676,9 @@ msgstr ""
msgid "DevopsAdoption|MRs"
msgstr ""
msgid "DevopsAdoption|Maximum %{maxSegments} segments allowed"
msgstr ""
msgid "DevopsAdoption|My segment"
msgstr ""
@ -25685,7 +25688,10 @@ msgstr ""
msgid "Something went wrong while archiving a requirement."
msgstr ""
msgid "Something went wrong while closing the merge request. Please try again later"
msgid "Something went wrong while closing the epic. Please try again later."
msgstr ""
msgid "Something went wrong while closing the merge request. Please try again later."
msgstr ""
msgid "Something went wrong while creating a requirement."
@ -25772,7 +25778,10 @@ msgstr ""
msgid "Something went wrong while reopening a requirement."
msgstr ""
msgid "Something went wrong while reopening the merge request. Please try again later"
msgid "Something went wrong while reopening the epic. Please try again later."
msgstr ""
msgid "Something went wrong while reopening the merge request. Please try again later."
msgstr ""
msgid "Something went wrong while resolving this discussion. Please try again."

View File

@ -0,0 +1,81 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'issue state', :js do
let_it_be(:project) { create(:project) }
let_it_be(:user) { create(:user) }
before do
project.add_developer(user)
sign_in(user)
end
shared_examples 'issue closed' do |selector|
it 'can close an issue' do
expect(find('.status-box')).to have_content 'Open'
within selector do
click_button 'Close issue'
end
expect(find('.status-box')).to have_content 'Closed'
end
end
shared_examples 'issue reopened' do |selector|
it 'can reopen an issue' do
expect(find('.status-box')).to have_content 'Closed'
within selector do
click_button 'Reopen issue'
end
expect(find('.status-box')).to have_content 'Open'
end
end
describe 'when open' do
context 'when clicking the top `Close issue` button', :aggregate_failures do
let(:open_issue) { create(:issue, project: project) }
before do
visit project_issue_path(project, open_issue)
end
it_behaves_like 'issue closed', '.detail-page-header'
end
context 'when clicking the bottom `Close issue` button', :aggregate_failures do
let(:open_issue) { create(:issue, project: project) }
before do
visit project_issue_path(project, open_issue)
end
it_behaves_like 'issue closed', '.timeline-content-form'
end
end
describe 'when closed' do
context 'when clicking the top `Reopen issue` button', :aggregate_failures do
let(:closed_issue) { create(:issue, project: project, state: 'closed') }
before do
visit project_issue_path(project, closed_issue)
end
it_behaves_like 'issue reopened', '.detail-page-header'
end
context 'when clicking the bottom `Reopen issue` button', :aggregate_failures do
let(:closed_issue) { create(:issue, project: project, state: 'closed') }
before do
visit project_issue_path(project, closed_issue)
end
it_behaves_like 'issue reopened', '.timeline-content-form'
end
end
end

View File

@ -1,23 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'User closes a merge requests', :js do
let(:project) { create(:project, :repository) }
let(:merge_request) { create(:merge_request, source_project: project, target_project: project) }
let(:user) { create(:user) }
before do
project.add_maintainer(user)
sign_in(user)
visit(merge_request_path(merge_request))
end
it 'closes a merge request' do
click_button('Close merge request', match: :first)
expect(page).to have_content(merge_request.title)
expect(page).to have_content('Closed by')
end
end

View File

@ -0,0 +1,101 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'User closes/reopens a merge request', :js do
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { create(:user) }
before do
project.add_developer(user)
sign_in(user)
end
describe 'when open' do
context 'when clicking the top `Close merge request` link', :aggregate_failures do
let(:open_merge_request) { create(:merge_request, source_project: project, target_project: project) }
before do
visit merge_request_path(open_merge_request)
end
it 'can close a merge request' do
expect(find('.status-box')).to have_content 'Open'
within '.detail-page-header' do
click_button 'Toggle dropdown'
click_link 'Close merge request'
end
wait_for_requests
expect(find('.status-box')).to have_content 'Closed'
end
end
context 'when clicking the bottom `Close merge request` button', :aggregate_failures do
let(:open_merge_request) { create(:merge_request, source_project: project, target_project: project) }
before do
visit merge_request_path(open_merge_request)
end
it 'can close a merge request' do
expect(find('.status-box')).to have_content 'Open'
within '.timeline-content-form' do
click_button 'Close merge request'
# Clicking the bottom `Close merge request` button does not yet update
# the header status so for now we'll check that the button text changes
expect(page).not_to have_button 'Close merge request'
expect(page).to have_button 'Reopen merge request'
end
end
end
end
describe 'when closed' do
context 'when clicking the top `Reopen merge request` link', :aggregate_failures do
let(:closed_merge_request) { create(:merge_request, source_project: project, target_project: project, state: 'closed') }
before do
visit merge_request_path(closed_merge_request)
end
it 'can reopen a merge request' do
expect(find('.status-box')).to have_content 'Closed'
within '.detail-page-header' do
click_button 'Toggle dropdown'
click_link 'Reopen merge request'
end
wait_for_requests
expect(find('.status-box')).to have_content 'Open'
end
end
context 'when clicking the bottom `Reopen merge request` button', :aggregate_failures do
let(:closed_merge_request) { create(:merge_request, source_project: project, target_project: project, state: 'closed') }
before do
visit merge_request_path(closed_merge_request)
end
it 'can reopen a merge request' do
expect(find('.status-box')).to have_content 'Closed'
within '.timeline-content-form' do
click_button 'Reopen merge request'
# Clicking the bottom `Reopen merge request` button does not yet update
# the header status so for now we'll check that the button text changes
expect(page).not_to have_button 'Reopen merge request'
expect(page).to have_button 'Close merge request'
end
end
end
end
end

View File

@ -1,28 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'User reopens a merge requests', :js do
let(:project) { create(:project, :public, :repository) }
let!(:merge_request) { create(:closed_merge_request, source_project: project, target_project: project) }
let(:user) { create(:user) }
before do
project.add_maintainer(user)
sign_in(user)
visit(merge_request_path(merge_request))
end
it 'reopens a merge request' do
find('.detail-page-header .dropdown-toggle').click
click_link('Reopen merge request', match: :first)
wait_for_requests
page.within('.status-box') do
expect(page).to have_content('Open')
end
end
end

View File

@ -2,6 +2,7 @@ import { nextTick } from 'vue';
import { mount, shallowMount } from '@vue/test-utils';
import MockAdapter from 'axios-mock-adapter';
import Autosize from 'autosize';
import { deprecatedCreateFlash as flash } from '~/flash';
import axios from '~/lib/utils/axios_utils';
import createStore from '~/notes/stores';
import CommentForm from '~/notes/components/comment_form.vue';
@ -13,6 +14,7 @@ import { loggedOutnoteableData, notesDataMock, userDataMock, noteableDataMock }
jest.mock('autosize');
jest.mock('~/commons/nav/user_merge_requests');
jest.mock('~/flash');
jest.mock('~/gl_form');
describe('issue_comment_form component', () => {
@ -28,7 +30,7 @@ describe('issue_comment_form component', () => {
const mountComponent = ({
initialData = {},
noteableType = 'issue',
noteableType = 'Issue',
noteableData = noteableDataMock,
notesData = notesDataMock,
userData = userDataMock,
@ -278,52 +280,92 @@ describe('issue_comment_form component', () => {
});
});
describe('when merge request', () => {
describe.each`
type | noteableType
${'merge request'} | ${'MergeRequest'}
${'epic'} | ${'Epic'}
`('when $type', ({ type, noteableType }) => {
describe('when open', () => {
it('makes an API call to open the merge request', () => {
it(`makes an API call to open it`, () => {
mountComponent({
noteableType: constants.MERGE_REQUEST_NOTEABLE_TYPE,
noteableType,
noteableData: { ...noteableDataMock, state: constants.OPENED },
mountFunction: mount,
});
jest.spyOn(wrapper.vm, 'closeMergeRequest').mockResolvedValue();
jest.spyOn(wrapper.vm, 'closeIssuable').mockResolvedValue();
findCloseReopenButton().trigger('click');
expect(wrapper.vm.closeMergeRequest).toHaveBeenCalled();
expect(wrapper.vm.closeIssuable).toHaveBeenCalled();
});
it(`shows an error when the API call fails`, async () => {
mountComponent({
noteableType,
noteableData: { ...noteableDataMock, state: constants.OPENED },
mountFunction: mount,
});
jest.spyOn(wrapper.vm, 'closeIssuable').mockRejectedValue();
await findCloseReopenButton().trigger('click');
await wrapper.vm.$nextTick;
expect(flash).toHaveBeenCalledWith(
`Something went wrong while closing the ${type}. Please try again later.`,
);
});
});
describe('when closed', () => {
it('makes an API call to close the merge request', () => {
it('makes an API call to close it', () => {
mountComponent({
noteableType: constants.MERGE_REQUEST_NOTEABLE_TYPE,
noteableType,
noteableData: { ...noteableDataMock, state: constants.CLOSED },
mountFunction: mount,
});
jest.spyOn(wrapper.vm, 'reopenMergeRequest').mockResolvedValue();
jest.spyOn(wrapper.vm, 'reopenIssuable').mockResolvedValue();
findCloseReopenButton().trigger('click');
expect(wrapper.vm.reopenMergeRequest).toHaveBeenCalled();
expect(wrapper.vm.reopenIssuable).toHaveBeenCalled();
});
});
it('should update MR count', async () => {
it(`shows an error when the API call fails`, async () => {
mountComponent({
noteableType: constants.MERGE_REQUEST_NOTEABLE_TYPE,
noteableType,
noteableData: { ...noteableDataMock, state: constants.CLOSED },
mountFunction: mount,
});
jest.spyOn(wrapper.vm, 'closeMergeRequest').mockResolvedValue();
jest.spyOn(wrapper.vm, 'reopenIssuable').mockRejectedValue();
await findCloseReopenButton().trigger('click');
expect(refreshUserMergeRequestCounts).toHaveBeenCalled();
await wrapper.vm.$nextTick;
expect(flash).toHaveBeenCalledWith(
`Something went wrong while reopening the ${type}. Please try again later.`,
);
});
});
it('when merge request, should update MR count', async () => {
mountComponent({
noteableType: constants.MERGE_REQUEST_NOTEABLE_TYPE,
mountFunction: mount,
});
jest.spyOn(wrapper.vm, 'closeIssuable').mockResolvedValue();
await findCloseReopenButton().trigger('click');
expect(refreshUserMergeRequestCounts).toHaveBeenCalled();
});
});
});

View File

@ -177,7 +177,7 @@ describe('Actions Notes Store', () => {
describe('closeMergeRequest', () => {
it('sets state as closed', done => {
store
.dispatch('closeMergeRequest', { notesData: { closeIssuePath: '' } })
.dispatch('closeIssuable', { notesData: { closeIssuePath: '' } })
.then(() => {
expect(store.state.noteableData.state).toEqual('closed');
expect(store.state.isToggleStateButtonLoading).toEqual(false);
@ -190,7 +190,7 @@ describe('Actions Notes Store', () => {
describe('reopenMergeRequest', () => {
it('sets state as reopened', done => {
store
.dispatch('reopenMergeRequest', { notesData: { reopenIssuePath: '' } })
.dispatch('reopenIssuable', { notesData: { reopenIssuePath: '' } })
.then(() => {
expect(store.state.noteableData.state).toEqual('reopened');
expect(store.state.isToggleStateButtonLoading).toEqual(false);

View File

@ -9,10 +9,10 @@ RSpec.describe Gitlab::BackgroundMigration::BackfillDeploymentClustersFromDeploy
it 'backfills deployment_cluster for all deployments in the given range with a non-null cluster_id' do
deployment_clusters = table(:deployment_clusters)
namespace = table(:namespaces).create(name: 'the-namespace', path: 'the-path')
project = table(:projects).create(name: 'the-project', namespace_id: namespace.id)
environment = table(:environments).create(name: 'the-environment', project_id: project.id, slug: 'slug')
cluster = table(:clusters).create(name: 'the-cluster')
namespace = table(:namespaces).create!(name: 'the-namespace', path: 'the-path')
project = table(:projects).create!(name: 'the-project', namespace_id: namespace.id)
environment = table(:environments).create!(name: 'the-environment', project_id: project.id, slug: 'slug')
cluster = table(:clusters).create!(name: 'the-cluster')
deployment_data = { cluster_id: cluster.id, project_id: project.id, environment_id: environment.id, ref: 'abc', tag: false, sha: 'sha', status: 1 }
expected_deployment_1 = create_deployment(**deployment_data)
@ -21,7 +21,7 @@ RSpec.describe Gitlab::BackgroundMigration::BackfillDeploymentClustersFromDeploy
out_of_range_deployment = create_deployment(**deployment_data, cluster_id: cluster.id) # expected to be out of range
# to test "ON CONFLICT DO NOTHING"
existing_record_for_deployment_2 = deployment_clusters.create(
existing_record_for_deployment_2 = deployment_clusters.create!(
deployment_id: expected_deployment_2.id,
cluster_id: expected_deployment_2.cluster_id,
kubernetes_namespace: 'production'
@ -38,7 +38,7 @@ RSpec.describe Gitlab::BackgroundMigration::BackfillDeploymentClustersFromDeploy
def create_deployment(**data)
@iid ||= 0
@iid += 1
table(:deployments).create(iid: @iid, **data)
table(:deployments).create!(iid: @iid, **data)
end
end
end

View File

@ -81,7 +81,7 @@ RSpec.describe Gitlab::BackgroundMigration::BackfillProjectRepositories do
end
it 'returns the correct disk_path using the route entry' do
project_legacy_storage_5.route.update(path: 'zoo/new-test')
project_legacy_storage_5.route.update!(path: 'zoo/new-test')
project = described_class.find(project_legacy_storage_5.id)
expect(project.disk_path).to eq('zoo/new-test')
@ -93,8 +93,8 @@ RSpec.describe Gitlab::BackgroundMigration::BackfillProjectRepositories do
subgroup.update_column(:parent_id, non_existing_record_id)
project = described_class.find(project_orphaned_namespace.id)
project.route.destroy
subgroup.route.destroy
project.route.destroy!
subgroup.route.destroy!
expect { project.reload.disk_path }
.to raise_error(Gitlab::BackgroundMigration::BackfillProjectRepositories::OrphanedNamespaceError)

View File

@ -5,16 +5,16 @@ require 'spec_helper'
RSpec.describe Gitlab::BackgroundMigration::BackfillProjectSettings, schema: 20200114113341 do
let(:projects) { table(:projects) }
let(:project_settings) { table(:project_settings) }
let(:namespace) { table(:namespaces).create(name: 'user', path: 'user') }
let(:project) { projects.create(namespace_id: namespace.id) }
let(:namespace) { table(:namespaces).create!(name: 'user', path: 'user') }
let(:project) { projects.create!(namespace_id: namespace.id) }
subject { described_class.new }
describe '#perform' do
it 'creates settings for all projects in range' do
projects.create(id: 5, namespace_id: namespace.id)
projects.create(id: 7, namespace_id: namespace.id)
projects.create(id: 8, namespace_id: namespace.id)
projects.create!(id: 5, namespace_id: namespace.id)
projects.create!(id: 7, namespace_id: namespace.id)
projects.create!(id: 8, namespace_id: namespace.id)
subject.perform(5, 7)

View File

@ -6,21 +6,21 @@ RSpec.describe Gitlab::BackgroundMigration::BackfillPushRulesIdInProjects, :migr
let(:push_rules) { table(:push_rules) }
let(:projects) { table(:projects) }
let(:project_settings) { table(:project_settings) }
let(:namespace) { table(:namespaces).create(name: 'user', path: 'user') }
let(:namespace) { table(:namespaces).create!(name: 'user', path: 'user') }
subject { described_class.new }
describe '#perform' do
it 'creates new project push_rules for all push rules in the range' do
project_1 = projects.create(id: 1, namespace_id: namespace.id)
project_2 = projects.create(id: 2, namespace_id: namespace.id)
project_3 = projects.create(id: 3, namespace_id: namespace.id)
project_settings_1 = project_settings.create(project_id: project_1.id)
project_settings_2 = project_settings.create(project_id: project_2.id)
project_settings_3 = project_settings.create(project_id: project_3.id)
push_rule_1 = push_rules.create(id: 5, is_sample: false, project_id: project_1.id)
push_rule_2 = push_rules.create(id: 6, is_sample: false, project_id: project_2.id)
push_rules.create(id: 8, is_sample: false, project_id: 3)
project_1 = projects.create!(id: 1, namespace_id: namespace.id)
project_2 = projects.create!(id: 2, namespace_id: namespace.id)
project_3 = projects.create!(id: 3, namespace_id: namespace.id)
project_settings_1 = project_settings.create!(project_id: project_1.id)
project_settings_2 = project_settings.create!(project_id: project_2.id)
project_settings_3 = project_settings.create!(project_id: project_3.id)
push_rule_1 = push_rules.create!(id: 5, is_sample: false, project_id: project_1.id)
push_rule_2 = push_rules.create!(id: 6, is_sample: false, project_id: project_2.id)
push_rules.create!(id: 8, is_sample: false, project_id: 3)
subject.perform(5, 7)

View File

@ -13,7 +13,7 @@ RSpec.describe Gitlab::BackgroundMigration::BackfillSnippetRepositories, :migrat
let(:user_name) { 'Test' }
let!(:user) do
users.create(id: 1,
users.create!(id: 1,
email: 'user@example.com',
projects_limit: 10,
username: 'test',
@ -25,7 +25,7 @@ RSpec.describe Gitlab::BackgroundMigration::BackfillSnippetRepositories, :migrat
end
let!(:migration_bot) do
users.create(id: 100,
users.create!(id: 100,
email: "noreply+gitlab-migration-bot%s@#{Settings.gitlab.host}",
user_type: HasUserType::USER_TYPES[:migration_bot],
name: 'GitLab Migration Bot',
@ -33,9 +33,9 @@ RSpec.describe Gitlab::BackgroundMigration::BackfillSnippetRepositories, :migrat
username: 'bot')
end
let!(:snippet_with_repo) { snippets.create(id: 1, type: 'PersonalSnippet', author_id: user.id, file_name: file_name, content: content) }
let!(:snippet_with_empty_repo) { snippets.create(id: 2, type: 'PersonalSnippet', author_id: user.id, file_name: file_name, content: content) }
let!(:snippet_without_repo) { snippets.create(id: 3, type: 'PersonalSnippet', author_id: user.id, file_name: file_name, content: content) }
let!(:snippet_with_repo) { snippets.create!(id: 1, type: 'PersonalSnippet', author_id: user.id, file_name: file_name, content: content) }
let!(:snippet_with_empty_repo) { snippets.create!(id: 2, type: 'PersonalSnippet', author_id: user.id, file_name: file_name, content: content) }
let!(:snippet_without_repo) { snippets.create!(id: 3, type: 'PersonalSnippet', author_id: user.id, file_name: file_name, content: content) }
let(:file_name) { 'file_name.rb' }
let(:content) { 'content' }
@ -197,8 +197,8 @@ RSpec.describe Gitlab::BackgroundMigration::BackfillSnippetRepositories, :migrat
end
with_them do
let!(:snippet_with_invalid_path) { snippets.create(id: 4, type: 'PersonalSnippet', author_id: user.id, file_name: invalid_file_name, content: content) }
let!(:snippet_with_valid_path) { snippets.create(id: 5, type: 'PersonalSnippet', author_id: user.id, file_name: file_name, content: content) }
let!(:snippet_with_invalid_path) { snippets.create!(id: 4, type: 'PersonalSnippet', author_id: user.id, file_name: invalid_file_name, content: content) }
let!(:snippet_with_valid_path) { snippets.create!(id: 5, type: 'PersonalSnippet', author_id: user.id, file_name: file_name, content: content) }
let(:ids) { [4, 5] }
after do
@ -241,7 +241,7 @@ RSpec.describe Gitlab::BackgroundMigration::BackfillSnippetRepositories, :migrat
context 'when user name is invalid' do
let(:user_name) { '.' }
let!(:snippet) { snippets.create(id: 4, type: 'PersonalSnippet', author_id: user.id, file_name: file_name, content: content) }
let!(:snippet) { snippets.create!(id: 4, type: 'PersonalSnippet', author_id: user.id, file_name: file_name, content: content) }
let(:ids) { [4, 4] }
after do
@ -254,7 +254,7 @@ RSpec.describe Gitlab::BackgroundMigration::BackfillSnippetRepositories, :migrat
context 'when both user name and snippet file_name are invalid' do
let(:user_name) { '.' }
let!(:other_user) do
users.create(id: 2,
users.create!(id: 2,
email: 'user2@example.com',
projects_limit: 10,
username: 'test2',
@ -265,8 +265,8 @@ RSpec.describe Gitlab::BackgroundMigration::BackfillSnippetRepositories, :migrat
confirmed_at: 1.day.ago)
end
let!(:invalid_snippet) { snippets.create(id: 4, type: 'PersonalSnippet', author_id: user.id, file_name: '.', content: content) }
let!(:snippet) { snippets.create(id: 5, type: 'PersonalSnippet', author_id: other_user.id, file_name: file_name, content: content) }
let!(:invalid_snippet) { snippets.create!(id: 4, type: 'PersonalSnippet', author_id: user.id, file_name: '.', content: content) }
let!(:snippet) { snippets.create!(id: 5, type: 'PersonalSnippet', author_id: other_user.id, file_name: file_name, content: content) }
let(:ids) { [4, 5] }
after do

View File

@ -7,7 +7,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutProjectFeature, sc
let(:projects) { table(:projects) }
let(:project_features) { table(:project_features) }
let(:namespace) { namespaces.create(name: 'foo', path: 'foo') }
let(:namespace) { namespaces.create!(name: 'foo', path: 'foo') }
let!(:project) { projects.create!(namespace_id: namespace.id) }
let(:private_project_without_feature) { projects.create!(namespace_id: namespace.id, visibility_level: 0) }
@ -15,7 +15,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutProjectFeature, sc
let!(:projects_without_feature) { [private_project_without_feature, public_project_without_feature] }
before do
project_features.create({ project_id: project.id, pages_access_level: 20 })
project_features.create!({ project_id: project.id, pages_access_level: 20 })
end
subject { described_class.new.perform(Project.minimum(:id), Project.maximum(:id)) }

View File

@ -33,8 +33,8 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
let(:clusters) { table(:clusters) }
let(:cluster_groups) { table(:cluster_groups) }
let(:clusters_applications_prometheus) { table(:clusters_applications_prometheus) }
let(:namespace) { namespaces.create(name: 'user', path: 'user') }
let(:project) { projects.create(namespace_id: namespace.id) }
let(:namespace) { namespaces.create!(name: 'user', path: 'user') }
let(:project) { projects.create!(namespace_id: namespace.id) }
let(:application_statuses) do
{
@ -71,7 +71,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
context 'non prometheus services' do
it 'does not change them' do
other_type = 'SomeOtherService'
services.create(service_params_for(project.id, active: true, type: other_type))
services.create!(service_params_for(project.id, active: true, type: other_type))
expect { subject.perform(project.id, project.id + 1) }.not_to change { services.where(type: other_type).order(:id).map { |row| row.attributes } }
end
@ -85,7 +85,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
context 'template is present for prometheus services' do
it 'creates missing services entries', :aggregate_failures do
services.create(service_params_for(nil, template: true, properties: { 'from_template' => true }.to_json))
services.create!(service_params_for(nil, template: true, properties: { 'from_template' => true }.to_json))
expect { subject.perform(project.id, project.id + 1) }.to change { services.count }.by(1)
updated_rows = services.where(template: false).order(:id).map { |row| row.attributes.slice(*columns).symbolize_keys }
@ -97,7 +97,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
context 'prometheus integration services exist' do
context 'in active state' do
it 'does not change them' do
services.create(service_params_for(project.id, active: true))
services.create!(service_params_for(project.id, active: true))
expect { subject.perform(project.id, project.id + 1) }.not_to change { services.order(:id).map { |row| row.attributes } }
end
@ -105,7 +105,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
context 'not in active state' do
it 'sets active attribute to true' do
service = services.create(service_params_for(project.id, active: false))
service = services.create!(service_params_for(project.id, active: false))
expect { subject.perform(project.id, project.id + 1) }.to change { service.reload.active? }.from(false).to(true)
end
@ -113,7 +113,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
context 'prometheus services are configured manually ' do
it 'does not change them' do
properties = '{"api_url":"http://test.dev","manual_configuration":"1"}'
services.create(service_params_for(project.id, properties: properties, active: false))
services.create!(service_params_for(project.id, properties: properties, active: false))
expect { subject.perform(project.id, project.id + 1) }.not_to change { services.order(:id).map { |row| row.attributes } }
end
@ -123,11 +123,11 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
end
context 'k8s cluster shared on instance level' do
let(:cluster) { clusters.create(name: 'cluster', cluster_type: cluster_types[:instance_type]) }
let(:cluster) { clusters.create!(name: 'cluster', cluster_type: cluster_types[:instance_type]) }
context 'with installed prometheus application' do
before do
clusters_applications_prometheus.create(cluster_id: cluster.id, status: application_statuses[:installed], version: '123')
clusters_applications_prometheus.create!(cluster_id: cluster.id, status: application_statuses[:installed], version: '123')
end
it_behaves_like 'fix services entries state'
@ -135,7 +135,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
context 'with updated prometheus application' do
before do
clusters_applications_prometheus.create(cluster_id: cluster.id, status: application_statuses[:updated], version: '123')
clusters_applications_prometheus.create!(cluster_id: cluster.id, status: application_statuses[:updated], version: '123')
end
it_behaves_like 'fix services entries state'
@ -143,7 +143,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
context 'with errored prometheus application' do
before do
clusters_applications_prometheus.create(cluster_id: cluster.id, status: application_statuses[:errored], version: '123')
clusters_applications_prometheus.create!(cluster_id: cluster.id, status: application_statuses[:errored], version: '123')
end
it 'does not change services entries' do
@ -153,26 +153,26 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
end
context 'k8s cluster shared on group level' do
let(:cluster) { clusters.create(name: 'cluster', cluster_type: cluster_types[:group_type]) }
let(:cluster) { clusters.create!(name: 'cluster', cluster_type: cluster_types[:group_type]) }
before do
cluster_groups.create(cluster_id: cluster.id, group_id: project.namespace_id)
cluster_groups.create!(cluster_id: cluster.id, group_id: project.namespace_id)
end
context 'with installed prometheus application' do
before do
clusters_applications_prometheus.create(cluster_id: cluster.id, status: application_statuses[:installed], version: '123')
clusters_applications_prometheus.create!(cluster_id: cluster.id, status: application_statuses[:installed], version: '123')
end
it_behaves_like 'fix services entries state'
context 'second k8s cluster without application available' do
let(:namespace_2) { namespaces.create(name: 'namespace2', path: 'namespace2') }
let(:project_2) { projects.create(namespace_id: namespace_2.id) }
let(:namespace_2) { namespaces.create!(name: 'namespace2', path: 'namespace2') }
let(:project_2) { projects.create!(namespace_id: namespace_2.id) }
before do
cluster_2 = clusters.create(name: 'cluster2', cluster_type: cluster_types[:group_type])
cluster_groups.create(cluster_id: cluster_2.id, group_id: project_2.namespace_id)
cluster_2 = clusters.create!(name: 'cluster2', cluster_type: cluster_types[:group_type])
cluster_groups.create!(cluster_id: cluster_2.id, group_id: project_2.namespace_id)
end
it 'changed only affected services entries' do
@ -184,7 +184,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
context 'with updated prometheus application' do
before do
clusters_applications_prometheus.create(cluster_id: cluster.id, status: application_statuses[:updated], version: '123')
clusters_applications_prometheus.create!(cluster_id: cluster.id, status: application_statuses[:updated], version: '123')
end
it_behaves_like 'fix services entries state'
@ -192,7 +192,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
context 'with errored prometheus application' do
before do
clusters_applications_prometheus.create(cluster_id: cluster.id, status: application_statuses[:errored], version: '123')
clusters_applications_prometheus.create!(cluster_id: cluster.id, status: application_statuses[:errored], version: '123')
end
it 'does not change services entries' do
@ -207,7 +207,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
context 'with inactive service' do
it 'does not change services entries' do
services.create(service_params_for(project.id))
services.create!(service_params_for(project.id))
expect { subject.perform(project.id, project.id + 1) }.not_to change { services.order(:id).map { |row| row.attributes } }
end
@ -216,13 +216,13 @@ RSpec.describe Gitlab::BackgroundMigration::FixProjectsWithoutPrometheusService,
end
context 'k8s cluster for single project' do
let(:cluster) { clusters.create(name: 'cluster', cluster_type: cluster_types[:project_type]) }
let(:cluster) { clusters.create!(name: 'cluster', cluster_type: cluster_types[:project_type]) }
let(:cluster_projects) { table(:cluster_projects) }
context 'with installed prometheus application' do
before do
cluster_projects.create(cluster_id: cluster.id, project_id: project.id)
clusters_applications_prometheus.create(cluster_id: cluster.id, status: application_statuses[:installed], version: '123')
cluster_projects.create!(cluster_id: cluster.id, project_id: project.id)
clusters_applications_prometheus.create!(cluster_id: cluster.id, status: application_statuses[:installed], version: '123')
end
it 'does not change services entries' do

View File

@ -5,18 +5,18 @@ require 'spec_helper'
RSpec.describe Gitlab::BackgroundMigration::FixUserNamespaceNames, schema: 20190620112608 do
let(:namespaces) { table(:namespaces) }
let(:users) { table(:users) }
let(:user) { users.create(name: "The user's full name", projects_limit: 10, username: 'not-null', email: '1') }
let(:user) { users.create!(name: "The user's full name", projects_limit: 10, username: 'not-null', email: '1') }
context 'updating the namespace names' do
it 'updates a user namespace within range' do
user2 = users.create(name: "Other user's full name", projects_limit: 10, username: 'also-not-null', email: '2')
user_namespace1 = namespaces.create(
user2 = users.create!(name: "Other user's full name", projects_limit: 10, username: 'also-not-null', email: '2')
user_namespace1 = namespaces.create!(
id: 2,
owner_id: user.id,
name: "Should be the user's name",
path: user.username
)
user_namespace2 = namespaces.create(
user_namespace2 = namespaces.create!(
id: 3,
owner_id: user2.id,
name: "Should also be the user's name",
@ -30,7 +30,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixUserNamespaceNames, schema: 20190
end
it 'does not update namespaces out of range' do
user_namespace = namespaces.create(
user_namespace = namespaces.create!(
id: 6,
owner_id: user.id,
name: "Should be the user's name",
@ -42,7 +42,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixUserNamespaceNames, schema: 20190
end
it 'does not update groups owned by the users' do
user_group = namespaces.create(
user_group = namespaces.create!(
id: 2,
owner_id: user.id,
name: 'A group name',
@ -58,7 +58,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixUserNamespaceNames, schema: 20190
context 'namespace route names' do
let(:routes) { table(:routes) }
let(:namespace) do
namespaces.create(
namespaces.create!(
id: 2,
owner_id: user.id,
name: "Will be updated to the user's name",
@ -67,7 +67,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixUserNamespaceNames, schema: 20190
end
it "updates the route name if it didn't match the namespace" do
route = routes.create(path: namespace.path, name: 'Incorrect name', source_type: 'Namespace', source_id: namespace.id)
route = routes.create!(path: namespace.path, name: 'Incorrect name', source_type: 'Namespace', source_id: namespace.id)
described_class.new.perform(1, 5)
@ -75,7 +75,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixUserNamespaceNames, schema: 20190
end
it 'updates the route name if it was nil match the namespace' do
route = routes.create(path: namespace.path, name: nil, source_type: 'Namespace', source_id: namespace.id)
route = routes.create!(path: namespace.path, name: nil, source_type: 'Namespace', source_id: namespace.id)
described_class.new.perform(1, 5)
@ -83,14 +83,14 @@ RSpec.describe Gitlab::BackgroundMigration::FixUserNamespaceNames, schema: 20190
end
it "doesn't update group routes" do
route = routes.create(path: 'group-path', name: 'Group name', source_type: 'Group', source_id: namespace.id)
route = routes.create!(path: 'group-path', name: 'Group name', source_type: 'Group', source_id: namespace.id)
expect { described_class.new.perform(1, 5) }
.not_to change { route.reload.name }
end
it "doesn't touch routes for namespaces out of range" do
user_namespace = namespaces.create(
user_namespace = namespaces.create!(
id: 6,
owner_id: user.id,
name: "Should be the user's name",

View File

@ -8,10 +8,10 @@ RSpec.describe Gitlab::BackgroundMigration::FixUserProjectRouteNames, schema: 20
let(:routes) { table(:routes) }
let(:projects) { table(:projects) }
let(:user) { users.create(name: "The user's full name", projects_limit: 10, username: 'not-null', email: '1') }
let(:user) { users.create!(name: "The user's full name", projects_limit: 10, username: 'not-null', email: '1') }
let(:namespace) do
namespaces.create(
namespaces.create!(
owner_id: user.id,
name: "Should eventually be the user's name",
path: user.username
@ -19,11 +19,11 @@ RSpec.describe Gitlab::BackgroundMigration::FixUserProjectRouteNames, schema: 20
end
let(:project) do
projects.create(namespace_id: namespace.id, name: 'Project Name')
projects.create!(namespace_id: namespace.id, name: 'Project Name')
end
it "updates the route for a project if it did not match the user's name" do
route = routes.create(
route = routes.create!(
id: 1,
path: "#{user.username}/#{project.path}",
source_id: project.id,
@ -37,7 +37,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixUserProjectRouteNames, schema: 20
end
it 'updates the route for a project if the name was nil' do
route = routes.create(
route = routes.create!(
id: 1,
path: "#{user.username}/#{project.path}",
source_id: project.id,
@ -51,7 +51,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixUserProjectRouteNames, schema: 20
end
it 'does not update routes that were are out of the range' do
route = routes.create(
route = routes.create!(
id: 6,
path: "#{user.username}/#{project.path}",
source_id: project.id,
@ -64,14 +64,14 @@ RSpec.describe Gitlab::BackgroundMigration::FixUserProjectRouteNames, schema: 20
end
it 'does not update routes for projects in groups owned by the user' do
group = namespaces.create(
group = namespaces.create!(
owner_id: user.id,
name: 'A group',
path: 'a-path',
type: ''
)
project = projects.create(namespace_id: group.id, name: 'Project Name')
route = routes.create(
project = projects.create!(namespace_id: group.id, name: 'Project Name')
route = routes.create!(
id: 1,
path: "#{group.path}/#{project.path}",
source_id: project.id,
@ -84,7 +84,7 @@ RSpec.describe Gitlab::BackgroundMigration::FixUserProjectRouteNames, schema: 20
end
it 'does not update routes for namespaces' do
route = routes.create(
route = routes.create!(
id: 1,
path: namespace.path,
source_id: namespace.id,

View File

@ -32,7 +32,7 @@ RSpec.describe Gitlab::BackgroundMigration::LegacyUploadMover, :aggregate_failur
if with_file
upload = create(:upload, :with_file, :attachment_upload, params)
model.update(attachment: upload.retrieve_uploader)
model.update!(attachment: upload.retrieve_uploader)
model.attachment.upload
else
create(:upload, :attachment_upload, params)
@ -245,7 +245,7 @@ RSpec.describe Gitlab::BackgroundMigration::LegacyUploadMover, :aggregate_failur
end
let(:connection) { ::Fog::Storage.new(FileUploader.object_store_credentials) }
let(:bucket) { connection.directories.create(key: 'uploads') }
let(:bucket) { connection.directories.create(key: 'uploads') } # rubocop:disable Rails/SaveBang
before do
stub_uploads_object_storage(FileUploader)
@ -257,7 +257,7 @@ RSpec.describe Gitlab::BackgroundMigration::LegacyUploadMover, :aggregate_failur
context 'when the file belongs to a legacy project' do
before do
bucket.files.create(remote_file)
bucket.files.create(remote_file) # rubocop:disable Rails/SaveBang
end
let(:project) { legacy_project }
@ -267,7 +267,7 @@ RSpec.describe Gitlab::BackgroundMigration::LegacyUploadMover, :aggregate_failur
context 'when the file belongs to a hashed project' do
before do
bucket.files.create(remote_file)
bucket.files.create(remote_file) # rubocop:disable Rails/SaveBang
end
let(:project) { hashed_project }

View File

@ -24,7 +24,7 @@ RSpec.describe Gitlab::BackgroundMigration::LegacyUploadsMigrator do
if with_file
upload = create(:upload, :with_file, :attachment_upload, params)
model.update(attachment: upload.retrieve_uploader)
model.update!(attachment: upload.retrieve_uploader)
model.attachment.upload
else
create(:upload, :attachment_upload, params)

View File

@ -10,44 +10,44 @@ RSpec.describe Gitlab::BackgroundMigration::LinkLfsObjectsProjects, :migration,
let(:lfs_objects) { table(:lfs_objects) }
let(:lfs_objects_projects) { table(:lfs_objects_projects) }
let(:namespace) { namespaces.create(name: 'GitLab', path: 'gitlab') }
let(:namespace) { namespaces.create!(name: 'GitLab', path: 'gitlab') }
let(:fork_network) { fork_networks.create(root_project_id: source_project.id) }
let(:another_fork_network) { fork_networks.create(root_project_id: another_source_project.id) }
let(:fork_network) { fork_networks.create!(root_project_id: source_project.id) }
let(:another_fork_network) { fork_networks.create!(root_project_id: another_source_project.id) }
let(:source_project) { projects.create(namespace_id: namespace.id) }
let(:another_source_project) { projects.create(namespace_id: namespace.id) }
let(:project) { projects.create(namespace_id: namespace.id) }
let(:another_project) { projects.create(namespace_id: namespace.id) }
let(:partially_linked_project) { projects.create(namespace_id: namespace.id) }
let(:fully_linked_project) { projects.create(namespace_id: namespace.id) }
let(:source_project) { projects.create!(namespace_id: namespace.id) }
let(:another_source_project) { projects.create!(namespace_id: namespace.id) }
let(:project) { projects.create!(namespace_id: namespace.id) }
let(:another_project) { projects.create!(namespace_id: namespace.id) }
let(:partially_linked_project) { projects.create!(namespace_id: namespace.id) }
let(:fully_linked_project) { projects.create!(namespace_id: namespace.id) }
let(:lfs_object) { lfs_objects.create(oid: 'abc123', size: 100) }
let(:another_lfs_object) { lfs_objects.create(oid: 'def456', size: 200) }
let(:lfs_object) { lfs_objects.create!(oid: 'abc123', size: 100) }
let(:another_lfs_object) { lfs_objects.create!(oid: 'def456', size: 200) }
let!(:source_project_lop_1) do
lfs_objects_projects.create(
lfs_objects_projects.create!(
lfs_object_id: lfs_object.id,
project_id: source_project.id
)
end
let!(:source_project_lop_2) do
lfs_objects_projects.create(
lfs_objects_projects.create!(
lfs_object_id: another_lfs_object.id,
project_id: source_project.id
)
end
let!(:another_source_project_lop_1) do
lfs_objects_projects.create(
lfs_objects_projects.create!(
lfs_object_id: lfs_object.id,
project_id: another_source_project.id
)
end
let!(:another_source_project_lop_2) do
lfs_objects_projects.create(
lfs_objects_projects.create!(
lfs_object_id: another_lfs_object.id,
project_id: another_source_project.id
)
@ -57,23 +57,23 @@ RSpec.describe Gitlab::BackgroundMigration::LinkLfsObjectsProjects, :migration,
stub_const("#{described_class}::BATCH_SIZE", 2)
# Create links between projects
fork_network_members.create(fork_network_id: fork_network.id, project_id: source_project.id, forked_from_project_id: nil)
fork_network_members.create!(fork_network_id: fork_network.id, project_id: source_project.id, forked_from_project_id: nil)
[project, partially_linked_project, fully_linked_project].each do |p|
fork_network_members.create(
fork_network_members.create!(
fork_network_id: fork_network.id,
project_id: p.id,
forked_from_project_id: fork_network.root_project_id
)
end
fork_network_members.create(fork_network_id: another_fork_network.id, project_id: another_source_project.id, forked_from_project_id: nil)
fork_network_members.create(fork_network_id: another_fork_network.id, project_id: another_project.id, forked_from_project_id: another_fork_network.root_project_id)
fork_network_members.create!(fork_network_id: another_fork_network.id, project_id: another_source_project.id, forked_from_project_id: nil)
fork_network_members.create!(fork_network_id: another_fork_network.id, project_id: another_project.id, forked_from_project_id: another_fork_network.root_project_id)
# Links LFS objects to some projects
lfs_objects_projects.create(lfs_object_id: lfs_object.id, project_id: fully_linked_project.id)
lfs_objects_projects.create(lfs_object_id: another_lfs_object.id, project_id: fully_linked_project.id)
lfs_objects_projects.create(lfs_object_id: lfs_object.id, project_id: partially_linked_project.id)
lfs_objects_projects.create!(lfs_object_id: lfs_object.id, project_id: fully_linked_project.id)
lfs_objects_projects.create!(lfs_object_id: another_lfs_object.id, project_id: fully_linked_project.id)
lfs_objects_projects.create!(lfs_object_id: lfs_object.id, project_id: partially_linked_project.id)
end
context 'when there are LFS objects to be linked' do
@ -96,8 +96,8 @@ RSpec.describe Gitlab::BackgroundMigration::LinkLfsObjectsProjects, :migration,
before do
# Links LFS objects to all projects
projects.all.each do |p|
lfs_objects_projects.create(lfs_object_id: lfs_object.id, project_id: p.id)
lfs_objects_projects.create(lfs_object_id: another_lfs_object.id, project_id: p.id)
lfs_objects_projects.create!(lfs_object_id: lfs_object.id, project_id: p.id)
lfs_objects_projects.create!(lfs_object_id: another_lfs_object.id, project_id: p.id)
end
end

View File

@ -97,7 +97,7 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
context 'with Jira service' do
let!(:service) do
services.create(id: 10, type: 'JiraService', title: nil, properties: jira_properties.to_json, category: 'issue_tracker')
services.create!(id: 10, type: 'JiraService', title: nil, properties: jira_properties.to_json, category: 'issue_tracker')
end
it_behaves_like 'handle properties'
@ -119,7 +119,7 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
context 'with bugzilla service' do
let!(:service) do
services.create(id: 11, type: 'BugzillaService', title: nil, properties: tracker_properties.to_json, category: 'issue_tracker')
services.create!(id: 11, type: 'BugzillaService', title: nil, properties: tracker_properties.to_json, category: 'issue_tracker')
end
it_behaves_like 'handle properties'
@ -140,7 +140,7 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
context 'with youtrack service' do
let!(:service) do
services.create(id: 12, type: 'YoutrackService', title: nil, properties: tracker_properties_no_url.to_json, category: 'issue_tracker')
services.create!(id: 12, type: 'YoutrackService', title: nil, properties: tracker_properties_no_url.to_json, category: 'issue_tracker')
end
it_behaves_like 'handle properties'
@ -161,7 +161,7 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
context 'with gitlab service with no properties' do
let!(:service) do
services.create(id: 13, type: 'GitlabIssueTrackerService', title: nil, properties: {}, category: 'issue_tracker')
services.create!(id: 13, type: 'GitlabIssueTrackerService', title: nil, properties: {}, category: 'issue_tracker')
end
it_behaves_like 'handle properties'
@ -173,7 +173,7 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
context 'with redmine service already with data fields' do
let!(:service) do
services.create(id: 14, type: 'RedmineService', title: nil, properties: tracker_properties_no_url.to_json, category: 'issue_tracker').tap do |service|
services.create!(id: 14, type: 'RedmineService', title: nil, properties: tracker_properties_no_url.to_json, category: 'issue_tracker').tap do |service|
IssueTrackerData.create!(service_id: service.id, project_url: url, new_issue_url: new_issue_url, issues_url: issues_url)
end
end
@ -187,7 +187,7 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
context 'with custom issue tracker which has data fields record inconsistent with properties field' do
let!(:service) do
services.create(id: 15, type: 'CustomIssueTrackerService', title: 'Existing title', properties: jira_properties.to_json, category: 'issue_tracker').tap do |service|
services.create!(id: 15, type: 'CustomIssueTrackerService', title: 'Existing title', properties: jira_properties.to_json, category: 'issue_tracker').tap do |service|
IssueTrackerData.create!(service_id: service.id, project_url: 'http://other_url', new_issue_url: 'http://other_url/new_issue', issues_url: 'http://other_url/issues')
end
end
@ -209,7 +209,7 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
context 'with Jira service which has data fields record inconsistent with properties field' do
let!(:service) do
services.create(id: 16, type: 'CustomIssueTrackerService', description: 'Existing description', properties: jira_properties.to_json, category: 'issue_tracker').tap do |service|
services.create!(id: 16, type: 'CustomIssueTrackerService', description: 'Existing description', properties: jira_properties.to_json, category: 'issue_tracker').tap do |service|
JiraTrackerData.create!(service_id: service.id, url: 'http://other_jira_url')
end
end
@ -232,7 +232,7 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
context 'non issue tracker service' do
let!(:service) do
services.create(id: 17, title: nil, description: nil, type: 'OtherService', properties: tracker_properties.to_json)
services.create!(id: 17, title: nil, description: nil, type: 'OtherService', properties: tracker_properties.to_json)
end
it_behaves_like 'handle properties'
@ -248,7 +248,7 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
context 'Jira service with empty properties' do
let!(:service) do
services.create(id: 18, type: 'JiraService', properties: '', category: 'issue_tracker')
services.create!(id: 18, type: 'JiraService', properties: '', category: 'issue_tracker')
end
it_behaves_like 'handle properties'
@ -260,7 +260,7 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
context 'Jira service with nil properties' do
let!(:service) do
services.create(id: 18, type: 'JiraService', properties: nil, category: 'issue_tracker')
services.create!(id: 18, type: 'JiraService', properties: nil, category: 'issue_tracker')
end
it_behaves_like 'handle properties'
@ -272,7 +272,7 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
context 'Jira service with invalid properties' do
let!(:service) do
services.create(id: 18, type: 'JiraService', properties: 'invalid data', category: 'issue_tracker')
services.create!(id: 18, type: 'JiraService', properties: 'invalid data', category: 'issue_tracker')
end
it_behaves_like 'handle properties'
@ -284,15 +284,15 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateIssueTrackersSensitiveData, s
context 'with Jira service with invalid properties, valid Jira service and valid bugzilla service' do
let!(:jira_service_invalid) do
services.create(id: 19, title: 'invalid - title', description: 'invalid - description', type: 'JiraService', properties: 'invalid data', category: 'issue_tracker')
services.create!(id: 19, title: 'invalid - title', description: 'invalid - description', type: 'JiraService', properties: 'invalid data', category: 'issue_tracker')
end
let!(:jira_service_valid) do
services.create(id: 20, type: 'JiraService', properties: jira_properties.to_json, category: 'issue_tracker')
services.create!(id: 20, type: 'JiraService', properties: jira_properties.to_json, category: 'issue_tracker')
end
let!(:bugzilla_service_valid) do
services.create(id: 11, type: 'BugzillaService', title: nil, properties: tracker_properties.to_json, category: 'issue_tracker')
services.create!(id: 11, type: 'BugzillaService', title: nil, properties: tracker_properties.to_json, category: 'issue_tracker')
end
it 'migrates data for the valid service' do

View File

@ -11,17 +11,17 @@ RSpec.describe Gitlab::BackgroundMigration::MigrateUsersBioToUserDetails, :migra
klass
end
let!(:user_needs_migration) { users.create(name: 'user1', email: 'test1@test.com', projects_limit: 1, bio: 'bio') }
let!(:user_needs_no_migration) { users.create(name: 'user2', email: 'test2@test.com', projects_limit: 1) }
let!(:user_also_needs_no_migration) { users.create(name: 'user3', email: 'test3@test.com', projects_limit: 1, bio: '') }
let!(:user_with_long_bio) { users.create(name: 'user4', email: 'test4@test.com', projects_limit: 1, bio: 'a' * 256) } # 255 is the max
let!(:user_needs_migration) { users.create!(name: 'user1', email: 'test1@test.com', projects_limit: 1, bio: 'bio') }
let!(:user_needs_no_migration) { users.create!(name: 'user2', email: 'test2@test.com', projects_limit: 1) }
let!(:user_also_needs_no_migration) { users.create!(name: 'user3', email: 'test3@test.com', projects_limit: 1, bio: '') }
let!(:user_with_long_bio) { users.create!(name: 'user4', email: 'test4@test.com', projects_limit: 1, bio: 'a' * 256) } # 255 is the max
let!(:user_already_has_details) { users.create(name: 'user5', email: 'test5@test.com', projects_limit: 1, bio: 'my bio') }
let!(:existing_user_details) { user_details.find_or_create_by(user_id: user_already_has_details.id).update(bio: 'my bio') }
let!(:user_already_has_details) { users.create!(name: 'user5', email: 'test5@test.com', projects_limit: 1, bio: 'my bio') }
let!(:existing_user_details) { user_details.find_or_create_by!(user_id: user_already_has_details.id).update!(bio: 'my bio') }
# unlikely scenario since we have triggers
let!(:user_has_different_details) { users.create(name: 'user6', email: 'test6@test.com', projects_limit: 1, bio: 'different') }
let!(:different_existing_user_details) { user_details.find_or_create_by(user_id: user_has_different_details.id).update(bio: 'bio') }
let!(:user_has_different_details) { users.create!(name: 'user6', email: 'test6@test.com', projects_limit: 1, bio: 'different') }
let!(:different_existing_user_details) { user_details.find_or_create_by!(user_id: user_has_different_details.id).update!(bio: 'bio') }
let(:user_ids) do
[

View File

@ -63,7 +63,7 @@ RSpec.describe Gitlab::BackgroundMigration::PopulateCanonicalEmails, :migration,
describe 'gracefully handles existing records, some of which may have an already-existing identical canonical_email field' do
let_it_be(:user_one) { create_user(email: "example.user@gmail.com", id: 1) }
let_it_be(:user_two) { create_user(email: "exampleuser@gmail.com", id: 2) }
let_it_be(:user_email_one) { user_canonical_emails.create(canonical_email: "exampleuser@gmail.com", user_id: user_one.id) }
let_it_be(:user_email_one) { user_canonical_emails.create!(canonical_email: "exampleuser@gmail.com", user_id: user_one.id) }
subject { migration.perform(1, 2) }
@ -79,7 +79,7 @@ RSpec.describe Gitlab::BackgroundMigration::PopulateCanonicalEmails, :migration,
projects_limit: 0
}
users.create(default_attributes.merge!(attributes))
users.create!(default_attributes.merge!(attributes))
end
def canonical_emails(user_id: nil)

View File

@ -11,8 +11,8 @@ RSpec.describe Gitlab::BackgroundMigration::PopulateMergeRequestAssigneesTable,
let(:user_2) { users.create!(email: 'test2@example.com', projects_limit: 100, username: 'test') }
let(:user_3) { users.create!(email: 'test3@example.com', projects_limit: 100, username: 'test') }
let(:namespace) { namespaces.create(name: 'gitlab', path: 'gitlab-org') }
let(:project) { projects.create(namespace_id: namespace.id, name: 'foo') }
let(:namespace) { namespaces.create!(name: 'gitlab', path: 'gitlab-org') }
let(:project) { projects.create!(namespace_id: namespace.id, name: 'foo') }
let(:merge_requests) { table(:merge_requests) }
let(:merge_request_assignees) { table(:merge_request_assignees) }
@ -24,7 +24,7 @@ RSpec.describe Gitlab::BackgroundMigration::PopulateMergeRequestAssigneesTable,
source_branch: 'mr name',
title: "mr name#{id}")
merge_requests.create(params)
merge_requests.create!(params)
end
before do

View File

@ -18,7 +18,7 @@ RSpec.describe Gitlab::BackgroundMigration::PopulateUserHighestRolesTable, schem
projects_limit: 0
}.merge(params)
users.create(user_params)
users.create!(user_params)
end
def create_member(id, access_level, params = {})
@ -30,7 +30,7 @@ RSpec.describe Gitlab::BackgroundMigration::PopulateUserHighestRolesTable, schem
notification_level: 0
}.merge(params)
members.create(params)
members.create!(params)
end
before do
@ -47,7 +47,7 @@ RSpec.describe Gitlab::BackgroundMigration::PopulateUserHighestRolesTable, schem
create_member(7, 30)
create_member(8, 20, requested_at: Time.current)
user_highest_roles.create(user_id: 1, highest_access_level: 50)
user_highest_roles.create!(user_id: 1, highest_access_level: 50)
end
describe '#perform' do

View File

@ -91,7 +91,7 @@ RSpec.describe Gitlab::BackgroundMigration::RecalculateProjectAuthorizations, sc
projects_table.create!(id: 1, name: 'project', path: 'project', visibility_level: 0,
namespace_id: shared_group.id)
group_group_links.create(shared_group_id: shared_group.id, shared_with_group_id: group.id,
group_group_links.create!(shared_group_id: shared_group.id, shared_with_group_id: group.id,
group_access: 20)
end
@ -111,7 +111,7 @@ RSpec.describe Gitlab::BackgroundMigration::RecalculateProjectAuthorizations, sc
shared_project = projects_table.create!(id: 1, name: 'shared project', path: 'shared-project',
visibility_level: 0, namespace_id: another_group.id)
project_group_links.create(project_id: shared_project.id, group_id: group.id, group_access: 20)
project_group_links.create!(project_id: shared_project.id, group_id: group.id, group_access: 20)
end
it 'creates correct authorization' do
@ -174,7 +174,7 @@ RSpec.describe Gitlab::BackgroundMigration::RecalculateProjectAuthorizations, sc
projects_table.create!(id: 1, name: 'project', path: 'project', visibility_level: 0,
namespace_id: shared_group.id)
group_group_links.create(shared_group_id: shared_group.id, shared_with_group_id: group.id,
group_group_links.create!(shared_group_id: shared_group.id, shared_with_group_id: group.id,
group_access: 20)
end
@ -192,7 +192,7 @@ RSpec.describe Gitlab::BackgroundMigration::RecalculateProjectAuthorizations, sc
shared_project = projects_table.create!(id: 1, name: 'shared project', path: 'shared-project',
visibility_level: 0, namespace_id: another_group.id)
project_group_links.create(project_id: shared_project.id, group_id: group.id, group_access: 20)
project_group_links.create!(project_id: shared_project.id, group_id: group.id, group_access: 20)
end
it 'does not create authorization' do

View File

@ -5,8 +5,8 @@ require 'spec_helper'
RSpec.describe Gitlab::BackgroundMigration::ResetMergeStatus do
let(:namespaces) { table(:namespaces) }
let(:projects) { table(:projects) }
let(:namespace) { namespaces.create(name: 'gitlab', path: 'gitlab-org') }
let(:project) { projects.create(namespace_id: namespace.id, name: 'foo') }
let(:namespace) { namespaces.create!(name: 'gitlab', path: 'gitlab-org') }
let(:project) { projects.create!(namespace_id: namespace.id, name: 'foo') }
let(:merge_requests) { table(:merge_requests) }
def create_merge_request(id, extra_params = {})

View File

@ -15,35 +15,35 @@ RSpec.describe MigrateIssueTrackersData do
end
let!(:jira_service) do
services.create(type: 'JiraService', properties: properties, category: 'issue_tracker')
services.create!(type: 'JiraService', properties: properties, category: 'issue_tracker')
end
let!(:jira_service_nil) do
services.create(type: 'JiraService', properties: nil, category: 'issue_tracker')
services.create!(type: 'JiraService', properties: nil, category: 'issue_tracker')
end
let!(:bugzilla_service) do
services.create(type: 'BugzillaService', properties: properties, category: 'issue_tracker')
services.create!(type: 'BugzillaService', properties: properties, category: 'issue_tracker')
end
let!(:youtrack_service) do
services.create(type: 'YoutrackService', properties: properties, category: 'issue_tracker')
services.create!(type: 'YoutrackService', properties: properties, category: 'issue_tracker')
end
let!(:youtrack_service_empty) do
services.create(type: 'YoutrackService', properties: '', category: 'issue_tracker')
services.create!(type: 'YoutrackService', properties: '', category: 'issue_tracker')
end
let!(:gitlab_service) do
services.create(type: 'GitlabIssueTrackerService', properties: properties, category: 'issue_tracker')
services.create!(type: 'GitlabIssueTrackerService', properties: properties, category: 'issue_tracker')
end
let!(:gitlab_service_empty) do
services.create(type: 'GitlabIssueTrackerService', properties: {}, category: 'issue_tracker')
services.create!(type: 'GitlabIssueTrackerService', properties: {}, category: 'issue_tracker')
end
let!(:other_service) do
services.create(type: 'OtherService', properties: properties, category: 'other_category')
services.create!(type: 'OtherService', properties: properties, category: 'other_category')
end
before do

View File

@ -5,16 +5,16 @@ require Rails.root.join('db', 'post_migrate', '20200122123016_backfill_project_s
RSpec.describe BackfillProjectSettings, :sidekiq, schema: 20200114113341 do
let(:projects) { table(:projects) }
let(:namespace) { table(:namespaces).create(name: 'user', path: 'user') }
let(:project) { projects.create(namespace_id: namespace.id) }
let(:namespace) { table(:namespaces).create!(name: 'user', path: 'user') }
let(:project) { projects.create!(namespace_id: namespace.id) }
describe '#up' do
before do
stub_const("#{described_class}::BATCH_SIZE", 2)
projects.create(id: 1, namespace_id: namespace.id)
projects.create(id: 2, namespace_id: namespace.id)
projects.create(id: 3, namespace_id: namespace.id)
projects.create!(id: 1, namespace_id: namespace.id)
projects.create!(id: 2, namespace_id: namespace.id)
projects.create!(id: 3, namespace_id: namespace.id)
end
it 'schedules BackfillProjectSettings background jobs' do

View File

@ -7,7 +7,7 @@ RSpec.describe RemoveInvalidJiraData do
let(:jira_tracker_data) { table(:jira_tracker_data) }
let(:services) { table(:services) }
let(:service) { services.create(id: 1) }
let(:service) { services.create!(id: 1) }
let(:data) do
{
service_id: service.id,
@ -22,49 +22,49 @@ RSpec.describe RemoveInvalidJiraData do
}
end
let!(:valid_data) { jira_tracker_data.create(data) }
let!(:empty_data) { jira_tracker_data.create(service_id: service.id) }
let!(:valid_data) { jira_tracker_data.create!(data) }
let!(:empty_data) { jira_tracker_data.create!(service_id: service.id) }
let!(:invalid_api_url) do
data[:encrypted_api_url_iv] = nil
jira_tracker_data.create(data)
jira_tracker_data.create!(data)
end
let!(:missing_api_url) do
data[:encrypted_api_url] = ''
data[:encrypted_api_url_iv] = nil
jira_tracker_data.create(data)
jira_tracker_data.create!(data)
end
let!(:invalid_url) do
data[:encrypted_url_iv] = nil
jira_tracker_data.create(data)
jira_tracker_data.create!(data)
end
let!(:missing_url) do
data[:encrypted_url] = ''
jira_tracker_data.create(data)
jira_tracker_data.create!(data)
end
let!(:invalid_username) do
data[:encrypted_username_iv] = nil
jira_tracker_data.create(data)
jira_tracker_data.create!(data)
end
let!(:missing_username) do
data[:encrypted_username] = nil
data[:encrypted_username_iv] = nil
jira_tracker_data.create(data)
jira_tracker_data.create!(data)
end
let!(:invalid_password) do
data[:encrypted_password_iv] = nil
jira_tracker_data.create(data)
jira_tracker_data.create!(data)
end
let!(:missing_password) do
data[:encrypted_password] = nil
data[:encrypted_username_iv] = nil
jira_tracker_data.create(data)
jira_tracker_data.create!(data)
end
it 'removes the invalid data' do

View File

@ -7,7 +7,7 @@ RSpec.describe RemoveInvalidIssueTrackerData do
let(:issue_tracker_data) { table(:issue_tracker_data) }
let(:services) { table(:services) }
let(:service) { services.create(id: 1) }
let(:service) { services.create!(id: 1) }
let(:data) do
{
service_id: service.id,
@ -20,38 +20,38 @@ RSpec.describe RemoveInvalidIssueTrackerData do
}
end
let!(:valid_data) { issue_tracker_data.create(data) }
let!(:empty_data) { issue_tracker_data.create(service_id: service.id) }
let!(:valid_data) { issue_tracker_data.create!(data) }
let!(:empty_data) { issue_tracker_data.create!(service_id: service.id) }
let!(:invalid_issues_url) do
data[:encrypted_issues_url_iv] = nil
issue_tracker_data.create(data)
issue_tracker_data.create!(data)
end
let!(:missing_issues_url) do
data[:encrypted_issues_url] = ''
data[:encrypted_issues_url_iv] = nil
issue_tracker_data.create(data)
issue_tracker_data.create!(data)
end
let!(:invalid_new_isue_url) do
data[:encrypted_new_issue_url_iv] = nil
issue_tracker_data.create(data)
issue_tracker_data.create!(data)
end
let!(:missing_new_issue_url) do
data[:encrypted_new_issue_url] = ''
issue_tracker_data.create(data)
issue_tracker_data.create!(data)
end
let!(:invalid_project_url) do
data[:encrypted_project_url_iv] = nil
issue_tracker_data.create(data)
issue_tracker_data.create!(data)
end
let!(:missing_project_url) do
data[:encrypted_project_url] = nil
data[:encrypted_project_url_iv] = nil
issue_tracker_data.create(data)
issue_tracker_data.create!(data)
end
it 'removes the invalid data' do

View File

@ -15,35 +15,35 @@ RSpec.describe RescheduleMigrateIssueTrackersData do
end
let!(:jira_service) do
services.create(id: 10, type: 'JiraService', properties: properties, category: 'issue_tracker')
services.create!(id: 10, type: 'JiraService', properties: properties, category: 'issue_tracker')
end
let!(:jira_service_nil) do
services.create(id: 11, type: 'JiraService', properties: nil, category: 'issue_tracker')
services.create!(id: 11, type: 'JiraService', properties: nil, category: 'issue_tracker')
end
let!(:bugzilla_service) do
services.create(id: 12, type: 'BugzillaService', properties: properties, category: 'issue_tracker')
services.create!(id: 12, type: 'BugzillaService', properties: properties, category: 'issue_tracker')
end
let!(:youtrack_service) do
services.create(id: 13, type: 'YoutrackService', properties: properties, category: 'issue_tracker')
services.create!(id: 13, type: 'YoutrackService', properties: properties, category: 'issue_tracker')
end
let!(:youtrack_service_empty) do
services.create(id: 14, type: 'YoutrackService', properties: '', category: 'issue_tracker')
services.create!(id: 14, type: 'YoutrackService', properties: '', category: 'issue_tracker')
end
let!(:gitlab_service) do
services.create(id: 15, type: 'GitlabIssueTrackerService', properties: properties, category: 'issue_tracker')
services.create!(id: 15, type: 'GitlabIssueTrackerService', properties: properties, category: 'issue_tracker')
end
let!(:gitlab_service_empty) do
services.create(id: 16, type: 'GitlabIssueTrackerService', properties: {}, category: 'issue_tracker')
services.create!(id: 16, type: 'GitlabIssueTrackerService', properties: {}, category: 'issue_tracker')
end
let!(:other_service) do
services.create(id: 17, type: 'OtherService', properties: properties, category: 'other_category')
services.create!(id: 17, type: 'OtherService', properties: properties, category: 'other_category')
end
before do
@ -69,7 +69,7 @@ RSpec.describe RescheduleMigrateIssueTrackersData do
let(:jira_tracker_data) { table(:jira_tracker_data) }
let!(:valid_issue_tracker_data) do
issue_tracker_data.create(
issue_tracker_data.create!(
service_id: bugzilla_service.id,
encrypted_issues_url: 'http://url.com',
encrypted_issues_url_iv: 'somevalue'
@ -77,7 +77,7 @@ RSpec.describe RescheduleMigrateIssueTrackersData do
end
let!(:invalid_issue_tracker_data) do
issue_tracker_data.create(
issue_tracker_data.create!(
service_id: bugzilla_service.id,
encrypted_issues_url: 'http:url.com',
encrypted_issues_url_iv: nil
@ -85,7 +85,7 @@ RSpec.describe RescheduleMigrateIssueTrackersData do
end
let!(:valid_jira_tracker_data) do
jira_tracker_data.create(
jira_tracker_data.create!(
service_id: bugzilla_service.id,
encrypted_url: 'http://url.com',
encrypted_url_iv: 'somevalue'
@ -93,7 +93,7 @@ RSpec.describe RescheduleMigrateIssueTrackersData do
end
let!(:invalid_jira_tracker_data) do
jira_tracker_data.create(
jira_tracker_data.create!(
service_id: bugzilla_service.id,
encrypted_url: 'http://url.com',
encrypted_url_iv: nil

View File

@ -9,9 +9,9 @@ RSpec.describe RemoveOrphanedChatNames, schema: 20200313202430 do
let(:services) { table(:services) }
let(:chat_names) { table(:chat_names) }
let(:namespace) { namespaces.create(name: 'foo', path: 'foo') }
let(:project) { projects.create(namespace_id: namespace.id) }
let(:service) { services.create(project_id: project.id, type: 'chat') }
let(:namespace) { namespaces.create!(name: 'foo', path: 'foo') }
let(:project) { projects.create!(namespace_id: namespace.id) }
let(:service) { services.create!(project_id: project.id, type: 'chat') }
let(:chat_name) { chat_names.create!(service_id: service.id, team_id: 'TEAM', user_id: 12345, chat_id: 12345) }
let(:orphaned_chat_name) { chat_names.create!(team_id: 'TEAM', service_id: 0, user_id: 12345, chat_id: 12345) }

View File

@ -8,10 +8,10 @@ RSpec.describe BackfillDeploymentClustersFromDeployments, :migration, :sidekiq,
it 'schedules BackfillDeploymentClustersFromDeployments background jobs' do
stub_const("#{described_class}::BATCH_SIZE", 2)
namespace = table(:namespaces).create(name: 'the-namespace', path: 'the-path')
project = table(:projects).create(name: 'the-project', namespace_id: namespace.id)
environment = table(:environments).create(name: 'the-environment', project_id: project.id, slug: 'slug')
cluster = table(:clusters).create(name: 'the-cluster')
namespace = table(:namespaces).create!(name: 'the-namespace', path: 'the-path')
project = table(:projects).create!(name: 'the-project', namespace_id: namespace.id)
environment = table(:environments).create!(name: 'the-environment', project_id: project.id, slug: 'slug')
cluster = table(:clusters).create!(name: 'the-cluster')
deployment_data = { cluster_id: cluster.id, project_id: project.id, environment_id: environment.id, ref: 'abc', tag: false, sha: 'sha', status: 1 }
@ -44,7 +44,7 @@ RSpec.describe BackfillDeploymentClustersFromDeployments, :migration, :sidekiq,
def create_deployment(**data)
@iid ||= 0
@iid += 1
table(:deployments).create(iid: @iid, **data)
table(:deployments).create!(iid: @iid, **data)
end
end
end

View File

@ -10,19 +10,19 @@ RSpec.describe DedupMrMetrics, :migration, schema: 20200526013844 do
let(:metrics) { table(:merge_request_metrics) }
let(:merge_request_params) { { source_branch: 'x', target_branch: 'y', target_project_id: project.id } }
let!(:namespace) { namespaces.create(name: 'foo', path: 'foo') }
let!(:namespace) { namespaces.create!(name: 'foo', path: 'foo') }
let!(:project) { projects.create!(namespace_id: namespace.id) }
let!(:merge_request_1) { merge_requests.create!(merge_request_params) }
let!(:merge_request_2) { merge_requests.create!(merge_request_params) }
let!(:merge_request_3) { merge_requests.create!(merge_request_params) }
let!(:duplicated_metrics_1) { metrics.create(merge_request_id: merge_request_1.id, latest_build_started_at: 1.day.ago, first_deployed_to_production_at: 5.days.ago, updated_at: 2.months.ago) }
let!(:duplicated_metrics_2) { metrics.create(merge_request_id: merge_request_1.id, latest_build_started_at: Time.now, merged_at: Time.now, updated_at: 1.month.ago) }
let!(:duplicated_metrics_1) { metrics.create!(merge_request_id: merge_request_1.id, latest_build_started_at: 1.day.ago, first_deployed_to_production_at: 5.days.ago, updated_at: 2.months.ago) }
let!(:duplicated_metrics_2) { metrics.create!(merge_request_id: merge_request_1.id, latest_build_started_at: Time.now, merged_at: Time.now, updated_at: 1.month.ago) }
let!(:duplicated_metrics_3) { metrics.create(merge_request_id: merge_request_3.id, diff_size: 30, commits_count: 20, updated_at: 2.months.ago) }
let!(:duplicated_metrics_4) { metrics.create(merge_request_id: merge_request_3.id, added_lines: 5, commits_count: nil, updated_at: 1.month.ago) }
let!(:duplicated_metrics_3) { metrics.create!(merge_request_id: merge_request_3.id, diff_size: 30, commits_count: 20, updated_at: 2.months.ago) }
let!(:duplicated_metrics_4) { metrics.create!(merge_request_id: merge_request_3.id, added_lines: 5, commits_count: nil, updated_at: 1.month.ago) }
let!(:non_duplicated_metrics) { metrics.create(merge_request_id: merge_request_2.id, latest_build_started_at: 2.days.ago) }
let!(:non_duplicated_metrics) { metrics.create!(merge_request_id: merge_request_2.id, latest_build_started_at: 2.days.ago) }
it 'deduplicates merge_request_metrics table' do
expect { migrate! }.to change { metrics.count }.from(5).to(3)

View File

@ -6,7 +6,7 @@ require Rails.root.join('db', 'migrate', '20200122161638_add_deploy_token_type_t
RSpec.describe AddDeployTokenTypeToDeployTokens do
let(:deploy_tokens) { table(:deploy_tokens) }
let(:deploy_token) do
deploy_tokens.create(name: 'token_test',
deploy_tokens.create!(name: 'token_test',
username: 'gitlab+deploy-token-1',
token_encrypted: 'dr8rPXwM+Mbs2p3Bg1+gpnXqrnH/wu6vaHdcc7A3isPR67WB',
read_repository: true,

View File

@ -38,7 +38,7 @@ RSpec.describe AddIncidentSettingsToAllExistingProjects, :migration do
RSpec.shared_context 'with incident settings' do
let(:existing_create_issue) { false }
before do
project_incident_management_settings.create(
project_incident_management_settings.create!(
project_id: project.id,
create_issue: existing_create_issue
)

View File

@ -12,15 +12,15 @@ RSpec.describe AddUniqueConstraintToApprovalsUserIdAndMergeRequestId do
describe '#up' do
before do
namespaces.create(id: 1, name: 'ns', path: 'ns')
projects.create(id: 1, namespace_id: 1)
merge_requests.create(id: 1, target_branch: 'master', source_branch: 'feature-1', target_project_id: 1)
merge_requests.create(id: 2, target_branch: 'master', source_branch: 'feature-2', target_project_id: 1)
namespaces.create!(id: 1, name: 'ns', path: 'ns')
projects.create!(id: 1, namespace_id: 1)
merge_requests.create!(id: 1, target_branch: 'master', source_branch: 'feature-1', target_project_id: 1)
merge_requests.create!(id: 2, target_branch: 'master', source_branch: 'feature-2', target_project_id: 1)
end
it 'deletes duplicate records and keeps the first one' do
first_approval = approvals.create(id: 1, merge_request_id: 1, user_id: 1)
approvals.create(id: 2, merge_request_id: 1, user_id: 1)
first_approval = approvals.create!(id: 1, merge_request_id: 1, user_id: 1)
approvals.create!(id: 2, merge_request_id: 1, user_id: 1)
migration.up

View File

@ -13,7 +13,7 @@ RSpec.describe BackfillAndAddNotNullConstraintToReleasedAtColumnOnReleasesTable
it 'fills released_at with the value of created_at' do
created_at_a = Time.zone.parse('2019-02-10T08:00:00Z')
created_at_b = Time.zone.parse('2019-03-10T18:00:00Z')
namespace = namespaces.create(name: 'foo', path: 'foo')
namespace = namespaces.create!(name: 'foo', path: 'foo')
project = projects.create!(namespace_id: namespace.id)
release_a = releases.create!(project_id: project.id, created_at: created_at_a)
release_b = releases.create!(project_id: project.id, created_at: created_at_b)

View File

@ -6,7 +6,7 @@ require Rails.root.join('db', 'post_migrate', '20200608072931_backfill_imported_
RSpec.describe BackfillImportedSnippetRepositories do
let(:users) { table(:users) }
let(:snippets) { table(:snippets) }
let(:user) { users.create(id: 1, email: 'user@example.com', projects_limit: 10, username: 'test', name: 'Test', state: 'active') }
let(:user) { users.create!(id: 1, email: 'user@example.com', projects_limit: 10, username: 'test', name: 'Test', state: 'active') }
def create_snippet(id)
params = {

View File

@ -13,7 +13,7 @@ RSpec.describe BackfillReleasesTableUpdatedAtAndAddNotNullConstraintsToTimestamp
it 'fills null updated_at rows with the value of created_at' do
created_at_a = Time.zone.parse('2014-03-11T04:30:00Z')
created_at_b = Time.zone.parse('2019-09-10T12:00:00Z')
namespace = namespaces.create(name: 'foo', path: 'foo')
namespace = namespaces.create!(name: 'foo', path: 'foo')
project = projects.create!(namespace_id: namespace.id)
release_a = releases.create!(project_id: project.id,
released_at: Time.zone.parse('2014-12-10T06:00:00Z'),
@ -36,7 +36,7 @@ RSpec.describe BackfillReleasesTableUpdatedAtAndAddNotNullConstraintsToTimestamp
created_at_a = Time.zone.parse('2014-03-11T04:30:00Z')
updated_at_a = Time.zone.parse('2015-01-16T10:00:00Z')
created_at_b = Time.zone.parse('2019-09-10T12:00:00Z')
namespace = namespaces.create(name: 'foo', path: 'foo')
namespace = namespaces.create!(name: 'foo', path: 'foo')
project = projects.create!(namespace_id: namespace.id)
release_a = releases.create!(project_id: project.id,
released_at: Time.zone.parse('2014-12-10T06:00:00Z'),

View File

@ -6,7 +6,7 @@ require Rails.root.join('db', 'post_migrate', '20200420094444_backfill_snippet_r
RSpec.describe BackfillSnippetRepositories do
let(:users) { table(:users) }
let(:snippets) { table(:snippets) }
let(:user) { users.create(id: 1, email: 'user@example.com', projects_limit: 10, username: 'test', name: 'Test', state: 'active') }
let(:user) { users.create!(id: 1, email: 'user@example.com', projects_limit: 10, username: 'test', name: 'Test', state: 'active') }
def create_snippet(id)
params = {

Some files were not shown because too many files have changed in this diff Show More