Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-12-22 03:09:39 +00:00
parent d61d19da54
commit f8edcff7e9
84 changed files with 587 additions and 2521 deletions

View File

@ -1,56 +1,57 @@
/* eslint-disable no-param-reassign, consistent-return */
import { parseBoolean } from '~/lib/utils/common_utils';
import AccessorUtilities from './lib/utils/accessor';
export default class Autosave {
constructor(field, key, fallbackKey, lockVersion) {
this.field = field;
this.type = this.field.prop('type');
this.type = this.field.getAttribute('type');
this.isLocalStorageAvailable = AccessorUtilities.canUseLocalStorage();
if (key.join != null) {
key = key.join('/');
}
this.key = `autosave/${key}`;
this.key = Array.isArray(key) ? `autosave/${key.join('/')}` : `autosave/${key}`;
this.fallbackKey = fallbackKey;
this.lockVersionKey = `${this.key}/lockVersion`;
this.lockVersion = lockVersion;
this.field.data('autosave', this);
this.restore();
this.field.on('input', () => this.save());
this.saveAction = this.save.bind(this);
// used by app/assets/javascripts/deprecated_notes.js
this.field.$autosave = this;
this.field.addEventListener('input', this.saveAction);
}
restore() {
if (!this.isLocalStorageAvailable) return;
if (!this.field.length) return;
const text = window.localStorage.getItem(this.key);
const fallbackText = window.localStorage.getItem(this.fallbackKey);
const newValue = text || fallbackText;
if (newValue == null) return;
let originalValue = this.field.value;
if (this.type === 'checkbox') {
this.field.prop('checked', text || fallbackText);
} else if (text) {
this.field.val(text);
} else if (fallbackText) {
this.field.val(fallbackText);
originalValue = this.field.checked;
this.field.checked = parseBoolean(newValue);
} else {
this.field.value = newValue;
}
this.field.trigger('input');
// v-model does not update with jQuery trigger
// https://github.com/vuejs/vue/issues/2804#issuecomment-216968137
const event = new Event('change', { bubbles: true, cancelable: false });
const field = this.field.get(0);
if (field) {
field.dispatchEvent(event);
}
if (originalValue === newValue) return;
this.triggerInputEvents();
}
triggerInputEvents() {
// trigger events so @input, @change and v-model trigger in Vue components
const inputEvent = new Event('input', { bubbles: true, cancelable: false });
const changeEvent = new Event('change', { bubbles: true, cancelable: false });
this.field.dispatchEvent(inputEvent);
this.field.dispatchEvent(changeEvent);
}
getSavedLockVersion() {
if (!this.isLocalStorageAvailable) return;
if (!this.isLocalStorageAvailable) return undefined;
return window.localStorage.getItem(this.lockVersionKey);
}
save() {
if (!this.field.length) return;
const value = this.type === 'checkbox' ? this.field.is(':checked') : this.field.val();
const value = this.type === 'checkbox' ? this.field.checked : this.field.value;
if (this.isLocalStorageAvailable && value) {
if (this.fallbackKey) {
@ -66,7 +67,7 @@ export default class Autosave {
}
reset() {
if (!this.isLocalStorageAvailable) return;
if (!this.isLocalStorageAvailable) return undefined;
window.localStorage.removeItem(this.lockVersionKey);
window.localStorage.removeItem(this.fallbackKey);
@ -74,7 +75,7 @@ export default class Autosave {
}
dispose() {
// eslint-disable-next-line @gitlab/no-global-event-off
this.field.off('input');
delete this.field.$autosave;
this.field.removeEventListener('input', this.saveAction);
}
}

View File

@ -1,5 +1,4 @@
<script>
import $ from 'jquery';
import {
GlDropdown,
GlButton,
@ -52,7 +51,7 @@ export default {
},
mounted() {
this.autosave = new Autosave(
$(this.$refs.textarea),
this.$refs.textarea,
`submit_review_dropdown/${this.getNoteableData.id}`,
);
this.noteData.noteable_type = this.noteableType;

View File

@ -575,7 +575,9 @@ export default class Notes {
// reset text and preview
form.find('.js-md-write-button').click();
form.find('.js-note-text').val('').trigger('input');
form.find('.js-note-text').data('autosave').reset();
form.find('.js-note-text').each(function reset() {
this.$autosave.reset();
});
const event = document.createEvent('Event');
event.initEvent('autosize:update', true, false);
@ -642,7 +644,9 @@ export default class Notes {
// DiffNote
form.find('#note_position').val(),
];
return new Autosave(textarea, key);
const textareaEl = textarea.get(0);
// eslint-disable-next-line no-new
if (textareaEl) new Autosave(textareaEl, key);
}
/**
@ -1086,7 +1090,9 @@ export default class Notes {
const row = form.closest('tr');
const glForm = form.data('glForm');
glForm.destroy();
form.find('.js-note-text').data('autosave').reset();
form.find('.js-note-text').each(function reset() {
this.$autosave.reset();
});
// show the reply button (will only work for replies)
form.prev('.discussion-reply-holder').show();
if (row.is('.js-temp-notes-holder')) {

View File

@ -1,6 +1,5 @@
<script>
import { GlButton } from '@gitlab/ui';
import $ from 'jquery';
import { helpPagePath } from '~/helpers/help_page_helper';
import { s__ } from '~/locale';
import Autosave from '~/autosave';
@ -118,7 +117,7 @@ export default {
},
initAutosaveComment() {
if (this.isLoggedIn) {
this.autosaveDiscussion = new Autosave($(this.$refs.textarea), [
this.autosaveDiscussion = new Autosave(this.$refs.textarea, [
s__('DesignManagement|Discussion'),
getIdFromGraphQLId(this.noteableId),
this.shortDiscussionId,

View File

@ -47,13 +47,12 @@ function getFallbackKey() {
}
export default class IssuableForm {
static addAutosave(map, id, $input, searchTerm, fallbackKey) {
if ($input.length) {
map.set(
id,
new Autosave($input, [document.location.pathname, searchTerm, id], `${fallbackKey}=${id}`),
);
}
static addAutosave(map, id, element, searchTerm, fallbackKey) {
if (!element) return;
map.set(
id,
new Autosave(element, [document.location.pathname, searchTerm, id], `${fallbackKey}=${id}`),
);
}
constructor(form) {
@ -122,28 +121,28 @@ export default class IssuableForm {
IssuableForm.addAutosave(
autosaveMap,
'title',
this.form.find('input[name*="[title]"]'),
this.form.find('input[name*="[title]"]').get(0),
this.searchTerm,
this.fallbackKey,
);
IssuableForm.addAutosave(
autosaveMap,
'description',
this.form.find('textarea[name*="[description]"]'),
this.form.find('textarea[name*="[description]"]').get(0),
this.searchTerm,
this.fallbackKey,
);
IssuableForm.addAutosave(
autosaveMap,
'confidential',
this.form.find('input:checkbox[name*="[confidential]"]'),
this.form.find('input:checkbox[name*="[confidential]"]').get(0),
this.searchTerm,
this.fallbackKey,
);
IssuableForm.addAutosave(
autosaveMap,
'due_date',
this.form.find('input[name*="[due_date]"]'),
this.form.find('input[name*="[due_date]"]').get(0),
this.searchTerm,
this.fallbackKey,
);

View File

@ -312,7 +312,7 @@ export default {
if (this.isLoggedIn) {
const noteableType = capitalizeFirstCharacter(convertToCamelCase(this.noteableType));
this.autosave = new Autosave($(this.$refs.textarea), [
this.autosave = new Autosave(this.$refs.textarea, [
this.$options.i18n.note,
noteableType,
this.getNoteableData.id,

View File

@ -1,4 +1,3 @@
import $ from 'jquery';
import { s__ } from '~/locale';
import Autosave from '~/autosave';
import { capitalizeFirstCharacter } from '~/lib/utils/text_utility';
@ -16,7 +15,7 @@ export default {
keys = keys.concat(extraKeys);
}
this.autosave = new Autosave($(this.$refs.noteForm.$refs.textarea), keys);
this.autosave = new Autosave(this.$refs.noteForm.$refs.textarea, keys);
},
resetAutoSave() {
this.autosave.reset();

View File

@ -1,25 +1,32 @@
<script>
import { GlListbox } from '@gitlab/ui';
import { GlFormGroup, GlListbox } from '@gitlab/ui';
import { __ } from '~/locale';
const MIN_ITEMS_COUNT_FOR_SEARCHING = 20;
const MIN_ITEMS_COUNT_FOR_SEARCHING = 10;
export default {
i18n: {
noResultsText: __('No results found'),
},
components: {
GlFormGroup,
GlListbox,
},
model: GlListbox.model,
props: {
label: {
type: String,
required: false,
default: '',
},
name: {
type: String,
required: true,
},
defaultToggleText: {
type: String,
required: true,
required: false,
default: '',
},
selected: {
type: String,
@ -95,7 +102,7 @@ export default {
</script>
<template>
<div>
<gl-form-group :label="label">
<gl-listbox
:selected="selected"
:toggle-text="toggleText"
@ -106,5 +113,5 @@ export default {
@select="$emit($options.model.event, $event)"
/>
<input ref="input" type="hidden" :name="name" :value="selected" />
</div>
</gl-form-group>
</template>

View File

@ -140,3 +140,7 @@ export const BIDI_CHARS_CLASS_LIST = 'unicode-bidi has-tooltip';
export const BIDI_CHAR_TOOLTIP = 'Potentially unwanted character detected: Unicode BiDi Control';
export const HLJS_ON_AFTER_HIGHLIGHT = 'after:highlight';
// We fallback to highlighting these languages with Rouge, see the following issue for more detail:
// https://gitlab.com/gitlab-org/gitlab/-/issues/384375#note_1212752013
export const LEGACY_FALLBACKS = ['python'];

View File

@ -11,6 +11,7 @@ import {
EVENT_LABEL_FALLBACK,
ROUGE_TO_HLJS_LANGUAGE_MAP,
LINES_PER_CHUNK,
LEGACY_FALLBACKS,
} from './constants';
import Chunk from './components/chunk.vue';
import { registerPlugins } from './plugins/index';
@ -57,10 +58,11 @@ export default {
},
unsupportedLanguage() {
const supportedLanguages = Object.keys(languageLoader);
return (
const unsupportedLanguage =
!supportedLanguages.includes(this.language) &&
!supportedLanguages.includes(this.blob.language?.toLowerCase())
);
!supportedLanguages.includes(this.blob.language?.toLowerCase());
return LEGACY_FALLBACKS.includes(this.language) || unsupportedLanguage;
},
totalChunks() {
return Object.keys(this.chunks).length;

View File

@ -1,6 +1,5 @@
<script>
import { GlForm, GlFormGroup, GlFormInput } from '@gitlab/ui';
import $ from 'jquery';
import Autosave from '~/autosave';
import MarkdownField from '~/vue_shared/components/markdown/field.vue';
@ -81,13 +80,13 @@ export default {
if (!titleInput || !descriptionInput) return;
this.autosaveTitle = new Autosave($(titleInput.$el), [
this.autosaveTitle = new Autosave(titleInput.$el, [
document.location.pathname,
document.location.search,
'title',
]);
this.autosaveDescription = new Autosave($(descriptionInput.$el), [
this.autosaveDescription = new Autosave(descriptionInput, [
document.location.pathname,
document.location.search,
'description',

View File

@ -8,10 +8,7 @@
# See lib/api/helpers/rate_limiter.rb for API version
module CheckRateLimit
def check_rate_limit!(key, scope:, redirect_back: false, **options)
return if bypass_header_set?
return unless rate_limiter.throttled?(key, scope: scope, **options)
rate_limiter.log_request(request, "#{key}_request_limit".to_sym, current_user)
return unless Gitlab::ApplicationRateLimiter.throttled_request?(request, current_user, key, scope: scope, **options)
return yield if block_given?
@ -23,14 +20,4 @@ module CheckRateLimit
render plain: message, status: :too_many_requests
end
end
private
def rate_limiter
::Gitlab::ApplicationRateLimiter
end
def bypass_header_set?
::Gitlab::Throttle.bypass_header.present? && request.get_header(Gitlab::Throttle.bypass_header) == '1'
end
end

View File

@ -3,6 +3,7 @@
module IssuableCollections
extend ActiveSupport::Concern
include PaginatedCollection
include SearchRateLimitable
include SortingHelper
include SortingPreference
include Gitlab::Utils::StrongMemoize

View File

@ -5,6 +5,12 @@ module IssuableCollectionsAction
include IssuableCollections
include IssuesCalendar
included do
before_action :check_search_rate_limit!, only: [:issues, :merge_requests], if: -> {
params[:search].present? && Feature.enabled?(:rate_limit_issuable_searches)
}
end
# rubocop:disable Gitlab/ModuleWithInstanceVariables
def issues
show_alert_if_search_is_disabled

View File

@ -27,6 +27,10 @@ class Projects::IssuesController < Projects::ApplicationController
before_action :set_issuables_index, if: ->(c) {
SET_ISSUABLES_INDEX_ONLY_ACTIONS.include?(c.action_name.to_sym) && !index_html_request?
}
before_action :check_search_rate_limit!, if: ->(c) {
SET_ISSUABLES_INDEX_ONLY_ACTIONS.include?(c.action_name.to_sym) && !index_html_request? &&
params[:search].present? && Feature.enabled?(:rate_limit_issuable_searches)
}
# Allow write(create) issue
before_action :authorize_create_issue!, only: [:new, :create]

View File

@ -28,6 +28,9 @@ class Projects::MergeRequestsController < Projects::MergeRequests::ApplicationCo
:codequality_mr_diff_reports
]
before_action :set_issuables_index, only: [:index]
before_action :check_search_rate_limit!, only: [:index], if: -> {
params[:search].present? && Feature.enabled?(:rate_limit_issuable_searches)
}
before_action :authenticate_user!, only: [:assign_related_issues]
before_action :check_user_can_push_to_source_branch!, only: [:rebase]

View File

@ -18,6 +18,7 @@ module SearchArguments
def ready?(**args)
validate_search_in_params!(args)
validate_anonymous_search_access!(args)
validate_search_rate_limit!(args)
super
end
@ -39,6 +40,28 @@ module SearchArguments
'`search` should be present when including the `in` argument'
end
def validate_search_rate_limit!(args)
return if args[:search].blank? || context[:request].nil? || Feature.disabled?(:rate_limit_issuable_searches)
if current_user.present?
rate_limiter_key = :search_rate_limit
rate_limiter_scope = [current_user]
else
rate_limiter_key = :search_rate_limit_unauthenticated
rate_limiter_scope = [context[:request].ip]
end
if ::Gitlab::ApplicationRateLimiter.throttled_request?(
context[:request],
current_user,
rate_limiter_key,
scope: rate_limiter_scope
)
raise Gitlab::Graphql::Errors::ResourceNotAvailable,
'This endpoint has been requested with the search argument too many times. Try again later.'
end
end
def prepare_finder_params(args)
prepare_search_params(args)
end

View File

@ -45,18 +45,6 @@ module Clusters
)
end
def api_client
strong_memoize(:api_client) do
::Aws::CloudFormation::Client.new(credentials: credentials, region: region)
end
end
def credentials
strong_memoize(:credentials) do
::Aws::Credentials.new(access_key_id, secret_access_key, session_token)
end
end
def has_rbac_enabled?
true
end

View File

@ -37,12 +37,6 @@ module Clusters
greater_than: 0
}
def api_client
return unless access_token
@api_client ||= GoogleApi::CloudPlatform::Client.new(access_token, nil)
end
def nullify_credentials
assign_attributes(
access_token: nil,

View File

@ -1,74 +0,0 @@
# frozen_string_literal: true
module Clusters
module Aws
class AuthorizeRoleService
attr_reader :user
Response = Struct.new(:status, :body)
ERRORS = [
ActiveRecord::RecordInvalid,
ActiveRecord::RecordNotFound,
Clusters::Aws::FetchCredentialsService::MissingRoleError,
::Aws::Errors::MissingCredentialsError,
::Aws::STS::Errors::ServiceError
].freeze
def initialize(user, params:)
@user = user
@role_arn = params[:role_arn]
@region = params[:region]
end
def execute
ensure_role_exists!
update_role_arn!
Response.new(:ok, credentials)
rescue *ERRORS => e
Gitlab::ErrorTracking.track_exception(e)
Response.new(:unprocessable_entity, response_details(e))
end
private
attr_reader :role, :role_arn, :region
def ensure_role_exists!
@role = ::Aws::Role.find_by_user_id!(user.id)
end
def update_role_arn!
role.update!(role_arn: role_arn, region: region)
end
def credentials
Clusters::Aws::FetchCredentialsService.new(role).execute
end
def response_details(exception)
message =
case exception
when ::Aws::STS::Errors::AccessDenied
_("Access denied: %{error}") % { error: exception.message }
when ::Aws::STS::Errors::ServiceError
_("AWS service error: %{error}") % { error: exception.message }
when ActiveRecord::RecordNotFound
_("Error: Unable to find AWS role for current user")
when ActiveRecord::RecordInvalid
exception.message
when Clusters::Aws::FetchCredentialsService::MissingRoleError
_("Error: No AWS provision role found for user")
when ::Aws::Errors::MissingCredentialsError
_("Error: No AWS credentials were supplied")
else
_('An error occurred while authorizing your role')
end
{ message: message }.compact
end
end
end
end

View File

@ -1,80 +0,0 @@
# frozen_string_literal: true
module Clusters
module Aws
class FetchCredentialsService
attr_reader :provision_role
MissingRoleError = Class.new(StandardError)
def initialize(provision_role, provider: nil)
@provision_role = provision_role
@provider = provider
@region = provider&.region || provision_role&.region || Clusters::Providers::Aws::DEFAULT_REGION
end
def execute
raise MissingRoleError, 'AWS provisioning role not configured' unless provision_role.present?
::Aws::AssumeRoleCredentials.new(
client: client,
role_arn: provision_role.role_arn,
role_session_name: session_name,
external_id: provision_role.role_external_id,
policy: session_policy
).credentials
end
private
attr_reader :provider, :region
def client
::Aws::STS::Client.new(**client_args)
end
def client_args
{ region: region, credentials: gitlab_credentials }.compact
end
def gitlab_credentials
# These are not needed for IAM instance profiles
return unless access_key_id.present? && secret_access_key.present?
::Aws::Credentials.new(access_key_id, secret_access_key)
end
def access_key_id
Gitlab::CurrentSettings.eks_access_key_id
end
def secret_access_key
Gitlab::CurrentSettings.eks_secret_access_key
end
##
# If we haven't created a provider record yet,
# we restrict ourselves to read-only access so
# that we can safely expose credentials to the
# frontend (to be used when populating the
# creation form).
def session_policy
if provider.nil?
File.read(read_only_policy)
end
end
def read_only_policy
Rails.root.join('vendor', 'aws', 'iam', "eks_cluster_read_only_policy.json")
end
def session_name
if provider.present?
"gitlab-eks-cluster-#{provider.cluster_id}-user-#{provision_role.user_id}"
else
"gitlab-eks-autofill-user-#{provision_role.user_id}"
end
end
end
end
end

View File

@ -1,139 +0,0 @@
# frozen_string_literal: true
module Clusters
module Aws
class FinalizeCreationService
include Gitlab::Utils::StrongMemoize
attr_reader :provider
delegate :cluster, to: :provider
def execute(provider)
@provider = provider
configure_provider
create_gitlab_service_account!
configure_platform_kubernetes
configure_node_authentication!
cluster.save!
rescue ::Aws::CloudFormation::Errors::ServiceError => e
log_service_error(e.class.name, provider.id, e.message)
provider.make_errored!(s_('ClusterIntegration|Failed to fetch CloudFormation stack: %{message}') % { message: e.message })
rescue Kubeclient::HttpError => e
log_service_error(e.class.name, provider.id, e.message)
provider.make_errored!(s_('ClusterIntegration|Failed to run Kubeclient: %{message}') % { message: e.message })
rescue ActiveRecord::RecordInvalid => e
log_service_error(e.class.name, provider.id, e.message)
provider.make_errored!(s_('ClusterIntegration|Failed to configure EKS provider: %{message}') % { message: e.message })
end
private
def create_gitlab_service_account!
Clusters::Kubernetes::CreateOrUpdateServiceAccountService.gitlab_creator(
kube_client,
rbac: true
).execute
end
def configure_provider
provider.status_event = :make_created
end
def configure_platform_kubernetes
cluster.build_platform_kubernetes(
api_url: cluster_endpoint,
ca_cert: cluster_certificate,
token: request_kubernetes_token)
end
def request_kubernetes_token
Clusters::Kubernetes::FetchKubernetesTokenService.new(
kube_client,
Clusters::Kubernetes::GITLAB_ADMIN_TOKEN_NAME,
Clusters::Kubernetes::GITLAB_SERVICE_ACCOUNT_NAMESPACE
).execute
end
def kube_client
@kube_client ||= build_kube_client!(
cluster_endpoint,
cluster_certificate
)
end
def build_kube_client!(api_url, ca_pem)
raise "Incomplete settings" unless api_url
Gitlab::Kubernetes::KubeClient.new(
api_url,
auth_options: kubeclient_auth_options,
ssl_options: kubeclient_ssl_options(ca_pem),
http_proxy_uri: ENV['http_proxy']
)
end
def kubeclient_auth_options
{ bearer_token: Kubeclient::AmazonEksCredentials.token(provider.credentials, cluster.name) }
end
def kubeclient_ssl_options(ca_pem)
opts = { verify_ssl: OpenSSL::SSL::VERIFY_PEER }
if ca_pem.present?
opts[:cert_store] = OpenSSL::X509::Store.new
opts[:cert_store].add_cert(OpenSSL::X509::Certificate.new(ca_pem))
end
opts
end
def cluster_stack
@cluster_stack ||= provider.api_client.describe_stacks(stack_name: provider.cluster.name).stacks.first
end
def stack_output_value(key)
cluster_stack.outputs.detect { |output| output.output_key == key }.output_value
end
def node_instance_role_arn
stack_output_value('NodeInstanceRole')
end
def cluster_endpoint
strong_memoize(:cluster_endpoint) do
stack_output_value('ClusterEndpoint')
end
end
def cluster_certificate
strong_memoize(:cluster_certificate) do
Base64.decode64(stack_output_value('ClusterCertificate'))
end
end
def configure_node_authentication!
kube_client.create_config_map(node_authentication_config)
end
def node_authentication_config
Gitlab::Kubernetes::ConfigMaps::AwsNodeAuth.new(node_instance_role_arn).generate
end
def logger
@logger ||= Gitlab::Kubernetes::Logger.build
end
def log_service_error(exception, provider_id, message)
logger.error(
exception: exception.class.name,
service: self.class.name,
provider_id: provider_id,
message: message
)
end
end
end
end

View File

@ -1,85 +0,0 @@
# frozen_string_literal: true
module Clusters
module Aws
class ProvisionService
attr_reader :provider
def execute(provider)
@provider = provider
configure_provider_credentials
provision_cluster
if provider.make_creating
WaitForClusterCreationWorker.perform_in(
Clusters::Aws::VerifyProvisionStatusService::INITIAL_INTERVAL,
provider.cluster_id
)
else
provider.make_errored!("Failed to update provider record; #{provider.errors.full_messages}")
end
rescue Clusters::Aws::FetchCredentialsService::MissingRoleError
provider.make_errored!('Amazon role is not configured')
rescue ::Aws::Errors::MissingCredentialsError
provider.make_errored!('Amazon credentials are not configured')
rescue ::Aws::STS::Errors::ServiceError => e
provider.make_errored!("Amazon authentication failed; #{e.message}")
rescue ::Aws::CloudFormation::Errors::ServiceError => e
provider.make_errored!("Amazon CloudFormation request failed; #{e.message}")
end
private
def provision_role
provider.created_by_user&.aws_role
end
def credentials
@credentials ||= Clusters::Aws::FetchCredentialsService.new(
provision_role,
provider: provider
).execute
end
def configure_provider_credentials
provider.update!(
access_key_id: credentials.access_key_id,
secret_access_key: credentials.secret_access_key,
session_token: credentials.session_token
)
end
def provision_cluster
provider.api_client.create_stack(
stack_name: provider.cluster.name,
template_body: stack_template,
parameters: parameters,
capabilities: ["CAPABILITY_IAM"]
)
end
def parameters
[
parameter('ClusterName', provider.cluster.name),
parameter('ClusterRole', provider.role_arn),
parameter('KubernetesVersion', provider.kubernetes_version),
parameter('ClusterControlPlaneSecurityGroup', provider.security_group_id),
parameter('VpcId', provider.vpc_id),
parameter('Subnets', provider.subnet_ids.join(',')),
parameter('NodeAutoScalingGroupDesiredCapacity', provider.num_nodes.to_s),
parameter('NodeInstanceType', provider.instance_type),
parameter('KeyName', provider.key_name)
]
end
def parameter(key, value)
{ parameter_key: key, parameter_value: value }
end
def stack_template
File.read(Rails.root.join('vendor', 'aws', 'cloudformation', 'eks_cluster.yaml'))
end
end
end
end

View File

@ -1,50 +0,0 @@
# frozen_string_literal: true
module Clusters
module Aws
class VerifyProvisionStatusService
attr_reader :provider
INITIAL_INTERVAL = 5.minutes
POLL_INTERVAL = 1.minute
TIMEOUT = 30.minutes
def execute(provider)
@provider = provider
case cluster_stack.stack_status
when 'CREATE_IN_PROGRESS'
continue_creation
when 'CREATE_COMPLETE'
finalize_creation
else
provider.make_errored!("Unexpected status; #{cluster_stack.stack_status}")
end
rescue ::Aws::CloudFormation::Errors::ServiceError => e
provider.make_errored!("Amazon CloudFormation request failed; #{e.message}")
end
private
def cluster_stack
@cluster_stack ||= provider.api_client.describe_stacks(stack_name: provider.cluster.name).stacks.first
end
def continue_creation
if timeout_threshold.future?
WaitForClusterCreationWorker.perform_in(POLL_INTERVAL, provider.cluster_id)
else
provider.make_errored!(_('Kubernetes cluster creation time exceeds timeout; %{timeout}') % { timeout: TIMEOUT })
end
end
def timeout_threshold
cluster_stack.creation_time + TIMEOUT
end
def finalize_creation
Clusters::Aws::FinalizeCreationService.new.execute(provider)
end
end
end
end

View File

@ -24,9 +24,7 @@ module Clusters
return cluster if cluster.errors.present?
cluster.tap do |cluster|
cluster.save && ClusterProvisionWorker.perform_async(cluster.id)
end
cluster.tap(&:save)
end
private

View File

@ -1,31 +0,0 @@
# frozen_string_literal: true
module Clusters
module Gcp
class FetchOperationService
def execute(provider)
operation = provider.api_client.projects_zones_operations(
provider.gcp_project_id,
provider.zone,
provider.operation_id)
yield(operation) if block_given?
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
logger.error(
exception: e.class.name,
service: self.class.name,
provider_id: provider.id,
message: e.message
)
provider.make_errored!("Failed to request to CloudPlatform; #{e.message}")
end
private
def logger
@logger ||= Gitlab::Kubernetes::Logger.build
end
end
end
end

View File

@ -1,127 +0,0 @@
# frozen_string_literal: true
module Clusters
module Gcp
class FinalizeCreationService
attr_reader :provider
def execute(provider)
@provider = provider
configure_provider
create_gitlab_service_account!
configure_kubernetes
configure_pre_installed_knative if provider.knative_pre_installed?
cluster.save!
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
log_service_error(e.class.name, provider.id, e.message)
provider.make_errored!(s_('ClusterIntegration|Failed to request to Google Cloud Platform: %{message}') % { message: e.message })
rescue Kubeclient::HttpError => e
log_service_error(e.class.name, provider.id, e.message)
provider.make_errored!(s_('ClusterIntegration|Failed to run Kubeclient: %{message}') % { message: e.message })
rescue ActiveRecord::RecordInvalid => e
log_service_error(e.class.name, provider.id, e.message)
provider.make_errored!(s_('ClusterIntegration|Failed to configure Google Kubernetes Engine Cluster: %{message}') % { message: e.message })
end
private
def create_gitlab_service_account!
Clusters::Kubernetes::CreateOrUpdateServiceAccountService.gitlab_creator(
kube_client,
rbac: create_rbac_cluster?
).execute
end
def configure_provider
provider.endpoint = gke_cluster.endpoint
provider.status_event = :make_created
end
def configure_kubernetes
cluster.platform_type = :kubernetes
cluster.build_platform_kubernetes(
api_url: 'https://' + gke_cluster.endpoint,
ca_cert: Base64.decode64(gke_cluster.master_auth.cluster_ca_certificate),
authorization_type: authorization_type,
token: request_kubernetes_token)
end
def configure_pre_installed_knative
knative = cluster.build_application_knative(
hostname: 'example.com'
)
knative.make_pre_installed!
end
def request_kubernetes_token
Clusters::Kubernetes::FetchKubernetesTokenService.new(
kube_client,
Clusters::Kubernetes::GITLAB_ADMIN_TOKEN_NAME,
Clusters::Kubernetes::GITLAB_SERVICE_ACCOUNT_NAMESPACE
).execute
end
def authorization_type
create_rbac_cluster? ? 'rbac' : 'abac'
end
def create_rbac_cluster?
!provider.legacy_abac?
end
def kube_client
@kube_client ||= build_kube_client!(
'https://' + gke_cluster.endpoint,
Base64.decode64(gke_cluster.master_auth.cluster_ca_certificate)
)
end
def build_kube_client!(api_url, ca_pem)
raise "Incomplete settings" unless api_url
Gitlab::Kubernetes::KubeClient.new(
api_url,
auth_options: { bearer_token: provider.access_token },
ssl_options: kubeclient_ssl_options(ca_pem),
http_proxy_uri: ENV['http_proxy']
)
end
def kubeclient_ssl_options(ca_pem)
opts = { verify_ssl: OpenSSL::SSL::VERIFY_PEER }
if ca_pem.present?
opts[:cert_store] = OpenSSL::X509::Store.new
opts[:cert_store].add_cert(OpenSSL::X509::Certificate.new(ca_pem))
end
opts
end
def gke_cluster
@gke_cluster ||= provider.api_client.projects_zones_clusters_get(
provider.gcp_project_id,
provider.zone,
cluster.name)
end
def cluster
@cluster ||= provider.cluster
end
def logger
@logger ||= Gitlab::Kubernetes::Logger.build
end
def log_service_error(exception, provider_id, message)
logger.error(
exception: exception.class.name,
service: self.class.name,
provider_id: provider_id,
message: message
)
end
end
end
end

View File

@ -1,56 +0,0 @@
# frozen_string_literal: true
module Clusters
module Gcp
class ProvisionService
CLOUD_RUN_ADDONS = %i[http_load_balancing istio_config cloud_run_config].freeze
attr_reader :provider
def execute(provider)
@provider = provider
get_operation_id do |operation_id|
if provider.make_creating(operation_id)
WaitForClusterCreationWorker.perform_in(
Clusters::Gcp::VerifyProvisionStatusService::INITIAL_INTERVAL,
provider.cluster_id)
else
provider.make_errored!("Failed to update provider record; #{provider.errors}")
end
end
end
private
def get_operation_id
enable_addons = provider.cloud_run? ? CLOUD_RUN_ADDONS : []
operation = provider.api_client.projects_zones_clusters_create(
provider.gcp_project_id,
provider.zone,
provider.cluster.name,
provider.num_nodes,
machine_type: provider.machine_type,
legacy_abac: provider.legacy_abac,
enable_addons: enable_addons
)
unless operation.status == 'PENDING' || operation.status == 'RUNNING'
return provider.make_errored!("Operation status is unexpected; #{operation.status_message}")
end
operation_id = provider.api_client.parse_operation_id(operation.self_link)
unless operation_id
return provider.make_errored!('Can not find operation_id from self_link')
end
yield(operation_id)
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
provider.make_errored!("Failed to request to CloudPlatform; #{e.message}")
end
end
end
end

View File

@ -1,50 +0,0 @@
# frozen_string_literal: true
module Clusters
module Gcp
class VerifyProvisionStatusService
attr_reader :provider
INITIAL_INTERVAL = 2.minutes
EAGER_INTERVAL = 10.seconds
TIMEOUT = 20.minutes
def execute(provider)
@provider = provider
request_operation do |operation|
case operation.status
when 'PENDING', 'RUNNING'
continue_creation(operation)
when 'DONE'
finalize_creation
else
provider.make_errored!("Unexpected operation status; #{operation.status} #{operation.status_message}")
end
end
end
private
def continue_creation(operation)
if elapsed_time_from_creation(operation) < TIMEOUT
WaitForClusterCreationWorker.perform_in(EAGER_INTERVAL, provider.cluster_id)
else
provider.make_errored!(_('Kubernetes cluster creation time exceeds timeout; %{timeout}') % { timeout: TIMEOUT })
end
end
def elapsed_time_from_creation(operation)
Time.current.utc - operation.start_time.to_time.utc
end
def finalize_creation
Clusters::Gcp::FinalizeCreationService.new.execute(provider)
end
def request_operation(&blk)
Clusters::Gcp::FetchOperationService.new.execute(provider, &blk)
end
end
end
end

View File

@ -951,11 +951,11 @@
- :name: gcp_cluster:cluster_provision
:worker_name: ClusterProvisionWorker
:feature_category: :kubernetes_management
:has_external_dependencies: true
:has_external_dependencies: false
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: false
:idempotent: true
:tags: []
- :name: gcp_cluster:cluster_update_app
:worker_name: ClusterUpdateAppWorker
@ -1059,11 +1059,11 @@
- :name: gcp_cluster:wait_for_cluster_creation
:worker_name: WaitForClusterCreationWorker
:feature_category: :kubernetes_management
:has_external_dependencies: true
:has_external_dependencies: false
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: false
:idempotent: true
:tags: []
- :name: github_gists_importer:github_gists_import_finish_import
:worker_name: Gitlab::GithubGistsImport::FinishImportWorker

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
class ClusterProvisionWorker # rubocop:disable Scalability/IdempotentWorker
class ClusterProvisionWorker
include ApplicationWorker
data_consistency :always
@ -8,17 +8,7 @@ class ClusterProvisionWorker # rubocop:disable Scalability/IdempotentWorker
sidekiq_options retry: 3
include ClusterQueue
worker_has_external_dependencies!
idempotent!
def perform(cluster_id)
Clusters::Cluster.find_by_id(cluster_id).try do |cluster|
cluster.provider.try do |provider|
if cluster.gcp?
Clusters::Gcp::ProvisionService.new.execute(provider)
elsif cluster.aws?
Clusters::Aws::ProvisionService.new.execute(provider)
end
end
end
end
def perform(_); end
end

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
class WaitForClusterCreationWorker # rubocop:disable Scalability/IdempotentWorker
class WaitForClusterCreationWorker
include ApplicationWorker
data_consistency :always
@ -8,17 +8,7 @@ class WaitForClusterCreationWorker # rubocop:disable Scalability/IdempotentWorke
sidekiq_options retry: 3
include ClusterQueue
worker_has_external_dependencies!
idempotent!
def perform(cluster_id)
Clusters::Cluster.find_by_id(cluster_id).try do |cluster|
cluster.provider.try do |provider|
if cluster.gcp?
Clusters::Gcp::VerifyProvisionStatusService.new.execute(provider)
elsif cluster.aws?
Clusters::Aws::VerifyProvisionStatusService.new.execute(provider)
end
end
end
end
def perform(_); end
end

View File

@ -0,0 +1,8 @@
---
name: rate_limit_issuable_searches
introduced_by_url: "https://gitlab.com/gitlab-org/gitlab/-/merge_requests/104208"
rollout_issue_url:
milestone: '15.8'
type: development
group: group::project management
default_enabled: false

View File

@ -48,3 +48,5 @@ For more information, read [Gitaly touch points](gitaly_touch_points.md).
Create: Source Code has over 100 REST endpoints, being a mixture of Grape API endpoints and Rails controller endpoints.
For a detailed list, refer to [Source Code REST Endpoints](rest_endpoints.md).
An alternative list of the [Source Code endpoints and other owned objects](https://gitlab-com.gitlab.io/gl-infra/platform/stage-groups-index/source-code.html) is available.

View File

@ -695,6 +695,16 @@ module API
unprocessable_entity!('User must be authenticated to use search')
end
def validate_search_rate_limit!
return unless Feature.enabled?(:rate_limit_issuable_searches)
if current_user
check_rate_limit!(:search_rate_limit, scope: [current_user])
else
check_rate_limit!(:search_rate_limit_unauthenticated, scope: [ip_address])
end
end
private
# rubocop:disable Gitlab/ModuleWithInstanceVariables

View File

@ -10,25 +10,14 @@ module API
# See app/controllers/concerns/check_rate_limit.rb for Rails controllers version
module RateLimiter
def check_rate_limit!(key, scope:, **options)
return if bypass_header_set?
return unless rate_limiter.throttled?(key, scope: scope, **options)
rate_limiter.log_request(request, "#{key}_request_limit".to_sym, current_user)
return unless Gitlab::ApplicationRateLimiter.throttled_request?(
request, current_user, key, scope: scope, **options
)
return yield if block_given?
render_api_error!({ error: _('This endpoint has been requested too many times. Try again later.') }, 429)
end
private
def rate_limiter
::Gitlab::ApplicationRateLimiter
end
def bypass_header_set?
::Gitlab::Throttle.bypass_header.present? && request.get_header(Gitlab::Throttle.bypass_header) == '1'
end
end
end
end

View File

@ -116,6 +116,7 @@ module API
get '/issues_statistics' do
authenticate! unless params[:scope] == 'all'
validate_anonymous_search_access! if params[:search].present?
validate_search_rate_limit! if declared_params[:search].present?
present issues_statistics, with: Grape::Presenters::Presenter
end
@ -134,6 +135,7 @@ module API
get do
authenticate! unless params[:scope] == 'all'
validate_anonymous_search_access! if params[:search].present?
validate_search_rate_limit! if declared_params[:search].present?
issues = paginate(find_issues)
options = {
@ -173,6 +175,7 @@ module API
end
get ":id/issues" do
validate_anonymous_search_access! if declared_params[:search].present?
validate_search_rate_limit! if declared_params[:search].present?
issues = paginate(find_issues(group_id: user_group.id, include_subgroups: true))
options = {
@ -192,6 +195,7 @@ module API
end
get ":id/issues_statistics" do
validate_anonymous_search_access! if declared_params[:search].present?
validate_search_rate_limit! if declared_params[:search].present?
present issues_statistics(group_id: user_group.id, include_subgroups: true), with: Grape::Presenters::Presenter
end
@ -211,6 +215,7 @@ module API
end
get ":id/issues" do
validate_anonymous_search_access! if declared_params[:search].present?
validate_search_rate_limit! if declared_params[:search].present?
issues = paginate(find_issues(project_id: user_project.id))
options = {
@ -230,6 +235,7 @@ module API
end
get ":id/issues_statistics" do
validate_anonymous_search_access! if declared_params[:search].present?
validate_search_rate_limit! if declared_params[:search].present?
present issues_statistics(project_id: user_project.id), with: Grape::Presenters::Presenter
end

View File

@ -149,6 +149,7 @@ module API
get feature_category: :code_review, urgency: :low do
authenticate! unless params[:scope] == 'all'
validate_anonymous_search_access! if params[:search].present?
validate_search_rate_limit! if declared_params[:search].present?
merge_requests = find_merge_requests
present merge_requests, serializer_options_for(merge_requests)
@ -177,6 +178,7 @@ module API
end
get ":id/merge_requests", feature_category: :code_review, urgency: :low do
validate_anonymous_search_access! if declared_params[:search].present?
validate_search_rate_limit! if declared_params[:search].present?
merge_requests = find_merge_requests(group_id: user_group.id, include_subgroups: true)
present merge_requests, serializer_options_for(merge_requests).merge(group: user_group)
@ -244,6 +246,7 @@ module API
get ":id/merge_requests", feature_category: :code_review, urgency: :low do
authorize! :read_merge_request, user_project
validate_anonymous_search_access! if declared_params[:search].present?
validate_search_rate_limit! if declared_params[:search].present?
merge_requests = find_merge_requests(project_id: user_project.id)

View File

@ -115,6 +115,38 @@ module Gitlab
value > threshold_value
end
# Similar to #throttled? above but checks for the bypass header in the request and logs the request when it is over the rate limit
#
# @param request [Http::Request] - Web request used to check the header and log
# @param current_user [User] Current user of the request, it can be nil
# @param key [Symbol] Key attribute registered in `.rate_limits`
# @param scope [Array<ActiveRecord>] Array of ActiveRecord models, Strings
# or Symbols to scope throttling to a specific request (e.g. per user
# per project)
# @param resource [ActiveRecord] An ActiveRecord model to count an action
# for (e.g. limit unique project (resource) downloads (action) to five
# per user (scope))
# @param threshold [Integer] Optional threshold value to override default
# one registered in `.rate_limits`
# @param interval [Integer] Optional interval value to override default
# one registered in `.rate_limits`
# @param users_allowlist [Array<String>] Optional list of usernames to
# exclude from the limit. This param will only be functional if Scope
# includes a current user.
# @param peek [Boolean] Optional. When true the key will not be
# incremented but the current throttled state will be returned.
#
# @return [Boolean] Whether or not a request should be throttled
def throttled_request?(request, current_user, key, scope:, **options)
if ::Gitlab::Throttle.bypass_header.present? && request.get_header(Gitlab::Throttle.bypass_header) == '1'
return false
end
throttled?(key, scope: scope, **options).tap do |throttled|
log_request(request, "#{key}_request_limit".to_sym, current_user) if throttled
end
end
# Returns the current rate limited state without incrementing the count.
#
# @param key [Symbol] Key attribute registered in `.rate_limits`

View File

@ -1927,9 +1927,6 @@ msgstr ""
msgid "AWS secret access key (Optional)"
msgstr ""
msgid "AWS service error: %{error}"
msgstr ""
msgid "Abort"
msgstr ""
@ -1978,9 +1975,6 @@ msgstr ""
msgid "Access denied for your LDAP account."
msgstr ""
msgid "Access denied: %{error}"
msgstr ""
msgid "Access expires"
msgstr ""
@ -4261,9 +4255,6 @@ msgstr ""
msgid "An error occurred while approving, please try again."
msgstr ""
msgid "An error occurred while authorizing your role"
msgstr ""
msgid "An error occurred while checking group path. Please refresh and try again."
msgstr ""
@ -9580,21 +9571,6 @@ msgstr ""
msgid "ClusterIntegration|Every new Google Cloud Platform (GCP) account receives $300 in credit upon %{sign_up_link}. In partnership with Google, GitLab is able to offer an additional $200 for both new and existing GCP accounts to get started with GitLab's Google Kubernetes Engine Integration."
msgstr ""
msgid "ClusterIntegration|Failed to configure EKS provider: %{message}"
msgstr ""
msgid "ClusterIntegration|Failed to configure Google Kubernetes Engine Cluster: %{message}"
msgstr ""
msgid "ClusterIntegration|Failed to fetch CloudFormation stack: %{message}"
msgstr ""
msgid "ClusterIntegration|Failed to request to Google Cloud Platform: %{message}"
msgstr ""
msgid "ClusterIntegration|Failed to run Kubeclient: %{message}"
msgstr ""
msgid "ClusterIntegration|GitLab Integration"
msgstr ""
@ -16071,21 +16047,12 @@ msgstr ""
msgid "Error: Gitaly is unavailable. Contact your administrator."
msgstr ""
msgid "Error: No AWS credentials were supplied"
msgstr ""
msgid "Error: No AWS provision role found for user"
msgstr ""
msgid "Error: Unable to create deploy freeze"
msgstr ""
msgid "Error: Unable to delete deploy freeze"
msgstr ""
msgid "Error: Unable to find AWS role for current user"
msgstr ""
msgid "ErrorTracking|Access token is %{token_in_code_tag}"
msgstr ""
@ -24106,9 +24073,6 @@ msgstr ""
msgid "Kubernetes cluster"
msgstr ""
msgid "Kubernetes cluster creation time exceeds timeout; %{timeout}"
msgstr ""
msgid "Kubernetes cluster integration and resources are being removed."
msgstr ""

View File

@ -159,8 +159,6 @@ RSpec.describe Admin::ClustersController do
describe 'functionality' do
context 'when creates a cluster' do
it 'creates a new cluster' do
expect(ClusterProvisionWorker).to receive(:perform_async)
expect { post_create_user }.to change { Clusters::Cluster.count }
.and change { Clusters::Platforms::Kubernetes.count }
@ -187,8 +185,6 @@ RSpec.describe Admin::ClustersController do
end
it 'creates a new cluster' do
expect(ClusterProvisionWorker).to receive(:perform_async)
expect { post_create_user }.to change { Clusters::Cluster.count }
.and change { Clusters::Platforms::Kubernetes.count }

View File

@ -33,8 +33,8 @@ RSpec.describe CheckRateLimit do
end
describe '#check_rate_limit!' do
it 'calls ApplicationRateLimiter#throttled? with the right arguments' do
expect(::Gitlab::ApplicationRateLimiter).to receive(:throttled?).with(key, scope: scope).and_return(false)
it 'calls ApplicationRateLimiter#throttled_request? with the right arguments' do
expect(::Gitlab::ApplicationRateLimiter).to receive(:throttled_request?).with(request, user, key, scope: scope).and_return(false)
expect(subject).not_to receive(:render)
subject.check_rate_limit!(key, scope: scope)

View File

@ -180,8 +180,6 @@ RSpec.describe Groups::ClustersController do
describe 'functionality' do
context 'when creates a cluster' do
it 'creates a new cluster' do
expect(ClusterProvisionWorker).to receive(:perform_async)
expect { go }.to change { Clusters::Cluster.count }
.and change { Clusters::Platforms::Kubernetes.count }
@ -210,8 +208,6 @@ RSpec.describe Groups::ClustersController do
end
it 'creates a new cluster' do
expect(ClusterProvisionWorker).to receive(:perform_async)
expect { go }.to change { Clusters::Cluster.count }
.and change { Clusters::Platforms::Kubernetes.count }

View File

@ -181,8 +181,6 @@ RSpec.describe Projects::ClustersController do
describe 'functionality' do
context 'when creates a cluster' do
it 'creates a new cluster' do
expect(ClusterProvisionWorker).to receive(:perform_async)
expect { go }.to change { Clusters::Cluster.count }
.and change { Clusters::Platforms::Kubernetes.count }
@ -210,8 +208,6 @@ RSpec.describe Projects::ClustersController do
end
it 'creates a new cluster' do
expect(ClusterProvisionWorker).to receive(:perform_async)
expect { go }.to change { Clusters::Cluster.count }
.and change { Clusters::Platforms::Kubernetes.count }

View File

@ -1,4 +1,3 @@
import $ from 'jquery';
import { useLocalStorageSpy } from 'helpers/local_storage_helper';
import Autosave from '~/autosave';
import AccessorUtilities from '~/lib/utils/accessor';
@ -7,12 +6,19 @@ describe('Autosave', () => {
useLocalStorageSpy();
let autosave;
const field = $('<textarea></textarea>');
const checkbox = $('<input type="checkbox">');
const field = document.createElement('textarea');
const checkbox = document.createElement('input');
checkbox.type = 'checkbox';
const key = 'key';
const fallbackKey = 'fallbackKey';
const lockVersionKey = 'lockVersionKey';
const lockVersion = 1;
const getAutosaveKey = () => `autosave/${key}`;
const getAutosaveLockKey = () => `autosave/${key}/lockVersion`;
afterEach(() => {
autosave?.dispose?.();
});
describe('class constructor', () => {
beforeEach(() => {
@ -43,18 +49,10 @@ describe('Autosave', () => {
});
describe('restore', () => {
beforeEach(() => {
autosave = {
field,
key,
};
});
describe('if .isLocalStorageAvailable is `false`', () => {
beforeEach(() => {
autosave.isLocalStorageAvailable = false;
Autosave.prototype.restore.call(autosave);
jest.spyOn(AccessorUtilities, 'canUseLocalStorage').mockReturnValue(false);
autosave = new Autosave(field, key);
});
it('should not call .getItem', () => {
@ -63,97 +61,73 @@ describe('Autosave', () => {
});
describe('if .isLocalStorageAvailable is `true`', () => {
beforeEach(() => {
autosave.isLocalStorageAvailable = true;
});
it('should call .getItem', () => {
Autosave.prototype.restore.call(autosave);
expect(window.localStorage.getItem).toHaveBeenCalledWith(key);
autosave = new Autosave(field, key);
expect(window.localStorage.getItem.mock.calls).toEqual([[getAutosaveKey()], []]);
});
it('triggers jquery event', () => {
jest.spyOn(autosave.field, 'trigger').mockImplementation(() => {});
describe('if saved value is present', () => {
const storedValue = 'bar';
Autosave.prototype.restore.call(autosave);
expect(field.trigger).toHaveBeenCalled();
});
it('triggers native event', () => {
const fieldElement = autosave.field.get(0);
const eventHandler = jest.fn();
fieldElement.addEventListener('change', eventHandler);
Autosave.prototype.restore.call(autosave);
expect(eventHandler).toHaveBeenCalledTimes(1);
fieldElement.removeEventListener('change', eventHandler);
});
describe('if field type is checkbox', () => {
beforeEach(() => {
autosave = {
field: checkbox,
key,
isLocalStorageAvailable: true,
type: 'checkbox',
};
field.value = 'foo';
window.localStorage.setItem(getAutosaveKey(), storedValue);
});
it('should restore', () => {
window.localStorage.setItem(key, true);
expect(checkbox.is(':checked')).toBe(false);
Autosave.prototype.restore.call(autosave);
expect(checkbox.is(':checked')).toBe(true);
it('restores the value', () => {
autosave = new Autosave(field, key);
expect(field.value).toEqual(storedValue);
});
});
});
describe('if field gets deleted from DOM', () => {
beforeEach(() => {
autosave.field = $('.not-a-real-element');
});
it('triggers native event', () => {
const eventHandler = jest.fn();
field.addEventListener('change', eventHandler);
autosave = new Autosave(field, key);
it('does not trigger event', () => {
jest.spyOn(field, 'trigger');
expect(eventHandler).toHaveBeenCalledTimes(1);
field.removeEventListener('change', eventHandler);
});
expect(field.trigger).not.toHaveBeenCalled();
describe('if field type is checkbox', () => {
beforeEach(() => {
checkbox.checked = false;
window.localStorage.setItem(getAutosaveKey(), true);
autosave = new Autosave(checkbox, key);
});
it('should restore', () => {
expect(checkbox.checked).toBe(true);
});
});
});
});
});
describe('getSavedLockVersion', () => {
beforeEach(() => {
autosave = {
field,
key,
lockVersionKey,
};
});
describe('if .isLocalStorageAvailable is `false`', () => {
beforeEach(() => {
autosave.isLocalStorageAvailable = false;
Autosave.prototype.getSavedLockVersion.call(autosave);
jest.spyOn(AccessorUtilities, 'canUseLocalStorage').mockReturnValue(false);
autosave = new Autosave(field, key);
});
it('should not call .getItem', () => {
autosave.getSavedLockVersion();
expect(window.localStorage.getItem).not.toHaveBeenCalled();
});
});
describe('if .isLocalStorageAvailable is `true`', () => {
beforeEach(() => {
autosave.isLocalStorageAvailable = true;
autosave = new Autosave(field, key);
});
it('should call .getItem', () => {
Autosave.prototype.getSavedLockVersion.call(autosave);
expect(window.localStorage.getItem).toHaveBeenCalledWith(lockVersionKey);
autosave.getSavedLockVersion();
expect(window.localStorage.getItem.mock.calls).toEqual([
[getAutosaveKey()],
[],
[getAutosaveLockKey()],
]);
});
});
});
@ -162,7 +136,7 @@ describe('Autosave', () => {
beforeEach(() => {
autosave = { reset: jest.fn() };
autosave.field = field;
field.val('value');
field.value = 'value';
});
describe('if .isLocalStorageAvailable is `false`', () => {
@ -200,14 +174,14 @@ describe('Autosave', () => {
});
it('should save true when checkbox on', () => {
checkbox.prop('checked', true);
checkbox.checked = true;
Autosave.prototype.save.call(autosave);
expect(window.localStorage.setItem).toHaveBeenCalledWith(key, true);
});
it('should call reset when checkbox off', () => {
autosave.reset = jest.fn();
checkbox.prop('checked', false);
checkbox.checked = false;
Autosave.prototype.save.call(autosave);
expect(autosave.reset).toHaveBeenCalled();
expect(window.localStorage.setItem).not.toHaveBeenCalled();

View File

@ -3,6 +3,8 @@ import Vuex from 'vuex';
import { mountExtended } from 'helpers/vue_test_utils_helper';
import SubmitDropdown from '~/batch_comments/components/submit_dropdown.vue';
jest.mock('~/autosave');
Vue.use(Vuex);
let wrapper;

View File

@ -5,6 +5,7 @@ import { confirmAction } from '~/lib/utils/confirm_via_gl_modal/confirm_via_gl_m
import DesignReplyForm from '~/design_management/components/design_notes/design_reply_form.vue';
jest.mock('~/lib/utils/confirm_via_gl_modal/confirm_via_gl_modal');
jest.mock('~/autosave');
describe('Design reply form component', () => {
let wrapper;
@ -78,12 +79,11 @@ describe('Design reply form component', () => {
createComponent({ discussionId });
await nextTick();
// We discourage testing `wrapper.vm` properties but
// since `autosave` library instantiates on component
// there's no other way to test whether instantiation
// happened correctly or not.
expect(wrapper.vm.autosaveDiscussion).toBeInstanceOf(Autosave);
expect(wrapper.vm.autosaveDiscussion.key).toBe(`autosave/Discussion/6/${shortDiscussionId}`);
expect(Autosave).toHaveBeenCalledWith(expect.any(Element), [
'Discussion',
6,
shortDiscussionId,
]);
},
);
@ -141,7 +141,7 @@ describe('Design reply form component', () => {
});
it('emits submitForm event on Comment button click', async () => {
const autosaveResetSpy = jest.spyOn(wrapper.vm.autosaveDiscussion, 'reset');
const autosaveResetSpy = jest.spyOn(Autosave.prototype, 'reset');
findSubmitButton().vm.$emit('click');
@ -151,7 +151,7 @@ describe('Design reply form component', () => {
});
it('emits submitForm event on textarea ctrl+enter keydown', async () => {
const autosaveResetSpy = jest.spyOn(wrapper.vm.autosaveDiscussion, 'reset');
const autosaveResetSpy = jest.spyOn(Autosave.prototype, 'reset');
findTextarea().trigger('keydown.enter', {
ctrlKey: true,
@ -163,7 +163,7 @@ describe('Design reply form component', () => {
});
it('emits submitForm event on textarea meta+enter keydown', async () => {
const autosaveResetSpy = jest.spyOn(wrapper.vm.autosaveDiscussion, 'reset');
const autosaveResetSpy = jest.spyOn(Autosave.prototype, 'reset');
findTextarea().trigger('keydown.enter', {
metaKey: true,
@ -178,7 +178,7 @@ describe('Design reply form component', () => {
findTextarea().setValue('test2');
await nextTick();
expect(wrapper.emitted('input')).toEqual([['test'], ['test2']]);
expect(wrapper.emitted('input')).toEqual([['test2']]);
});
it('emits cancelForm event on Escape key if text was not changed', () => {
@ -211,7 +211,7 @@ describe('Design reply form component', () => {
it('emits cancelForm event when confirmed', async () => {
confirmAction.mockResolvedValueOnce(true);
const autosaveResetSpy = jest.spyOn(wrapper.vm.autosaveDiscussion, 'reset');
const autosaveResetSpy = jest.spyOn(Autosave.prototype, 'reset');
wrapper.setProps({ value: 'test3' });
await nextTick();
@ -228,7 +228,7 @@ describe('Design reply form component', () => {
it("doesn't emit cancelForm event when not confirmed", async () => {
confirmAction.mockResolvedValueOnce(false);
const autosaveResetSpy = jest.spyOn(wrapper.vm.autosaveDiscussion, 'reset');
const autosaveResetSpy = jest.spyOn(Autosave.prototype, 'reset');
wrapper.setProps({ value: 'test3' });
await nextTick();

View File

@ -101,7 +101,8 @@ describe('DiffLineNoteForm', () => {
});
it('should init autosave', () => {
expect(Autosave).toHaveBeenCalledWith({}, [
// we're using shallow mount here so there's no element to pass to Autosave
expect(Autosave).toHaveBeenCalledWith(undefined, [
'Note',
'Issue',
98,

View File

@ -35,8 +35,8 @@ describe('IssuableForm', () => {
let $description;
beforeEach(() => {
$title = $form.find('input[name*="[title]"]');
$description = $form.find('textarea[name*="[description]"]');
$title = $form.find('input[name*="[title]"]').get(0);
$description = $form.find('textarea[name*="[description]"]').get(0);
});
afterEach(() => {
@ -103,7 +103,11 @@ describe('IssuableForm', () => {
createIssuable($form);
expect(Autosave).toHaveBeenCalledTimes(totalAutosaveFormFields);
expect(Autosave).toHaveBeenLastCalledWith($input, ['/', '', id], `autosave///=${id}`);
expect(Autosave).toHaveBeenLastCalledWith(
$input.get(0),
['/', '', id],
`autosave///=${id}`,
);
});
});

View File

@ -5,6 +5,7 @@ import MockAdapter from 'axios-mock-adapter';
import Vue, { nextTick } from 'vue';
import Vuex from 'vuex';
import { extendedWrapper } from 'helpers/vue_test_utils_helper';
import Autosave from '~/autosave';
import batchComments from '~/batch_comments/stores/modules/batch_comments';
import { refreshUserMergeRequestCounts } from '~/commons/nav/user_merge_requests';
import { createAlert } from '~/flash';
@ -20,6 +21,7 @@ import { loggedOutnoteableData, notesDataMock, userDataMock, noteableDataMock }
jest.mock('autosize');
jest.mock('~/commons/nav/user_merge_requests');
jest.mock('~/flash');
jest.mock('~/autosave');
Vue.use(Vuex);
@ -336,8 +338,11 @@ describe('issue_comment_form component', () => {
});
it('inits autosave', () => {
expect(wrapper.vm.autosave).toBeDefined();
expect(wrapper.vm.autosave.key).toBe(`autosave/Note/Issue/${noteableDataMock.id}`);
expect(Autosave).toHaveBeenCalledWith(expect.any(Element), [
'Note',
'Issue',
noteableDataMock.id,
]);
});
});

View File

@ -7,11 +7,14 @@ import NoteAwardsList from '~/notes/components/note_awards_list.vue';
import NoteForm from '~/notes/components/note_form.vue';
import createStore from '~/notes/stores';
import notes from '~/notes/stores/modules/index';
import Autosave from '~/autosave';
import Suggestions from '~/vue_shared/components/markdown/suggestions.vue';
import { noteableDataMock, notesDataMock, note } from '../mock_data';
jest.mock('~/autosave');
const createComponent = ({
props = {},
noteableData = noteableDataMock,
@ -84,13 +87,8 @@ describe('issue_note_body component', () => {
});
it('adds autosave', () => {
const autosaveKey = `autosave/Note/${note.noteable_type}/${note.id}`;
// While we discourage testing wrapper props
// here we aren't testing a component prop
// but instead an instance object property
// which is defined in `app/assets/javascripts/notes/mixins/autosave.js`
expect(wrapper.vm.autosave.key).toEqual(autosaveKey);
// passing undefined instead of an element because of shallowMount
expect(Autosave).toHaveBeenCalledWith(undefined, ['Note', note.noteable_type, note.id]);
});
describe('isInternalNote', () => {

View File

@ -1,11 +1,12 @@
import { shallowMount } from '@vue/test-utils';
import { GlListbox } from '@gitlab/ui';
import { GlFormGroup, GlListbox } from '@gitlab/ui';
import ListboxInput from '~/vue_shared/components/listbox_input/listbox_input.vue';
describe('ListboxInput', () => {
let wrapper;
// Props
const label = 'label';
const name = 'name';
const defaultToggleText = 'defaultToggleText';
const items = [
@ -23,12 +24,14 @@ describe('ListboxInput', () => {
];
// Finders
const findGlFormGroup = () => wrapper.findComponent(GlFormGroup);
const findGlListbox = () => wrapper.findComponent(GlListbox);
const findInput = () => wrapper.find('input');
const createComponent = (propsData) => {
wrapper = shallowMount(ListboxInput, {
propsData: {
label,
name,
defaultToggleText,
items,
@ -37,14 +40,22 @@ describe('ListboxInput', () => {
});
};
describe('input attributes', () => {
describe('options', () => {
beforeEach(() => {
createComponent();
});
it('passes the label to the form group', () => {
expect(findGlFormGroup().attributes('label')).toBe(label);
});
it('sets the input name', () => {
expect(findInput().attributes('name')).toBe(name);
});
it('is not filterable with few items', () => {
expect(findGlListbox().props('searchable')).toBe(false);
});
});
describe('toggle text', () => {
@ -91,12 +102,29 @@ describe('ListboxInput', () => {
});
describe('search', () => {
beforeEach(() => {
createComponent();
it('is searchable when there are more than 10 items', () => {
createComponent({
items: [
{
text: 'Group 1',
options: [...Array(10).keys()].map((index) => ({
text: index + 1,
value: String(index + 1),
})),
},
{
text: 'Group 2',
options: [{ text: 'Item 11', value: '11' }],
},
],
});
expect(findGlListbox().props('searchable')).toBe(true);
});
it('passes all items to GlListbox by default', () => {
createComponent();
expect(findGlListbox().props('items')).toStrictEqual(items);
});

View File

@ -90,6 +90,17 @@ describe('Source Viewer component', () => {
});
});
describe('legacy fallbacks', () => {
it('tracks a fallback event and emits an error when viewing python files', () => {
const fallbackLanguage = 'python';
const eventData = { label: EVENT_LABEL_FALLBACK, property: fallbackLanguage };
createComponent({ language: fallbackLanguage });
expect(Tracking.event).toHaveBeenCalledWith(undefined, EVENT_ACTION, eventData);
expect(wrapper.emitted('error')).toHaveLength(1);
});
});
describe('highlight.js', () => {
beforeEach(() => createComponent({ language: mappedLanguage }));
@ -114,10 +125,10 @@ describe('Source Viewer component', () => {
});
it('correctly maps languages starting with uppercase', async () => {
await createComponent({ language: 'Python3' });
const languageDefinition = await import(`highlight.js/lib/languages/python`);
await createComponent({ language: 'Ruby' });
const languageDefinition = await import(`highlight.js/lib/languages/ruby`);
expect(hljs.registerLanguage).toHaveBeenCalledWith('python', languageDefinition.default);
expect(hljs.registerLanguage).toHaveBeenCalledWith('ruby', languageDefinition.default);
});
it('highlights the first chunk', () => {

View File

@ -5,9 +5,12 @@ import { nextTick } from 'vue';
import IssuableEditForm from '~/vue_shared/issuable/show/components/issuable_edit_form.vue';
import IssuableEventHub from '~/vue_shared/issuable/show/event_hub';
import MarkdownField from '~/vue_shared/components/markdown/field.vue';
import Autosave from '~/autosave';
import { mockIssuableShowProps, mockIssuable } from '../mock_data';
jest.mock('~/autosave');
const issuableEditFormProps = {
issuable: mockIssuable,
...mockIssuableShowProps,
@ -36,10 +39,12 @@ describe('IssuableEditForm', () => {
beforeEach(() => {
wrapper = createComponent();
jest.spyOn(Autosave.prototype, 'reset');
});
afterEach(() => {
wrapper.destroy();
jest.resetAllMocks();
});
describe('watch', () => {
@ -100,21 +105,18 @@ describe('IssuableEditForm', () => {
describe('methods', () => {
describe('initAutosave', () => {
it('initializes `autosaveTitle` and `autosaveDescription` props', () => {
expect(wrapper.vm.autosaveTitle).toBeDefined();
expect(wrapper.vm.autosaveDescription).toBeDefined();
it('initializes autosave', () => {
expect(Autosave.mock.calls).toEqual([
[expect.any(Element), ['/', '', 'title']],
[expect.any(Element), ['/', '', 'description']],
]);
});
});
describe('resetAutosave', () => {
it('calls `reset` on `autosaveTitle` and `autosaveDescription` props', () => {
jest.spyOn(wrapper.vm.autosaveTitle, 'reset').mockImplementation(jest.fn);
jest.spyOn(wrapper.vm.autosaveDescription, 'reset').mockImplementation(jest.fn);
wrapper.vm.resetAutosave();
expect(wrapper.vm.autosaveTitle.reset).toHaveBeenCalled();
expect(wrapper.vm.autosaveDescription.reset).toHaveBeenCalled();
it('resets title and description on "update.issuable event"', () => {
IssuableEventHub.$emit('update.issuable');
expect(Autosave.prototype.reset.mock.calls).toEqual([[], []]);
});
});
});

View File

@ -31,8 +31,8 @@ RSpec.describe API::Helpers::RateLimiter do
end
describe '#check_rate_limit!' do
it 'calls ApplicationRateLimiter#throttled? with the right arguments' do
expect(::Gitlab::ApplicationRateLimiter).to receive(:throttled?).with(key, scope: scope).and_return(false)
it 'calls ApplicationRateLimiter#throttled_request? with the right arguments' do
expect(::Gitlab::ApplicationRateLimiter).to receive(:throttled_request?).with(request, user, key, scope: scope).and_return(false)
expect(subject).not_to receive(:render_api_error!)
subject.check_rate_limit!(key, scope: scope)

View File

@ -214,6 +214,52 @@ RSpec.describe Gitlab::ApplicationRateLimiter, :clean_gitlab_redis_rate_limiting
end
end
describe '.throttled_request?', :freeze_time do
let(:request) { instance_double('Rack::Request') }
context 'when request is not over the limit' do
it 'returns false and does not log the request' do
expect(subject).not_to receive(:log_request)
expect(subject.throttled_request?(request, user, :test_action, scope: [user])).to eq(false)
end
end
context 'when request is over the limit' do
before do
subject.throttled?(:test_action, scope: [user])
end
it 'returns true and logs the request' do
expect(subject).to receive(:log_request).with(request, :test_action_request_limit, user)
expect(subject.throttled_request?(request, user, :test_action, scope: [user])).to eq(true)
end
context 'when the bypass header is set' do
before do
allow(Gitlab::Throttle).to receive(:bypass_header).and_return('SOME_HEADER')
end
it 'skips rate limit if set to "1"' do
allow(request).to receive(:get_header).with(Gitlab::Throttle.bypass_header).and_return('1')
expect(subject).not_to receive(:log_request)
expect(subject.throttled_request?(request, user, :test_action, scope: [user])).to eq(false)
end
it 'does not skip rate limit if set to something else than "1"' do
allow(request).to receive(:get_header).with(Gitlab::Throttle.bypass_header).and_return('0')
expect(subject).to receive(:log_request).with(request, :test_action_request_limit, user)
expect(subject.throttled_request?(request, user, :test_action, scope: [user])).to eq(true)
end
end
end
end
describe '.peek' do
it 'peeks at the current state without changing its value' do
freeze_time do

View File

@ -75,39 +75,6 @@ RSpec.describe Clusters::Providers::Aws do
end
end
describe '#api_client' do
let(:provider) { create(:cluster_provider_aws) }
let(:credentials) { double }
let(:client) { double }
subject { provider.api_client }
before do
allow(provider).to receive(:credentials).and_return(credentials)
expect(Aws::CloudFormation::Client).to receive(:new)
.with(credentials: credentials, region: provider.region)
.and_return(client)
end
it { is_expected.to eq client }
end
describe '#credentials' do
let(:provider) { create(:cluster_provider_aws) }
let(:credentials) { double }
subject { provider.credentials }
before do
expect(Aws::Credentials).to receive(:new)
.with(provider.access_key_id, provider.secret_access_key, provider.session_token)
.and_return(credentials)
end
it { is_expected.to eq credentials }
end
describe '#created_by_user' do
let(:provider) { create(:cluster_provider_aws) }

View File

@ -111,31 +111,6 @@ RSpec.describe Clusters::Providers::Gcp do
end
end
describe '#api_client' do
subject { gcp.api_client }
context 'when status is creating' do
let(:gcp) { build(:cluster_provider_gcp, :creating) }
it 'returns Cloud Platform API clinet' do
expect(subject).to be_an_instance_of(GoogleApi::CloudPlatform::Client)
expect(subject.access_token).to eq(gcp.access_token)
end
end
context 'when status is created' do
let(:gcp) { build(:cluster_provider_gcp, :created) }
it { is_expected.to be_nil }
end
context 'when status is errored' do
let(:gcp) { build(:cluster_provider_gcp, :errored) }
it { is_expected.to be_nil }
end
end
describe '#nullify_credentials' do
let(:provider) { create(:cluster_provider_gcp, :creating) }

View File

@ -88,10 +88,10 @@ RSpec.describe 'Query.project.pipeline', feature_category: :continuous_integrati
build_stage = create(:ci_stage, position: 2, name: 'build', project: project, pipeline: pipeline)
test_stage = create(:ci_stage, position: 3, name: 'test', project: project, pipeline: pipeline)
create(:ci_build, pipeline: pipeline, name: 'docker 1 2', scheduling_type: :stage, stage: build_stage, stage_idx: build_stage.position)
create(:ci_build, pipeline: pipeline, name: 'docker 2 2', stage: build_stage, stage_idx: build_stage.position, scheduling_type: :dag)
create(:ci_build, pipeline: pipeline, name: 'rspec 1 2', scheduling_type: :stage, stage: test_stage, stage_idx: test_stage.position)
test_job = create(:ci_build, pipeline: pipeline, name: 'rspec 2 2', scheduling_type: :dag, stage: test_stage, stage_idx: test_stage.position)
create(:ci_build, pipeline: pipeline, name: 'docker 1 2', scheduling_type: :stage, ci_stage: build_stage, stage_idx: build_stage.position)
create(:ci_build, pipeline: pipeline, name: 'docker 2 2', ci_stage: build_stage, stage_idx: build_stage.position, scheduling_type: :dag)
create(:ci_build, pipeline: pipeline, name: 'rspec 1 2', scheduling_type: :stage, ci_stage: test_stage, stage_idx: test_stage.position)
test_job = create(:ci_build, pipeline: pipeline, name: 'rspec 2 2', scheduling_type: :dag, ci_stage: test_stage, stage_idx: test_stage.position)
create(:ci_build_need, build: test_job, name: 'my test job')
end

View File

@ -177,6 +177,32 @@ RSpec.describe 'getting an issue list at root level', feature_category: :team_pl
end
end
context 'with rate limiting' do
it_behaves_like 'rate limited endpoint', rate_limit_key: :search_rate_limit, graphql: true do
let_it_be(:current_user) { developer }
let(:error_message) do
'This endpoint has been requested with the search argument too many times. Try again later.'
end
def request
post_graphql(query({ search: 'test' }), current_user: developer)
end
end
it_behaves_like 'rate limited endpoint', rate_limit_key: :search_rate_limit_unauthenticated, graphql: true do
let_it_be(:current_user) { nil }
let(:error_message) do
'This endpoint has been requested with the search argument too many times. Try again later.'
end
def request
post_graphql(query({ search: 'test' }))
end
end
end
def execute_query
post_query
end

View File

@ -11,15 +11,24 @@ RSpec.describe API::Issues, feature_category: :team_planning do
let_it_be(:group) { create(:group, :public) }
let(:user2) { create(:user) }
let(:non_member) { create(:user) }
let_it_be(:user2) { create(:user) }
let_it_be(:non_member) { create(:user) }
let_it_be(:guest) { create(:user) }
let_it_be(:author) { create(:author) }
let_it_be(:assignee) { create(:assignee) }
let(:admin) { create(:user, :admin) }
let(:issue_title) { 'foo' }
let(:issue_description) { 'closed' }
let!(:closed_issue) do
let_it_be(:admin) { create(:user, :admin) }
let_it_be(:milestone) { create(:milestone, title: '1.0.0', project: project) }
let_it_be(:empty_milestone) do
create(:milestone, title: '2.0.0', project: project)
end
let(:no_milestone_title) { 'None' }
let(:any_milestone_title) { 'Any' }
let_it_be(:issue_title) { 'foo' }
let_it_be(:issue_description) { 'closed' }
let_it_be(:closed_issue) do
create :closed_issue,
author: user,
assignees: [user],
@ -31,7 +40,7 @@ RSpec.describe API::Issues, feature_category: :team_planning do
closed_at: 1.hour.ago
end
let!(:confidential_issue) do
let_it_be(:confidential_issue) do
create :issue,
:confidential,
project: project,
@ -41,7 +50,7 @@ RSpec.describe API::Issues, feature_category: :team_planning do
updated_at: 2.hours.ago
end
let!(:issue) do
let_it_be(:issue) do
create :issue,
author: user,
assignees: [user],
@ -53,22 +62,12 @@ RSpec.describe API::Issues, feature_category: :team_planning do
description: issue_description
end
let_it_be(:label) do
create(:label, title: 'label', color: '#FFAABB', project: project)
end
let_it_be(:label) { create(:label, title: 'label', color: '#FFAABB', project: project) }
let_it_be(:label_link) { create(:label_link, label: label, target: issue) }
let!(:label_link) { create(:label_link, label: label, target: issue) }
let(:milestone) { create(:milestone, title: '1.0.0', project: project) }
let_it_be(:empty_milestone) do
create(:milestone, title: '2.0.0', project: project)
end
let_it_be(:note) { create(:note_on_issue, author: user, project: project, noteable: issue) }
let!(:note) { create(:note_on_issue, author: user, project: project, noteable: issue) }
let(:no_milestone_title) { 'None' }
let(:any_milestone_title) { 'Any' }
let!(:merge_request1) do
let_it_be(:merge_request1) do
create(:merge_request,
:simple,
author: user,
@ -77,7 +76,7 @@ RSpec.describe API::Issues, feature_category: :team_planning do
description: "closes #{issue.to_reference}")
end
let!(:merge_request2) do
let_it_be(:merge_request2) do
create(:merge_request,
:simple,
author: user,
@ -101,7 +100,7 @@ RSpec.describe API::Issues, feature_category: :team_planning do
shared_examples 'project issues statistics' do
it 'returns project issues statistics' do
get api("/issues_statistics", user), params: params
get api("/projects/#{project.id}/issues_statistics", current_user), params: params
expect(response).to have_gitlab_http_status(:ok)
expect(json_response['statistics']).not_to be_nil
@ -138,6 +137,8 @@ RSpec.describe API::Issues, feature_category: :team_planning do
end
context 'issues_statistics' do
let(:current_user) { nil }
context 'no state is treated as all state' do
let(:params) { {} }
let(:counts) { { all: 2, closed: 1, opened: 1 } }
@ -534,30 +535,32 @@ RSpec.describe API::Issues, feature_category: :team_planning do
end
context 'issues_statistics' do
let(:current_user) { user }
context 'no state is treated as all state' do
let(:params) { {} }
let(:counts) { { all: 2, closed: 1, opened: 1 } }
let(:counts) { { all: 3, closed: 1, opened: 2 } }
it_behaves_like 'project issues statistics'
end
context 'statistics when all state is passed' do
let(:params) { { state: :all } }
let(:counts) { { all: 2, closed: 1, opened: 1 } }
let(:counts) { { all: 3, closed: 1, opened: 2 } }
it_behaves_like 'project issues statistics'
end
context 'closed state is treated as all state' do
let(:params) { { state: :closed } }
let(:counts) { { all: 2, closed: 1, opened: 1 } }
let(:counts) { { all: 3, closed: 1, opened: 2 } }
it_behaves_like 'project issues statistics'
end
context 'opened state is treated as all state' do
let(:params) { { state: :opened } }
let(:counts) { { all: 2, closed: 1, opened: 1 } }
let(:counts) { { all: 3, closed: 1, opened: 2 } }
it_behaves_like 'project issues statistics'
end
@ -592,7 +595,7 @@ RSpec.describe API::Issues, feature_category: :team_planning do
context 'sort does not affect statistics ' do
let(:params) { { state: :opened, order_by: 'updated_at' } }
let(:counts) { { all: 2, closed: 1, opened: 1 } }
let(:counts) { { all: 3, closed: 1, opened: 2 } }
it_behaves_like 'project issues statistics'
end

View File

@ -145,6 +145,11 @@ RSpec.describe API::Issues, feature_category: :team_planning do
let(:result) { issuable.id }
end
it_behaves_like 'issuable API rate-limited search' do
let(:url) { '/issues' }
let(:issuable) { issue }
end
it 'returns authentication error without any scope' do
get api('/issues')

View File

@ -55,6 +55,11 @@ RSpec.describe API::MergeRequests, feature_category: :source_code_management do
let(:issuable) { merge_request }
let(:result) { [merge_request_merged.id, merge_request_locked.id, merge_request_closed.id, merge_request.id] }
end
it_behaves_like 'issuable API rate-limited search' do
let(:url) { endpoint_path }
let(:issuable) { merge_request }
end
end
context 'when authenticated' do
@ -663,6 +668,11 @@ RSpec.describe API::MergeRequests, feature_category: :source_code_management do
let(:result) { [merge_request_merged.id, merge_request_locked.id, merge_request_closed.id, merge_request.id] }
end
it_behaves_like 'issuable API rate-limited search' do
let(:url) { '/merge_requests' }
let(:issuable) { merge_request }
end
it "returns authentication error without any scope" do
get api("/merge_requests")

View File

@ -12,4 +12,32 @@ RSpec.describe DashboardController, feature_category: :authentication_and_author
let(:url) { issues_dashboard_url(:ics, assignee_username: user.username) }
end
end
context 'issues dashboard' do
it_behaves_like 'rate limited endpoint', rate_limit_key: :search_rate_limit do
let_it_be(:current_user) { create(:user) }
before do
sign_in current_user
end
def request
get issues_dashboard_path, params: { scope: 'all', search: 'test' }
end
end
end
context 'merge requests dashboard' do
it_behaves_like 'rate limited endpoint', rate_limit_key: :search_rate_limit do
let_it_be(:current_user) { create(:user) }
before do
sign_in current_user
end
def request
get merge_requests_dashboard_path, params: { scope: 'all', search: 'test' }
end
end
end
end

View File

@ -32,6 +32,28 @@ RSpec.describe Projects::IssuesController, feature_category: :team_planning do
end
end
describe 'GET #index.json' do
let_it_be(:public_project) { create(:project, :public) }
it_behaves_like 'rate limited endpoint', rate_limit_key: :search_rate_limit do
let_it_be(:current_user) { create(:user) }
before do
sign_in current_user
end
def request
get project_issues_path(public_project, format: :json), params: { scope: 'all', search: 'test' }
end
end
it_behaves_like 'rate limited endpoint', rate_limit_key: :search_rate_limit_unauthenticated do
def request
get project_issues_path(public_project, format: :json), params: { scope: 'all', search: 'test' }
end
end
end
describe 'GET #discussions' do
before do
login_as(user)

View File

@ -19,6 +19,28 @@ RSpec.describe Projects::MergeRequestsController, feature_category: :source_code
end
end
describe 'GET #index' do
let_it_be(:public_project) { create(:project, :public) }
it_behaves_like 'rate limited endpoint', rate_limit_key: :search_rate_limit do
let_it_be(:current_user) { user }
before do
sign_in current_user
end
def request
get project_merge_requests_path(public_project), params: { scope: 'all', search: 'test' }
end
end
it_behaves_like 'rate limited endpoint', rate_limit_key: :search_rate_limit_unauthenticated do
def request
get project_merge_requests_path(public_project), params: { scope: 'all', search: 'test' }
end
end
end
describe 'GET #discussions' do
let_it_be(:discussion) { create(:discussion_note_on_merge_request, noteable: merge_request, project: project) }
let_it_be(:discussion_reply) { create(:discussion_note_on_merge_request, noteable: merge_request, project: project, in_reply_to: discussion) }

View File

@ -1,102 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Clusters::Aws::AuthorizeRoleService do
subject { described_class.new(user, params: params).execute }
let(:role) { create(:aws_role) }
let(:user) { role.user }
let(:credentials) { instance_double(Aws::Credentials) }
let(:credentials_service) { instance_double(Clusters::Aws::FetchCredentialsService, execute: credentials) }
let(:role_arn) { 'arn:my-role' }
let(:region) { 'region' }
let(:params) do
params = ActionController::Parameters.new({
cluster: {
role_arn: role_arn,
region: region
}
})
params.require(:cluster).permit(:role_arn, :region)
end
before do
allow(Clusters::Aws::FetchCredentialsService).to receive(:new)
.with(instance_of(Aws::Role)).and_return(credentials_service)
end
context 'role exists' do
it 'updates the existing Aws::Role record and returns a set of credentials' do
expect(subject.status).to eq(:ok)
expect(subject.body).to eq(credentials)
expect(role.reload.role_arn).to eq(role_arn)
end
end
context 'errors' do
shared_examples 'bad request' do
it 'returns an empty hash' do
expect(subject.status).to eq(:unprocessable_entity)
expect(subject.body).to eq({ message: message })
end
it 'logs the error' do
expect(::Gitlab::ErrorTracking).to receive(:track_exception)
subject
end
end
context 'role does not exist' do
let(:user) { create(:user) }
let(:message) { 'Error: Unable to find AWS role for current user' }
include_examples 'bad request'
end
context 'supplied ARN is invalid' do
let(:role_arn) { 'invalid' }
let(:message) { 'Validation failed: Role arn must be a valid Amazon Resource Name' }
include_examples 'bad request'
end
context 'client errors' do
before do
allow(credentials_service).to receive(:execute).and_raise(error)
end
context 'error fetching credentials' do
let(:error) { Aws::STS::Errors::ServiceError.new(nil, 'error message') }
let(:message) { 'AWS service error: error message' }
include_examples 'bad request'
end
context 'error in assuming role' do
let(:raw_message) { "User foo is not authorized to perform: sts:AssumeRole on resource bar" }
let(:error) { Aws::STS::Errors::AccessDenied.new(nil, raw_message) }
let(:message) { "Access denied: #{raw_message}" }
include_examples 'bad request'
end
context 'credentials not configured' do
let(:error) { Aws::Errors::MissingCredentialsError.new('error message') }
let(:message) { "Error: No AWS credentials were supplied" }
include_examples 'bad request'
end
context 'role not configured' do
let(:error) { Clusters::Aws::FetchCredentialsService::MissingRoleError.new('error message') }
let(:message) { "Error: No AWS provision role found for user" }
include_examples 'bad request'
end
end
end
end

View File

@ -1,139 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Clusters::Aws::FetchCredentialsService do
describe '#execute' do
let(:user) { create(:user) }
let(:provider) { create(:cluster_provider_aws, region: 'ap-southeast-2') }
let(:gitlab_access_key_id) { 'gitlab-access-key-id' }
let(:gitlab_secret_access_key) { 'gitlab-secret-access-key' }
let(:gitlab_credentials) { Aws::Credentials.new(gitlab_access_key_id, gitlab_secret_access_key) }
let(:sts_client) { Aws::STS::Client.new(credentials: gitlab_credentials, region: region) }
let(:assumed_role) { instance_double(Aws::AssumeRoleCredentials, credentials: assumed_role_credentials) }
let(:assumed_role_credentials) { double }
subject { described_class.new(provision_role, provider: provider).execute }
context 'provision role is configured' do
let(:provision_role) { create(:aws_role, user: user, region: 'custom-region') }
before do
stub_application_setting(eks_access_key_id: gitlab_access_key_id)
stub_application_setting(eks_secret_access_key: gitlab_secret_access_key)
expect(Aws::Credentials).to receive(:new)
.with(gitlab_access_key_id, gitlab_secret_access_key)
.and_return(gitlab_credentials)
expect(Aws::STS::Client).to receive(:new)
.with(credentials: gitlab_credentials, region: region)
.and_return(sts_client)
expect(Aws::AssumeRoleCredentials).to receive(:new)
.with(
client: sts_client,
role_arn: provision_role.role_arn,
role_session_name: session_name,
external_id: provision_role.role_external_id,
policy: session_policy
).and_return(assumed_role)
end
context 'provider is specified' do
let(:region) { provider.region }
let(:session_name) { "gitlab-eks-cluster-#{provider.cluster_id}-user-#{user.id}" }
let(:session_policy) { nil }
it { is_expected.to eq assumed_role_credentials }
end
context 'provider is not specifed' do
let(:provider) { nil }
let(:region) { provision_role.region }
let(:session_name) { "gitlab-eks-autofill-user-#{user.id}" }
let(:session_policy) { 'policy-document' }
subject { described_class.new(provision_role, provider: provider).execute }
before do
stub_file_read(Rails.root.join('vendor', 'aws', 'iam', 'eks_cluster_read_only_policy.json'), content: session_policy)
end
it { is_expected.to eq assumed_role_credentials }
context 'region is not specifed' do
let(:region) { Clusters::Providers::Aws::DEFAULT_REGION }
let(:provision_role) { create(:aws_role, user: user, region: nil) }
it { is_expected.to eq assumed_role_credentials }
end
end
end
context 'provision role is not configured' do
let(:provision_role) { nil }
it 'raises an error' do
expect { subject }.to raise_error(described_class::MissingRoleError, 'AWS provisioning role not configured')
end
end
context 'with an instance profile attached to an IAM role' do
let(:sts_client) { Aws::STS::Client.new(region: region, stub_responses: true) }
let(:provision_role) { create(:aws_role, user: user, region: 'custom-region') }
before do
stub_application_setting(eks_access_key_id: nil)
stub_application_setting(eks_secret_access_key: nil)
expect(Aws::STS::Client).to receive(:new)
.with(region: region)
.and_return(sts_client)
expect(Aws::AssumeRoleCredentials).to receive(:new)
.with(
client: sts_client,
role_arn: provision_role.role_arn,
role_session_name: session_name,
external_id: provision_role.role_external_id,
policy: session_policy
).and_call_original
end
context 'provider is specified' do
let(:region) { provider.region }
let(:session_name) { "gitlab-eks-cluster-#{provider.cluster_id}-user-#{user.id}" }
let(:session_policy) { nil }
it 'returns credentials', :aggregate_failures do
expect(subject.access_key_id).to be_present
expect(subject.secret_access_key).to be_present
expect(subject.session_token).to be_present
end
end
context 'provider is not specifed' do
let(:provider) { nil }
let(:region) { provision_role.region }
let(:session_name) { "gitlab-eks-autofill-user-#{user.id}" }
let(:session_policy) { 'policy-document' }
before do
stub_file_read(Rails.root.join('vendor', 'aws', 'iam', 'eks_cluster_read_only_policy.json'), content: session_policy)
end
subject { described_class.new(provision_role, provider: provider).execute }
it 'returns credentials', :aggregate_failures do
expect(subject.access_key_id).to be_present
expect(subject.secret_access_key).to be_present
expect(subject.session_token).to be_present
end
end
end
end
end

View File

@ -1,124 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Clusters::Aws::FinalizeCreationService do
describe '#execute' do
let(:provider) { create(:cluster_provider_aws, :creating) }
let(:platform) { provider.cluster.platform_kubernetes }
let(:create_service_account_service) { double(execute: true) }
let(:fetch_token_service) { double(execute: gitlab_token) }
let(:kube_client) { double(create_config_map: true) }
let(:cluster_stack) { double(outputs: [endpoint_output, cert_output, node_role_output]) }
let(:node_auth_config_map) { double }
let(:endpoint_output) { double(output_key: 'ClusterEndpoint', output_value: api_url) }
let(:cert_output) { double(output_key: 'ClusterCertificate', output_value: Base64.encode64(ca_pem)) }
let(:node_role_output) { double(output_key: 'NodeInstanceRole', output_value: node_role) }
let(:api_url) { 'https://kubernetes.example.com' }
let(:ca_pem) { File.read(Rails.root.join('spec/fixtures/clusters/sample_cert.pem')) }
let(:gitlab_token) { 'gitlab-token' }
let(:iam_token) { 'iam-token' }
let(:node_role) { 'arn::aws::iam::123456789012:role/node-role' }
subject { described_class.new.execute(provider) }
before do
allow(Clusters::Kubernetes::CreateOrUpdateServiceAccountService).to receive(:gitlab_creator)
.with(kube_client, rbac: true)
.and_return(create_service_account_service)
allow(Clusters::Kubernetes::FetchKubernetesTokenService).to receive(:new)
.with(
kube_client,
Clusters::Kubernetes::GITLAB_ADMIN_TOKEN_NAME,
Clusters::Kubernetes::GITLAB_SERVICE_ACCOUNT_NAMESPACE)
.and_return(fetch_token_service)
allow(Gitlab::Kubernetes::KubeClient).to receive(:new)
.with(
api_url,
auth_options: { bearer_token: iam_token },
ssl_options: {
verify_ssl: OpenSSL::SSL::VERIFY_PEER,
cert_store: instance_of(OpenSSL::X509::Store)
},
http_proxy_uri: nil
)
.and_return(kube_client)
allow(provider.api_client).to receive(:describe_stacks)
.with(stack_name: provider.cluster.name)
.and_return(double(stacks: [cluster_stack]))
allow(Kubeclient::AmazonEksCredentials).to receive(:token)
.with(provider.credentials, provider.cluster.name)
.and_return(iam_token)
allow(Gitlab::Kubernetes::ConfigMaps::AwsNodeAuth).to receive(:new)
.with(node_role).and_return(double(generate: node_auth_config_map))
end
it 'configures the provider and platform' do
subject
expect(provider).to be_created
expect(platform.api_url).to eq(api_url)
expect(platform.ca_pem).to eq(ca_pem)
expect(platform.token).to eq(gitlab_token)
expect(platform).to be_rbac
end
it 'calls the create_service_account_service' do
expect(create_service_account_service).to receive(:execute).once
subject
end
it 'configures cluster node authentication' do
expect(kube_client).to receive(:create_config_map).with(node_auth_config_map).once
subject
end
describe 'error handling' do
shared_examples 'provision error' do |message|
it "sets the status to :errored with an appropriate error message" do
subject
expect(provider).to be_errored
expect(provider.status_reason).to include(message)
end
end
context 'failed to request stack details from AWS' do
before do
allow(provider.api_client).to receive(:describe_stacks)
.and_raise(Aws::CloudFormation::Errors::ServiceError.new(double, "Error message"))
end
include_examples 'provision error', 'Failed to fetch CloudFormation stack'
end
context 'failed to create auth config map' do
before do
allow(kube_client).to receive(:create_config_map)
.and_raise(Kubeclient::HttpError.new(500, 'Error', nil))
end
include_examples 'provision error', 'Failed to run Kubeclient'
end
context 'failed to save records' do
before do
allow(provider.cluster).to receive(:save!)
.and_raise(ActiveRecord::RecordInvalid)
end
include_examples 'provision error', 'Failed to configure EKS provider'
end
end
end
end

View File

@ -1,130 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Clusters::Aws::ProvisionService do
describe '#execute' do
let(:provider) { create(:cluster_provider_aws) }
let(:provision_role) { create(:aws_role, user: provider.created_by_user) }
let(:client) { instance_double(Aws::CloudFormation::Client, create_stack: true) }
let(:cloudformation_template) { double }
let(:credentials) do
instance_double(
Aws::Credentials,
access_key_id: 'key',
secret_access_key: 'secret',
session_token: 'token'
)
end
let(:parameters) do
[
{ parameter_key: 'ClusterName', parameter_value: provider.cluster.name },
{ parameter_key: 'ClusterRole', parameter_value: provider.role_arn },
{ parameter_key: 'KubernetesVersion', parameter_value: provider.kubernetes_version },
{ parameter_key: 'ClusterControlPlaneSecurityGroup', parameter_value: provider.security_group_id },
{ parameter_key: 'VpcId', parameter_value: provider.vpc_id },
{ parameter_key: 'Subnets', parameter_value: provider.subnet_ids.join(',') },
{ parameter_key: 'NodeAutoScalingGroupDesiredCapacity', parameter_value: provider.num_nodes.to_s },
{ parameter_key: 'NodeInstanceType', parameter_value: provider.instance_type },
{ parameter_key: 'KeyName', parameter_value: provider.key_name }
]
end
subject { described_class.new.execute(provider) }
before do
allow(Clusters::Aws::FetchCredentialsService).to receive(:new)
.with(provision_role, provider: provider)
.and_return(double(execute: credentials))
allow(provider).to receive(:api_client)
.and_return(client)
stub_file_read(Rails.root.join('vendor', 'aws', 'cloudformation', 'eks_cluster.yaml'), content: cloudformation_template)
end
it 'updates the provider status to :creating and configures the provider with credentials' do
subject
expect(provider).to be_creating
expect(provider.access_key_id).to eq 'key'
expect(provider.secret_access_key).to eq 'secret'
expect(provider.session_token).to eq 'token'
end
it 'creates a CloudFormation stack' do
expect(client).to receive(:create_stack).with(
stack_name: provider.cluster.name,
template_body: cloudformation_template,
parameters: parameters,
capabilities: ["CAPABILITY_IAM"]
)
subject
end
it 'schedules a worker to monitor creation status' do
expect(WaitForClusterCreationWorker).to receive(:perform_in)
.with(Clusters::Aws::VerifyProvisionStatusService::INITIAL_INTERVAL, provider.cluster_id)
subject
end
describe 'error handling' do
shared_examples 'provision error' do |message|
it "sets the status to :errored with an appropriate error message" do
subject
expect(provider).to be_errored
expect(provider.status_reason).to include(message)
end
end
context 'invalid state transition' do
before do
allow(provider).to receive(:make_creating).and_return(false)
end
include_examples 'provision error', 'Failed to update provider record'
end
context 'AWS role is not configured' do
before do
allow(Clusters::Aws::FetchCredentialsService).to receive(:new)
.and_raise(Clusters::Aws::FetchCredentialsService::MissingRoleError)
end
include_examples 'provision error', 'Amazon role is not configured'
end
context 'AWS credentials are not configured' do
before do
allow(Clusters::Aws::FetchCredentialsService).to receive(:new)
.and_raise(Aws::Errors::MissingCredentialsError)
end
include_examples 'provision error', 'Amazon credentials are not configured'
end
context 'Authentication failure' do
before do
allow(Clusters::Aws::FetchCredentialsService).to receive(:new)
.and_raise(Aws::STS::Errors::ServiceError.new(double, 'Error message'))
end
include_examples 'provision error', 'Amazon authentication failed'
end
context 'CloudFormation failure' do
before do
allow(client).to receive(:create_stack)
.and_raise(Aws::CloudFormation::Errors::ServiceError.new(double, 'Error message'))
end
include_examples 'provision error', 'Amazon CloudFormation request failed'
end
end
end
end

View File

@ -1,76 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Clusters::Aws::VerifyProvisionStatusService do
describe '#execute' do
let(:provider) { create(:cluster_provider_aws) }
let(:stack) { double(stack_status: stack_status, creation_time: creation_time) }
let(:creation_time) { 1.minute.ago }
subject { described_class.new.execute(provider) }
before do
allow(provider.api_client).to receive(:describe_stacks)
.with(stack_name: provider.cluster.name)
.and_return(double(stacks: [stack]))
end
shared_examples 'provision error' do |message|
it "sets the status to :errored with an appropriate error message" do
subject
expect(provider).to be_errored
expect(provider.status_reason).to include(message)
end
end
context 'stack creation is still in progress' do
let(:stack_status) { 'CREATE_IN_PROGRESS' }
let(:verify_service) { double(execute: true) }
it 'schedules a worker to check again later' do
expect(WaitForClusterCreationWorker).to receive(:perform_in)
.with(described_class::POLL_INTERVAL, provider.cluster_id)
subject
end
context 'stack creation is taking too long' do
let(:creation_time) { 1.hour.ago }
include_examples 'provision error', 'Kubernetes cluster creation time exceeds timeout'
end
end
context 'stack creation is complete' do
let(:stack_status) { 'CREATE_COMPLETE' }
let(:finalize_service) { double(execute: true) }
it 'finalizes creation' do
expect(Clusters::Aws::FinalizeCreationService).to receive(:new).and_return(finalize_service)
expect(finalize_service).to receive(:execute).with(provider).once
subject
end
end
context 'stack creation failed' do
let(:stack_status) { 'CREATE_FAILED' }
include_examples 'provision error', 'Unexpected status'
end
context 'error communicating with CloudFormation API' do
let(:stack_status) { 'CREATE_IN_PROGRESS' }
before do
allow(provider.api_client).to receive(:describe_stacks)
.and_raise(Aws::CloudFormation::Errors::ServiceError.new(double, 'Error message'))
end
include_examples 'provision error', 'Amazon CloudFormation request failed'
end
end
end

View File

@ -54,7 +54,6 @@ RSpec.describe Clusters::CreateService do
let!(:cluster) { create(:cluster, :provided_by_gcp, :production_environment, projects: [project]) }
it 'creates another cluster' do
expect(ClusterProvisionWorker).to receive(:perform_async)
expect { subject }.to change { Clusters::Cluster.count }.by(1)
end
end

View File

@ -1,45 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Clusters::Gcp::FetchOperationService do
include GoogleApi::CloudPlatformHelpers
describe '#execute' do
let(:provider) { create(:cluster_provider_gcp, :creating) }
let(:gcp_project_id) { provider.gcp_project_id }
let(:zone) { provider.zone }
let(:operation_id) { provider.operation_id }
shared_examples 'success' do
it 'yields' do
expect { |b| described_class.new.execute(provider, &b) }
.to yield_with_args
end
end
shared_examples 'error' do
it 'sets an error to provider object' do
expect { |b| described_class.new.execute(provider, &b) }
.not_to yield_with_args
expect(provider.reload).to be_errored
end
end
context 'when succeeded to fetch operation' do
before do
stub_cloud_platform_get_zone_operation(gcp_project_id, zone, operation_id)
end
it_behaves_like 'success'
end
context 'when Internal Server Error happened' do
before do
stub_cloud_platform_get_zone_operation_error(gcp_project_id, zone, operation_id)
end
it_behaves_like 'error'
end
end
end

View File

@ -1,161 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Clusters::Gcp::FinalizeCreationService, '#execute' do
include GoogleApi::CloudPlatformHelpers
include KubernetesHelpers
let(:cluster) { create(:cluster, :project, :providing_by_gcp) }
let(:provider) { cluster.provider }
let(:platform) { cluster.platform }
let(:endpoint) { '111.111.111.111' }
let(:api_url) { 'https://' + endpoint }
let(:secret_name) { 'gitlab-token' }
let(:token) { 'sample-token' }
let(:namespace) { "#{cluster.project.path}-#{cluster.project.id}" }
subject { described_class.new.execute(provider) }
shared_examples 'success' do
it 'configures provider and kubernetes' do
subject
expect(provider).to be_created
end
it 'properly configures database models' do
subject
cluster.reload
expect(provider.endpoint).to eq(endpoint)
expect(platform.api_url).to eq(api_url)
expect(platform.ca_cert).to eq(Base64.decode64(load_sample_cert).strip)
expect(platform.token).to eq(token)
end
end
shared_examples 'error' do
it 'sets an error to provider object' do
subject
expect(provider.reload).to be_errored
end
end
shared_examples 'kubernetes information not successfully fetched' do
context 'when failed to fetch gke cluster info' do
before do
stub_cloud_platform_get_zone_cluster_error(provider.gcp_project_id, provider.zone, cluster.name)
end
it_behaves_like 'error'
end
context 'when token is empty' do
let(:token) { '' }
it_behaves_like 'error'
end
context 'when failed to fetch kubernetes token' do
before do
stub_kubeclient_get_secret_error(api_url, secret_name, namespace: 'default')
end
it_behaves_like 'error'
end
context 'when service account fails to create' do
before do
stub_kubeclient_create_service_account_error(api_url, namespace: 'default')
end
it_behaves_like 'error'
end
end
shared_context 'kubernetes information successfully fetched' do
before do
stub_cloud_platform_get_zone_cluster(
provider.gcp_project_id, provider.zone, cluster.name, { endpoint: endpoint }
)
stub_kubeclient_discover(api_url)
stub_kubeclient_get_namespace(api_url)
stub_kubeclient_create_namespace(api_url)
stub_kubeclient_get_service_account_error(api_url, 'gitlab')
stub_kubeclient_create_service_account(api_url)
stub_kubeclient_create_secret(api_url)
stub_kubeclient_put_secret(api_url, 'gitlab-token')
stub_kubeclient_get_secret(
api_url,
metadata_name: secret_name,
token: Base64.encode64(token),
namespace: 'default'
)
stub_kubeclient_put_cluster_role_binding(api_url, 'gitlab-admin')
end
end
context 'With a legacy ABAC cluster' do
before do
provider.legacy_abac = true
end
include_context 'kubernetes information successfully fetched'
it_behaves_like 'success'
it 'uses ABAC authorization type' do
subject
cluster.reload
expect(platform).to be_abac
expect(platform.authorization_type).to eq('abac')
end
it_behaves_like 'kubernetes information not successfully fetched'
end
context 'With an RBAC cluster' do
before do
provider.legacy_abac = false
end
include_context 'kubernetes information successfully fetched'
it_behaves_like 'success'
it 'uses RBAC authorization type' do
subject
cluster.reload
expect(platform).to be_rbac
expect(platform.authorization_type).to eq('rbac')
end
it_behaves_like 'kubernetes information not successfully fetched'
end
context 'With a Cloud Run cluster' do
before do
provider.cloud_run = true
end
include_context 'kubernetes information successfully fetched'
it_behaves_like 'success'
it 'has knative pre-installed' do
subject
cluster.reload
expect(cluster.application_knative).to be_present
expect(cluster.application_knative).to be_pre_installed
end
end
end

View File

@ -1,71 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Clusters::Gcp::ProvisionService do
include GoogleApi::CloudPlatformHelpers
describe '#execute' do
let(:provider) { create(:cluster_provider_gcp, :scheduled) }
let(:gcp_project_id) { provider.gcp_project_id }
let(:zone) { provider.zone }
shared_examples 'success' do
it 'schedules a worker for status minitoring' do
expect(WaitForClusterCreationWorker).to receive(:perform_in)
described_class.new.execute(provider)
expect(provider.reload).to be_creating
end
end
shared_examples 'error' do
it 'sets an error to provider object' do
described_class.new.execute(provider)
expect(provider.reload).to be_errored
end
end
context 'when succeeded to request provision' do
before do
stub_cloud_platform_create_cluster(gcp_project_id, zone)
end
it_behaves_like 'success'
end
context 'when operation status is unexpected' do
before do
stub_cloud_platform_create_cluster(
gcp_project_id, zone,
{
"status": 'unexpected'
})
end
it_behaves_like 'error'
end
context 'when selfLink is unexpected' do
before do
stub_cloud_platform_create_cluster(
gcp_project_id, zone,
{
"selfLink": 'unexpected'
})
end
it_behaves_like 'error'
end
context 'when Internal Server Error happened' do
before do
stub_cloud_platform_create_cluster_error(gcp_project_id, zone)
end
it_behaves_like 'error'
end
end
end

View File

@ -1,111 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Clusters::Gcp::VerifyProvisionStatusService do
include GoogleApi::CloudPlatformHelpers
describe '#execute' do
let(:provider) { create(:cluster_provider_gcp, :creating) }
let(:gcp_project_id) { provider.gcp_project_id }
let(:zone) { provider.zone }
let(:operation_id) { provider.operation_id }
shared_examples 'continue_creation' do
it 'schedules a worker for status minitoring' do
expect(WaitForClusterCreationWorker).to receive(:perform_in)
described_class.new.execute(provider)
end
end
shared_examples 'finalize_creation' do
it 'schedules a worker for status minitoring' do
expect_next_instance_of(Clusters::Gcp::FinalizeCreationService) do |instance|
expect(instance).to receive(:execute)
end
described_class.new.execute(provider)
end
end
shared_examples 'error' do
it 'sets an error to provider object' do
described_class.new.execute(provider)
expect(provider.reload).to be_errored
end
end
context 'when operation status is RUNNING' do
before do
stub_cloud_platform_get_zone_operation(
gcp_project_id, zone, operation_id,
{
"status": 'RUNNING',
"startTime": 1.minute.ago.strftime("%FT%TZ")
})
end
it_behaves_like 'continue_creation'
context 'when cluster creation time exceeds timeout' do
before do
stub_cloud_platform_get_zone_operation(
gcp_project_id, zone, operation_id,
{
"status": 'RUNNING',
"startTime": 30.minutes.ago.strftime("%FT%TZ")
})
end
it_behaves_like 'error'
end
end
context 'when operation status is PENDING' do
before do
stub_cloud_platform_get_zone_operation(
gcp_project_id, zone, operation_id,
{
"status": 'PENDING',
"startTime": 1.minute.ago.strftime("%FT%TZ")
})
end
it_behaves_like 'continue_creation'
end
context 'when operation status is DONE' do
before do
stub_cloud_platform_get_zone_operation(
gcp_project_id, zone, operation_id,
{
"status": 'DONE'
})
end
it_behaves_like 'finalize_creation'
end
context 'when operation status is unexpected' do
before do
stub_cloud_platform_get_zone_operation(
gcp_project_id, zone, operation_id,
{
"status": 'unexpected'
})
end
it_behaves_like 'error'
end
context 'when failed to get operation status' do
before do
stub_cloud_platform_get_zone_operation_error(gcp_project_id, zone, operation_id)
end
it_behaves_like 'error'
end
end
end

View File

@ -37,9 +37,7 @@ RSpec.shared_context 'invalid cluster create params' do
end
RSpec.shared_examples 'create cluster service success' do
it 'creates a cluster object and performs a worker' do
expect(ClusterProvisionWorker).to receive(:perform_async)
it 'creates a cluster object' do
expect { subject }
.to change { Clusters::Cluster.count }.by(1)
.and change { Clusters::Providers::Gcp.count }.by(1)
@ -60,7 +58,6 @@ end
RSpec.shared_examples 'create cluster service error' do
it 'returns an error' do
expect(ClusterProvisionWorker).not_to receive(:perform_async)
expect { subject }.to change { Clusters::Cluster.count }.by(0)
expect(subject.errors[:"provider_gcp.gcp_project_id"]).to be_present
end

View File

@ -42,6 +42,10 @@ RSpec.shared_examples 'issuables list meta-data' do |issuable_type, action = nil
let(:result_issuable) { issuables.first }
let(:search) { result_issuable.title }
before do
stub_application_setting(search_rate_limit: 0, search_rate_limit_unauthenticated: 0)
end
# .simple_sorts is the same across all Sortable classes
sorts = ::Issue.simple_sorts.keys + %w[popularity priority label_priority]
sorts.each do |sort|

View File

@ -5,7 +5,9 @@
# - current_user
# - error_message # optional
RSpec.shared_examples 'rate limited endpoint' do |rate_limit_key:|
RSpec.shared_examples 'rate limited endpoint' do |rate_limit_key:, graphql: false|
let(:error_message) { _('This endpoint has been requested too many times. Try again later.') }
context 'when rate limiter enabled', :freeze_time, :clean_gitlab_redis_rate_limiting do
let(:expected_logger_attributes) do
{
@ -25,8 +27,6 @@ RSpec.shared_examples 'rate limited endpoint' do |rate_limit_key:|
end
end
let(:error_message) { _('This endpoint has been requested too many times. Try again later.') }
before do
allow(Gitlab::ApplicationRateLimiter).to receive(:threshold).with(rate_limit_key).and_return(1)
end
@ -37,12 +37,16 @@ RSpec.shared_examples 'rate limited endpoint' do |rate_limit_key:|
request
request
expect(response).to have_gitlab_http_status(:too_many_requests)
if graphql
expect_graphql_errors_to_include(error_message)
else
expect(response).to have_gitlab_http_status(:too_many_requests)
if example.metadata[:type] == :controller
expect(response.body).to eq(error_message)
else # it is API spec
expect(response.body).to eq({ message: { error: error_message } }.to_json)
if response.content_type == 'application/json' # it is API spec
expect(response.body).to eq({ message: { error: error_message } }.to_json)
else
expect(response.body).to eq(error_message)
end
end
end
end
@ -57,7 +61,11 @@ RSpec.shared_examples 'rate limited endpoint' do |rate_limit_key:|
request
expect(response).not_to have_gitlab_http_status(:too_many_requests)
if graphql
expect_graphql_errors_to_be_empty
else
expect(response).not_to have_gitlab_http_status(:too_many_requests)
end
end
end
end

View File

@ -34,3 +34,35 @@ RSpec.shared_examples 'issuable anonymous search' do
end
end
end
RSpec.shared_examples 'issuable API rate-limited search' do
it_behaves_like 'rate limited endpoint', rate_limit_key: :search_rate_limit do
let(:current_user) { user }
def request
get api(url, current_user), params: { scope: 'all', search: issuable.title }
end
end
it_behaves_like 'rate limited endpoint', rate_limit_key: :search_rate_limit_unauthenticated do
def request
get api(url), params: { scope: 'all', search: issuable.title }
end
end
context 'when rate_limit_issuable_searches is disabled', :freeze_time, :clean_gitlab_redis_rate_limiting do
before do
stub_feature_flags(rate_limit_issuable_searches: false)
allow(Gitlab::ApplicationRateLimiter).to receive(:threshold)
.with(:search_rate_limit_unauthenticated).and_return(1)
end
it 'does not enforce the rate limit' do
get api(url), params: { scope: 'all', search: issuable.title }
get api(url), params: { scope: 'all', search: issuable.title }
expect(response).to have_gitlab_http_status(:ok)
end
end
end

View File

@ -1,47 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe ClusterProvisionWorker do
describe '#perform' do
context 'when provider type is gcp' do
let(:cluster) { create(:cluster, provider_type: :gcp, provider_gcp: provider) }
let(:provider) { create(:cluster_provider_gcp, :scheduled) }
it 'provision a cluster' do
expect_any_instance_of(Clusters::Gcp::ProvisionService).to receive(:execute).with(provider)
described_class.new.perform(cluster.id)
end
end
context 'when provider type is aws' do
let(:cluster) { create(:cluster, provider_type: :aws, provider_aws: provider) }
let(:provider) { create(:cluster_provider_aws, :scheduled) }
it 'provision a cluster' do
expect_any_instance_of(Clusters::Aws::ProvisionService).to receive(:execute).with(provider)
described_class.new.perform(cluster.id)
end
end
context 'when provider type is user' do
let(:cluster) { create(:cluster, :provided_by_user) }
it 'does not provision a cluster' do
expect_any_instance_of(Clusters::Gcp::ProvisionService).not_to receive(:execute)
described_class.new.perform(cluster.id)
end
end
context 'when cluster does not exist' do
it 'does not provision a cluster' do
expect_any_instance_of(Clusters::Gcp::ProvisionService).not_to receive(:execute)
described_class.new.perform(123)
end
end
end
end

View File

@ -1,47 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe WaitForClusterCreationWorker do
describe '#perform' do
context 'when provider type is gcp' do
let(:cluster) { create(:cluster, provider_type: :gcp, provider_gcp: provider) }
let(:provider) { create(:cluster_provider_gcp, :creating) }
it 'provisions a cluster' do
expect_any_instance_of(Clusters::Gcp::VerifyProvisionStatusService).to receive(:execute).with(provider)
described_class.new.perform(cluster.id)
end
end
context 'when provider type is aws' do
let(:cluster) { create(:cluster, provider_type: :aws, provider_aws: provider) }
let(:provider) { create(:cluster_provider_aws, :creating) }
it 'provisions a cluster' do
expect_any_instance_of(Clusters::Aws::VerifyProvisionStatusService).to receive(:execute).with(provider)
described_class.new.perform(cluster.id)
end
end
context 'when provider type is user' do
let(:cluster) { create(:cluster, provider_type: :user) }
it 'does not provision a cluster' do
expect_any_instance_of(Clusters::Gcp::VerifyProvisionStatusService).not_to receive(:execute)
described_class.new.perform(cluster.id)
end
end
context 'when cluster does not exist' do
it 'does not provision a cluster' do
expect_any_instance_of(Clusters::Gcp::VerifyProvisionStatusService).not_to receive(:execute)
described_class.new.perform(123)
end
end
end
end

View File

@ -1,342 +0,0 @@
---
AWSTemplateFormatVersion: "2010-09-09"
Description: GitLab EKS Cluster
Parameters:
KubernetesVersion:
Description: The Kubernetes version to install
Type: String
Default: "1.20"
AllowedValues:
- "1.16"
- "1.17"
- "1.18"
- "1.19"
- "1.20"
KeyName:
Description: The EC2 Key Pair to allow SSH access to the node instances
Type: AWS::EC2::KeyPair::KeyName
NodeImageIdSSMParam:
Type: "AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>"
Default: /aws/service/eks/optimized-ami/1.17/amazon-linux-2/recommended/image_id
Description: AWS Systems Manager Parameter Store parameter of the AMI ID for the worker node instances.
NodeInstanceType:
Description: EC2 instance type for the node instances
Type: String
Default: t3.medium
ConstraintDescription: Must be a valid EC2 instance type
AllowedValues:
- t2.small
- t2.medium
- t2.large
- t2.xlarge
- t2.2xlarge
- t3.nano
- t3.micro
- t3.small
- t3.medium
- t3.large
- t3.xlarge
- t3.2xlarge
- m3.medium
- m3.large
- m3.xlarge
- m3.2xlarge
- m4.large
- m4.xlarge
- m4.2xlarge
- m4.4xlarge
- m4.10xlarge
- m5.large
- m5.xlarge
- m5.2xlarge
- m5.4xlarge
- m5.12xlarge
- m5.24xlarge
- c4.large
- c4.xlarge
- c4.2xlarge
- c4.4xlarge
- c4.8xlarge
- c5.large
- c5.xlarge
- c5.2xlarge
- c5.4xlarge
- c5.9xlarge
- c5.18xlarge
- i3.large
- i3.xlarge
- i3.2xlarge
- i3.4xlarge
- i3.8xlarge
- i3.16xlarge
- r3.xlarge
- r3.2xlarge
- r3.4xlarge
- r3.8xlarge
- r4.large
- r4.xlarge
- r4.2xlarge
- r4.4xlarge
- r4.8xlarge
- r4.16xlarge
- x1.16xlarge
- x1.32xlarge
- p2.xlarge
- p2.8xlarge
- p2.16xlarge
- p3.2xlarge
- p3.8xlarge
- p3.16xlarge
- p3dn.24xlarge
- r5.large
- r5.xlarge
- r5.2xlarge
- r5.4xlarge
- r5.12xlarge
- r5.24xlarge
- r5d.large
- r5d.xlarge
- r5d.2xlarge
- r5d.4xlarge
- r5d.12xlarge
- r5d.24xlarge
- z1d.large
- z1d.xlarge
- z1d.2xlarge
- z1d.3xlarge
- z1d.6xlarge
- z1d.12xlarge
NodeAutoScalingGroupDesiredCapacity:
Description: Desired capacity of Node Group ASG.
Type: Number
Default: 3
NodeVolumeSize:
Description: Node volume size
Type: Number
Default: 20
ClusterName:
Description: Unique name for your Amazon EKS cluster.
Type: String
ClusterRole:
Description: The IAM Role to allow Amazon EKS and the Kubernetes control plane to manage AWS resources on your behalf.
Type: String
ClusterControlPlaneSecurityGroup:
Description: The security groups to apply to the EKS-managed Elastic Network Interfaces that are created in your worker node subnets.
Type: AWS::EC2::SecurityGroup::Id
VpcId:
Description: The VPC to use for your EKS Cluster resources.
Type: AWS::EC2::VPC::Id
Subnets:
Description: The subnets in your VPC where your worker nodes will run.
Type: List<AWS::EC2::Subnet::Id>
Metadata:
AWS::CloudFormation::Interface:
ParameterGroups:
- Label:
default: EKS Cluster
Parameters:
- ClusterName
- ClusterRole
- KubernetesVersion
- ClusterControlPlaneSecurityGroup
- Label:
default: Worker Node Configuration
Parameters:
- NodeAutoScalingGroupDesiredCapacity
- NodeInstanceType
- NodeImageIdSSMParam
- NodeVolumeSize
- KeyName
- Label:
default: Worker Network Configuration
Parameters:
- VpcId
- Subnets
Resources:
Cluster:
Type: AWS::EKS::Cluster
Properties:
Name: !Sub ${ClusterName}
Version: !Sub ${KubernetesVersion}
RoleArn: !Sub ${ClusterRole}
ResourcesVpcConfig:
SecurityGroupIds:
- !Ref ClusterControlPlaneSecurityGroup
SubnetIds: !Ref Subnets
NodeInstanceProfile:
Type: AWS::IAM::InstanceProfile
Properties:
Path: "/"
Roles:
- !Ref NodeInstanceRole
NodeInstanceRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Principal:
Service: ec2.amazonaws.com
Action: sts:AssumeRole
Path: "/"
ManagedPolicyArns:
- arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy
- arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy
- arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly
NodeSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Security group for all nodes in the cluster
VpcId: !Ref VpcId
Tags:
- Key: !Sub kubernetes.io/cluster/${ClusterName}
Value: owned
NodeSecurityGroupIngress:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: NodeSecurityGroup
Properties:
Description: Allow nodes to communicate with each other
GroupId: !Ref NodeSecurityGroup
SourceSecurityGroupId: !Ref NodeSecurityGroup
IpProtocol: -1
FromPort: 0
ToPort: 65535
NodeSecurityGroupFromControlPlaneIngress:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: NodeSecurityGroup
Properties:
Description: Allow worker Kubelets and pods to receive communication from the cluster control plane
GroupId: !Ref NodeSecurityGroup
SourceSecurityGroupId: !Ref ClusterControlPlaneSecurityGroup
IpProtocol: tcp
FromPort: 1025
ToPort: 65535
ControlPlaneEgressToNodeSecurityGroup:
Type: AWS::EC2::SecurityGroupEgress
DependsOn: NodeSecurityGroup
Properties:
Description: Allow the cluster control plane to communicate with worker Kubelet and pods
GroupId: !Ref ClusterControlPlaneSecurityGroup
DestinationSecurityGroupId: !Ref NodeSecurityGroup
IpProtocol: tcp
FromPort: 1025
ToPort: 65535
NodeSecurityGroupFromControlPlaneOn443Ingress:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: NodeSecurityGroup
Properties:
Description: Allow pods running extension API servers on port 443 to receive communication from cluster control plane
GroupId: !Ref NodeSecurityGroup
SourceSecurityGroupId: !Ref ClusterControlPlaneSecurityGroup
IpProtocol: tcp
FromPort: 443
ToPort: 443
ControlPlaneEgressToNodeSecurityGroupOn443:
Type: AWS::EC2::SecurityGroupEgress
DependsOn: NodeSecurityGroup
Properties:
Description: Allow the cluster control plane to communicate with pods running extension API servers on port 443
GroupId: !Ref ClusterControlPlaneSecurityGroup
DestinationSecurityGroupId: !Ref NodeSecurityGroup
IpProtocol: tcp
FromPort: 443
ToPort: 443
ClusterControlPlaneSecurityGroupIngress:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: NodeSecurityGroup
Properties:
Description: Allow pods to communicate with the cluster API Server
GroupId: !Ref ClusterControlPlaneSecurityGroup
SourceSecurityGroupId: !Ref NodeSecurityGroup
IpProtocol: tcp
ToPort: 443
FromPort: 443
NodeGroup:
Type: AWS::AutoScaling::AutoScalingGroup
DependsOn: Cluster
Properties:
DesiredCapacity: !Ref NodeAutoScalingGroupDesiredCapacity
LaunchConfigurationName: !Ref NodeLaunchConfig
MinSize: !Ref NodeAutoScalingGroupDesiredCapacity
MaxSize: !Ref NodeAutoScalingGroupDesiredCapacity
VPCZoneIdentifier: !Ref Subnets
Tags:
- Key: Name
Value: !Sub ${ClusterName}-node
PropagateAtLaunch: true
- Key: !Sub kubernetes.io/cluster/${ClusterName}
Value: owned
PropagateAtLaunch: true
UpdatePolicy:
AutoScalingRollingUpdate:
MaxBatchSize: 1
MinInstancesInService: !Ref NodeAutoScalingGroupDesiredCapacity
PauseTime: PT5M
NodeLaunchConfig:
Type: AWS::AutoScaling::LaunchConfiguration
Properties:
AssociatePublicIpAddress: true
IamInstanceProfile: !Ref NodeInstanceProfile
ImageId: !Ref NodeImageIdSSMParam
InstanceType: !Ref NodeInstanceType
KeyName: !Ref KeyName
SecurityGroups:
- !Ref NodeSecurityGroup
BlockDeviceMappings:
- DeviceName: /dev/xvda
Ebs:
VolumeSize: !Ref NodeVolumeSize
VolumeType: gp2
DeleteOnTermination: true
UserData:
Fn::Base64:
!Sub |
#!/bin/bash
set -o xtrace
/etc/eks/bootstrap.sh "${ClusterName}"
/opt/aws/bin/cfn-signal --exit-code $? \
--stack ${AWS::StackName} \
--resource NodeGroup \
--region ${AWS::Region}
Outputs:
NodeInstanceRole:
Description: The node instance role
Value: !GetAtt NodeInstanceRole.Arn
ClusterCertificate:
Description: The cluster certificate
Value: !GetAtt Cluster.CertificateAuthorityData
ClusterEndpoint:
Description: The cluster endpoint
Value: !GetAtt Cluster.Endpoint

View File

@ -1,17 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"iam:ListRoles",
"ec2:DescribeKeyPairs",
"ec2:DescribeRegions",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs"
],
"Resource": "*"
}
]
}