Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
e14148b7ed
commit
708ee0bcb2
|
|
@ -814,6 +814,7 @@ Rails/SaveBang:
|
|||
- 'ee/spec/workers/repository_import_worker_spec.rb'
|
||||
- 'ee/spec/workers/update_all_mirrors_worker_spec.rb'
|
||||
- 'qa/qa/specs/features/browser_ui/3_create/repository/push_mirroring_over_http_spec.rb'
|
||||
- 'qa/qa/specs/features/browser_ui/3_create/repository/push_mirroring_lfs_over_http_spec.rb'
|
||||
- 'qa/qa/specs/features/ee/browser_ui/3_create/repository/pull_mirroring_over_http_spec.rb'
|
||||
- 'qa/qa/specs/features/ee/browser_ui/3_create/repository/pull_mirroring_over_ssh_with_key_spec.rb'
|
||||
- 'spec/controllers/abuse_reports_controller_spec.rb'
|
||||
|
|
|
|||
|
|
@ -0,0 +1,79 @@
|
|||
<script>
|
||||
import { GlButton, GlLoadingIcon } from '@gitlab/ui';
|
||||
|
||||
export default {
|
||||
components: { GlButton, GlLoadingIcon },
|
||||
props: {
|
||||
title: {
|
||||
type: String,
|
||||
required: false,
|
||||
default: '',
|
||||
},
|
||||
loading: {
|
||||
type: Boolean,
|
||||
required: false,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
inject: ['canUpdate'],
|
||||
data() {
|
||||
return {
|
||||
edit: false,
|
||||
};
|
||||
},
|
||||
destroyed() {
|
||||
window.removeEventListener('click', this.collapseWhenOffClick);
|
||||
},
|
||||
methods: {
|
||||
collapseWhenOffClick({ target }) {
|
||||
if (!this.$el.contains(target)) {
|
||||
this.collapse();
|
||||
}
|
||||
},
|
||||
expand() {
|
||||
if (this.edit) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.edit = true;
|
||||
this.$emit('changed', this.edit);
|
||||
window.addEventListener('click', this.collapseWhenOffClick);
|
||||
},
|
||||
collapse() {
|
||||
if (!this.edit) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.edit = false;
|
||||
this.$emit('changed', this.edit);
|
||||
window.removeEventListener('click', this.collapseWhenOffClick);
|
||||
},
|
||||
},
|
||||
};
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<div>
|
||||
<div class="gl-display-flex gl-justify-content-space-between gl-mb-3">
|
||||
<span class="gl-vertical-align-middle">
|
||||
<span data-testid="title">{{ title }}</span>
|
||||
<gl-loading-icon v-if="loading" inline class="gl-ml-2" />
|
||||
</span>
|
||||
<gl-button
|
||||
v-if="canUpdate"
|
||||
variant="link"
|
||||
class="gl-text-gray-900!"
|
||||
data-testid="edit-button"
|
||||
@click="expand()"
|
||||
>
|
||||
{{ __('Edit') }}
|
||||
</gl-button>
|
||||
</div>
|
||||
<div v-show="!edit" class="gl-text-gray-400" data-testid="collapsed-content">
|
||||
<slot name="collapsed">{{ __('None') }}</slot>
|
||||
</div>
|
||||
<div v-show="edit" data-testid="expanded-content">
|
||||
<slot></slot>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
|
@ -23,9 +23,6 @@ const EMPTY_STAGE_TEXTS = {
|
|||
staging: __(
|
||||
'The staging stage shows the time between merging the MR and deploying code to the production environment. The data will be automatically added once you deploy to production for the first time.',
|
||||
),
|
||||
production: __(
|
||||
'The total stage shows the time it takes between creating an issue and deploying the code to production. The data will be automatically added once you have completed the full idea to production cycle.',
|
||||
),
|
||||
};
|
||||
|
||||
export default {
|
||||
|
|
|
|||
|
|
@ -1,21 +1,6 @@
|
|||
<script>
|
||||
export default {
|
||||
name: 'GroupMembersApp',
|
||||
props: {
|
||||
groupId: {
|
||||
type: Number,
|
||||
required: true,
|
||||
},
|
||||
currentUserId: {
|
||||
type: Number,
|
||||
required: false,
|
||||
default: null,
|
||||
},
|
||||
members: {
|
||||
type: Array,
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
};
|
||||
</script>
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
import Vue from 'vue';
|
||||
import Vuex from 'vuex';
|
||||
import App from './components/app.vue';
|
||||
import membersModule from '~/vuex_shared/modules/members';
|
||||
import { convertObjectPropsToCamelCase } from '~/lib/utils/common_utils';
|
||||
|
||||
export default el => {
|
||||
|
|
@ -7,26 +9,22 @@ export default el => {
|
|||
return () => {};
|
||||
}
|
||||
|
||||
Vue.use(Vuex);
|
||||
|
||||
const { members, groupId } = el.dataset;
|
||||
|
||||
const store = new Vuex.Store({
|
||||
...membersModule({
|
||||
members: convertObjectPropsToCamelCase(JSON.parse(members), { deep: true }),
|
||||
sourceId: parseInt(groupId, 10),
|
||||
currentUserId: gon.current_user_id || null,
|
||||
}),
|
||||
});
|
||||
|
||||
return new Vue({
|
||||
el,
|
||||
components: { App },
|
||||
data() {
|
||||
const { members, groupId, currentUserId } = this.$options.el.dataset;
|
||||
|
||||
return {
|
||||
members: convertObjectPropsToCamelCase(JSON.parse(members), { deep: true }),
|
||||
groupId: parseInt(groupId, 10),
|
||||
...(currentUserId ? { currentUserId: parseInt(currentUserId, 10) } : {}),
|
||||
};
|
||||
},
|
||||
render(createElement) {
|
||||
return createElement('app', {
|
||||
props: {
|
||||
members: this.members,
|
||||
groupId: this.groupId,
|
||||
currentUserId: this.currentUserId,
|
||||
},
|
||||
});
|
||||
},
|
||||
store,
|
||||
render: createElement => createElement('app'),
|
||||
});
|
||||
};
|
||||
|
|
|
|||
|
|
@ -132,8 +132,9 @@ export default {
|
|||
v-gl-tooltip="{ boundary, placement: 'bottom' }"
|
||||
:href="status.details_path"
|
||||
:title="tooltipText"
|
||||
:class="cssClassJobName"
|
||||
:class="jobClasses"
|
||||
class="js-pipeline-graph-job-link qa-job-link menu-item"
|
||||
data-testid="job-with-link"
|
||||
>
|
||||
<job-name-component :name="job.name" :status="job.status" />
|
||||
</gl-link>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,20 @@
|
|||
<script>
|
||||
import { mapGetters } from 'vuex';
|
||||
import ReleasesPaginationGraphql from './releases_pagination_graphql.vue';
|
||||
import ReleasesPaginationRest from './releases_pagination_rest.vue';
|
||||
|
||||
export default {
|
||||
name: 'ReleasesPagination',
|
||||
components: { ReleasesPaginationGraphql, ReleasesPaginationRest },
|
||||
computed: {
|
||||
...mapGetters(['useGraphQLEndpoint']),
|
||||
},
|
||||
};
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<div class="gl-display-flex gl-justify-content-center">
|
||||
<releases-pagination-graphql v-if="useGraphQLEndpoint" />
|
||||
<releases-pagination-rest v-else />
|
||||
</div>
|
||||
</template>
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
<script>
|
||||
import { mapActions, mapState } from 'vuex';
|
||||
import { GlKeysetPagination } from '@gitlab/ui';
|
||||
import { historyPushState, buildUrlWithCurrentLocation } from '~/lib/utils/common_utils';
|
||||
|
||||
export default {
|
||||
name: 'ReleasesPaginationGraphql',
|
||||
components: { GlKeysetPagination },
|
||||
computed: {
|
||||
...mapState('list', ['projectPath', 'graphQlPageInfo']),
|
||||
showPagination() {
|
||||
return this.graphQlPageInfo.hasPreviousPage || this.graphQlPageInfo.hasNextPage;
|
||||
},
|
||||
},
|
||||
methods: {
|
||||
...mapActions('list', ['fetchReleasesGraphQl']),
|
||||
onPrev(before) {
|
||||
historyPushState(buildUrlWithCurrentLocation(`?before=${before}`));
|
||||
this.fetchReleasesGraphQl({ projectPath: this.projectPath, before });
|
||||
},
|
||||
onNext(after) {
|
||||
historyPushState(buildUrlWithCurrentLocation(`?after=${after}`));
|
||||
this.fetchReleasesGraphQl({ projectPath: this.projectPath, after });
|
||||
},
|
||||
},
|
||||
};
|
||||
</script>
|
||||
<template>
|
||||
<gl-keyset-pagination
|
||||
v-if="showPagination"
|
||||
v-bind="graphQlPageInfo"
|
||||
@prev="onPrev($event)"
|
||||
@next="onNext($event)"
|
||||
/>
|
||||
</template>
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
<script>
|
||||
import { mapActions, mapState } from 'vuex';
|
||||
import TablePagination from '~/vue_shared/components/pagination/table_pagination.vue';
|
||||
import { historyPushState, buildUrlWithCurrentLocation } from '~/lib/utils/common_utils';
|
||||
|
||||
export default {
|
||||
name: 'ReleasesPaginationRest',
|
||||
components: { TablePagination },
|
||||
computed: {
|
||||
...mapState('list', ['projectId', 'pageInfo']),
|
||||
},
|
||||
methods: {
|
||||
...mapActions('list', ['fetchReleasesRest']),
|
||||
onChangePage(page) {
|
||||
historyPushState(buildUrlWithCurrentLocation(`?page=${page}`));
|
||||
this.fetchReleasesRest({ page, projectId: this.projectId });
|
||||
},
|
||||
},
|
||||
};
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<table-pagination :change="onChangePage" :page-info="pageInfo" />
|
||||
</template>
|
||||
|
|
@ -1,8 +1,11 @@
|
|||
import Vue from 'vue';
|
||||
import Vuex from 'vuex';
|
||||
import ReleaseEditNewApp from './components/app_edit_new.vue';
|
||||
import createStore from './stores';
|
||||
import createDetailModule from './stores/modules/detail';
|
||||
|
||||
Vue.use(Vuex);
|
||||
|
||||
export default () => {
|
||||
const el = document.getElementById('js-edit-release-page');
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,10 @@
|
|||
import Vue from 'vue';
|
||||
import Vuex from 'vuex';
|
||||
import ReleaseListApp from './components/app_index.vue';
|
||||
import createStore from './stores';
|
||||
import listModule from './stores/modules/list';
|
||||
import createListModule from './stores/modules/list';
|
||||
|
||||
Vue.use(Vuex);
|
||||
|
||||
export default () => {
|
||||
const el = document.getElementById('js-releases-page');
|
||||
|
|
@ -10,7 +13,7 @@ export default () => {
|
|||
el,
|
||||
store: createStore({
|
||||
modules: {
|
||||
list: listModule,
|
||||
list: createListModule(el.dataset),
|
||||
},
|
||||
featureFlags: {
|
||||
graphqlReleaseData: Boolean(gon.features?.graphqlReleaseData),
|
||||
|
|
|
|||
|
|
@ -1,8 +1,11 @@
|
|||
import Vue from 'vue';
|
||||
import Vuex from 'vuex';
|
||||
import ReleaseEditNewApp from './components/app_edit_new.vue';
|
||||
import createStore from './stores';
|
||||
import createDetailModule from './stores/modules/detail';
|
||||
|
||||
Vue.use(Vuex);
|
||||
|
||||
export default () => {
|
||||
const el = document.getElementById('js-new-release-page');
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,11 @@
|
|||
import Vue from 'vue';
|
||||
import Vuex from 'vuex';
|
||||
import ReleaseShowApp from './components/app_show.vue';
|
||||
import createStore from './stores';
|
||||
import createDetailModule from './stores/modules/detail';
|
||||
|
||||
Vue.use(Vuex);
|
||||
|
||||
export default () => {
|
||||
const el = document.getElementById('js-show-release-page');
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,5 @@
|
|||
import Vue from 'vue';
|
||||
import Vuex from 'vuex';
|
||||
|
||||
Vue.use(Vuex);
|
||||
|
||||
export default ({ modules, featureFlags }) =>
|
||||
new Vuex.Store({
|
||||
modules,
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
import state from './state';
|
||||
import createState from './state';
|
||||
import * as actions from './actions';
|
||||
import mutations from './mutations';
|
||||
|
||||
export default {
|
||||
export default initialState => ({
|
||||
namespaced: true,
|
||||
actions,
|
||||
mutations,
|
||||
state,
|
||||
};
|
||||
state: createState(initialState),
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,4 +1,16 @@
|
|||
export default () => ({
|
||||
export default ({
|
||||
projectId,
|
||||
projectPath,
|
||||
documentationPath,
|
||||
illustrationPath,
|
||||
newReleasePath = '',
|
||||
}) => ({
|
||||
projectId,
|
||||
projectPath,
|
||||
documentationPath,
|
||||
illustrationPath,
|
||||
newReleasePath,
|
||||
|
||||
isLoading: false,
|
||||
hasError: false,
|
||||
releases: [],
|
||||
|
|
|
|||
|
|
@ -0,0 +1,6 @@
|
|||
import createState from './state';
|
||||
|
||||
export default initialState => ({
|
||||
namespaced: true,
|
||||
state: createState(initialState),
|
||||
});
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
export default ({ members, sourceId, currentUserId }) => ({
|
||||
members,
|
||||
sourceId,
|
||||
currentUserId,
|
||||
});
|
||||
|
|
@ -12,9 +12,13 @@ module Resolvers
|
|||
required: false,
|
||||
description: 'Search query for project name, path, or description'
|
||||
|
||||
argument :ids, [GraphQL::ID_TYPE],
|
||||
required: false,
|
||||
description: 'Filter projects by IDs'
|
||||
|
||||
def resolve(**args)
|
||||
ProjectsFinder
|
||||
.new(current_user: current_user, params: project_finder_params(args))
|
||||
.new(current_user: current_user, params: project_finder_params(args), project_ids_relation: parse_gids(args[:ids]))
|
||||
.execute
|
||||
end
|
||||
|
||||
|
|
@ -27,5 +31,9 @@ module Resolvers
|
|||
search: params[:search]
|
||||
}.compact
|
||||
end
|
||||
|
||||
def parse_gids(gids)
|
||||
gids&.map { |gid| GitlabSchema.parse_gid(gid, expected_type: ::Project).model_id }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
module CycleAnalytics
|
||||
module LevelBase
|
||||
STAGES = %i[issue plan code test review staging production].freeze
|
||||
STAGES = %i[issue plan code test review staging].freeze
|
||||
|
||||
def all_medians_by_stage
|
||||
STAGES.each_with_object({}) do |stage_name, medians_per_stage|
|
||||
|
|
|
|||
|
|
@ -1469,6 +1469,12 @@ class Project < ApplicationRecord
|
|||
forked_from_project || fork_network&.root_project
|
||||
end
|
||||
|
||||
def lfs_objects_for_repository_types(*types)
|
||||
LfsObject
|
||||
.joins(:lfs_objects_projects)
|
||||
.where(lfs_objects_projects: { project: self, repository_type: types })
|
||||
end
|
||||
|
||||
def lfs_objects_oids(oids: [])
|
||||
oids(lfs_objects, oids: oids)
|
||||
end
|
||||
|
|
|
|||
|
|
@ -210,6 +210,10 @@ class RemoteMirror < ApplicationRecord
|
|||
super(usernames_whitelist: %w[git])
|
||||
end
|
||||
|
||||
def bare_url
|
||||
Gitlab::UrlSanitizer.new(read_attribute(:url)).full_url
|
||||
end
|
||||
|
||||
def ensure_remote!
|
||||
return unless project
|
||||
return unless remote_name && remote_url
|
||||
|
|
|
|||
|
|
@ -0,0 +1,80 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Lfs
|
||||
# Lfs::PushService pushes the LFS objects associated with a project to a
|
||||
# remote URL
|
||||
class PushService < BaseService
|
||||
include Gitlab::Utils::StrongMemoize
|
||||
|
||||
# Match the canonical LFS client's batch size:
|
||||
# https://github.com/git-lfs/git-lfs/blob/master/tq/transfer_queue.go#L19
|
||||
BATCH_SIZE = 100
|
||||
|
||||
def execute
|
||||
lfs_objects_relation.each_batch(of: BATCH_SIZE) do |objects|
|
||||
push_objects(objects)
|
||||
end
|
||||
|
||||
success
|
||||
rescue => err
|
||||
error(err.message)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# Currently we only set repository_type for design repository objects, so
|
||||
# push mirroring must send objects with a `nil` repository type - but if the
|
||||
# wiki repository uses LFS, its objects will also be sent. This will be
|
||||
# addressed by https://gitlab.com/gitlab-org/gitlab/-/issues/250346
|
||||
def lfs_objects_relation
|
||||
project.lfs_objects_for_repository_types(nil, :project)
|
||||
end
|
||||
|
||||
def push_objects(objects)
|
||||
rsp = lfs_client.batch('upload', objects)
|
||||
objects = objects.index_by(&:oid)
|
||||
|
||||
rsp.fetch('objects', []).each do |spec|
|
||||
actions = spec['actions']
|
||||
object = objects[spec['oid']]
|
||||
|
||||
upload_object!(object, spec) if actions&.key?('upload')
|
||||
verify_object!(object, spec) if actions&.key?('verify')
|
||||
end
|
||||
end
|
||||
|
||||
def upload_object!(object, spec)
|
||||
authenticated = spec['authenticated']
|
||||
upload = spec.dig('actions', 'upload')
|
||||
|
||||
# The server wants us to upload the object but something is wrong
|
||||
unless object && object.size == spec['size'].to_i
|
||||
log_error("Couldn't match object #{spec['oid']}/#{spec['size']}")
|
||||
return
|
||||
end
|
||||
|
||||
lfs_client.upload(object, upload, authenticated: authenticated)
|
||||
end
|
||||
|
||||
def verify_object!(object, spec)
|
||||
# TODO: the remote has requested that we make another call to verify that
|
||||
# the object has been sent correctly.
|
||||
# https://gitlab.com/gitlab-org/gitlab/-/issues/250654
|
||||
log_error("LFS upload verification requested, but not supported for #{object.oid}")
|
||||
end
|
||||
|
||||
def url
|
||||
params.fetch(:url)
|
||||
end
|
||||
|
||||
def credentials
|
||||
params.fetch(:credentials)
|
||||
end
|
||||
|
||||
def lfs_client
|
||||
strong_memoize(:lfs_client) do
|
||||
Gitlab::Lfs::Client.new(url, credentials: credentials)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -31,6 +31,9 @@ module Projects
|
|||
remote_mirror.update_start!
|
||||
remote_mirror.ensure_remote!
|
||||
|
||||
# LFS objects must be sent first, or the push has dangling pointers
|
||||
send_lfs_objects!(remote_mirror)
|
||||
|
||||
response = remote_mirror.update_repository
|
||||
|
||||
if response.divergent_refs.any?
|
||||
|
|
@ -43,6 +46,23 @@ module Projects
|
|||
end
|
||||
end
|
||||
|
||||
def send_lfs_objects!(remote_mirror)
|
||||
return unless Feature.enabled?(:push_mirror_syncs_lfs, project)
|
||||
return unless project.lfs_enabled?
|
||||
|
||||
# TODO: Support LFS sync over SSH
|
||||
# https://gitlab.com/gitlab-org/gitlab/-/issues/249587
|
||||
return unless remote_mirror.url =~ /\Ahttps?:\/\//i
|
||||
return unless remote_mirror.password_auth?
|
||||
|
||||
Lfs::PushService.new(
|
||||
project,
|
||||
current_user,
|
||||
url: remote_mirror.bare_url,
|
||||
credentials: remote_mirror.credentials
|
||||
).execute
|
||||
end
|
||||
|
||||
def retry_or_fail(mirror, message, tries)
|
||||
if tries < MAX_TRIES
|
||||
mirror.mark_for_retry!(message)
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
- show_access_requests = can_manage_members && @requesters.exists?
|
||||
- invited_active = params[:search_invited].present? || params[:invited_members_page].present?
|
||||
- vue_members_list_enabled = Feature.enabled?(:vue_group_members_list, @group)
|
||||
- data_attributes = { group_id: @group.id, current_user_id: current_user&.id }
|
||||
- data_attributes = { group_id: @group.id }
|
||||
|
||||
- form_item_label_css_class = 'label-bold gl-mr-2 gl-mb-0 gl-py-2 align-self-md-center'
|
||||
|
||||
|
|
|
|||
|
|
@ -9,8 +9,6 @@ class UpdateMergeRequestsWorker # rubocop:disable Scalability/IdempotentWorker
|
|||
weight 3
|
||||
loggable_arguments 2, 3, 4
|
||||
|
||||
LOG_TIME_THRESHOLD = 90 # seconds
|
||||
|
||||
# rubocop: disable CodeReuse/ActiveRecord
|
||||
def perform(project_id, user_id, oldrev, newrev, ref)
|
||||
project = Project.find_by(id: project_id)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: Sync LFS objects when push mirroring
|
||||
merge_request: 40137
|
||||
author:
|
||||
type: added
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: Remove Value Stream Total stage
|
||||
merge_request: 42345
|
||||
author:
|
||||
type: removed
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: Make bridge/child pipelines clickable
|
||||
merge_request: 41263
|
||||
author:
|
||||
type: added
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: Query projects by ids with GraphQL
|
||||
merge_request: 42372
|
||||
author:
|
||||
type: added
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: Disable Sidekiq Exporter logs by default
|
||||
merge_request: 42267
|
||||
author:
|
||||
type: changed
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
name: ci_bridge_pipeline_details
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/41263
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/250683
|
||||
group: group::memory
|
||||
type: development
|
||||
default_enabled: true
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
name: push_mirror_syncs_lfs
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/40137
|
||||
rollout_issue_url:
|
||||
group: group::source code
|
||||
type: development
|
||||
default_enabled: false
|
||||
|
|
@ -1136,6 +1136,7 @@ production: &base
|
|||
# Sidekiq exporter is webserver built in to Sidekiq to expose Prometheus metrics
|
||||
sidekiq_exporter:
|
||||
# enabled: true
|
||||
# log_enabled: false
|
||||
# address: localhost
|
||||
# port: 8082
|
||||
|
||||
|
|
|
|||
|
|
@ -784,6 +784,7 @@ Settings.monitoring['ip_whitelist'] ||= ['127.0.0.1/8']
|
|||
Settings.monitoring['unicorn_sampler_interval'] ||= 10
|
||||
Settings.monitoring['sidekiq_exporter'] ||= Settingslogic.new({})
|
||||
Settings.monitoring.sidekiq_exporter['enabled'] ||= false
|
||||
Settings.monitoring.sidekiq_exporter['log_enabled'] ||= false
|
||||
Settings.monitoring.sidekiq_exporter['address'] ||= 'localhost'
|
||||
Settings.monitoring.sidekiq_exporter['port'] ||= 8082
|
||||
Settings.monitoring['web_exporter'] ||= Settingslogic.new({})
|
||||
|
|
|
|||
|
|
@ -160,6 +160,8 @@
|
|||
- 1
|
||||
- - merge_request_mergeability_check
|
||||
- 1
|
||||
- - merge_request_reset_approvals
|
||||
- 1
|
||||
- - metrics_dashboard_prune_old_annotations
|
||||
- 1
|
||||
- - migrate_external_diffs
|
||||
|
|
|
|||
|
|
@ -0,0 +1,14 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class RemoveCycleAnalyticsTotalStageData < ActiveRecord::Migration[6.0]
|
||||
DOWNTIME = false
|
||||
|
||||
def up
|
||||
execute("DELETE FROM analytics_cycle_analytics_group_stages WHERE name='production'")
|
||||
execute("DELETE FROM analytics_cycle_analytics_project_stages WHERE name='production'")
|
||||
end
|
||||
|
||||
def down
|
||||
# Migration is irreversible
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1 @@
|
|||
dde7a29268d925044d59455db87bfc1aa617eec6e30df1cc9dc531b52c909fe1
|
||||
|
|
@ -723,10 +723,15 @@ was initiated, such as `1509705644.log`
|
|||
|
||||
## `sidekiq_exporter.log` and `web_exporter.log`
|
||||
|
||||
If Prometheus metrics and the Sidekiq Exporter are both enabled, Sidekiq will
|
||||
start a Web server and listen to the defined port (default: `8082`). Access logs
|
||||
will be generated in `/var/log/gitlab/gitlab-rails/sidekiq_exporter.log` for
|
||||
Omnibus GitLab packages or in `/home/git/gitlab/log/sidekiq_exporter.log` for
|
||||
If Prometheus metrics and the Sidekiq Exporter are both enabled, Sidekiq
|
||||
will start a Web server and listen to the defined port (default:
|
||||
`8082`). By default, Sidekiq Exporter access logs are disabled but can
|
||||
be enabled via the `sidekiq['exporter_log_enabled'] = true` option in `/etc/gitlab/gitlab.rb`
|
||||
for Omnibus installations, or via the `sidekiq_exporter.log_enabled` option
|
||||
in `gitlab.yml` for installations from source. When enabled,
|
||||
access logs will be generated in
|
||||
`/var/log/gitlab/gitlab-rails/sidekiq_exporter.log` for Omnibus GitLab
|
||||
packages or in `/home/git/gitlab/log/sidekiq_exporter.log` for
|
||||
installations from source.
|
||||
|
||||
If Prometheus metrics and the Web Exporter are both enabled, Puma/Unicorn will
|
||||
|
|
|
|||
|
|
@ -13981,6 +13981,11 @@ type Query {
|
|||
"""
|
||||
first: Int
|
||||
|
||||
"""
|
||||
Filter projects by IDs
|
||||
"""
|
||||
ids: [ID!]
|
||||
|
||||
"""
|
||||
Returns the last _n_ elements from the list.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -40936,6 +40936,24 @@
|
|||
},
|
||||
"defaultValue": null
|
||||
},
|
||||
{
|
||||
"name": "ids",
|
||||
"description": "Filter projects by IDs",
|
||||
"type": {
|
||||
"kind": "LIST",
|
||||
"name": null,
|
||||
"ofType": {
|
||||
"kind": "NON_NULL",
|
||||
"name": null,
|
||||
"ofType": {
|
||||
"kind": "SCALAR",
|
||||
"name": "ID",
|
||||
"ofType": null
|
||||
}
|
||||
}
|
||||
},
|
||||
"defaultValue": null
|
||||
},
|
||||
{
|
||||
"name": "after",
|
||||
"description": "Returns the elements in the list that come after the specified cursor.",
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ are very appreciative of the work done by translators and proofreaders!
|
|||
- Mongolian
|
||||
- Proofreaders needed.
|
||||
- Norwegian Bokmal
|
||||
- Proofreaders needed.
|
||||
- Imre Kristoffer Eilertsen - [GitLab](https://gitlab.com/DandelionSprout), [CrowdIn](https://crowdin.com/profile/DandelionSprout)
|
||||
- Polish
|
||||
- Filip Mech - [GitLab](https://gitlab.com/mehenz), [CrowdIn](https://crowdin.com/profile/mehenz)
|
||||
- Maksymilian Roman - [GitLab](https://gitlab.com/villaincandle), [CrowdIn](https://crowdin.com/profile/villaincandle)
|
||||
|
|
|
|||
|
|
@ -45,8 +45,6 @@ There are seven stages that are tracked as part of the Value Stream Analytics ca
|
|||
- Time spent on code review
|
||||
- **Staging** (Continuous Deployment)
|
||||
- Time between merging and deploying to production
|
||||
- **Total** (Total)
|
||||
- Total lifecycle time. That is, the velocity of the project or team. [Previously known](https://gitlab.com/gitlab-org/gitlab/-/issues/38317) as **Production**.
|
||||
|
||||
## Filter the analytics data
|
||||
|
||||
|
|
@ -95,7 +93,7 @@ Note: A commit is associated with an issue by [crosslinking](../project/issues/c
|
|||
## How the stages are measured
|
||||
|
||||
Value Stream Analytics records stage time and data based on the project issues with the
|
||||
exception of the staging and total stages, where only data deployed to
|
||||
exception of the staging stage, where only data deployed to
|
||||
production are measured.
|
||||
|
||||
Specifically, if your CI is not set up and you have not defined a `production`
|
||||
|
|
@ -112,7 +110,6 @@ Each stage of Value Stream Analytics is further described in the table below.
|
|||
| Test | Measures the median time to run the entire pipeline for that project. It's related to the time GitLab CI/CD takes to run every job for the commits pushed to that merge request defined in the previous stage. It is basically the start->finish time for all pipelines. |
|
||||
| Review | Measures the median time taken to review the merge request that has a closing issue pattern, between its creation and until it's merged. |
|
||||
| Staging | Measures the median time between merging the merge request with a closing issue pattern until the very first deployment to production. It's tracked by the environment set to `production` or matching `production/*` (case-sensitive, `Production` won't work) in your GitLab CI/CD configuration. If there isn't a production environment, this is not tracked. |
|
||||
| Total | The sum of all time (medians) taken to run the entire process, from issue creation to deploying the code to production. [Previously known](https://gitlab.com/gitlab-org/gitlab/-/issues/38317) as **Production**. |
|
||||
|
||||
How this works, behind the scenes:
|
||||
|
||||
|
|
@ -131,7 +128,7 @@ Value Stream Analytics dashboard will not present any data for:
|
|||
|
||||
- Merge requests that do not close an issue.
|
||||
- Issues not labeled with a label present in the Issue Board or for issues not assigned a milestone.
|
||||
- Staging and production stages, if the project has no `production` or `production/*`
|
||||
- Staging stage, if the project has no `production` or `production/*`
|
||||
environment.
|
||||
|
||||
## Example workflow
|
||||
|
|
@ -158,9 +155,6 @@ environments is configured.
|
|||
request at 19:00. (stop of **Review** stage / start of **Staging** stage).
|
||||
1. Now that the merge request is merged, a deployment to the `production`
|
||||
environment starts and finishes at 19:30 (stop of **Staging** stage).
|
||||
1. The cycle completes and the sum of the median times of the previous stages
|
||||
is recorded to the **Total** stage. That is the time between creating an
|
||||
issue and deploying its relevant merge request to production.
|
||||
|
||||
From the above example you can conclude the time it took each stage to complete
|
||||
as long as their total time:
|
||||
|
|
@ -171,10 +165,6 @@ as long as their total time:
|
|||
- **Test**: 5min
|
||||
- **Review**: 5h (19:00 - 14:00)
|
||||
- **Staging**: 30min (19:30 - 19:00)
|
||||
- **Total**: Since this stage measures the sum of median time of all
|
||||
previous stages, we cannot calculate it if we don't know the status of the
|
||||
stages before. In case this is the very first cycle that is run in the project,
|
||||
then the **Total** time is 10h 30min (19:30 - 09:00)
|
||||
|
||||
A few notes:
|
||||
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ information directly in the merge request.
|
|||
|
||||
## Requirements
|
||||
|
||||
To enable Container Scanning in your pipeline, you need the following:
|
||||
To enable container scanning in your pipeline, you need the following:
|
||||
|
||||
- [GitLab Runner](https://docs.gitlab.com/runner/) with the [`docker`](https://docs.gitlab.com/runner/executors/docker.html)
|
||||
or [`kubernetes`](https://docs.gitlab.com/runner/install/kubernetes.html) executor.
|
||||
|
|
@ -72,7 +72,7 @@ To enable Container Scanning in your pipeline, you need the following:
|
|||
|
||||
## Configuration
|
||||
|
||||
How you enable Container Scanning depends on your GitLab version:
|
||||
How you enable container scanning depends on your GitLab version:
|
||||
|
||||
- GitLab 11.9 and later: [Include](../../../ci/yaml/README.md#includetemplate) the
|
||||
[`Container-Scanning.gitlab-ci.yml` template](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml)
|
||||
|
|
@ -91,7 +91,7 @@ include:
|
|||
The included template:
|
||||
|
||||
- Creates a `container_scanning` job in your CI/CD pipeline.
|
||||
- Pulls the built Docker image from your project's [Container Registry](../../packages/container_registry/index.md)
|
||||
- Pulls the built Docker image from your project's [container registry](../../packages/container_registry/index.md)
|
||||
(see [requirements](#requirements)) and scans it for possible vulnerabilities.
|
||||
|
||||
GitLab saves the results as a
|
||||
|
|
@ -99,8 +99,8 @@ GitLab saves the results as a
|
|||
that you can download and analyze later. When downloading, you always receive the most-recent
|
||||
artifact.
|
||||
|
||||
The following is a sample `.gitlab-ci.yml` that builds your Docker image, pushes it to the Container
|
||||
Registry, and scans the containers:
|
||||
The following is a sample `.gitlab-ci.yml` that builds your Docker image, pushes it to the container
|
||||
registry, and scans the containers:
|
||||
|
||||
```yaml
|
||||
variables:
|
||||
|
|
@ -127,7 +127,7 @@ include:
|
|||
- template: Container-Scanning.gitlab-ci.yml
|
||||
```
|
||||
|
||||
### Customizing the Container Scanning settings
|
||||
### Customizing the container scanning settings
|
||||
|
||||
There may be cases where you want to customize how GitLab scans your containers. For example, you
|
||||
may want to enable more verbose output from Clair or Klar, access a Docker registry that requires
|
||||
|
|
@ -136,7 +136,7 @@ parameter in your `.gitlab-ci.yml` to set [environment variables](#available-var
|
|||
The environment variables you set in your `.gitlab-ci.yml` overwrite those in
|
||||
`Container-Scanning.gitlab-ci.yml`.
|
||||
|
||||
This example [includes](../../../ci/yaml/README.md#include) the Container Scanning template and
|
||||
This example [includes](../../../ci/yaml/README.md#include) the container scanning template and
|
||||
enables verbose output from Clair by setting the `CLAIR_OUTPUT` environment variable to `High`:
|
||||
|
||||
```yaml
|
||||
|
|
@ -153,30 +153,30 @@ variables:
|
|||
|
||||
#### Available variables
|
||||
|
||||
Container Scanning can be [configured](#customizing-the-container-scanning-settings)
|
||||
using environment variables.
|
||||
You can [configure](#customizing-the-container-scanning-settings) container
|
||||
scanning by using the following environment variables:
|
||||
|
||||
| Environment Variable | Default | Description |
|
||||
| -------------------- | ----------- | ------- |
|
||||
| `SECURE_ANALYZERS_PREFIX` | `"registry.gitlab.com/gitlab-org/security-products/analyzers"` | Set the Docker registry base address from which to download the analyzer. |
|
||||
| `KLAR_TRACE` | `"false"` | Set to true to enable more verbose output from klar. |
|
||||
| `CLAIR_TRACE` | `"false"` | Set to true to enable more verbose output from the clair server process. |
|
||||
| `DOCKER_USER` | `$CI_REGISTRY_USER` | Username for accessing a Docker registry requiring authentication. |
|
||||
| `DOCKER_PASSWORD` | `$CI_REGISTRY_PASSWORD` | Password for accessing a Docker registry requiring authentication. |
|
||||
| `CLAIR_OUTPUT` | `Unknown` | Severity level threshold. Vulnerabilities with severity level higher than or equal to this threshold are outputted. Supported levels are `Unknown`, `Negligible`, `Low`, `Medium`, `High`, `Critical` and `Defcon1`. |
|
||||
| `REGISTRY_INSECURE` | `"false"` | Allow [Klar](https://github.com/optiopay/klar) to access insecure registries (HTTP only). Should only be set to `true` when testing the image locally. |
|
||||
| `DOCKER_INSECURE` | `"false"` | Allow [Klar](https://github.com/optiopay/klar) to access secure Docker registries using HTTPS with bad (or self-signed) SSL certificates. |
|
||||
| `CLAIR_VULNERABILITIES_DB_URL` | `clair-vulnerabilities-db` | (**DEPRECATED - use `CLAIR_DB_CONNECTION_STRING` instead**) This variable is explicitly set in the [services section](https://gitlab.com/gitlab-org/gitlab/-/blob/898c5da43504eba87b749625da50098d345b60d6/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml#L23) of the `Container-Scanning.gitlab-ci.yml` file and defaults to `clair-vulnerabilities-db`. This value represents the address that the [PostgreSQL server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db) is running on and **shouldn't be changed** unless you're running the image locally as described in the [Running the standalone Container Scanning Tool](#running-the-standalone-container-scanning-tool) section. |
|
||||
| `CLAIR_DB_CONNECTION_STRING` | `postgresql://postgres:password@clair-vulnerabilities-db:5432/postgres?sslmode=disable&statement_timeout=60000` | This variable represents the [connection string](https://www.postgresql.org/docs/9.3/libpq-connect.html#AEN39692) to the [PostgreSQL server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db) database and **shouldn't be changed** unless you're running the image locally as described in the [Running the standalone Container Scanning Tool](#running-the-standalone-container-scanning-tool) section. The host value for the connection string must match the [alias](https://gitlab.com/gitlab-org/gitlab/-/blob/898c5da43504eba87b749625da50098d345b60d6/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml#L23) value of the `Container-Scanning.gitlab-ci.yml` template file, which defaults to `clair-vulnerabilities-db`. |
|
||||
| `CI_APPLICATION_REPOSITORY` | `$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG` | Docker repository URL for the image to be scanned. |
|
||||
| `CI_APPLICATION_TAG` | `$CI_COMMIT_SHA` | Docker repository tag for the image to be scanned. |
|
||||
| `CLAIR_DB_IMAGE` | `arminc/clair-db:latest` | The Docker image name and tag for the [PostgreSQL server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db). It can be useful to override this value with a specific version, for example, to provide a consistent set of vulnerabilities for integration testing purposes, or to refer to a locally hosted vulnerabilities database for an on-premise offline installation. |
|
||||
| `CLAIR_DB_IMAGE_TAG` | `latest` | (**DEPRECATED - use `CLAIR_DB_IMAGE` instead**) The Docker image tag for the [PostgreSQL server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db). It can be useful to override this value with a specific version, for example, to provide a consistent set of vulnerabilities for integration testing purposes. |
|
||||
| `DOCKERFILE_PATH` | `Dockerfile` | The path to the `Dockerfile` to be used for generating remediations. By default, the scanner looks for a file named `Dockerfile` in the root directory of the project, so this variable should only be configured if your `Dockerfile` is in a non-standard location, such as a subdirectory. See [Solutions for vulnerabilities](#solutions-for-vulnerabilities-auto-remediation) for more details. |
|
||||
| `ADDITIONAL_CA_CERT_BUNDLE` | `""` | Bundle of CA certs that you want to trust. |
|
||||
| `SECURE_LOG_LEVEL` | `info` | Set the minimum logging level. Messages of this logging level or higher are output. From highest to lowest severity, the logging levels are: `fatal`, `error`, `warn`, `info`, `debug`. [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/10880) in GitLab 13.1. |
|
||||
| Environment Variable | Default | Description |
|
||||
| ------------------------------ | ------------- | ----------- |
|
||||
| `ADDITIONAL_CA_CERT_BUNDLE` | `""` | Bundle of CA certs that you want to trust. |
|
||||
| `CLAIR_DB_CONNECTION_STRING` | `postgresql://postgres:password@clair-vulnerabilities-db:5432/postgres?sslmode=disable&statement_timeout=60000` | This variable represents the [connection string](https://www.postgresql.org/docs/9.3/libpq-connect.html#AEN39692) to the [PostgreSQL server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db) database and **shouldn't be changed** unless you're running the image locally as described in the [Running the standalone container scanning tool](#running-the-standalone-container-scanning-tool) section. The host value for the connection string must match the [alias](https://gitlab.com/gitlab-org/gitlab/-/blob/898c5da43504eba87b749625da50098d345b60d6/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml#L23) value of the `Container-Scanning.gitlab-ci.yml` template file, which defaults to `clair-vulnerabilities-db`. |
|
||||
| `CLAIR_DB_IMAGE` | `arminc/clair-db:latest` | The Docker image name and tag for the [PostgreSQL server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db). It can be useful to override this value with a specific version, for example, to provide a consistent set of vulnerabilities for integration testing purposes, or to refer to a locally hosted vulnerabilities database for an on-premise offline installation. |
|
||||
| `CLAIR_DB_IMAGE_TAG` | `latest` | (**DEPRECATED - use `CLAIR_DB_IMAGE` instead**) The Docker image tag for the [PostgreSQL server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db). It can be useful to override this value with a specific version, for example, to provide a consistent set of vulnerabilities for integration testing purposes. |
|
||||
| `CLAIR_OUTPUT` | `Unknown` | Severity level threshold. Vulnerabilities with severity level higher than or equal to this threshold are outputted. Supported levels are `Unknown`, `Negligible`, `Low`, `Medium`, `High`, `Critical` and `Defcon1`. |
|
||||
| `CLAIR_TRACE` | `"false"` | Set to true to enable more verbose output from the clair server process. |
|
||||
| `CLAIR_VULNERABILITIES_DB_URL` | `clair-vulnerabilities-db` | (**DEPRECATED - use `CLAIR_DB_CONNECTION_STRING` instead**) This variable is explicitly set in the [services section](https://gitlab.com/gitlab-org/gitlab/-/blob/898c5da43504eba87b749625da50098d345b60d6/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml#L23) of the `Container-Scanning.gitlab-ci.yml` file and defaults to `clair-vulnerabilities-db`. This value represents the address that the [PostgreSQL server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db) is running on and **shouldn't be changed** unless you're running the image locally as described in the [Running the standalone container scanning tool](#running-the-standalone-container-scanning-tool) section. |
|
||||
| `CI_APPLICATION_REPOSITORY` | `$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG` | Docker repository URL for the image to be scanned. |
|
||||
| `CI_APPLICATION_TAG` | `$CI_COMMIT_SHA` | Docker repository tag for the image to be scanned. |
|
||||
| `DOCKER_INSECURE` | `"false"` | Allow [Klar](https://github.com/optiopay/klar) to access secure Docker registries using HTTPS with bad (or self-signed) SSL certificates. |
|
||||
| `DOCKER_PASSWORD` | `$CI_REGISTRY_PASSWORD` | Password for accessing a Docker registry requiring authentication. |
|
||||
| `DOCKER_USER` | `$CI_REGISTRY_USER` | Username for accessing a Docker registry requiring authentication. |
|
||||
| `DOCKERFILE_PATH` | `Dockerfile` | The path to the `Dockerfile` to be used for generating remediations. By default, the scanner looks for a file named `Dockerfile` in the root directory of the project, so this variable should only be configured if your `Dockerfile` is in a non-standard location, such as a subdirectory. See [Solutions for vulnerabilities](#solutions-for-vulnerabilities-auto-remediation) for more details. |
|
||||
| `KLAR_TRACE` | `"false"` | Set to true to enable more verbose output from klar. |
|
||||
| `REGISTRY_INSECURE` | `"false"` | Allow [Klar](https://github.com/optiopay/klar) to access insecure registries (HTTP only). Should only be set to `true` when testing the image locally. |
|
||||
| `SECURE_ANALYZERS_PREFIX` | `"registry.gitlab.com/gitlab-org/security-products/analyzers"` | Set the Docker registry base address from which to download the analyzer. |
|
||||
| `SECURE_LOG_LEVEL` | `info` | Set the minimum logging level. Messages of this logging level or higher are output. From highest to lowest severity, the logging levels are: `fatal`, `error`, `warn`, `info`, `debug`. [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/10880) in GitLab 13.1. |
|
||||
|
||||
### Overriding the Container Scanning template
|
||||
### Overriding the container scanning template
|
||||
|
||||
If you want to override the job definition (for example, to change properties like `variables`), you
|
||||
must declare a `container_scanning` job after the template inclusion, and then
|
||||
|
|
@ -201,23 +201,23 @@ instead.
|
|||
To allowlist specific vulnerabilities, follow these steps:
|
||||
|
||||
1. Set `GIT_STRATEGY: fetch` in your `.gitlab-ci.yml` file by following the instructions in
|
||||
[overriding the Container Scanning template](#overriding-the-container-scanning-template).
|
||||
[overriding the container scanning template](#overriding-the-container-scanning-template).
|
||||
1. Define the allowlisted vulnerabilities in a YAML file named `vulnerability-allowlist.yml`. This must use
|
||||
the format described in the [allowlist example file](https://gitlab.com/gitlab-org/security-products/analyzers/klar/-/raw/master/testdata/vulnerability-allowlist.yml).
|
||||
1. Add the `vulnerability-allowlist.yml` file to your project's Git repository.
|
||||
|
||||
### Running Container Scanning in an offline environment
|
||||
### Running container scanning in an offline environment
|
||||
|
||||
For self-managed GitLab instances in an environment with limited, restricted, or intermittent access
|
||||
to external resources through the internet, some adjustments are required for the Container Scanning job to
|
||||
to external resources through the internet, some adjustments are required for the container scanning job to
|
||||
successfully run. For more information, see [Offline environments](../offline_deployments/index.md).
|
||||
|
||||
#### Requirements for offline Container Scanning
|
||||
#### Requirements for offline container Scanning
|
||||
|
||||
To use Container Scanning in an offline environment, you need:
|
||||
To use container scanning in an offline environment, you need:
|
||||
|
||||
- GitLab Runner with the [`docker` or `kubernetes` executor](#requirements).
|
||||
- To configure a local Docker Container Registry with copies of the Container Scanning [analyzer](https://gitlab.com/gitlab-org/security-products/analyzers/klar) images, found in the [Container Scanning container registry](https://gitlab.com/gitlab-org/security-products/analyzers/klar/container_registry).
|
||||
- To configure a local Docker container registry with copies of the container scanning [analyzer](https://gitlab.com/gitlab-org/security-products/analyzers/klar) images, found in the [container scanning container registry](https://gitlab.com/gitlab-org/security-products/analyzers/klar/container_registry).
|
||||
|
||||
NOTE: **Note:**
|
||||
GitLab Runner has a [default `pull policy` of `always`](https://docs.gitlab.com/runner/executors/docker.html#using-the-always-pull-policy),
|
||||
|
|
@ -227,9 +227,9 @@ in an offline environment if you prefer using only locally available Docker imag
|
|||
recommend keeping the pull policy setting to `always` if not in an offline environment, as this
|
||||
enables the use of updated scanners in your CI/CD pipelines.
|
||||
|
||||
#### Make GitLab Container Scanning analyzer images available inside your Docker registry
|
||||
#### Make GitLab container scanning analyzer images available inside your Docker registry
|
||||
|
||||
For Container Scanning, import the following default images from `registry.gitlab.com` into your
|
||||
For container scanning, import the following default images from `registry.gitlab.com` into your
|
||||
[local Docker container registry](../../packages/container_registry/index.md):
|
||||
|
||||
```plaintext
|
||||
|
|
@ -249,7 +249,7 @@ For details on saving and transporting Docker images as a file, see Docker's doc
|
|||
[`docker save`](https://docs.docker.com/engine/reference/commandline/save/), [`docker load`](https://docs.docker.com/engine/reference/commandline/load/),
|
||||
[`docker export`](https://docs.docker.com/engine/reference/commandline/export/), and [`docker import`](https://docs.docker.com/engine/reference/commandline/import/).
|
||||
|
||||
#### Set Container Scanning CI job variables to use local Container Scanner analyzers
|
||||
#### Set container scanning CI job variables to use local container scanner analyzers
|
||||
|
||||
1. [Override the container scanning template](#overriding-the-container-scanning-template) in your `.gitlab-ci.yml` file to refer to the Docker images hosted on your local Docker container registry:
|
||||
|
||||
|
|
@ -267,10 +267,10 @@ For details on saving and transporting Docker images as a file, see Docker's doc
|
|||
self-signed certificate, then you must set `DOCKER_INSECURE: "true"` in the above
|
||||
`container_scanning` section of your `.gitlab-ci.yml`.
|
||||
|
||||
#### Automating Container Scanning vulnerability database updates with a pipeline
|
||||
#### Automating container scanning vulnerability database updates with a pipeline
|
||||
|
||||
It can be worthwhile to set up a [scheduled pipeline](../../../ci/pipelines/schedules.md) to
|
||||
automatically build a new version of the vulnerabilities database on a preset schedule. Automating
|
||||
build a new version of the vulnerabilities database on a preset schedule. Automating
|
||||
this with a pipeline means you won't have to do it manually each time. You can use the following
|
||||
`.gitlab-yml.ci` as a template:
|
||||
|
||||
|
|
@ -293,9 +293,9 @@ build_latest_vulnerabilities:
|
|||
|
||||
The above template works for a GitLab Docker registry running on a local installation, however, if you're using a non-GitLab Docker registry, you'll need to change the `$CI_REGISTRY` value and the `docker login` credentials to match the details of your local registry.
|
||||
|
||||
## Running the standalone Container Scanning Tool
|
||||
## Running the standalone container scanning tool
|
||||
|
||||
It's possible to run the [GitLab Container Scanning Tool](https://gitlab.com/gitlab-org/security-products/analyzers/klar)
|
||||
It's possible to run the [GitLab container scanning tool](https://gitlab.com/gitlab-org/security-products/analyzers/klar)
|
||||
against a Docker container without needing to run it within the context of a CI job. To scan an
|
||||
image directly, follow these steps:
|
||||
|
||||
|
|
@ -329,10 +329,10 @@ The results are stored in `gl-container-scanning-report.json`.
|
|||
|
||||
## Reports JSON format
|
||||
|
||||
The Container Scanning tool emits a JSON report file. For more information, see the
|
||||
The container scanning tool emits a JSON report file. For more information, see the
|
||||
[schema for this report](https://gitlab.com/gitlab-org/security-products/security-report-schemas/-/blob/master/dist/container-scanning-report-format.json).
|
||||
|
||||
Here's an example Container Scanning report:
|
||||
Here's an example container scanning report:
|
||||
|
||||
```json-doc
|
||||
{
|
||||
|
|
@ -401,7 +401,7 @@ For more information about the vulnerabilities database update, check the
|
|||
|
||||
## Interacting with the vulnerabilities
|
||||
|
||||
Once a vulnerability is found, you can [interact with it](../index.md#interacting-with-the-vulnerabilities).
|
||||
After a vulnerability is found, you can [interact with it](../index.md#interacting-with-the-vulnerabilities).
|
||||
|
||||
## Solutions for vulnerabilities (auto-remediation)
|
||||
|
||||
|
|
@ -413,7 +413,7 @@ the [`DOCKERFILE_PATH`](#available-variables) environment variable. To ensure th
|
|||
has access to this
|
||||
file, it's necessary to set [`GIT_STRATEGY: fetch`](../../../ci/yaml/README.md#git-strategy) in
|
||||
your `.gitlab-ci.yml` file by following the instructions described in this document's
|
||||
[overriding the Container Scanning template](#overriding-the-container-scanning-template) section.
|
||||
[overriding the container scanning template](#overriding-the-container-scanning-template) section.
|
||||
|
||||
Read more about the [solutions for vulnerabilities](../index.md#solutions-for-vulnerabilities-auto-remediation).
|
||||
|
||||
|
|
@ -422,7 +422,7 @@ Read more about the [solutions for vulnerabilities](../index.md#solutions-for-vu
|
|||
### `docker: Error response from daemon: failed to copy xattrs`
|
||||
|
||||
When the runner uses the `docker` executor and NFS is used
|
||||
(for example, `/var/lib/docker` is on an NFS mount), Container Scanning might fail with
|
||||
(for example, `/var/lib/docker` is on an NFS mount), container scanning might fail with
|
||||
an error like the following:
|
||||
|
||||
```plaintext
|
||||
|
|
|
|||
|
|
@ -0,0 +1,327 @@
|
|||
---
|
||||
stage: Configure
|
||||
group: Configure
|
||||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#designated-technical-writers
|
||||
---
|
||||
|
||||
# GitLab Kubernetes Agent
|
||||
|
||||
## Goals
|
||||
|
||||
The [GitLab Kubernetes Agent](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent) is an active in-cluster component for solving GitLab and Kubernetes integration tasks in a secure and cloud native way.
|
||||
|
||||
Features:
|
||||
|
||||
1. Makes it possible to integrate GitLab with a Kubernetes cluster behind a firewall or NAT
|
||||
1. Enables pull-based GitOps deployments by leveraging the [GitOps Engine](https://github.com/argoproj/gitops-engine)
|
||||
1. Allows for real-time access to API endpoints within a cluster.
|
||||
1. Many more features are planned. Please [review our roadmap](https://gitlab.com/groups/gitlab-org/-/epics/3329).
|
||||
|
||||
## Architecture
|
||||
|
||||
### GitLab Agent GitOps workflow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant D as Developer
|
||||
participant A as Application code repository
|
||||
participant M as Manifest repository
|
||||
participant K as Kubernetes agent
|
||||
participant C as Agent configuration repository
|
||||
K->C: Grab the configuration
|
||||
D->>+A: Pushing code changes
|
||||
A->>M: Updating manifest
|
||||
loop Regularly
|
||||
K-->>M: Watching changes
|
||||
M-->>K: Pulling and applying changes
|
||||
end
|
||||
```
|
||||
|
||||
Please refer to our [full architecture documentation in the Agent project](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/blob/master/doc/architecture.md#high-level-architecture).
|
||||
|
||||
## Getting started with GitOps using the GitLab Agent and the GitLab Cloud Native Helm chart
|
||||
|
||||
There are several components that work in concert for the Agent to accomplish GitOps deployments:
|
||||
|
||||
1. A Kubernetes cluster that is properly configured
|
||||
1. A configuration repository that contains a `config.yaml` file. This `config.yaml` tells the Agent which repositories to synchronize with.
|
||||
1. A manifest repository that contains a `manifest.yaml`. This `manifest.yaml` (which can be autogenerated) is tracked by the Agent and any changes to the file are automatically applied to the cluster.
|
||||
|
||||
The setup process involves a few steps that, once completed, will enable GitOps deployments to work
|
||||
|
||||
1. Installing the Agent server via GitLab Helm chart
|
||||
1. Defining a configuration directory
|
||||
1. Creating an Agent record in GitLab
|
||||
1. Generating and copying a Secret token used to connect to the Agent
|
||||
1. Installing the Agent into the cluster
|
||||
1. Creating a `manifest.yaml`
|
||||
|
||||
### Installing the Agent server via Helm
|
||||
|
||||
Currently the GitLab Kubernetes Agent can only be deployed via our [Helm chart](https://gitlab.com/gitlab-org/charts/gitlab).
|
||||
|
||||
NOTE: We are working quickly to [include the Agent in Official Linux Package](https://gitlab.com/gitlab-org/gitlab/-/issues/223060).
|
||||
|
||||
If you don't already have GitLab installed via Helm please refer to our [installation documentation](https://docs.gitlab.com/charts/installation/)
|
||||
|
||||
When installing/upgrading the GitLab Helm chart please consider the following Helm 2 example (if using Helm 3 please modify):
|
||||
|
||||
```shell
|
||||
helm upgrade --force --install gitlab . \
|
||||
--timeout 600 \
|
||||
--set global.hosts.domain=<YOUR_DOMAIN> \
|
||||
--set global.hosts.externalIP=<YOUR_IP> \
|
||||
--set certmanager-issuer.email=<YOUR_EMAIL> \
|
||||
--set name=gitlab-instance \
|
||||
--set global.kas.enabled=true
|
||||
```
|
||||
|
||||
`global.kas.enabled=true` must be set in order for the Agent to be properly installed and configured.
|
||||
|
||||
### Defining a configuration repository
|
||||
|
||||
Next you will need a GitLab repository that will contain your Agent configuration.
|
||||
|
||||
The minimal repository layout looks like this:
|
||||
|
||||
`.gitlab/agents/<agent-name>/config.yaml`
|
||||
|
||||
The `config.yaml` file contents should look like this:
|
||||
|
||||
```yaml
|
||||
gitops:
|
||||
manifest_projects:
|
||||
- id: "path-to/your-awesome-project"
|
||||
```
|
||||
|
||||
### Creating an Agent record in GitLab
|
||||
|
||||
Next you will need to create an GitLab Rails Agent record so that your GitLab project so that the Agent itself can associate with a GitLab project. This process will also yield a Secret that you will use to configure the Agent in subsequent steps.
|
||||
|
||||
There are two ways to accomplish this:
|
||||
|
||||
1. Via the Rails console
|
||||
1. Via GraphQL
|
||||
|
||||
To do this you could either run `rails c` or via GraphQL. From `rails c`:
|
||||
|
||||
```ruby
|
||||
project = ::Project.find_by_full_path("path-to/your-awesome-project")
|
||||
agent = ::Clusters::Agent.create(name: "<agent-name>", project: project)
|
||||
token = ::Clusters::AgentToken.create(agent: agent)
|
||||
token.token # this will print out the token you need to use on the next step
|
||||
```
|
||||
|
||||
or using GraphQL:
|
||||
|
||||
with this approach, you'll need a premium license to use this feature.
|
||||
|
||||
If you are new to using the GitLab GraphQL API please refer to the [Getting started with the GraphQL API page](../../../api/graphql/getting_started.md) or check out the [GraphQL Explorer](https://gitlab.com/-/graphql-explorer).
|
||||
|
||||
```json
|
||||
mutation createAgent {
|
||||
createClusterAgent(input: { projectPath: "path-to/your-awesome-project", name: "<agent-name>" }) {
|
||||
clusterAgent {
|
||||
id
|
||||
name
|
||||
}
|
||||
errors
|
||||
}
|
||||
}
|
||||
|
||||
mutation createToken {
|
||||
clusterAgentTokenCreate(input: { clusterAgentId: <cluster-agent-id-taken-from-the-previous-mutation> }) {
|
||||
secret # This is the value you need to use on the next step
|
||||
token {
|
||||
createdAt
|
||||
id
|
||||
}
|
||||
errors
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Note that GraphQL will only show you the token once, after you've created it.
|
||||
|
||||
### Creating the Kubernetes secret
|
||||
|
||||
Once the token has been generated it needs to be applied to the Kubernetes cluster.
|
||||
|
||||
If you didn't previously define or create a namespace you need to do that first:
|
||||
|
||||
```shell
|
||||
kubectl create namespace <YOUR-DESIRED-NAMESPACE>
|
||||
```
|
||||
|
||||
Run the following command to create your Secret:
|
||||
|
||||
```shell
|
||||
kubectl create secret generic -n <YOUR-DESIRED-NAMESPACE> gitlab-agent-token --from-literal=token='YOUR_AGENT_TOKEN'
|
||||
```
|
||||
|
||||
### Installing the Agent into the cluster
|
||||
|
||||
Next you are now ready to install the in-cluster component of the Agent. The below is an example YAML file of the Kubernetes resources required for the Agent to be installed.
|
||||
|
||||
Let's highlight a few of the details in the example below:
|
||||
|
||||
1. You can replace `gitlab-agent` with <YOUR-DESIRED-NAMESPACE>
|
||||
1. For the `kas-address` (Kubernetes Agent Server), you can replace `grpc://host.docker.internal:5005` with the address of the kas agent that was initialized via your Helm install.
|
||||
1. If you defined your own secret name, then replace `gitlab-agent-token` with your secret name.
|
||||
|
||||
`./resources.yml`
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: gitlab-agent
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: gitlab-agent
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: gitlab-agent
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: gitlab-agent
|
||||
spec:
|
||||
serviceAccountName: gitlab-agent
|
||||
containers:
|
||||
- name: agent
|
||||
image: "registry.gitlab.com/gitlab-org/cluster-integration/gitlab-agent/agentk:latest"
|
||||
args:
|
||||
- --token-file=/config/token
|
||||
- --kas-address
|
||||
- grpc://host.docker.internal:5005 # {"$openapi":"kas-address"}
|
||||
volumeMounts:
|
||||
- name: token-volume
|
||||
mountPath: /config
|
||||
volumes:
|
||||
- name: token-volume
|
||||
secret:
|
||||
secretName: gitlab-agent-token
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: gitlab-agent-write
|
||||
rules:
|
||||
- resources:
|
||||
- '*'
|
||||
apiGroups:
|
||||
- '*'
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: gitlab-agent-write-binding
|
||||
roleRef:
|
||||
name: gitlab-agent-write
|
||||
kind: ClusterRole
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- name: gitlab-agent
|
||||
kind: ServiceAccount
|
||||
namespace: gitlab-agent
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: gitlab-agent-read
|
||||
rules:
|
||||
- resources:
|
||||
- '*'
|
||||
apiGroups:
|
||||
- '*'
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: gitlab-agent-read-binding
|
||||
roleRef:
|
||||
name: gitlab-agent-read
|
||||
kind: ClusterRole
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- name: gitlab-agent
|
||||
kind: ServiceAccount
|
||||
namespace: gitlab-agent
|
||||
|
||||
```
|
||||
|
||||
```shell
|
||||
kubectl apply -n gitlab-agent -f ./resources.yml
|
||||
```
|
||||
|
||||
```plaintext
|
||||
$ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
gitlab-agent gitlab-agent-77689f7dcb-5skqk 1/1 Running 0 51s
|
||||
kube-system coredns-f9fd979d6-n6wcw 1/1 Running 0 14m
|
||||
kube-system etcd-minikube 1/1 Running 0 14m
|
||||
kube-system kube-apiserver-minikube 1/1 Running 0 14m
|
||||
kube-system kube-controller-manager-minikube 1/1 Running 0 14m
|
||||
kube-system kube-proxy-j6zdh 1/1 Running 0 14m
|
||||
kube-system kube-scheduler-minikube 1/1 Running 0 14m
|
||||
kube-system storage-provisioner 1/1 Running 0 14m
|
||||
```
|
||||
|
||||
### Creating a `manifest.yaml`
|
||||
|
||||
In the above step, you configured a `config.yaml` to point to which GitLab projects the Agent should synchronize. Within each one of those projects, you need to create a `manifest.yaml` file which the Agent will monitor. This `manifest.yaml` can be autogenerated by a templating engine or other means.
|
||||
|
||||
Example `manifest.yaml`:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14.2
|
||||
ports:
|
||||
- containerPort: 80
|
||||
```
|
||||
|
||||
The above file creates a simple NGINX deployment.
|
||||
|
||||
Each time you commit and push a change to the `manifest.yaml` the Agent will observe the change. Example log:
|
||||
|
||||
```plaintext
|
||||
2020-09-15_14:09:04.87946 gitlab-k8s-agent : time="2020-09-15T10:09:04-04:00" level=info msg="Config: new commit" agent_id=1 commit_id=e6a3651f1faa2e928fe6120e254c122451be4eea
|
||||
```
|
||||
|
||||
## Example projects
|
||||
|
||||
Basic GitOps example deploying NGINX: [Configuration repository](https://gitlab.com/gitlab-org/configure/examples/kubernetes-agent), [Manifest repository](https://gitlab.com/gitlab-org/configure/examples/gitops-project)
|
||||
|
|
@ -76,3 +76,6 @@ If you've accidentally started the import process with the wrong account, follow
|
|||
1. Revoke GitLab access to your Bitbucket account, essentially reversing the process in the following procedure: [Import your Bitbucket repositories](#import-your-bitbucket-repositories).
|
||||
|
||||
1. Sign out of the Bitbucket account. Follow the procedure linked from the previous step.
|
||||
|
||||
NOTE: **Note:**
|
||||
To import a repository including LFS objects from a Bitbucket server repository, use the [Repo by URL](../import/repo_by_url.md) importer.
|
||||
|
|
|
|||
Binary file not shown.
|
After Width: | Height: | Size: 31 KiB |
|
|
@ -121,6 +121,29 @@ indistinguishably.
|
|||
Alternatively, you can **require**
|
||||
[Code Owner's approvals for Protected Branches](../protected_branches.md#protected-branches-approval-by-code-owners). **(PREMIUM)**
|
||||
|
||||
#### Merge Request approval segregation of duties
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/40491) in [GitLab Starter](https://about.gitlab.com/pricing/) 13.4.
|
||||
|
||||
Managers or operators with [Reporter permissions](../../permissions.md#project-members-permissions)
|
||||
to a project sometimes need to be required approvers of a merge request,
|
||||
before a merge to a protected branch begins. These approvers aren't allowed
|
||||
to push or merge code to any branches.
|
||||
|
||||
To enable this access:
|
||||
|
||||
1. [Create a new group](../../group/index.md#create-a-new-group), and then
|
||||
[add the user to the group](../../group/index.md#add-users-to-a-group),
|
||||
ensuring you select the Reporter role for the user.
|
||||
1. [Share the project with your group](../members/share_project_with_groups.md#sharing-a-project-with-a-group-of-users),
|
||||
based on the Reporter role.
|
||||
1. Navigate to your project's **Settings > General**, and in the
|
||||
**Merge request approvals** section, click **Expand**.
|
||||
1. [Add the group](../../group/index.md#create-a-new-group) to the permission list
|
||||
for the protected branch.
|
||||
|
||||

|
||||
|
||||
#### Adding / editing a default approval rule
|
||||
|
||||
To add or edit the default merge request approval rule:
|
||||
|
|
|
|||
|
|
@ -67,6 +67,14 @@ list.
|
|||
|
||||

|
||||
|
||||
### Collapsed files in the Changes view
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/232820) in GitLab 13.4.
|
||||
|
||||
When you review changes in the **Changes** tab, files with a large number of changes are collapsed
|
||||
to improve performance. When files are collapsed, a warning appears at the top of the changes.
|
||||
Click **Expand file** on any file to view the changes for that file.
|
||||
|
||||
### File-by-file diff navigation
|
||||
|
||||
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/222790) in GitLab 13.2.
|
||||
|
|
|
|||
|
|
@ -18,8 +18,7 @@ module Gitlab
|
|||
params_for_code_stage,
|
||||
params_for_test_stage,
|
||||
params_for_review_stage,
|
||||
params_for_staging_stage,
|
||||
params_for_production_stage
|
||||
params_for_staging_stage
|
||||
]
|
||||
end
|
||||
|
||||
|
|
@ -86,16 +85,6 @@ module Gitlab
|
|||
end_event_identifier: :merge_request_first_deployed_to_production
|
||||
}
|
||||
end
|
||||
|
||||
def self.params_for_production_stage
|
||||
{
|
||||
name: 'production',
|
||||
custom: false,
|
||||
relative_position: 7,
|
||||
start_event_identifier: :issue_created,
|
||||
end_event_identifier: :production_stage_end
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ module Gitlab
|
|||
module StageEvents
|
||||
class ProductionStageEnd < StageEvent
|
||||
def self.name
|
||||
_("Issue first depoloyed to production")
|
||||
_("Issue first deployed to production")
|
||||
end
|
||||
|
||||
def self.identifier
|
||||
|
|
|
|||
|
|
@ -10,14 +10,28 @@ module Gitlab
|
|||
end
|
||||
|
||||
def has_details?
|
||||
false
|
||||
!!details_path
|
||||
end
|
||||
|
||||
def details_path
|
||||
return unless Feature.enabled?(:ci_bridge_pipeline_details, subject.project, default_enabled: true)
|
||||
return unless can?(user, :read_pipeline, downstream_pipeline)
|
||||
|
||||
project_pipeline_path(downstream_project, downstream_pipeline)
|
||||
end
|
||||
|
||||
def has_action?
|
||||
false
|
||||
end
|
||||
|
||||
def details_path
|
||||
private
|
||||
|
||||
def downstream_pipeline
|
||||
subject.downstream_pipeline
|
||||
end
|
||||
|
||||
def downstream_project
|
||||
downstream_pipeline&.project
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,38 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module CycleAnalytics
|
||||
class ProductionStage < BaseStage
|
||||
include ProductionHelper
|
||||
|
||||
def start_time_attrs
|
||||
@start_time_attrs ||= issue_table[:created_at]
|
||||
end
|
||||
|
||||
def end_time_attrs
|
||||
@end_time_attrs ||= mr_metrics_table[:first_deployed_to_production_at]
|
||||
end
|
||||
|
||||
def name
|
||||
:production
|
||||
end
|
||||
|
||||
def title
|
||||
s_('CycleAnalyticsStage|Total')
|
||||
end
|
||||
|
||||
def legend
|
||||
_("Related Issues")
|
||||
end
|
||||
|
||||
def description
|
||||
_("From issue creation until deploy to production")
|
||||
end
|
||||
|
||||
def query
|
||||
# Limit to merge requests that have been deployed to production after `@from`
|
||||
query.where(mr_metrics_table[:first_deployed_to_production_at].gteq(@from))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
# frozen_string_literal: true
|
||||
module Gitlab
|
||||
module Lfs
|
||||
# Gitlab::Lfs::Client implements a simple LFS client, designed to talk to
|
||||
# LFS servers as described in these documents:
|
||||
# * https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
|
||||
# * https://github.com/git-lfs/git-lfs/blob/master/docs/api/basic-transfers.md
|
||||
class Client
|
||||
attr_reader :base_url
|
||||
|
||||
def initialize(base_url, credentials:)
|
||||
@base_url = base_url
|
||||
@credentials = credentials
|
||||
end
|
||||
|
||||
def batch(operation, objects)
|
||||
body = {
|
||||
operation: operation,
|
||||
transfers: ['basic'],
|
||||
# We don't know `ref`, so can't send it
|
||||
objects: objects.map { |object| { oid: object.oid, size: object.size } }
|
||||
}
|
||||
|
||||
rsp = Gitlab::HTTP.post(
|
||||
batch_url,
|
||||
basic_auth: basic_auth,
|
||||
body: body.to_json,
|
||||
headers: { 'Content-Type' => 'application/vnd.git-lfs+json' }
|
||||
)
|
||||
|
||||
raise BatchSubmitError unless rsp.success?
|
||||
|
||||
# HTTParty provides rsp.parsed_response, but it only kicks in for the
|
||||
# application/json content type in the response, which we can't rely on
|
||||
body = Gitlab::Json.parse(rsp.body)
|
||||
transfer = body.fetch('transfer', 'basic')
|
||||
|
||||
raise UnsupportedTransferError.new(transfer.inspect) unless transfer == 'basic'
|
||||
|
||||
body
|
||||
end
|
||||
|
||||
def upload(object, upload_action, authenticated:)
|
||||
file = object.file.open
|
||||
|
||||
params = {
|
||||
body_stream: file,
|
||||
headers: {
|
||||
'Content-Length' => object.size.to_s,
|
||||
'Content-Type' => 'application/octet-stream'
|
||||
}.merge(upload_action['header'] || {})
|
||||
}
|
||||
|
||||
params[:basic_auth] = basic_auth unless authenticated
|
||||
|
||||
rsp = Gitlab::HTTP.put(upload_action['href'], params)
|
||||
|
||||
raise ObjectUploadError unless rsp.success?
|
||||
ensure
|
||||
file&.close
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :credentials
|
||||
|
||||
def batch_url
|
||||
base_url + '/info/lfs/objects/batch'
|
||||
end
|
||||
|
||||
def basic_auth
|
||||
return unless credentials[:auth_method] == "password"
|
||||
|
||||
{ username: credentials[:user], password: credentials[:password] }
|
||||
end
|
||||
|
||||
class BatchSubmitError < StandardError
|
||||
def message
|
||||
"Failed to submit batch"
|
||||
end
|
||||
end
|
||||
|
||||
class UnsupportedTransferError < StandardError
|
||||
def initialize(transfer = nil)
|
||||
super
|
||||
@transfer = transfer
|
||||
end
|
||||
|
||||
def message
|
||||
"Unsupported transfer: #{@transfer}"
|
||||
end
|
||||
end
|
||||
|
||||
class ObjectUploadError < StandardError
|
||||
def message
|
||||
"Failed to upload object"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -12,7 +12,11 @@ module Gitlab
|
|||
end
|
||||
|
||||
def log_filename
|
||||
File.join(Rails.root, 'log', 'sidekiq_exporter.log')
|
||||
if settings['log_enabled']
|
||||
File.join(Rails.root, 'log', 'sidekiq_exporter.log')
|
||||
else
|
||||
File::NULL
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
|
|
|||
|
|
@ -13905,7 +13905,7 @@ msgstr ""
|
|||
msgid "Issue events"
|
||||
msgstr ""
|
||||
|
||||
msgid "Issue first depoloyed to production"
|
||||
msgid "Issue first deployed to production"
|
||||
msgstr ""
|
||||
|
||||
msgid "Issue label"
|
||||
|
|
@ -25306,9 +25306,6 @@ msgstr ""
|
|||
msgid "The private key to use when a client certificate is provided. This value is encrypted at rest."
|
||||
msgstr ""
|
||||
|
||||
msgid "The production stage shows the total time it takes between creating an issue and deploying the code to production. The data will be automatically added once you have completed the full idea to production cycle."
|
||||
msgstr ""
|
||||
|
||||
msgid "The project can be accessed by any logged in user."
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -25399,9 +25396,6 @@ msgstr ""
|
|||
msgid "The time taken by each data entry gathered by that stage."
|
||||
msgstr ""
|
||||
|
||||
msgid "The total stage shows the time it takes between creating an issue and deploying the code to production. The data will be automatically added once you have completed the full idea to production cycle."
|
||||
msgstr ""
|
||||
|
||||
msgid "The update action will time out after %{number_of_minutes} minutes. For big repositories, use a clone/push combination."
|
||||
msgstr ""
|
||||
|
||||
|
|
|
|||
|
|
@ -368,6 +368,15 @@ module QA
|
|||
ENV['MAILHOG_HOSTNAME']
|
||||
end
|
||||
|
||||
# Get the version of GitLab currently being tested against
|
||||
# @return String Version
|
||||
# @example
|
||||
# > Env.deploy_version
|
||||
# #=> 13.3.4-ee.0
|
||||
def deploy_version
|
||||
ENV['DEPLOY_VERSION']
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def remote_grid_credentials
|
||||
|
|
|
|||
|
|
@ -0,0 +1,45 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module QA
|
||||
RSpec.describe 'Create' do
|
||||
describe 'Push mirror a repository over HTTP' do
|
||||
it 'configures and syncs LFS objects for a (push) mirrored repository', testcase: 'https://gitlab.com/gitlab-org/quality/testcases/-/issues/414' do
|
||||
Runtime::Feature.enable_and_verify('push_mirror_syncs_lfs')
|
||||
Runtime::Browser.visit(:gitlab, Page::Main::Login)
|
||||
Page::Main::Login.perform(&:sign_in_using_credentials)
|
||||
|
||||
target_project = Resource::Project.fabricate_via_api! do |project|
|
||||
project.name = 'push-mirror-target-project'
|
||||
end
|
||||
target_project_uri = target_project.repository_http_location.uri
|
||||
target_project_uri.user = Runtime::User.username
|
||||
|
||||
source_project_push = Resource::Repository::ProjectPush.fabricate! do |push|
|
||||
push.file_name = 'README.md'
|
||||
push.file_content = '# This is a test project'
|
||||
push.commit_message = 'Add README.md'
|
||||
push.use_lfs = true
|
||||
end
|
||||
source_project_push.project.visit!
|
||||
|
||||
Page::Project::Menu.perform(&:go_to_repository_settings)
|
||||
Page::Project::Settings::Repository.perform do |settings|
|
||||
settings.expand_mirroring_repositories do |mirror_settings|
|
||||
# Configure the source project to push to the target project
|
||||
mirror_settings.repository_url = target_project_uri
|
||||
mirror_settings.mirror_direction = 'Push'
|
||||
mirror_settings.authentication_method = 'Password'
|
||||
mirror_settings.password = Runtime::User.password
|
||||
mirror_settings.mirror_repository
|
||||
mirror_settings.update target_project_uri
|
||||
end
|
||||
end
|
||||
|
||||
# Check that the target project has the commit from the source
|
||||
target_project.visit!
|
||||
expect(page).to have_content('README.md')
|
||||
expect(page).to have_content('The rendered file could not be displayed because it is stored in LFS')
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -76,9 +76,6 @@ RSpec.describe 'Value Stream Analytics', :js do
|
|||
|
||||
click_stage('Staging')
|
||||
expect_build_to_be_present
|
||||
|
||||
click_stage('Total')
|
||||
expect_issue_to_be_present
|
||||
end
|
||||
|
||||
context "when I change the time period observed" do
|
||||
|
|
|
|||
|
|
@ -0,0 +1,107 @@
|
|||
import { shallowMount } from '@vue/test-utils';
|
||||
import { GlLoadingIcon } from '@gitlab/ui';
|
||||
import BoardSidebarItem from '~/boards/components/sidebar/board_editable_item.vue';
|
||||
|
||||
describe('boards sidebar remove issue', () => {
|
||||
let wrapper;
|
||||
|
||||
const findLoader = () => wrapper.find(GlLoadingIcon);
|
||||
const findEditButton = () => wrapper.find('[data-testid="edit-button"]');
|
||||
const findTitle = () => wrapper.find('[data-testid="title"]');
|
||||
const findCollapsed = () => wrapper.find('[data-testid="collapsed-content"]');
|
||||
const findExpanded = () => wrapper.find('[data-testid="expanded-content"]');
|
||||
|
||||
const createComponent = ({ props = {}, slots = {}, canUpdate = false } = {}) => {
|
||||
wrapper = shallowMount(BoardSidebarItem, {
|
||||
attachTo: document.body,
|
||||
provide: { canUpdate },
|
||||
propsData: props,
|
||||
slots,
|
||||
});
|
||||
};
|
||||
|
||||
afterEach(() => {
|
||||
wrapper.destroy();
|
||||
wrapper = null;
|
||||
});
|
||||
|
||||
describe('template', () => {
|
||||
it('renders title', () => {
|
||||
const title = 'Sidebar item title';
|
||||
createComponent({ props: { title } });
|
||||
|
||||
expect(findTitle().text()).toBe(title);
|
||||
});
|
||||
|
||||
it('hides edit button, loader and expanded content by default', () => {
|
||||
createComponent();
|
||||
|
||||
expect(findEditButton().exists()).toBe(false);
|
||||
expect(findLoader().exists()).toBe(false);
|
||||
expect(findExpanded().isVisible()).toBe(false);
|
||||
});
|
||||
|
||||
it('shows "None" if empty collapsed slot', () => {
|
||||
createComponent({});
|
||||
|
||||
expect(findCollapsed().text()).toBe('None');
|
||||
});
|
||||
|
||||
it('renders collapsed content by default', () => {
|
||||
const slots = { collapsed: '<div>Collapsed content</div>' };
|
||||
createComponent({ slots });
|
||||
|
||||
expect(findCollapsed().text()).toBe('Collapsed content');
|
||||
});
|
||||
|
||||
it('shows edit button if can update', () => {
|
||||
createComponent({ canUpdate: true });
|
||||
|
||||
expect(findEditButton().exists()).toBe(true);
|
||||
});
|
||||
|
||||
it('shows loading icon if loading', () => {
|
||||
createComponent({ props: { loading: true } });
|
||||
|
||||
expect(findLoader().exists()).toBe(true);
|
||||
});
|
||||
|
||||
it('shows expanded content and hides collapsed content when clicking edit button', async () => {
|
||||
const slots = { default: '<div>Select item</div>' };
|
||||
createComponent({ canUpdate: true, slots });
|
||||
findEditButton().vm.$emit('click');
|
||||
|
||||
return wrapper.vm.$nextTick().then(() => {
|
||||
expect(findCollapsed().isVisible()).toBe(false);
|
||||
expect(findExpanded().isVisible()).toBe(true);
|
||||
expect(findExpanded().text()).toBe('Select item');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('collapsing an item by offclicking', () => {
|
||||
beforeEach(async () => {
|
||||
createComponent({ canUpdate: true });
|
||||
findEditButton().vm.$emit('click');
|
||||
await wrapper.vm.$nextTick();
|
||||
});
|
||||
|
||||
it('hides expanded section and displays collapsed section', async () => {
|
||||
expect(findExpanded().isVisible()).toBe(true);
|
||||
document.body.click();
|
||||
|
||||
await wrapper.vm.$nextTick();
|
||||
|
||||
expect(findCollapsed().isVisible()).toBe(true);
|
||||
expect(findExpanded().isVisible()).toBe(false);
|
||||
});
|
||||
|
||||
it('emits changed event', async () => {
|
||||
document.body.click();
|
||||
|
||||
await wrapper.vm.$nextTick();
|
||||
|
||||
expect(wrapper.emitted().changed[1][0]).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -5,21 +5,21 @@ import { membersJsonString, membersParsed } from './mock_data';
|
|||
|
||||
describe('initGroupMembersApp', () => {
|
||||
let el;
|
||||
let vm;
|
||||
let wrapper;
|
||||
|
||||
const setup = () => {
|
||||
const vm = initGroupMembersApp(el);
|
||||
vm = initGroupMembersApp(el);
|
||||
wrapper = createWrapper(vm);
|
||||
};
|
||||
|
||||
const getGroupMembersApp = () => wrapper.find(GroupMembersApp);
|
||||
|
||||
beforeEach(() => {
|
||||
el = document.createElement('div');
|
||||
el.setAttribute('data-members', membersJsonString);
|
||||
el.setAttribute('data-current-user-id', '123');
|
||||
el.setAttribute('data-group-id', '234');
|
||||
|
||||
window.gon = { current_user_id: 123 };
|
||||
|
||||
document.body.appendChild(el);
|
||||
});
|
||||
|
||||
|
|
@ -31,28 +31,36 @@ describe('initGroupMembersApp', () => {
|
|||
wrapper = null;
|
||||
});
|
||||
|
||||
it('parses and passes `currentUserId` prop to `GroupMembersApp`', () => {
|
||||
it('renders `GroupMembersApp`', () => {
|
||||
setup();
|
||||
|
||||
expect(getGroupMembersApp().props('currentUserId')).toBe(123);
|
||||
expect(wrapper.find(GroupMembersApp).exists()).toBe(true);
|
||||
});
|
||||
|
||||
it('does not pass `currentUserId` prop if not provided by the data attribute (user is not logged in)', () => {
|
||||
el.removeAttribute('data-current-user-id');
|
||||
it('sets `currentUserId` in Vuex store', () => {
|
||||
setup();
|
||||
|
||||
expect(getGroupMembersApp().props('currentUserId')).toBeNull();
|
||||
expect(vm.$store.state.currentUserId).toBe(123);
|
||||
});
|
||||
|
||||
it('parses and passes `groupId` prop to `GroupMembersApp`', () => {
|
||||
setup();
|
||||
describe('when `gon.current_user_id` is not set (user is not logged in)', () => {
|
||||
it('sets `currentUserId` as `null` in Vuex store', () => {
|
||||
window.gon = {};
|
||||
setup();
|
||||
|
||||
expect(getGroupMembersApp().props('groupId')).toBe(234);
|
||||
expect(vm.$store.state.currentUserId).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
it('parses and passes `members` prop to `GroupMembersApp`', () => {
|
||||
it('parses and sets `data-group-id` as `sourceId` in Vuex store', () => {
|
||||
setup();
|
||||
|
||||
expect(getGroupMembersApp().props('members')).toEqual(membersParsed);
|
||||
expect(vm.$store.state.sourceId).toBe(234);
|
||||
});
|
||||
|
||||
it('parses and sets `members` in Vuex store', () => {
|
||||
setup();
|
||||
|
||||
expect(vm.$store.state.members).toEqual(membersParsed);
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ describe('pipeline graph job item', () => {
|
|||
let wrapper;
|
||||
|
||||
const findJobWithoutLink = () => wrapper.find('[data-testid="job-without-link"]');
|
||||
const findJobWithLink = () => wrapper.find('[data-testid="job-with-link"]');
|
||||
|
||||
const createWrapper = propsData => {
|
||||
wrapper = mount(JobItem, {
|
||||
|
|
@ -36,7 +37,7 @@ describe('pipeline graph job item', () => {
|
|||
};
|
||||
const mockJobWithoutDetails = {
|
||||
id: 4257,
|
||||
name: 'test',
|
||||
name: 'job_without_details',
|
||||
status: {
|
||||
icon: 'status_success',
|
||||
text: 'passed',
|
||||
|
|
@ -60,7 +61,7 @@ describe('pipeline graph job item', () => {
|
|||
|
||||
expect(link.attributes('href')).toBe(mockJob.status.details_path);
|
||||
|
||||
expect(link.attributes('title')).toEqual(`${mockJob.name} - ${mockJob.status.label}`);
|
||||
expect(link.attributes('title')).toBe(`${mockJob.name} - ${mockJob.status.label}`);
|
||||
|
||||
expect(wrapper.find('.ci-status-icon-success').exists()).toBe(true);
|
||||
|
||||
|
|
@ -84,11 +85,10 @@ describe('pipeline graph job item', () => {
|
|||
expect(wrapper.find('.ci-status-icon-success').exists()).toBe(true);
|
||||
expect(wrapper.find('a').exists()).toBe(false);
|
||||
|
||||
expect(trimText(wrapper.find('.ci-status-text').text())).toEqual(mockJob.name);
|
||||
expect(trimText(wrapper.find('.ci-status-text').text())).toBe(mockJobWithoutDetails.name);
|
||||
});
|
||||
|
||||
it('should apply hover class and provided class name', () => {
|
||||
expect(findJobWithoutLink().classes()).toContain(triggerActiveClass);
|
||||
expect(findJobWithoutLink().classes()).toContain('css-class-job-name');
|
||||
});
|
||||
});
|
||||
|
|
@ -139,9 +139,7 @@ describe('pipeline graph job item', () => {
|
|||
},
|
||||
});
|
||||
|
||||
expect(wrapper.find('.js-job-component-tooltip').attributes('title')).toEqual(
|
||||
'test - success',
|
||||
);
|
||||
expect(wrapper.find('.js-job-component-tooltip').attributes('title')).toBe('test - success');
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -151,29 +149,39 @@ describe('pipeline graph job item', () => {
|
|||
job: delayedJobFixture,
|
||||
});
|
||||
|
||||
expect(wrapper.find('.js-pipeline-graph-job-link').attributes('title')).toEqual(
|
||||
expect(findJobWithLink().attributes('title')).toBe(
|
||||
`delayed job - delayed manual action (${wrapper.vm.remainingTime})`,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('trigger job highlighting', () => {
|
||||
it('trigger job should stay highlighted when downstream is expanded', () => {
|
||||
createWrapper({
|
||||
job: mockJobWithoutDetails,
|
||||
pipelineExpanded: { jobName: mockJob.name, expanded: true },
|
||||
});
|
||||
it.each`
|
||||
job | jobName | expanded | link
|
||||
${mockJob} | ${mockJob.name} | ${true} | ${true}
|
||||
${mockJobWithoutDetails} | ${mockJobWithoutDetails.name} | ${true} | ${false}
|
||||
`(
|
||||
`trigger job should stay highlighted when downstream is expanded`,
|
||||
({ job, jobName, expanded, link }) => {
|
||||
createWrapper({ job, pipelineExpanded: { jobName, expanded } });
|
||||
const findJobEl = link ? findJobWithLink : findJobWithoutLink;
|
||||
|
||||
expect(findJobWithoutLink().classes()).toContain(triggerActiveClass);
|
||||
});
|
||||
expect(findJobEl().classes()).toContain(triggerActiveClass);
|
||||
},
|
||||
);
|
||||
|
||||
it('trigger job should not be highlighted when downstream is closed', () => {
|
||||
createWrapper({
|
||||
job: mockJobWithoutDetails,
|
||||
pipelineExpanded: { jobName: mockJob.name, expanded: false },
|
||||
});
|
||||
it.each`
|
||||
job | jobName | expanded | link
|
||||
${mockJob} | ${mockJob.name} | ${false} | ${true}
|
||||
${mockJobWithoutDetails} | ${mockJobWithoutDetails.name} | ${false} | ${false}
|
||||
`(
|
||||
`trigger job should not be highlighted when downstream is not expanded`,
|
||||
({ job, jobName, expanded, link }) => {
|
||||
createWrapper({ job, pipelineExpanded: { jobName, expanded } });
|
||||
const findJobEl = link ? findJobWithLink : findJobWithoutLink;
|
||||
|
||||
expect(findJobWithoutLink().classes()).not.toContain(triggerActiveClass);
|
||||
});
|
||||
expect(findJobEl().classes()).not.toContain(triggerActiveClass);
|
||||
},
|
||||
);
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import { shallowMount, createLocalVue } from '@vue/test-utils';
|
|||
import waitForPromises from 'helpers/wait_for_promises';
|
||||
import ReleasesApp from '~/releases/components/app_index.vue';
|
||||
import createStore from '~/releases/stores';
|
||||
import listModule from '~/releases/stores/modules/list';
|
||||
import createListModule from '~/releases/stores/modules/list';
|
||||
import api from '~/api';
|
||||
import {
|
||||
pageInfoHeadersWithoutPagination,
|
||||
|
|
@ -35,6 +35,8 @@ describe('Releases App ', () => {
|
|||
};
|
||||
|
||||
const createComponent = (propsData = defaultProps) => {
|
||||
const listModule = createListModule({});
|
||||
|
||||
fetchReleaseSpy = jest.spyOn(listModule.actions, 'fetchReleases');
|
||||
|
||||
const store = createStore({
|
||||
|
|
|
|||
|
|
@ -0,0 +1,175 @@
|
|||
import Vuex from 'vuex';
|
||||
import { mount, createLocalVue } from '@vue/test-utils';
|
||||
import createStore from '~/releases/stores';
|
||||
import createListModule from '~/releases/stores/modules/list';
|
||||
import ReleasesPaginationGraphql from '~/releases/components/releases_pagination_graphql.vue';
|
||||
import { historyPushState } from '~/lib/utils/common_utils';
|
||||
|
||||
jest.mock('~/lib/utils/common_utils', () => ({
|
||||
...jest.requireActual('~/lib/utils/common_utils'),
|
||||
historyPushState: jest.fn(),
|
||||
}));
|
||||
|
||||
const localVue = createLocalVue();
|
||||
localVue.use(Vuex);
|
||||
|
||||
describe('~/releases/components/releases_pagination_graphql.vue', () => {
|
||||
let wrapper;
|
||||
let listModule;
|
||||
|
||||
const cursors = {
|
||||
startCursor: 'startCursor',
|
||||
endCursor: 'endCursor',
|
||||
};
|
||||
|
||||
const projectPath = 'my/project';
|
||||
|
||||
const createComponent = pageInfo => {
|
||||
listModule = createListModule({ projectPath });
|
||||
|
||||
listModule.state.graphQlPageInfo = pageInfo;
|
||||
|
||||
listModule.actions.fetchReleasesGraphQl = jest.fn();
|
||||
|
||||
wrapper = mount(ReleasesPaginationGraphql, {
|
||||
store: createStore({
|
||||
modules: {
|
||||
list: listModule,
|
||||
},
|
||||
featureFlags: {},
|
||||
}),
|
||||
localVue,
|
||||
});
|
||||
};
|
||||
|
||||
afterEach(() => {
|
||||
wrapper.destroy();
|
||||
wrapper = null;
|
||||
});
|
||||
|
||||
const findPrevButton = () => wrapper.find('[data-testid="prevButton"]');
|
||||
const findNextButton = () => wrapper.find('[data-testid="nextButton"]');
|
||||
|
||||
const expectDisabledPrev = () => {
|
||||
expect(findPrevButton().attributes().disabled).toBe('disabled');
|
||||
};
|
||||
const expectEnabledPrev = () => {
|
||||
expect(findPrevButton().attributes().disabled).toBe(undefined);
|
||||
};
|
||||
const expectDisabledNext = () => {
|
||||
expect(findNextButton().attributes().disabled).toBe('disabled');
|
||||
};
|
||||
const expectEnabledNext = () => {
|
||||
expect(findNextButton().attributes().disabled).toBe(undefined);
|
||||
};
|
||||
|
||||
describe('when there is only one page of results', () => {
|
||||
beforeEach(() => {
|
||||
createComponent({
|
||||
hasPreviousPage: false,
|
||||
hasNextPage: false,
|
||||
});
|
||||
});
|
||||
|
||||
it('does not render anything', () => {
|
||||
expect(wrapper.isEmpty()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('when there is a next page, but not a previous page', () => {
|
||||
beforeEach(() => {
|
||||
createComponent({
|
||||
hasPreviousPage: false,
|
||||
hasNextPage: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('renders a disabled "Prev" button', () => {
|
||||
expectDisabledPrev();
|
||||
});
|
||||
|
||||
it('renders an enabled "Next" button', () => {
|
||||
expectEnabledNext();
|
||||
});
|
||||
});
|
||||
|
||||
describe('when there is a previous page, but not a next page', () => {
|
||||
beforeEach(() => {
|
||||
createComponent({
|
||||
hasPreviousPage: true,
|
||||
hasNextPage: false,
|
||||
});
|
||||
});
|
||||
|
||||
it('renders a enabled "Prev" button', () => {
|
||||
expectEnabledPrev();
|
||||
});
|
||||
|
||||
it('renders an disabled "Next" button', () => {
|
||||
expectDisabledNext();
|
||||
});
|
||||
});
|
||||
|
||||
describe('when there is both a previous page and a next page', () => {
|
||||
beforeEach(() => {
|
||||
createComponent({
|
||||
hasPreviousPage: true,
|
||||
hasNextPage: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('renders a enabled "Prev" button', () => {
|
||||
expectEnabledPrev();
|
||||
});
|
||||
|
||||
it('renders an enabled "Next" button', () => {
|
||||
expectEnabledNext();
|
||||
});
|
||||
});
|
||||
|
||||
describe('button behavior', () => {
|
||||
beforeEach(() => {
|
||||
createComponent({
|
||||
hasPreviousPage: true,
|
||||
hasNextPage: true,
|
||||
...cursors,
|
||||
});
|
||||
});
|
||||
|
||||
describe('next button behavior', () => {
|
||||
beforeEach(() => {
|
||||
findNextButton().trigger('click');
|
||||
});
|
||||
|
||||
it('calls fetchReleasesGraphQl with the correct after cursor', () => {
|
||||
expect(listModule.actions.fetchReleasesGraphQl.mock.calls).toEqual([
|
||||
[expect.anything(), { projectPath, after: cursors.endCursor }],
|
||||
]);
|
||||
});
|
||||
|
||||
it('calls historyPushState with the new URL', () => {
|
||||
expect(historyPushState.mock.calls).toEqual([
|
||||
[expect.stringContaining(`?after=${cursors.endCursor}`)],
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('previous button behavior', () => {
|
||||
beforeEach(() => {
|
||||
findPrevButton().trigger('click');
|
||||
});
|
||||
|
||||
it('calls fetchReleasesGraphQl with the correct before cursor', () => {
|
||||
expect(listModule.actions.fetchReleasesGraphQl.mock.calls).toEqual([
|
||||
[expect.anything(), { projectPath, before: cursors.startCursor }],
|
||||
]);
|
||||
});
|
||||
|
||||
it('calls historyPushState with the new URL', () => {
|
||||
expect(historyPushState.mock.calls).toEqual([
|
||||
[expect.stringContaining(`?before=${cursors.startCursor}`)],
|
||||
]);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
import Vuex from 'vuex';
|
||||
import { mount, createLocalVue } from '@vue/test-utils';
|
||||
import { GlPagination } from '@gitlab/ui';
|
||||
import ReleasesPaginationRest from '~/releases/components/releases_pagination_rest.vue';
|
||||
import createStore from '~/releases/stores';
|
||||
import createListModule from '~/releases/stores/modules/list';
|
||||
import * as commonUtils from '~/lib/utils/common_utils';
|
||||
|
||||
commonUtils.historyPushState = jest.fn();
|
||||
|
||||
const localVue = createLocalVue();
|
||||
localVue.use(Vuex);
|
||||
|
||||
describe('~/releases/components/releases_pagination_rest.vue', () => {
|
||||
let wrapper;
|
||||
let listModule;
|
||||
|
||||
const projectId = 19;
|
||||
|
||||
const createComponent = pageInfo => {
|
||||
listModule = createListModule({ projectId });
|
||||
|
||||
listModule.state.pageInfo = pageInfo;
|
||||
|
||||
listModule.actions.fetchReleasesRest = jest.fn();
|
||||
|
||||
wrapper = mount(ReleasesPaginationRest, {
|
||||
store: createStore({
|
||||
modules: {
|
||||
list: listModule,
|
||||
},
|
||||
featureFlags: {},
|
||||
}),
|
||||
localVue,
|
||||
});
|
||||
};
|
||||
|
||||
const findGlPagination = () => wrapper.find(GlPagination);
|
||||
|
||||
afterEach(() => {
|
||||
wrapper.destroy();
|
||||
wrapper = null;
|
||||
});
|
||||
|
||||
describe('when a page number is clicked', () => {
|
||||
const newPage = 2;
|
||||
|
||||
beforeEach(() => {
|
||||
createComponent({
|
||||
perPage: 20,
|
||||
page: 1,
|
||||
total: 40,
|
||||
totalPages: 2,
|
||||
nextPage: 2,
|
||||
});
|
||||
|
||||
findGlPagination().vm.$emit('input', newPage);
|
||||
});
|
||||
|
||||
it('calls fetchReleasesRest with the correct page', () => {
|
||||
expect(listModule.actions.fetchReleasesRest.mock.calls).toEqual([
|
||||
[expect.anything(), { projectId, page: newPage }],
|
||||
]);
|
||||
});
|
||||
|
||||
it('calls historyPushState with the new URL', () => {
|
||||
expect(commonUtils.historyPushState.mock.calls).toEqual([
|
||||
[expect.stringContaining(`?page=${newPage}`)],
|
||||
]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
import { shallowMount, createLocalVue } from '@vue/test-utils';
|
||||
import Vuex from 'vuex';
|
||||
import ReleasesPagination from '~/releases/components/releases_pagination.vue';
|
||||
import ReleasesPaginationGraphql from '~/releases/components/releases_pagination_graphql.vue';
|
||||
import ReleasesPaginationRest from '~/releases/components/releases_pagination_rest.vue';
|
||||
|
||||
const localVue = createLocalVue();
|
||||
localVue.use(Vuex);
|
||||
|
||||
describe('~/releases/components/releases_pagination.vue', () => {
|
||||
let wrapper;
|
||||
|
||||
const createComponent = useGraphQLEndpoint => {
|
||||
const store = new Vuex.Store({
|
||||
getters: {
|
||||
useGraphQLEndpoint: () => useGraphQLEndpoint,
|
||||
},
|
||||
});
|
||||
|
||||
wrapper = shallowMount(ReleasesPagination, { store, localVue });
|
||||
};
|
||||
|
||||
afterEach(() => {
|
||||
wrapper.destroy();
|
||||
wrapper = null;
|
||||
});
|
||||
|
||||
const findRestPagination = () => wrapper.find(ReleasesPaginationRest);
|
||||
const findGraphQlPagination = () => wrapper.find(ReleasesPaginationGraphql);
|
||||
|
||||
describe('when one of necessary feature flags is disabled', () => {
|
||||
beforeEach(() => {
|
||||
createComponent(false);
|
||||
});
|
||||
|
||||
it('renders the REST pagination component', () => {
|
||||
expect(findRestPagination().exists()).toBe(true);
|
||||
expect(findGraphQlPagination().exists()).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('when all the necessary feature flags are enabled', () => {
|
||||
beforeEach(() => {
|
||||
createComponent(true);
|
||||
});
|
||||
|
||||
it('renders the GraphQL pagination component', () => {
|
||||
expect(findGraphQlPagination().exists()).toBe(true);
|
||||
expect(findRestPagination().exists()).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
import Vuex from 'vuex';
|
||||
import { GlFormInput } from '@gitlab/ui';
|
||||
import { shallowMount, mount } from '@vue/test-utils';
|
||||
import { shallowMount, mount, createLocalVue } from '@vue/test-utils';
|
||||
import TagFieldExisting from '~/releases/components/tag_field_existing.vue';
|
||||
import createStore from '~/releases/stores';
|
||||
import createDetailModule from '~/releases/stores/modules/detail';
|
||||
|
|
@ -7,6 +8,9 @@ import createDetailModule from '~/releases/stores/modules/detail';
|
|||
const TEST_TAG_NAME = 'test-tag-name';
|
||||
const TEST_DOCS_PATH = '/help/test/docs/path';
|
||||
|
||||
const localVue = createLocalVue();
|
||||
localVue.use(Vuex);
|
||||
|
||||
describe('releases/components/tag_field_existing', () => {
|
||||
let store;
|
||||
let wrapper;
|
||||
|
|
@ -14,6 +18,7 @@ describe('releases/components/tag_field_existing', () => {
|
|||
const createComponent = (mountFn = shallowMount) => {
|
||||
wrapper = mountFn(TagFieldExisting, {
|
||||
store,
|
||||
localVue,
|
||||
});
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import {
|
|||
receiveReleasesSuccess,
|
||||
receiveReleasesError,
|
||||
} from '~/releases/stores/modules/list/actions';
|
||||
import state from '~/releases/stores/modules/list/state';
|
||||
import createState from '~/releases/stores/modules/list/state';
|
||||
import * as types from '~/releases/stores/modules/list/mutation_types';
|
||||
import api from '~/api';
|
||||
import { gqClient, convertGraphQLResponse } from '~/releases/util';
|
||||
|
|
@ -27,7 +27,7 @@ describe('Releases State actions', () => {
|
|||
|
||||
beforeEach(() => {
|
||||
mockedState = {
|
||||
...state(),
|
||||
...createState({}),
|
||||
featureFlags: {
|
||||
graphqlReleaseData: true,
|
||||
graphqlReleasesPage: true,
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import state from '~/releases/stores/modules/list/state';
|
||||
import createState from '~/releases/stores/modules/list/state';
|
||||
import mutations from '~/releases/stores/modules/list/mutations';
|
||||
import * as types from '~/releases/stores/modules/list/mutation_types';
|
||||
import { parseIntPagination } from '~/lib/utils/common_utils';
|
||||
|
|
@ -9,7 +9,7 @@ describe('Releases Store Mutations', () => {
|
|||
let pageInfo;
|
||||
|
||||
beforeEach(() => {
|
||||
stateCopy = state();
|
||||
stateCopy = createState({});
|
||||
pageInfo = parseIntPagination(pageInfoHeadersWithoutPagination);
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -71,6 +71,14 @@ RSpec.describe Resolvers::ProjectsResolver do
|
|||
is_expected.to contain_exactly(project, private_project)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when ids filter is provided' do
|
||||
let(:filters) { { ids: [project.to_global_id.to_s] } }
|
||||
|
||||
it 'returns matching project' do
|
||||
is_expected.to contain_exactly(project)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -0,0 +1,49 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Ci::Status::Bridge::Common do
|
||||
let_it_be(:user) { create(:user) }
|
||||
let_it_be(:bridge) { create(:ci_bridge) }
|
||||
let_it_be(:downstream_pipeline) { create(:ci_pipeline) }
|
||||
|
||||
before_all do
|
||||
create(:ci_sources_pipeline,
|
||||
source_pipeline: bridge.pipeline,
|
||||
source_project: bridge.pipeline.project,
|
||||
source_job: bridge,
|
||||
pipeline: downstream_pipeline,
|
||||
project: downstream_pipeline.project)
|
||||
end
|
||||
|
||||
subject do
|
||||
Gitlab::Ci::Status::Core
|
||||
.new(bridge, user)
|
||||
.extend(described_class)
|
||||
end
|
||||
|
||||
describe '#details_path' do
|
||||
context 'when user has access to read downstream pipeline' do
|
||||
before do
|
||||
downstream_pipeline.project.add_developer(user)
|
||||
end
|
||||
|
||||
it { expect(subject).to have_details }
|
||||
it { expect(subject.details_path).to include "pipelines/#{downstream_pipeline.id}" }
|
||||
|
||||
context 'when ci_bridge_pipeline_details is disabled' do
|
||||
before do
|
||||
stub_feature_flags(ci_bridge_pipeline_details: false)
|
||||
end
|
||||
|
||||
it { expect(subject).not_to have_details }
|
||||
it { expect(subject.details_path).to be_nil }
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user does not have access to read downstream pipeline' do
|
||||
it { expect(subject).not_to have_details }
|
||||
it { expect(subject.details_path).to be_nil }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -306,48 +306,6 @@ RSpec.describe 'cycle analytics events' do
|
|||
end
|
||||
end
|
||||
|
||||
describe '#production_events', :sidekiq_might_not_need_inline do
|
||||
let(:stage) { :production }
|
||||
let!(:context) { create(:issue, project: project, created_at: 2.days.ago) }
|
||||
|
||||
before do
|
||||
merge_merge_requests_closing_issue(user, project, context)
|
||||
deploy_master(user, project)
|
||||
end
|
||||
|
||||
it 'has the total time' do
|
||||
expect(events.first[:total_time]).not_to be_empty
|
||||
end
|
||||
|
||||
it 'has a title' do
|
||||
expect(events.first[:title]).to eq(context.title)
|
||||
end
|
||||
|
||||
it 'has the URL' do
|
||||
expect(events.first[:url]).not_to be_nil
|
||||
end
|
||||
|
||||
it 'has an iid' do
|
||||
expect(events.first[:iid]).to eq(context.iid.to_s)
|
||||
end
|
||||
|
||||
it 'has a created_at timestamp' do
|
||||
expect(events.first[:created_at]).to end_with('ago')
|
||||
end
|
||||
|
||||
it "has the author's URL" do
|
||||
expect(events.first[:author][:web_url]).not_to be_nil
|
||||
end
|
||||
|
||||
it "has the author's avatar URL" do
|
||||
expect(events.first[:author][:avatar_url]).not_to be_nil
|
||||
end
|
||||
|
||||
it "has the author's name" do
|
||||
expect(events.first[:author][:name]).to eq(context.author.name)
|
||||
end
|
||||
end
|
||||
|
||||
def setup(context)
|
||||
milestone = create(:milestone, project: project)
|
||||
context.update(milestone: milestone)
|
||||
|
|
|
|||
|
|
@ -21,10 +21,6 @@ RSpec.describe Gitlab::CycleAnalytics::Permissions do
|
|||
expect(subject[:staging]).to eq(false)
|
||||
end
|
||||
|
||||
it 'has no permissions to production stage' do
|
||||
expect(subject[:production]).to eq(false)
|
||||
end
|
||||
|
||||
it 'has no permissions to code stage' do
|
||||
expect(subject[:code]).to eq(false)
|
||||
end
|
||||
|
|
@ -55,10 +51,6 @@ RSpec.describe Gitlab::CycleAnalytics::Permissions do
|
|||
expect(subject[:staging]).to eq(true)
|
||||
end
|
||||
|
||||
it 'has permissions to production stage' do
|
||||
expect(subject[:production]).to eq(true)
|
||||
end
|
||||
|
||||
it 'has permissions to code stage' do
|
||||
expect(subject[:code]).to eq(true)
|
||||
end
|
||||
|
|
@ -121,9 +113,5 @@ RSpec.describe Gitlab::CycleAnalytics::Permissions do
|
|||
it 'has no permissions to issue stage' do
|
||||
expect(subject[:issue]).to eq(false)
|
||||
end
|
||||
|
||||
it 'has no permissions to production stage' do
|
||||
expect(subject[:production]).to eq(false)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,9 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::CycleAnalytics::ProductionStage do
|
||||
let(:stage_name) { 'Total' }
|
||||
|
||||
it_behaves_like 'base stage'
|
||||
end
|
||||
|
|
@ -0,0 +1,148 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Lfs::Client do
|
||||
let(:base_url) { "https://example.com" }
|
||||
let(:username) { 'user' }
|
||||
let(:password) { 'password' }
|
||||
let(:credentials) { { user: username, password: password, auth_method: 'password' } }
|
||||
|
||||
let(:basic_auth_headers) do
|
||||
{ 'Authorization' => "Basic #{Base64.strict_encode64("#{username}:#{password}")}" }
|
||||
end
|
||||
|
||||
let(:upload_action) do
|
||||
{
|
||||
"href" => "#{base_url}/some/file",
|
||||
"header" => {
|
||||
"Key" => "value"
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
subject(:lfs_client) { described_class.new(base_url, credentials: credentials) }
|
||||
|
||||
describe '#batch' do
|
||||
let_it_be(:objects) { create_list(:lfs_object, 3) }
|
||||
|
||||
context 'server returns 200 OK' do
|
||||
it 'makes a successful batch request' do
|
||||
stub = stub_batch(
|
||||
objects: objects,
|
||||
headers: basic_auth_headers
|
||||
).to_return(
|
||||
status: 200,
|
||||
body: { 'objects' => 'anything', 'transfer' => 'basic' }.to_json,
|
||||
headers: { 'Content-Type' => 'application/vnd.git-lfs+json' }
|
||||
)
|
||||
|
||||
result = lfs_client.batch('upload', objects)
|
||||
|
||||
expect(stub).to have_been_requested
|
||||
expect(result).to eq('objects' => 'anything', 'transfer' => 'basic')
|
||||
end
|
||||
end
|
||||
|
||||
context 'server returns 400 error' do
|
||||
it 'raises an error' do
|
||||
stub_batch(objects: objects, headers: basic_auth_headers).to_return(status: 400)
|
||||
|
||||
expect { lfs_client.batch('upload', objects) }.to raise_error(/Failed/)
|
||||
end
|
||||
end
|
||||
|
||||
context 'server returns 500 error' do
|
||||
it 'raises an error' do
|
||||
stub_batch(objects: objects, headers: basic_auth_headers).to_return(status: 400)
|
||||
|
||||
expect { lfs_client.batch('upload', objects) }.to raise_error(/Failed/)
|
||||
end
|
||||
end
|
||||
|
||||
context 'server returns an exotic transfer method' do
|
||||
it 'raises an error' do
|
||||
stub_batch(
|
||||
objects: objects,
|
||||
headers: basic_auth_headers
|
||||
).to_return(
|
||||
status: 200,
|
||||
body: { 'transfer' => 'carrier-pigeon' }.to_json,
|
||||
headers: { 'Content-Type' => 'application/vnd.git-lfs+json' }
|
||||
)
|
||||
|
||||
expect { lfs_client.batch('upload', objects) }.to raise_error(/Unsupported transfer/)
|
||||
end
|
||||
end
|
||||
|
||||
def stub_batch(objects:, headers:, operation: 'upload', transfer: 'basic')
|
||||
objects = objects.map { |o| { oid: o.oid, size: o.size } }
|
||||
body = { operation: operation, 'transfers': [transfer], objects: objects }.to_json
|
||||
|
||||
stub_request(:post, base_url + '/info/lfs/objects/batch').with(body: body, headers: headers)
|
||||
end
|
||||
end
|
||||
|
||||
describe "#upload" do
|
||||
let_it_be(:object) { create(:lfs_object) }
|
||||
|
||||
context 'server returns 200 OK to an authenticated request' do
|
||||
it "makes an HTTP PUT with expected parameters" do
|
||||
stub_upload(object: object, headers: upload_action['header']).to_return(status: 200)
|
||||
|
||||
lfs_client.upload(object, upload_action, authenticated: true)
|
||||
end
|
||||
end
|
||||
|
||||
context 'server returns 200 OK to an unauthenticated request' do
|
||||
it "makes an HTTP PUT with expected parameters" do
|
||||
stub = stub_upload(
|
||||
object: object,
|
||||
headers: basic_auth_headers.merge(upload_action['header'])
|
||||
).to_return(status: 200)
|
||||
|
||||
lfs_client.upload(object, upload_action, authenticated: false)
|
||||
|
||||
expect(stub).to have_been_requested
|
||||
end
|
||||
end
|
||||
|
||||
context 'LFS object has no file' do
|
||||
let(:object) { LfsObject.new }
|
||||
|
||||
it 'makes an HJTT PUT with expected parameters' do
|
||||
stub = stub_upload(
|
||||
object: object,
|
||||
headers: upload_action['header']
|
||||
).to_return(status: 200)
|
||||
|
||||
lfs_client.upload(object, upload_action, authenticated: true)
|
||||
|
||||
expect(stub).to have_been_requested
|
||||
end
|
||||
end
|
||||
|
||||
context 'server returns 400 error' do
|
||||
it 'raises an error' do
|
||||
stub_upload(object: object, headers: upload_action['header']).to_return(status: 400)
|
||||
|
||||
expect { lfs_client.upload(object, upload_action, authenticated: true) }.to raise_error(/Failed/)
|
||||
end
|
||||
end
|
||||
|
||||
context 'server returns 500 error' do
|
||||
it 'raises an error' do
|
||||
stub_upload(object: object, headers: upload_action['header']).to_return(status: 500)
|
||||
|
||||
expect { lfs_client.upload(object, upload_action, authenticated: true) }.to raise_error(/Failed/)
|
||||
end
|
||||
end
|
||||
|
||||
def stub_upload(object:, headers:)
|
||||
stub_request(:put, upload_action['href']).with(
|
||||
body: object.file.read,
|
||||
headers: headers.merge('Content-Length' => object.size.to_s)
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -15,6 +15,7 @@ RSpec.describe Gitlab::Metrics::Exporter::SidekiqExporter do
|
|||
monitoring: {
|
||||
sidekiq_exporter: {
|
||||
enabled: true,
|
||||
log_enabled: false,
|
||||
port: 0,
|
||||
address: '127.0.0.1'
|
||||
}
|
||||
|
|
@ -25,6 +26,29 @@ RSpec.describe Gitlab::Metrics::Exporter::SidekiqExporter do
|
|||
it 'does start thread' do
|
||||
expect(exporter.start).not_to be_nil
|
||||
end
|
||||
|
||||
it 'does not enable logging by default' do
|
||||
expect(exporter.log_filename).to eq(File::NULL)
|
||||
end
|
||||
end
|
||||
|
||||
context 'with logging enabled' do
|
||||
before do
|
||||
stub_config(
|
||||
monitoring: {
|
||||
sidekiq_exporter: {
|
||||
enabled: true,
|
||||
log_enabled: true,
|
||||
port: 0,
|
||||
address: '127.0.0.1'
|
||||
}
|
||||
}
|
||||
)
|
||||
end
|
||||
|
||||
it 'returns a valid log filename' do
|
||||
expect(exporter.log_filename).to end_with('sidekiq_exporter.log')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when port is already taken' do
|
||||
|
|
|
|||
|
|
@ -6,7 +6,11 @@ RSpec.describe Gitlab::UsageData::Topology do
|
|||
include UsageDataHelpers
|
||||
|
||||
describe '#topology_usage_data' do
|
||||
subject { described_class.new.topology_usage_data }
|
||||
subject { topology.topology_usage_data }
|
||||
|
||||
let(:topology) { described_class.new }
|
||||
let(:prometheus_client) { Gitlab::PrometheusClient.new('http://localhost:9090') }
|
||||
let(:fallback) { {} }
|
||||
|
||||
before do
|
||||
# this pins down time shifts when benchmarking durations
|
||||
|
|
@ -16,8 +20,7 @@ RSpec.describe Gitlab::UsageData::Topology do
|
|||
shared_examples 'query topology data from Prometheus' do
|
||||
context 'tracking node metrics' do
|
||||
it 'contains node level metrics for each instance' do
|
||||
expect_prometheus_api_to(
|
||||
receive_ready_check_query,
|
||||
expect_prometheus_client_to(
|
||||
receive_app_request_volume_query,
|
||||
receive_query_apdex_ratio_query,
|
||||
receive_node_memory_query,
|
||||
|
|
@ -103,8 +106,7 @@ RSpec.describe Gitlab::UsageData::Topology do
|
|||
|
||||
context 'and some node memory metrics are missing' do
|
||||
it 'removes the respective entries and includes the failures' do
|
||||
expect_prometheus_api_to(
|
||||
receive_ready_check_query,
|
||||
expect_prometheus_client_to(
|
||||
receive_app_request_volume_query(result: []),
|
||||
receive_query_apdex_ratio_query(result: []),
|
||||
receive_node_memory_query(result: []),
|
||||
|
|
@ -244,8 +246,7 @@ RSpec.describe Gitlab::UsageData::Topology do
|
|||
end
|
||||
|
||||
it 'normalizes equivalent instance values and maps them to the same node' do
|
||||
expect_prometheus_api_to(
|
||||
receive_ready_check_query,
|
||||
expect_prometheus_client_to(
|
||||
receive_app_request_volume_query(result: []),
|
||||
receive_query_apdex_ratio_query(result: []),
|
||||
receive_node_memory_query(result: node_memory_response),
|
||||
|
|
@ -311,8 +312,7 @@ RSpec.describe Gitlab::UsageData::Topology do
|
|||
|
||||
context 'and node metrics are missing but service metrics exist' do
|
||||
it 'still reports service metrics' do
|
||||
expect_prometheus_api_to(
|
||||
receive_ready_check_query,
|
||||
expect_prometheus_client_to(
|
||||
receive_app_request_volume_query(result: []),
|
||||
receive_query_apdex_ratio_query(result: []),
|
||||
receive_node_memory_query(result: []),
|
||||
|
|
@ -387,8 +387,7 @@ RSpec.describe Gitlab::UsageData::Topology do
|
|||
end
|
||||
|
||||
it 'filters out unknown service data and reports the unknown services as a failure' do
|
||||
expect_prometheus_api_to(
|
||||
receive_ready_check_query,
|
||||
expect_prometheus_client_to(
|
||||
receive_app_request_volume_query(result: []),
|
||||
receive_query_apdex_ratio_query(result: []),
|
||||
receive_node_memory_query(result: []),
|
||||
|
|
@ -413,8 +412,7 @@ RSpec.describe Gitlab::UsageData::Topology do
|
|||
context 'and an error is raised when querying Prometheus' do
|
||||
context 'without timeout failures' do
|
||||
it 'returns empty result and executes subsequent queries as usual' do
|
||||
expect_prometheus_api_to(
|
||||
receive_ready_check_query,
|
||||
expect_prometheus_client_to(
|
||||
receive(:query).at_least(:once).and_raise(Gitlab::PrometheusClient::UnexpectedResponseError)
|
||||
)
|
||||
|
||||
|
|
@ -446,8 +444,7 @@ RSpec.describe Gitlab::UsageData::Topology do
|
|||
|
||||
with_them do
|
||||
it 'returns empty result and cancelled subsequent queries' do
|
||||
expect_prometheus_api_to(
|
||||
receive_ready_check_query,
|
||||
expect_prometheus_client_to(
|
||||
receive(:query).and_raise(exception)
|
||||
)
|
||||
|
||||
|
|
@ -484,65 +481,25 @@ RSpec.describe Gitlab::UsageData::Topology do
|
|||
end
|
||||
end
|
||||
|
||||
shared_examples 'try to query Prometheus with given address' do
|
||||
context 'Prometheus is ready' do
|
||||
it_behaves_like 'query topology data from Prometheus'
|
||||
context 'can reach a ready Prometheus client' do
|
||||
before do
|
||||
expect(topology).to receive(:with_prometheus_client).and_yield(prometheus_client)
|
||||
end
|
||||
|
||||
context 'Prometheus is not ready' do
|
||||
before do
|
||||
# readiness check over HTTPS connection returns false
|
||||
expect_prometheus_api_to(receive_ready_check_query(result: false))
|
||||
# readiness check over HTTP connection also returns false
|
||||
expect_prometheus_api_to(receive_ready_check_query(result: false))
|
||||
end
|
||||
|
||||
it_behaves_like 'returns empty result with no failures'
|
||||
end
|
||||
|
||||
context 'Prometheus is not reachable' do
|
||||
before do
|
||||
# HTTPS connection is not reachable
|
||||
expect_prometheus_api_to(receive_ready_check_query(raise_error: Errno::ECONNREFUSED))
|
||||
# HTTP connection is also not reachable
|
||||
expect_prometheus_api_to(receive_ready_check_query(raise_error: Errno::ECONNREFUSED))
|
||||
end
|
||||
|
||||
it_behaves_like 'returns empty result with no failures'
|
||||
end
|
||||
it_behaves_like 'query topology data from Prometheus'
|
||||
end
|
||||
|
||||
context 'when Prometheus server address is available from Prometheus settings' do
|
||||
context 'can not reach a ready Prometheus client' do
|
||||
before do
|
||||
expect(Gitlab::Prometheus::Internal).to receive(:prometheus_enabled?).and_return(true)
|
||||
expect(Gitlab::Prometheus::Internal).to receive(:uri).and_return('http://prom:9090')
|
||||
expect(topology).to receive(:with_prometheus_client).and_return(fallback)
|
||||
end
|
||||
|
||||
include_examples 'try to query Prometheus with given address'
|
||||
end
|
||||
|
||||
context 'when Prometheus server address is available from Consul service discovery' do
|
||||
before do
|
||||
expect(Gitlab::Prometheus::Internal).to receive(:prometheus_enabled?).and_return(false)
|
||||
expect(Gitlab::Consul::Internal).to receive(:api_url).and_return('http://127.0.0.1:8500')
|
||||
expect(Gitlab::Consul::Internal).to receive(:discover_prometheus_server_address).and_return('prom.net:9090')
|
||||
end
|
||||
|
||||
include_examples 'try to query Prometheus with given address'
|
||||
end
|
||||
|
||||
context 'when Prometheus server address is not available' do
|
||||
before do
|
||||
expect(Gitlab::Prometheus::Internal).to receive(:prometheus_enabled?).and_return(false)
|
||||
expect(Gitlab::Consul::Internal).to receive(:api_url).and_return(nil)
|
||||
end
|
||||
|
||||
include_examples 'returns empty result with no failures'
|
||||
it_behaves_like 'returns empty result with no failures'
|
||||
end
|
||||
|
||||
context 'when top-level function raises error' do
|
||||
it 'returns empty result with generic failure' do
|
||||
allow(Gitlab::Prometheus::Internal).to receive(:prometheus_enabled?).and_raise(RuntimeError)
|
||||
expect(topology).to receive(:with_prometheus_client).and_raise(RuntimeError)
|
||||
|
||||
expect(subject[:topology]).to eq({
|
||||
duration_s: 0,
|
||||
|
|
|
|||
|
|
@ -1,53 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe 'CycleAnalytics#production' do
|
||||
extend CycleAnalyticsHelpers::TestGeneration
|
||||
|
||||
let_it_be(:project) { create(:project, :repository) }
|
||||
let_it_be(:from_date) { 10.days.ago }
|
||||
let_it_be(:user) { project.owner }
|
||||
let_it_be(:project_level) { CycleAnalytics::ProjectLevel.new(project, options: { from: from_date }) }
|
||||
|
||||
subject { project_level }
|
||||
|
||||
generate_cycle_analytics_spec(
|
||||
phase: :production,
|
||||
data_fn: -> (context) { { issue: context.build(:issue, project: context.project) } },
|
||||
start_time_conditions: [["issue is created", -> (context, data) { data[:issue].save! }]],
|
||||
before_end_fn: lambda do |context, data|
|
||||
context.create_merge_request_closing_issue(context.user, context.project, data[:issue])
|
||||
context.merge_merge_requests_closing_issue(context.user, context.project, data[:issue])
|
||||
end,
|
||||
end_time_conditions:
|
||||
[["merge request that closes issue is deployed to production", -> (context, data) { context.deploy_master(context.user, context.project) }],
|
||||
["production deploy happens after merge request is merged (along with other changes)",
|
||||
lambda do |context, data|
|
||||
# Make other changes on master
|
||||
context.project.repository.commit("sha_that_does_not_matter")
|
||||
|
||||
context.deploy_master(context.user, context.project)
|
||||
end]])
|
||||
|
||||
context "when a regular merge request (that doesn't close the issue) is merged and deployed" do
|
||||
it "returns nil" do
|
||||
merge_request = create(:merge_request)
|
||||
MergeRequests::MergeService.new(project, user).execute(merge_request)
|
||||
deploy_master(user, project)
|
||||
|
||||
expect(subject[:production].project_median).to be_nil
|
||||
end
|
||||
end
|
||||
|
||||
context "when the deployment happens to a non-production environment" do
|
||||
it "returns nil" do
|
||||
issue = build(:issue, project: project)
|
||||
merge_request = create_merge_request_closing_issue(user, project, issue)
|
||||
MergeRequests::MergeService.new(project, user).execute(merge_request)
|
||||
deploy_master(user, project, environment: 'staging')
|
||||
|
||||
expect(subject[:production].project_median).to be_nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -2901,6 +2901,20 @@ RSpec.describe Project do
|
|||
end
|
||||
end
|
||||
|
||||
describe '#lfs_objects_for_repository_types' do
|
||||
let(:project) { create(:project) }
|
||||
|
||||
it 'returns LFS objects of the specified type only' do
|
||||
none, design, wiki = *[nil, :design, :wiki].map do |type|
|
||||
create(:lfs_objects_project, project: project, repository_type: type).lfs_object
|
||||
end
|
||||
|
||||
expect(project.lfs_objects_for_repository_types(nil)).to contain_exactly(none)
|
||||
expect(project.lfs_objects_for_repository_types(nil, :wiki)).to contain_exactly(none, wiki)
|
||||
expect(project.lfs_objects_for_repository_types(:design)).to contain_exactly(design)
|
||||
end
|
||||
end
|
||||
|
||||
context 'forks' do
|
||||
include ProjectForksHelper
|
||||
|
||||
|
|
|
|||
|
|
@ -142,6 +142,20 @@ RSpec.describe RemoteMirror, :mailer do
|
|||
end
|
||||
end
|
||||
|
||||
describe '#bare_url' do
|
||||
it 'returns the URL without any credentials' do
|
||||
remote_mirror = build(:remote_mirror, url: 'http://user:pass@example.com/foo')
|
||||
|
||||
expect(remote_mirror.bare_url).to eq('http://example.com/foo')
|
||||
end
|
||||
|
||||
it 'returns an empty string when the URL is nil' do
|
||||
remote_mirror = build(:remote_mirror, url: nil)
|
||||
|
||||
expect(remote_mirror.bare_url).to eq('')
|
||||
end
|
||||
end
|
||||
|
||||
describe '#update_repository' do
|
||||
it 'performs update including options' do
|
||||
git_remote_mirror = stub_const('Gitlab::Git::RemoteMirror', spy)
|
||||
|
|
|
|||
|
|
@ -73,15 +73,6 @@ RSpec.describe 'value stream analytics events' do
|
|||
expect(json_response['events'].first['date']).not_to be_empty
|
||||
end
|
||||
|
||||
it 'lists the production events', :sidekiq_might_not_need_inline do
|
||||
get project_cycle_analytics_production_path(project, format: :json)
|
||||
|
||||
first_issue_iid = project.issues.sort_by_attribute(:created_desc).pluck(:iid).first.to_s
|
||||
|
||||
expect(json_response['events']).not_to be_empty
|
||||
expect(json_response['events'].first['iid']).to eq(first_issue_iid)
|
||||
end
|
||||
|
||||
context 'specific branch' do
|
||||
it 'lists the test events', :sidekiq_might_not_need_inline do
|
||||
branch = project.merge_requests.first.source_branch
|
||||
|
|
|
|||
|
|
@ -0,0 +1,93 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Lfs::PushService do
|
||||
let(:logger) { service.send(:logger) }
|
||||
let(:lfs_client) { service.send(:lfs_client) }
|
||||
|
||||
let_it_be(:project) { create(:forked_project_with_submodules) }
|
||||
let_it_be(:remote_mirror) { create(:remote_mirror, project: project, enabled: true) }
|
||||
let_it_be(:lfs_object) { create_linked_lfs_object(project, :project) }
|
||||
|
||||
let(:params) { { url: remote_mirror.bare_url, credentials: remote_mirror.credentials } }
|
||||
|
||||
subject(:service) { described_class.new(project, nil, params) }
|
||||
|
||||
describe "#execute" do
|
||||
it 'uploads the object when upload is requested' do
|
||||
stub_lfs_batch(lfs_object)
|
||||
|
||||
expect(lfs_client)
|
||||
.to receive(:upload)
|
||||
.with(lfs_object, upload_action_spec(lfs_object), authenticated: true)
|
||||
|
||||
expect(service.execute).to eq(status: :success)
|
||||
end
|
||||
|
||||
it 'does nothing if there are no LFS objects' do
|
||||
lfs_object.destroy!
|
||||
|
||||
expect(lfs_client).not_to receive(:upload)
|
||||
|
||||
expect(service.execute).to eq(status: :success)
|
||||
end
|
||||
|
||||
it 'does not upload the object when upload is not requested' do
|
||||
stub_lfs_batch(lfs_object, upload: false)
|
||||
|
||||
expect(lfs_client).not_to receive(:upload)
|
||||
|
||||
expect(service.execute).to eq(status: :success)
|
||||
end
|
||||
|
||||
it 'returns a failure when submitting a batch fails' do
|
||||
expect(lfs_client).to receive(:batch) { raise 'failed' }
|
||||
|
||||
expect(service.execute).to eq(status: :error, message: 'failed')
|
||||
end
|
||||
|
||||
it 'returns a failure when submitting an upload fails' do
|
||||
stub_lfs_batch(lfs_object)
|
||||
expect(lfs_client).to receive(:upload) { raise 'failed' }
|
||||
|
||||
expect(service.execute).to eq(status: :error, message: 'failed')
|
||||
end
|
||||
|
||||
context 'non-project-repository LFS objects' do
|
||||
let_it_be(:nil_lfs_object) { create_linked_lfs_object(project, nil) }
|
||||
let_it_be(:wiki_lfs_object) { create_linked_lfs_object(project, :wiki) }
|
||||
let_it_be(:design_lfs_object) { create_linked_lfs_object(project, :design) }
|
||||
|
||||
it 'only tries to upload the project-repository LFS object' do
|
||||
stub_lfs_batch(nil_lfs_object, lfs_object, upload: false)
|
||||
|
||||
expect(service.execute).to eq(status: :success)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def create_linked_lfs_object(project, type)
|
||||
create(:lfs_objects_project, project: project, repository_type: type).lfs_object
|
||||
end
|
||||
|
||||
def stub_lfs_batch(*objects, upload: true)
|
||||
expect(lfs_client)
|
||||
.to receive(:batch).with('upload', containing_exactly(*objects))
|
||||
.and_return('transfer' => 'basic', 'objects' => objects.map { |o| object_spec(o, upload: upload) })
|
||||
end
|
||||
|
||||
def batch_spec(*objects, upload: true)
|
||||
{ 'transfer' => 'basic', 'objects' => objects.map {|o| object_spec(o, upload: upload) } }
|
||||
end
|
||||
|
||||
def object_spec(object, upload: true)
|
||||
{ 'oid' => object.oid, 'size' => object.size, 'authenticated' => true }.tap do |spec|
|
||||
spec['actions'] = { 'upload' => upload_action_spec(object) } if upload
|
||||
end
|
||||
end
|
||||
|
||||
def upload_action_spec(object)
|
||||
{ 'href' => "https://example.com/#{object.oid}/#{object.size}", 'header' => { 'Key' => 'value' } }
|
||||
end
|
||||
end
|
||||
|
|
@ -3,9 +3,10 @@
|
|||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Projects::UpdateRemoteMirrorService do
|
||||
let(:project) { create(:project, :repository) }
|
||||
let(:remote_project) { create(:forked_project_with_submodules) }
|
||||
let(:remote_mirror) { create(:remote_mirror, project: project, enabled: true) }
|
||||
let_it_be(:project) { create(:project, :repository, lfs_enabled: true) }
|
||||
let_it_be(:remote_project) { create(:forked_project_with_submodules) }
|
||||
let_it_be(:remote_mirror) { create(:remote_mirror, project: project, enabled: true) }
|
||||
|
||||
let(:remote_name) { remote_mirror.remote_name }
|
||||
|
||||
subject(:service) { described_class.new(project, project.creator) }
|
||||
|
|
@ -127,5 +128,63 @@ RSpec.describe Projects::UpdateRemoteMirrorService do
|
|||
expect(remote_mirror.last_error).to include("refs/heads/develop")
|
||||
end
|
||||
end
|
||||
|
||||
context "sending lfs objects" do
|
||||
let_it_be(:lfs_pointer) { create(:lfs_objects_project, project: project) }
|
||||
|
||||
before do
|
||||
stub_lfs_setting(enabled: true)
|
||||
end
|
||||
|
||||
context 'feature flag enabled' do
|
||||
before do
|
||||
stub_feature_flags(push_mirror_syncs_lfs: true)
|
||||
end
|
||||
|
||||
it 'pushes LFS objects to a HTTP repository' do
|
||||
expect_next_instance_of(Lfs::PushService) do |service|
|
||||
expect(service).to receive(:execute)
|
||||
end
|
||||
|
||||
execute!
|
||||
end
|
||||
|
||||
it 'does nothing to an SSH repository' do
|
||||
remote_mirror.update!(url: 'ssh://example.com')
|
||||
|
||||
expect_any_instance_of(Lfs::PushService).not_to receive(:execute)
|
||||
|
||||
execute!
|
||||
end
|
||||
|
||||
it 'does nothing if LFS is disabled' do
|
||||
expect(project).to receive(:lfs_enabled?) { false }
|
||||
|
||||
expect_any_instance_of(Lfs::PushService).not_to receive(:execute)
|
||||
|
||||
execute!
|
||||
end
|
||||
|
||||
it 'does nothing if non-password auth is specified' do
|
||||
remote_mirror.update!(auth_method: 'ssh_public_key')
|
||||
|
||||
expect_any_instance_of(Lfs::PushService).not_to receive(:execute)
|
||||
|
||||
execute!
|
||||
end
|
||||
end
|
||||
|
||||
context 'feature flag disabled' do
|
||||
before do
|
||||
stub_feature_flags(push_mirror_syncs_lfs: false)
|
||||
end
|
||||
|
||||
it 'does nothing' do
|
||||
expect_any_instance_of(Lfs::PushService).not_to receive(:execute)
|
||||
|
||||
execute!
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -224,17 +224,8 @@ module UsageDataHelpers
|
|||
)
|
||||
end
|
||||
|
||||
def expect_prometheus_api_to(*receive_matchers)
|
||||
expect_next_instance_of(Gitlab::PrometheusClient) do |client|
|
||||
receive_matchers.each { |m| expect(client).to m }
|
||||
end
|
||||
end
|
||||
|
||||
def allow_prometheus_queries
|
||||
allow_next_instance_of(Gitlab::PrometheusClient) do |client|
|
||||
allow(client).to receive(:aggregate).and_return({})
|
||||
allow(client).to receive(:query).and_return({})
|
||||
end
|
||||
def expect_prometheus_client_to(*receive_matchers)
|
||||
receive_matchers.each { |m| expect(prometheus_client).to m }
|
||||
end
|
||||
|
||||
def for_defined_days_back(days: [29, 2])
|
||||
|
|
|
|||
Loading…
Reference in New Issue