Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
c1553a2569
commit
aefada43de
|
|
@ -609,7 +609,6 @@ export default {
|
|||
'ee/app/assets/javascripts/work_items/components/work_item_links/work_item_rolled_up_health_status.vue',
|
||||
'ee/app/assets/javascripts/work_items/components/work_item_progress.vue',
|
||||
'ee/app/assets/javascripts/work_items/components/work_item_rolledup_dates.vue',
|
||||
'ee/app/assets/javascripts/work_items/components/work_item_weight.vue',
|
||||
'ee/app/assets/javascripts/workspaces/common/components/workspaces_list/workspaces_table.vue',
|
||||
'ee/app/assets/javascripts/workspaces/dropdown_group/components/workspace_dropdown_item.vue',
|
||||
'ee/app/assets/javascripts/workspaces/user/pages/list.vue',
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
8ee24aca568eff50b309b6555a96545d71b77979
|
||||
5ba826bb0ae2842ea51fbfb42d6cdb5a0fd12f2f
|
||||
|
|
|
|||
|
|
@ -68,6 +68,17 @@
|
|||
"Submodule",
|
||||
"TreeEntry"
|
||||
],
|
||||
"EventTargetType": [
|
||||
"Design",
|
||||
"Issue",
|
||||
"MergeRequest",
|
||||
"Milestone",
|
||||
"Note",
|
||||
"Project",
|
||||
"Snippet",
|
||||
"UserCore",
|
||||
"WikiPage"
|
||||
],
|
||||
"Eventable": [
|
||||
"BoardEpic",
|
||||
"Epic"
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ export default {
|
|||
import('ee_component/dora/components/change_failure_rate_charts.vue'),
|
||||
ProjectQualitySummary: () => import('ee_component/project_quality_summary/app.vue'),
|
||||
},
|
||||
piplelinesTabEvent: 'p_analytics_ci_cd_pipelines',
|
||||
pipelinesTabEvent: 'p_analytics_ci_cd_pipelines',
|
||||
deploymentFrequencyTabEvent: 'p_analytics_ci_cd_deployment_frequency',
|
||||
leadTimeTabEvent: 'p_analytics_ci_cd_lead_time',
|
||||
timeToRestoreServiceTabEvent: 'visit_ci_cd_time_to_restore_service_tab',
|
||||
|
|
@ -94,7 +94,7 @@ export default {
|
|||
<gl-tab
|
||||
:title="__('Pipelines')"
|
||||
data-testid="pipelines-tab"
|
||||
@click="trackEvent($options.piplelinesTabEvent)"
|
||||
@click="trackEvent($options.pipelinesTabEvent)"
|
||||
>
|
||||
<component :is="pipelineChartsComponent" />
|
||||
</gl-tab>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,13 @@
|
|||
<script>
|
||||
export default {
|
||||
name: 'DashboardHeader',
|
||||
};
|
||||
</script>
|
||||
<template>
|
||||
<div>
|
||||
<h2 class="gl-heading-2 gl-mt-3">
|
||||
<slot></slot>
|
||||
</h2>
|
||||
<slot name="description"></slot>
|
||||
</div>
|
||||
</template>
|
||||
|
|
@ -20,6 +20,7 @@ import {
|
|||
} from '../constants';
|
||||
import getPipelineCountByStatus from '../graphql/queries/get_pipeline_count_by_status.query.graphql';
|
||||
import getProjectPipelineStatistics from '../graphql/queries/get_project_pipeline_statistics.query.graphql';
|
||||
import DashboardHeader from './dashboard_header.vue';
|
||||
import StatisticsList from './statistics_list.vue';
|
||||
|
||||
const defaultAnalyticsValues = {
|
||||
|
|
@ -51,6 +52,7 @@ export default {
|
|||
GlColumnChart,
|
||||
GlChartSeriesLabel,
|
||||
GlSkeletonLoader,
|
||||
DashboardHeader,
|
||||
StatisticsList,
|
||||
CiCdAnalyticsCharts,
|
||||
},
|
||||
|
|
@ -316,9 +318,9 @@ export default {
|
|||
<gl-alert v-if="showFailureAlert" :variant="failure.variant" @dismiss="hideAlert">{{
|
||||
failure.text
|
||||
}}</gl-alert>
|
||||
<div class="gl-mb-3">
|
||||
<h4>{{ s__('PipelineCharts|CI/CD Analytics') }}</h4>
|
||||
</div>
|
||||
<dashboard-header>
|
||||
{{ s__('PipelineCharts|Pipelines') }}
|
||||
</dashboard-header>
|
||||
<gl-skeleton-loader v-if="loading" :lines="5" />
|
||||
<statistics-list v-else :counts="formattedCounts" />
|
||||
<h4>{{ __('Pipelines charts') }}</h4>
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import {
|
|||
DATE_RANGE_LAST_180_DAYS,
|
||||
} from '../constants';
|
||||
import getPipelineAnalytics from '../graphql/queries/get_pipeline_analytics.query.graphql';
|
||||
import DashboardHeader from './dashboard_header.vue';
|
||||
import StatisticsList from './statistics_list.vue';
|
||||
import PipelineDurationChart from './pipeline_duration_chart.vue';
|
||||
import PipelineStatusChart from './pipeline_status_chart.vue';
|
||||
|
|
@ -18,6 +19,7 @@ export default {
|
|||
components: {
|
||||
GlCollapsibleListbox,
|
||||
GlFormGroup,
|
||||
DashboardHeader,
|
||||
StatisticsList,
|
||||
PipelineDurationChart,
|
||||
PipelineStatusChart,
|
||||
|
|
@ -106,7 +108,9 @@ export default {
|
|||
</script>
|
||||
<template>
|
||||
<div>
|
||||
<h2>{{ s__('PipelineCharts|Pipelines') }}</h2>
|
||||
<dashboard-header>
|
||||
{{ s__('PipelineCharts|Pipelines') }}
|
||||
</dashboard-header>
|
||||
<div class="gl-mb-4 gl-bg-subtle gl-p-4 gl-pb-2">
|
||||
<gl-form-group :label="__('Date range')" label-for="date-range">
|
||||
<gl-collapsible-listbox
|
||||
|
|
|
|||
|
|
@ -131,7 +131,9 @@ export default {
|
|||
@click="$emit('edit')"
|
||||
>{{ __('Edit') }}
|
||||
</gl-button>
|
||||
<gl-link v-else :href="headerLinkHref">{{ headerLinkTitle }}</gl-link>
|
||||
<gl-link v-else-if="headerLinkHref && headerLinkTitle" :href="headerLinkHref">{{
|
||||
headerLinkTitle
|
||||
}}</gl-link>
|
||||
</template>
|
||||
<span
|
||||
v-if="showEmptyState && !$scopedSlots.content"
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
<script>
|
||||
import { GlButton, GlOutsideDirective as Outside } from '@gitlab/ui';
|
||||
import { GlButton, GlLoadingIcon, GlOutsideDirective as Outside } from '@gitlab/ui';
|
||||
import { Mousetrap } from '~/lib/mousetrap';
|
||||
import { keysFor, SIDEBAR_CLOSE_WIDGET } from '~/behaviors/shortcuts/keybindings';
|
||||
|
||||
export default {
|
||||
components: {
|
||||
GlButton,
|
||||
GlLoadingIcon,
|
||||
},
|
||||
directives: {
|
||||
Outside,
|
||||
|
|
@ -56,6 +57,7 @@ export default {
|
|||
<h3 class="gl-heading-5 gl-mb-0">
|
||||
<slot name="title"></slot>
|
||||
</h3>
|
||||
<gl-loading-icon v-if="isUpdating" />
|
||||
<gl-button
|
||||
v-if="canUpdate && !isEditing"
|
||||
key="edit-button"
|
||||
|
|
|
|||
|
|
@ -55,8 +55,6 @@ class ProjectsController < Projects::ApplicationController
|
|||
push_force_frontend_feature_flag(:work_items, !!@project&.work_items_feature_flag_enabled?)
|
||||
push_force_frontend_feature_flag(:work_items_beta, !!@project&.work_items_beta_feature_flag_enabled?)
|
||||
push_force_frontend_feature_flag(:work_items_alpha, !!@project&.work_items_alpha_feature_flag_enabled?)
|
||||
# FF to enable setting to allow webhook execution on 30D and 60D notification delivery too
|
||||
push_frontend_feature_flag(:extended_expiry_webhook_execution_setting, @project&.namespace)
|
||||
end
|
||||
|
||||
layout :determine_layout
|
||||
|
|
@ -472,10 +470,7 @@ class ProjectsController < Projects::ApplicationController
|
|||
emails_enabled
|
||||
]
|
||||
|
||||
if ::Feature.enabled?(:extended_expiry_webhook_execution_setting, @project&.namespace) &&
|
||||
can?(current_user, :admin_project, project)
|
||||
attributes << :extended_prat_expiry_webhooks_execute
|
||||
end
|
||||
attributes << :extended_prat_expiry_webhooks_execute if can?(current_user, :admin_project, project)
|
||||
|
||||
attributes
|
||||
end
|
||||
|
|
|
|||
|
|
@ -33,6 +33,15 @@ module Types
|
|||
null: true,
|
||||
experiment: { milestone: '17.10' },
|
||||
description: 'User preferences for the given work item type and namespace.'
|
||||
|
||||
field :activity,
|
||||
Users::ActivityStreamType,
|
||||
description: 'Recent user activity.',
|
||||
experiment: { milestone: '17.10' }
|
||||
|
||||
def activity
|
||||
object if Feature.enabled?(:activity_stream_graphql, current_user)
|
||||
end
|
||||
end
|
||||
# rubocop:enable Graphql/AuthorizeTypes
|
||||
end
|
||||
|
|
|
|||
|
|
@ -29,8 +29,34 @@ module Types
|
|||
description: 'When this event was updated.',
|
||||
null: false
|
||||
|
||||
field :project, Types::ProjectType,
|
||||
description: 'Project of this event.',
|
||||
null: true
|
||||
|
||||
field :target, Types::Users::EventTargetType,
|
||||
description: 'The target of the event',
|
||||
calls_gitaly: true
|
||||
|
||||
def author
|
||||
Gitlab::Graphql::Loaders::BatchModelLoader.new(User, object.author_id).find
|
||||
end
|
||||
|
||||
def project
|
||||
Gitlab::Graphql::Loaders::BatchModelLoader.new(Project, object.project_id).find
|
||||
end
|
||||
|
||||
def target
|
||||
# If we don't have target info, bail
|
||||
return unless object.target_type && object.target_id
|
||||
|
||||
Gitlab::Graphql::Loaders::BatchModelLoader.new(target_type_class, object.target_id).find
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def target_type_class
|
||||
klass = object.target_type&.safe_constantize
|
||||
klass if klass.is_a?(Class)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -0,0 +1,30 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Types
|
||||
module Users
|
||||
class ActivityStreamType < BaseObject
|
||||
graphql_name 'ActivityStream'
|
||||
description 'Activity streams associated with a user'
|
||||
|
||||
authorize :read_user_profile
|
||||
|
||||
field :followed_users_activity,
|
||||
Types::EventType.connection_type,
|
||||
description: 'Activity from users followed by the current user.',
|
||||
experiment: { milestone: '17.10' } do
|
||||
argument :target, EventTargetEnum, default_value: EventFilter::ALL, description: "Event target."
|
||||
end
|
||||
|
||||
def followed_users_activity(target: nil, last: 20)
|
||||
scope = current_user.followees
|
||||
user_events(scope, target, last)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def user_events(scope, target, last)
|
||||
UserRecentEventsFinder.new(current_user, scope, EventFilter.new(target), { limit: last }).execute
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Types
|
||||
module Users
|
||||
class EventTargetEnum < BaseEnum
|
||||
graphql_name 'EventTarget'
|
||||
description 'Event target'
|
||||
|
||||
mock_filter = ::EventFilter.new('')
|
||||
mock_filter.filters.each do |target_type|
|
||||
value target_type.upcase, value: target_type, description: "#{target_type.titleize} events"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Types
|
||||
module Users
|
||||
class EventTargetType < BaseUnion
|
||||
graphql_name 'EventTargetType'
|
||||
description 'Represents an object that can be the subject of an event.'
|
||||
|
||||
possible_types Types::IssueType, Types::MilestoneType,
|
||||
Types::MergeRequestType, Types::ProjectType,
|
||||
Types::SnippetType, Types::UserType, Types::Wikis::WikiPageType,
|
||||
Types::DesignManagement::DesignType, Types::Notes::NoteType
|
||||
|
||||
def self.resolve_type(object, _context)
|
||||
case object
|
||||
when Issue
|
||||
Types::IssueType
|
||||
when Milestone
|
||||
Types::MilestoneType
|
||||
when MergeRequest
|
||||
Types::MergeRequestType
|
||||
when Note
|
||||
Types::Notes::NoteType
|
||||
when Project
|
||||
Types::ProjectType
|
||||
when Snippet
|
||||
Types::SnippetType
|
||||
when User
|
||||
Types::UserType
|
||||
when WikiPage::Meta
|
||||
Types::Wikis::WikiPageType
|
||||
when ::DesignManagement::Design
|
||||
Types::DesignManagement::DesignType
|
||||
else
|
||||
raise "Unsupported event target type: #{object.class.name}"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -2012,7 +2012,6 @@ class Project < ApplicationRecord
|
|||
# seven_days interval but we have a setting to allow webhook execution
|
||||
# for thirty_days and sixty_days interval too.
|
||||
if hooks_scope == :resource_access_token_hooks &&
|
||||
::Feature.enabled?(:extended_expiry_webhook_execution_setting, self.namespace) &&
|
||||
data[:interval] != :seven_days &&
|
||||
!self.extended_prat_expiry_webhooks_execute?
|
||||
|
||||
|
|
|
|||
|
|
@ -1,34 +1,40 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Users
|
||||
class AutoBanService < BaseService
|
||||
class AutoBanService
|
||||
Error = Class.new(StandardError)
|
||||
|
||||
def initialize(user:, reason:)
|
||||
@user = user
|
||||
@reason = reason
|
||||
end
|
||||
|
||||
def execute
|
||||
if user.ban
|
||||
record_custom_attribute
|
||||
ban_duplicate_users
|
||||
success
|
||||
else
|
||||
messages = user.errors.full_messages
|
||||
error(messages.uniq.join('. '))
|
||||
end
|
||||
ban_user
|
||||
end
|
||||
|
||||
def execute!
|
||||
user.ban!
|
||||
record_custom_attribute
|
||||
ban_duplicate_users
|
||||
success
|
||||
result = ban_user
|
||||
|
||||
raise Error, result[:message] if result[:status] == :error
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :user, :reason
|
||||
|
||||
def ban_user
|
||||
result = ::Users::BanService.new(admin_bot).execute(user)
|
||||
|
||||
record_custom_attribute if result[:status] == :success
|
||||
|
||||
result
|
||||
end
|
||||
|
||||
def admin_bot
|
||||
Users::Internal.admin_bot
|
||||
end
|
||||
|
||||
def ban_duplicate_users
|
||||
AntiAbuse::BanDuplicateUsersWorker.perform_async(user.id)
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
- page_title _('CI/CD Analytics')
|
||||
|
||||
= render ::Layouts::PageHeadingComponent.new(_("CI/CD Analytics"))
|
||||
|
||||
#js-project-pipelines-charts-app{ data: { project_path: @project.full_path,
|
||||
project_id: @project.id,
|
||||
should_render_dora_charts: should_render_dora_charts.to_s,
|
||||
|
|
|
|||
|
|
@ -10,14 +10,27 @@ module Pages
|
|||
|
||||
feature_category :pages
|
||||
|
||||
MAX_NUM_DELETIONS = 10000
|
||||
BATCH_SIZE = 1000
|
||||
|
||||
def perform
|
||||
scope = PagesDeployment.expired
|
||||
scope = PagesDeployment.active.expired
|
||||
|
||||
iterator = Gitlab::Pagination::Keyset::Iterator.new(scope: scope)
|
||||
count = 0
|
||||
start = Time.current
|
||||
|
||||
iterator.each_batch do |deployments|
|
||||
iterator.each_batch(of: BATCH_SIZE) do |deployments|
|
||||
deployments.each(&:deactivate)
|
||||
count += deployments.length
|
||||
|
||||
break if count >= MAX_NUM_DELETIONS
|
||||
end
|
||||
|
||||
log_extra_metadata_on_done(:deactivate_expired_pages_deployments, {
|
||||
deactivated_deployments: count,
|
||||
duration: Time.current - start
|
||||
})
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -129,12 +129,7 @@ module PersonalAccessTokens
|
|||
# project bot does not have more than 1 token
|
||||
expiring_user_token = project_bot.personal_access_tokens.first
|
||||
|
||||
# If feature flag is not enabled webhooks will only execute if interval is seven_days
|
||||
resource_namespace = bot_resource_namepace(project_bot.resource_bot_resource)
|
||||
if Feature.enabled?(:extended_expiry_webhook_execution_setting, resource_namespace) ||
|
||||
interval == :seven_days
|
||||
execute_web_hooks(project_bot, expiring_user_token, { interval: interval })
|
||||
end
|
||||
execute_web_hooks(project_bot, expiring_user_token, { interval: interval })
|
||||
|
||||
interval_days = PersonalAccessToken.notification_interval(interval)
|
||||
deliver_bot_notifications(project_bot, expiring_user_token.name, days_to_expire: interval_days)
|
||||
|
|
@ -221,13 +216,5 @@ module PersonalAccessTokens
|
|||
NotificationService.new
|
||||
end
|
||||
strong_memoize_attr :notification_service
|
||||
|
||||
def bot_resource_namepace(resource)
|
||||
if resource.is_a?(Project)
|
||||
resource.namespace
|
||||
else
|
||||
resource
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -72,9 +72,9 @@ module.exports = {
|
|||
fileName: './config/webpack.config.js',
|
||||
},
|
||||
cache: {
|
||||
// Use Yarn's cache directory
|
||||
folder: './tmp/cache/depcruise-cache',
|
||||
strategy: 'content',
|
||||
// NOTE: if we want to store cache on CI, set the value to 'content'
|
||||
strategy: 'metadata',
|
||||
// With compression the cache is around 2MB
|
||||
// Without Compression, cache is 20 times larger
|
||||
compress: true,
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
---
|
||||
name: extended_expiry_webhook_execution_setting
|
||||
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/499732
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/178266
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/513684
|
||||
milestone: '17.9'
|
||||
group: group::authentication
|
||||
type: gitlab_com_derisk
|
||||
name: activity_stream_graphql
|
||||
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/514804
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/183872
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/524717
|
||||
milestone: '17.10'
|
||||
group: group::personal productivity
|
||||
type: beta
|
||||
default_enabled: false
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class AddActualStateUpdatedAtToWorkspaces < Gitlab::Database::Migration[2.2]
|
||||
milestone '17.11'
|
||||
|
||||
def change
|
||||
add_column :workspaces, :actual_state_updated_at, :datetime_with_timezone, null: false, default: '1970-01-01'
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1 @@
|
|||
9f7a9198505162ceb254c9c9ed141395be5a34306e0619c9f1ef4ae0323fc8c7
|
||||
|
|
@ -24660,6 +24660,7 @@ CREATE TABLE workspaces (
|
|||
workspaces_agent_config_version integer NOT NULL,
|
||||
desired_config_generator_version integer,
|
||||
project_ref text,
|
||||
actual_state_updated_at timestamp with time zone DEFAULT '1970-01-01 00:00:00+00'::timestamp with time zone NOT NULL,
|
||||
CONSTRAINT check_15543fb0fa CHECK ((char_length(name) <= 64)),
|
||||
CONSTRAINT check_157d5f955c CHECK ((char_length(namespace) <= 64)),
|
||||
CONSTRAINT check_2b401b0034 CHECK ((char_length(deployment_resource_version) <= 64)),
|
||||
|
|
|
|||
|
|
@ -204,7 +204,7 @@ To [change the default color theme](../user/profile/preferences.md#change-the-co
|
|||
|
||||
1. [Reconfigure and restart GitLab](restart_gitlab.md#reconfigure-a-linux-package-installation).
|
||||
|
||||
As of GitLab 17.8, `gitlab_default_theme` can specify [a value from 1 to 10](https://gitlab.com/gitlab-org/omnibus-gitlab/-/blob/17.8.0+ee.0/files/gitlab-config-template/gitlab.rb.template?ref_type=tags#L137) to set the default theme.
|
||||
As of GitLab 17.8, `gitlab_default_theme` can specify [a value from 1 to 10](https://gitlab.com/gitlab-org/omnibus-gitlab/-/blob/17.8.0+ee.0/files/gitlab-config-template/gitlab.rb.template?ref_type=tags#L137) to set the default theme.
|
||||
|
||||
| Value | Color |
|
||||
| ------ | ------ |
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@ The OpenID Connect provider provides you with a client's details and secret for
|
|||
but you can override these icons by specifying this parameter. GitLab accepts both
|
||||
local paths and absolute URLs.
|
||||
GitLab includes icons for most major social login platforms,
|
||||
but you can override these icons by specifying an external URL or
|
||||
but you can override these icons by specifying an external URL or
|
||||
an absolute or relative path to your own icon file.
|
||||
- For local absolute paths, configure the provider settings as `icon: <path>/<to>/<your-icon>`.
|
||||
- Store the icon file in `/opt/gitlab/embedded/service/gitlab-rails/public/<path>/<to>/<your-icon>`.
|
||||
|
|
|
|||
|
|
@ -429,7 +429,7 @@ This situation can occur:
|
|||
```
|
||||
|
||||
1. Reconfigure GitLab:
|
||||
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl reconfigure
|
||||
```
|
||||
|
|
|
|||
|
|
@ -243,8 +243,6 @@ To request acceleration of a feature, check if an issue already exists in [epic
|
|||
|
||||
Secondary site HTTP proxying is enabled by default on a secondary site when it uses a unified URL, meaning, it is configured with the same `external_url` as the primary site. Disabling proxying in this case tends not to be helpful due to completely different behavior being served at the same URL, depending on routing.
|
||||
|
||||
Secondary site HTTP proxying is enabled by default on a secondary site when it uses a unified URL, meaning, it is configured with the same `external_url` as the primary site. Disabling proxying in this case tends not to be helpful due to completely different behavior being served at the same URL, depending on routing.
|
||||
|
||||
### What happens if you disable secondary proxying
|
||||
|
||||
Disabling the proxying feature flag has the following general effects:
|
||||
|
|
|
|||
|
|
@ -394,7 +394,7 @@ to see how to monitor the progress and health of the online garbage collector.
|
|||
The container registry supports two types of migrations:
|
||||
|
||||
- **Regular schema migrations**: Changes to the database structure that must run before deploying new application code. These should be fast (no more than a few minutes) to avoid deployment delays.
|
||||
|
||||
|
||||
- **Post-deployment migrations**: Changes to the database structure that can run while the application is running. Used for longer operations like creating indexes on large tables, avoiding startup delays and extended upgrade downtimes.
|
||||
|
||||
By default, the registry applies both regular schema and post-deployment migrations simultaneously. To reduce downtime during upgrades, you can skip post-deployment migrations and apply them manually after the application starts.
|
||||
|
|
|
|||
|
|
@ -489,7 +489,7 @@ control over how the Pages daemon runs and serves content in your environment.
|
|||
| `sentry_dsn` | The address for sending Sentry crash reporting to. |
|
||||
| `sentry_enabled` | Enable reporting and logging with Sentry, true/false. |
|
||||
| `sentry_environment` | The environment for Sentry crash reporting. |
|
||||
| `status_uri` | The URL path for a status page, for example, `/@status`. |
|
||||
| `status_uri` | The URL path for a status page, for example, `/@status`. Configure to enable health check endpoint on GitLab Pages. |
|
||||
| `tls_max_version` | Specifies the maximum TLS version ("tls1.2" or "tls1.3"). |
|
||||
| `tls_min_version` | Specifies the minimum TLS version ("tls1.2" or "tls1.3"). |
|
||||
| `use_http2` | Enable HTTP2 support. |
|
||||
|
|
|
|||
|
|
@ -19826,6 +19826,33 @@ Representation of a GitLab user.
|
|||
| <a id="achievementupdatedat"></a>`updatedAt` | [`Time!`](#time) | Timestamp the achievement was last updated. |
|
||||
| <a id="achievementuserachievements"></a>`userAchievements` {{< icon name="warning-solid" >}} | [`UserAchievementConnection`](#userachievementconnection) | **Introduced** in GitLab 15.10. **Status**: Experiment. Recipients for the achievement. |
|
||||
|
||||
### `ActivityStream`
|
||||
|
||||
Activity streams associated with a user.
|
||||
|
||||
#### Fields with arguments
|
||||
|
||||
##### `ActivityStream.followedUsersActivity`
|
||||
|
||||
Activity from users followed by the current user.
|
||||
|
||||
{{< details >}}
|
||||
**Introduced** in GitLab 17.10.
|
||||
**Status**: Experiment.
|
||||
{{< /details >}}
|
||||
|
||||
Returns [`EventConnection`](#eventconnection).
|
||||
|
||||
This field returns a [connection](#connections). It accepts the
|
||||
four standard [pagination arguments](#pagination-arguments):
|
||||
`before: String`, `after: String`, `first: Int`, and `last: Int`.
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Name | Type | Description |
|
||||
| ---- | ---- | ----------- |
|
||||
| <a id="activitystreamfollowedusersactivitytarget"></a>`target` | [`EventTarget!`](#eventtarget) | Event target. |
|
||||
|
||||
### `AddOnPurchase`
|
||||
|
||||
Represents AddOn purchase for Namespace.
|
||||
|
|
@ -23621,6 +23648,7 @@ The currently authenticated GitLab user.
|
|||
| Name | Type | Description |
|
||||
| ---- | ---- | ----------- |
|
||||
| <a id="currentuseractive"></a>`active` | [`Boolean`](#boolean) | Indicates if the user is active. |
|
||||
| <a id="currentuseractivity"></a>`activity` {{< icon name="warning-solid" >}} | [`ActivityStream`](#activitystream) | **Introduced** in GitLab 17.10. **Status**: Experiment. Recent user activity. |
|
||||
| <a id="currentuseravatarurl"></a>`avatarUrl` | [`String`](#string) | URL of the user's avatar. |
|
||||
| <a id="currentuserbio"></a>`bio` | [`String`](#string) | Bio of the user. |
|
||||
| <a id="currentuserbot"></a>`bot` | [`Boolean!`](#boolean) | Indicates if the user is a bot. |
|
||||
|
|
@ -26061,6 +26089,8 @@ Representing an event.
|
|||
| <a id="eventauthor"></a>`author` | [`UserCore!`](#usercore) | Author of this event. |
|
||||
| <a id="eventcreatedat"></a>`createdAt` | [`Time!`](#time) | When this event was created. |
|
||||
| <a id="eventid"></a>`id` | [`ID!`](#id) | ID of the event. |
|
||||
| <a id="eventproject"></a>`project` | [`Project`](#project) | Project of this event. |
|
||||
| <a id="eventtarget"></a>`target` | [`EventTargetType`](#eventtargettype) | The target of the event. |
|
||||
| <a id="eventupdatedat"></a>`updatedAt` | [`Time!`](#time) | When this event was updated. |
|
||||
|
||||
### `ExternalAuditEventDestination`
|
||||
|
|
@ -42251,6 +42281,22 @@ Event action.
|
|||
| <a id="eventactionreopened"></a>`REOPENED` | Reopened action. |
|
||||
| <a id="eventactionupdated"></a>`UPDATED` | Updated action. |
|
||||
|
||||
### `EventTarget`
|
||||
|
||||
Event target.
|
||||
|
||||
| Value | Description |
|
||||
| ----- | ----------- |
|
||||
| <a id="eventtargetall"></a>`ALL` | All events. |
|
||||
| <a id="eventtargetcomments"></a>`COMMENTS` | Comments events. |
|
||||
| <a id="eventtargetdesigns"></a>`DESIGNS` | Designs events. |
|
||||
| <a id="eventtargetepic"></a>`EPIC` | Epic events. |
|
||||
| <a id="eventtargetissue"></a>`ISSUE` | Issue events. |
|
||||
| <a id="eventtargetmerged"></a>`MERGED` | Merged events. |
|
||||
| <a id="eventtargetpush"></a>`PUSH` | Push events. |
|
||||
| <a id="eventtargetteam"></a>`TEAM` | Team events. |
|
||||
| <a id="eventtargetwiki"></a>`WIKI` | Wiki events. |
|
||||
|
||||
### `ExclusionScannerEnum`
|
||||
|
||||
Enum for the security scanners used with exclusions.
|
||||
|
|
@ -45843,6 +45889,22 @@ One of:
|
|||
|
||||
- [`NugetDependencyLinkMetadata`](#nugetdependencylinkmetadata)
|
||||
|
||||
#### `EventTargetType`
|
||||
|
||||
Represents an object that can be the subject of an event.
|
||||
|
||||
One of:
|
||||
|
||||
- [`Design`](#design)
|
||||
- [`Issue`](#issue)
|
||||
- [`MergeRequest`](#mergerequest)
|
||||
- [`Milestone`](#milestone)
|
||||
- [`Note`](#note)
|
||||
- [`Project`](#project)
|
||||
- [`Snippet`](#snippet)
|
||||
- [`UserCore`](#usercore)
|
||||
- [`WikiPage`](#wikipage)
|
||||
|
||||
#### `ExpressionValue`
|
||||
|
||||
Represents possible value types for an expression.
|
||||
|
|
|
|||
|
|
@ -116,6 +116,52 @@ Example response:
|
|||
}
|
||||
```
|
||||
|
||||
## Update a service account user
|
||||
|
||||
{{< history >}}
|
||||
|
||||
- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/182607/) in GitLab 17.10.
|
||||
|
||||
{{< /history >}}
|
||||
|
||||
Updates a service account user in a given top-level group.
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
||||
This endpoint only works on top-level groups.
|
||||
|
||||
{{< /alert >}}
|
||||
|
||||
```plaintext
|
||||
PATCH /groups/:id/service_accounts/:user_id
|
||||
```
|
||||
|
||||
Parameters:
|
||||
|
||||
| Attribute | Type | Required | Description |
|
||||
|:-----------|:---------------|:---------|:----------------------------------------------------------------|
|
||||
| `id` | integer/string | yes | The ID or [URL-encoded path of the target group](rest/_index.md#namespaced-paths). |
|
||||
| `user_id` | integer | yes | The ID of the service account user. |
|
||||
| `name` | string | no | Name of the user. |
|
||||
| `username` | string | no | Username of the user. |
|
||||
|
||||
Example request:
|
||||
|
||||
```shell
|
||||
curl --request PATCH --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/groups/345/service_accounts/57" --data "name=Updated Service Account"
|
||||
```
|
||||
|
||||
Example response:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": 57,
|
||||
"username": "service_account_group_345_6018816a18e515214e0c34c2b33523fc",
|
||||
"name": "Updated Service Account",
|
||||
"email": "service_account_group_345_<random_hash>@noreply.gitlab.example.com"
|
||||
}
|
||||
```
|
||||
|
||||
## Delete a service account user
|
||||
|
||||
{{< history >}}
|
||||
|
|
|
|||
|
|
@ -971,14 +971,14 @@ environment where custom or private certificates are used (for example, Zscaler
|
|||
error pulling image configuration: download failed after attempts=6: tls: failed to verify certificate: x509: certificate signed by unknown authority
|
||||
```
|
||||
|
||||
This error occurs because Docker commands in a Docker-in-Docker environment
|
||||
This error occurs because Docker commands in a Docker-in-Docker environment
|
||||
use two separate containers:
|
||||
|
||||
- The **build container** runs the Docker client (`/usr/bin/docker`) and executes your job's script commands.
|
||||
- The **service container** (often named `svc`) runs the Docker daemon that processes most Docker commands.
|
||||
|
||||
When your organization uses custom certificates, both containers need these certificates.
|
||||
Without proper certificate configuration in both containers, Docker operations that connect to external
|
||||
When your organization uses custom certificates, both containers need these certificates.
|
||||
Without proper certificate configuration in both containers, Docker operations that connect to external
|
||||
registries or services will fail with certificate errors.
|
||||
|
||||
To resolve this issue:
|
||||
|
|
|
|||
|
|
@ -479,9 +479,9 @@ the `stop` trigger is automatically enabled.
|
|||
In the following example, the `deploy_review` job calls a `stop_review` job to clean up and stop
|
||||
the environment.
|
||||
|
||||
- When the [**Pipelines must succeed**](../../user/project/merge_requests/auto_merge.md#require-a-successful-pipeline-for-merge) setting is turned on,
|
||||
you can configure the [`allow_failure: true`](../yaml/_index.md#allow_failure)
|
||||
keyword on the `stop_review` job to prevent it from
|
||||
- When the [**Pipelines must succeed**](../../user/project/merge_requests/auto_merge.md#require-a-successful-pipeline-for-merge) setting is turned on,
|
||||
you can configure the [`allow_failure: true`](../yaml/_index.md#allow_failure)
|
||||
keyword on the `stop_review` job to prevent it from
|
||||
blocking your pipelines and merge requests.
|
||||
|
||||
```yaml
|
||||
|
|
|
|||
|
|
@ -0,0 +1,72 @@
|
|||
---
|
||||
stage: AI-powered
|
||||
group: Custom Models
|
||||
info: Any user with at least the Maintainer role can merge updates to this content. For details, see https://docs.gitlab.com/ee/development/development_processes.html#development-guidelines-review.
|
||||
title: Amazon Q integration for testing and evaluation
|
||||
---
|
||||
|
||||
> This guide combines and build on top of the following guides and sources. It describes Amazon Q setup for testing and evaluation purposes:
|
||||
>
|
||||
> - [Set up GitLab Duo with Amazon Q](../../user/duo_amazon_q/setup.md)
|
||||
> - [code-suggestions development guide](../code_suggestions/_index.md)
|
||||
|
||||
This guide describes how to set up Amazon Q in a GitLab Linux package running in a VM, using the staging AI Gateway. The reason we need a GitLab Linux package instance instead of GDK is that the GitLab instance needs an HTTPS URL that can be accessed by Amazon Q.
|
||||
|
||||
## Install and configure a GitLab Linux package on a virtual machine
|
||||
|
||||
1. Create a VM in either GCP or AWS
|
||||
|
||||
- Follow this [guide](../../install/google_cloud_platform/_index.md) on how to create a VM in GCP
|
||||
- Create a VM in AWS
|
||||
1. Go to [cloud sandbox](https://gitlabsandbox.cloud/cloud), and login with OKTA
|
||||
1. Click "Create Individual Account", and choose `aws-***` (not `aws-services-***` or `aws-dedicated-***`). This will create a AWS sandbox and display login credentials
|
||||
1. Configure an EC2 machine of similar spec as GCP VM
|
||||
|
||||
A few things to note:
|
||||
- Need to enable both http and https traffic under firewall setting.
|
||||
- Copy the external IP of the VM instance created.
|
||||
|
||||
1. Install GitLab
|
||||
1. Follow this [guide](https://about.gitlab.com/install/#ubuntu) on how to install GitLab Linux package.
|
||||
We need to set up the external URL and an initial password. Install GitLab using the following command:
|
||||
|
||||
```shell
|
||||
sudo GITLAB_ROOT_PASSWORD="your_password" EXTERNAL_URL="https://<vm-instance-external-ip>.nip.io" apt install gitlab-ee
|
||||
```
|
||||
|
||||
This will use nip.io as the DNS service so the GitLab instance can be accessed through HTTPs
|
||||
|
||||
1. Config the newly installed GitLab instance
|
||||
1. SSH into the VM, and add the following config into `/etc/gitlab/gitlab.rb`
|
||||
|
||||
```ruby
|
||||
gitlab_rails['env'] = {
|
||||
"GITLAB_LICENSE_MODE" => "test",
|
||||
"CUSTOMER_PORTAL_URL" => "https://customers.staging.gitlab.com",
|
||||
"AI_GATEWAY_URL" => "https://cloud.staging.gitlab.com/ai"
|
||||
}
|
||||
```
|
||||
|
||||
1. Apply the config changes by `sudo gitlab-ctl reconfigure`
|
||||
1. Obtain and activate a self-managed ultimate license
|
||||
1. Go to [staging customers portal](https://customers.staging.gitlab.com/), select "Signin with GitLab.com account".
|
||||
1. Instead of clicking "Buy new subscription", go to the [product page](https://customers.staging.gitlab.com/subscriptions/new?plan_id=2c92a00c76f0c6c20176f2f9328b33c9) directly. For reason of this, see [buy_subscription](https://gitlab.com/gitlab-org/customers-gitlab-com/-/blob/8aa922840091ad5c5d96ada43d0065a1b6198841/doc/flows/buy_subscription.md)
|
||||
1. Purchase the subscription using [a test credit card](https://gitlab.com/gitlab-org/customers-gitlab-com/#testing-credit-card-information). An activation code will be given. Do not purchase a duo-pro add-on, because currently duo-pro and Q are mutually exclusive.
|
||||
1. Go to the GitLab instance created earlier (`https://<vm-instance-external-ip>.nip.io`), log in with root account. Then on the left sidebar, go to **Admin > Subscription**, and enter the activation code
|
||||
|
||||
## Create and configure an AWS sandbox
|
||||
|
||||
1. Follow the [same step](#install-and-configure-a-gitlab-linux-package-on-a-virtual-machine) described above on how to create an AWS sandbox if you haven't had one already.
|
||||
1. Login into the newly created AWS account and create an **Identity Provider** following this [instruction](../../user/duo_amazon_q/setup.md#create-an-iam-identity-provider) with slight modifications:
|
||||
|
||||
- Provider URL: `https://glgo.staging.runway.gitlab.net/cc/oidc/<your_gitlab_instance_id>`
|
||||
- Audience: `gitlab-cc-<your_gitlab_instance_id>`
|
||||
|
||||
The GitLab instance ID can be found at `<gitlab_url>/admin/ai/amazon_q_settings`
|
||||
1. Create a new role using the identity provider. For this, we can follow [this section](../../user/duo_amazon_q/setup.md#create-an-iam-role) exactly.
|
||||
|
||||
## Add Amazon Q to GitLab
|
||||
|
||||
1. Follow [Enter the ARN in GitLab and enable Amazon Q](../../user/duo_amazon_q/setup.md#enter-the-arn-in-gitlab-and-enable-amazon-q) exactly
|
||||
1. [Invite Amazon Q user to your project](../../user/duo_amazon_q/setup.md#add-the-amazon-q-user-to-your-project). For this step, we do not need to configure AI Gateway again, because we've already done it when configuring our GitLab instance.
|
||||
1. Now Q should be working. We can test it like [this](https://gitlab.com/gitlab-com/ops-sub-department/aws-gitlab-ai-integration/integration-motion-planning/-/wikis/integration-docs#testing-q)
|
||||
|
|
@ -77,7 +77,7 @@ end
|
|||
We must not make breaking changes to our REST API v4, even in major GitLab releases. See [what is a breaking change](#what-is-a-breaking-change) and [what is not a breaking change](#what-is-not-a-breaking-change).
|
||||
|
||||
Our REST API maintains its own versioning independent of GitLab versioning.
|
||||
The current REST API version is `4`. Because [we commit to follow semantic versioning for our REST API](../api/rest/_index.md), we cannot make breaking changes to it. A major version change for our REST API (most likely, `5`) is currently not planned, or scheduled.
|
||||
The current REST API version is `4`. Because [we commit to follow semantic versioning for our REST API](../api/rest/_index.md), we cannot make breaking changes to it. A major version change for our REST API (most likely, `5`) is currently not planned, or scheduled.
|
||||
|
||||
The exception is API features that are [marked as experimental or beta](#experimental-beta-and-generally-available-features). These features can be removed or changed at any time.
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ Specifically, avoid invoking the following classes:
|
|||
|
||||
### For projects
|
||||
|
||||
Instead of using `Projects::DestroyService`, use `Projects::MarkForDeletionService`.
|
||||
Instead of using `Projects::DestroyService`, use `Projects::MarkForDeletionService`.
|
||||
|
||||
```ruby
|
||||
Projects::MarkForDeletionService.new(project, current_user).execute
|
||||
|
|
@ -34,7 +34,7 @@ Projects::MarkForDeletionService.new(project, current_user).execute
|
|||
|
||||
### For groups
|
||||
|
||||
Instead of using `Groups::DestroyService`, use `Groups::MarkForDeletionService`.
|
||||
Instead of using `Groups::DestroyService`, use `Groups::MarkForDeletionService`.
|
||||
|
||||
```ruby
|
||||
Groups::MarkForDeletionService.new(group, current_user).execute
|
||||
|
|
|
|||
|
|
@ -1103,6 +1103,16 @@ unless we've gone through a legal review and have been told to promote the partn
|
|||
|
||||
This guidance follows the [Use of Third-party Trademarks](https://handbook.gitlab.com/handbook/legal/policies/product-third-party-trademarks-guidelines/#dos--donts-for-use-of-third-party-trademarks-in-gitlab).
|
||||
|
||||
## GitLab AI vendor model
|
||||
|
||||
Use **GitLab AI vendor model** to refer to a [language model](#language-model-large-language-model)
|
||||
that is hosted by GitLab, and that customers access through the GitLab-hosted
|
||||
[AI gateway](#ai-gateway).
|
||||
|
||||
Do not use this term when the [language model is hosted by a customer](#self-hosted-model),
|
||||
or when the customer is using the [GitLab Duo Self-Hosted](#gitlab-duo-self-hosted)
|
||||
feature.
|
||||
|
||||
## GitLab Dedicated
|
||||
|
||||
Use **GitLab Dedicated** to refer to the product offering. It refers to a GitLab instance that's hosted and managed by GitLab for customers.
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ Below are some examples of common Kibana filters.
|
|||
|
||||
[See example filter](https://log.gprd.gitlab.net/app/r/s/cWkK1).
|
||||
|
||||
As mentioned [above](#logs-of-each-graphql-query), `json.meta.caller_id` appears as `graphql:<operation_name>` for queries that
|
||||
As mentioned [above](#logs-of-each-graphql-query), `json.meta.caller_id` appears as `graphql:<operation_name>` for queries that
|
||||
came from the GitLab frontend, otherwise as `graphql:unknown`. This filter be used to identify internal versus external queries.
|
||||
|
||||
1. Combine the [subcomponent filter](#logs-of-each-graphql-query) with the following Kibana filter:
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ To set up publishing through GitLab CI/CD:
|
|||
|
||||
### Example CI/CD configuration
|
||||
|
||||
Below is an example `.gitlab-ci.yml` configuration for publishing an NPM package. This codeblock isn't meant to be used as-is and will require changes depending on your configuration. This means that you will need to modify the example below to include the location of your npmjs publishing token.
|
||||
Below is an example `.gitlab-ci.yml` configuration for publishing an NPM package. This codeblock isn't meant to be used as-is and will require changes depending on your configuration. This means that you will need to modify the example below to include the location of your npmjs publishing token.
|
||||
|
||||
```yaml
|
||||
stages:
|
||||
|
|
|
|||
|
|
@ -37,8 +37,19 @@ generate them from a cron-based Sidekiq job:
|
|||
- For Geo related metrics, check `Geo::MetricsUpdateService`.
|
||||
- For other "global" / instance-wide metrics, check: `Metrics::GlobalMetricsUpdateService`.
|
||||
|
||||
When exporting data from Sidekiq in an installation with more than one Sidekiq instance,
|
||||
you are not guaranteed that the same exporter will always be queried.
|
||||
{{< alert type="warning" >}}
|
||||
|
||||
You can read more and understand the caveats in [issue 406583](https://gitlab.com/gitlab-org/gitlab/-/issues/406583),
|
||||
When exporting metrics from Sidekiq in a multi-instance deployment:
|
||||
|
||||
- The same exporter is not guaranteed to be queried consistently.
|
||||
- This is especially problematic for gauge metrics, as each Sidekiq worker will continue reporting the last recorded value
|
||||
until that specific worker runs the metric collection code again.
|
||||
- This can lead to inconsistent or stale metrics data across your monitoring system.
|
||||
|
||||
For more reliable metrics collection, consider creating the exporter as a custom exporter
|
||||
in [`gitlab-exporter`](https://gitlab.com/gitlab-org/ruby/gems/gitlab-exporter/)
|
||||
|
||||
{{< /alert >}}
|
||||
|
||||
For more details, see [issue 406583](https://gitlab.com/gitlab-org/gitlab/-/issues/406583),
|
||||
where we also discuss a possible solution using a push-gateway.
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ version `19.5`, this would need three deployments of the service:
|
|||
- One for service version `18.11`, which provides support for all GitLab `18.x` versions
|
||||
- One for service version `19.5`, which provides support for GitLab versions `19.0`-`19.5`.
|
||||
|
||||
Once version 18.0 is released, unused code from versions 17.x can be safely removed, since a legacy deployment will be present.
|
||||
Once version 18.0 is released, unused code from versions 17.x can be safely removed, since a legacy deployment will be present.
|
||||
Then, once version 20.0 is released, and GitLab version 17.x is not supported anymore, the legacy deployment can also be removed.
|
||||
|
||||
#### Publishing images
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ There is a risk in skipping end-to-end tests. Use caution and discretion when ap
|
|||
|
||||
#### Dynamic parallel job scaling
|
||||
|
||||
To maintain consistent pipeline run times, the CI/CD job count for each particular E2E test suite is scaled dynamically based on total run time of tests in the suite.
|
||||
To maintain consistent pipeline run times, the CI/CD job count for each particular E2E test suite is scaled dynamically based on total run time of tests in the suite.
|
||||
The [`generate_e2e_pipelines`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/qa/tasks/ci.rake?ref_type=heads) Rake task creates CI/CD YAML files that:
|
||||
|
||||
- Create the correct number of parallel jobs.
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ More threads would lead to excessive swapping and lower performance.
|
|||
[Redis](https://redis.io/) stores all user sessions and background tasks
|
||||
and requires about 25 kB per user on average.
|
||||
|
||||
In GitLab 16.0 and later, Redis 6.x or 7.x is required.
|
||||
In GitLab 16.0 and later, Redis 6.2.x or 7.x is required.
|
||||
For more information about end-of-life dates, see the
|
||||
[Redis documentation](https://redis.io/docs/latest/operate/rs/installing-upgrading/product-lifecycle/).
|
||||
|
||||
|
|
|
|||
|
|
@ -246,7 +246,7 @@ To copy the API token, select **Copy**.
|
|||
To migrate from one Jira site to another in GitLab and maintain your Jira issues integration:
|
||||
|
||||
1. Follow the steps in [configure the integration](#configure-the-integration).
|
||||
1. Enter the new Jira site URL (for example, `https://myjirasite.atlassian.net`).
|
||||
1. Enter the new Jira site URL (for example, `https://myjirasite.atlassian.net`).
|
||||
|
||||
To update existing Jira issue references in GitLab to use the new Jira site URL, you must
|
||||
[invalidate the Markdown cache](../../administration/invalidate_markdown_cache.md#invalidate-the-cache).
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@ title: Solution Components
|
|||
---
|
||||
|
||||
This documentation section covers a variety of Solution components developed and provided by GitLab.
|
||||
To download and run these solution components, request your account team for invitation code.
|
||||
To download and run these solution components, request your account team for invitation code.
|
||||
|
||||
The use of any Solution component is subject to the [GitLab Subscription Agreement](https://handbook.gitlab.com/handbook/legal/subscription-agreement/) (the "Agreement") and constitutes Free Software as defined within the Agreement.
|
||||
The use of any Solution component is subject to the [GitLab Subscription Agreement](https://handbook.gitlab.com/handbook/legal/subscription-agreement/) (the "Agreement") and constitutes Free Software as defined within the Agreement.
|
||||
|
||||
## DevSecOps Workflow
|
||||
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@ title: GitLab AI Solution Components
|
|||
---
|
||||
|
||||
This documentation section covers a variety of Solution components developed and provided by GitLab.
|
||||
To download and run these solution components, request your account team for invitation code.
|
||||
To download and run these solution components, request your account team for invitation code.
|
||||
|
||||
The use of any Solution component is subject to the [GitLab Subscription Agreement](https://handbook.gitlab.com/handbook/legal/subscription-agreement/) (the "Agreement") and constitutes Free Software as defined within the Agreement.
|
||||
The use of any Solution component is subject to the [GitLab Subscription Agreement](https://handbook.gitlab.com/handbook/legal/subscription-agreement/) (the "Agreement") and constitutes Free Software as defined within the Agreement.
|
||||
|
||||
## GitLab Duo Workflow
|
||||
|
||||
|
|
|
|||
|
|
@ -20,14 +20,14 @@ title: Duo Workflow Use Case for Applying Coding Style
|
|||
1. Obtain the invitation code from your account team.
|
||||
1. Download the solution component from [the solution component webstore](https://cloud.gitlab-accelerator-marketplace.com) by using your invitation code.
|
||||
|
||||
## Duo Workflow Use Case: Improve Java Application with Style Guide
|
||||
## Duo Workflow Use Case: Improve Java Application with Style Guide
|
||||
|
||||
The document describes GitLab Duo Workflow Solution with prompt and context library. The purpose of the solution is to improve appliction coding based on defined style.
|
||||
The document describes GitLab Duo Workflow Solution with prompt and context library. The purpose of the solution is to improve appliction coding based on defined style.
|
||||
|
||||
This solution provides a GitLab issue as the prompt and the style guide as the context, designed to automate Java style guidelines to codebases using GitLab Duo Workflow. The prompt and context library enables Duo Workflow to:
|
||||
This solution provides a GitLab issue as the prompt and the style guide as the context, designed to automate Java style guidelines to codebases using GitLab Duo Workflow. The prompt and context library enables Duo Workflow to:
|
||||
|
||||
1. Access centralized style guide content stored in GitLab repository,
|
||||
1. Understand domain-specific coding standards, and
|
||||
1. Access centralized style guide content stored in GitLab repository,
|
||||
1. Understand domain-specific coding standards, and
|
||||
1. Apply consistent formatting to Java code while preserving functionality.
|
||||
|
||||
For detailed information about GitLab Duo Workflow, review [the document here](../../../user/duo_workflow/_index.md).
|
||||
|
|
@ -57,7 +57,7 @@ To run the agentic workflow to review and apply style to your application, you n
|
|||
|
||||
1. **Set up the prompt and contet library** by cloning `Enterprise Code Quality Standards` project
|
||||
1. **Create a GitLab issue** `Review and Apply Style` with the prompt content from the library file `.gitlab/workflows/java-style-workflow.md`
|
||||
1. **In the issue** `Review and Apply Style` configure the workflow variables as detailed in the [Configuration section](#configuration-guide)
|
||||
1. **In the issue** `Review and Apply Style` configure the workflow variables as detailed in the [Configuration section](#configuration-guide)
|
||||
1. **In your VS code** with the project `Enterprise Code Quality Standards`, start the Duo Workflow with a simple [workflow prompt](#example-duo-workflow-prompt)
|
||||
1. **Work with the Duo Workflow** by reviewing the proposed plan and automated tasks, if needed add further input to the workflow
|
||||
1. **Review and commit** the styled code changes to your repository
|
||||
|
|
@ -77,7 +77,7 @@ This simple prompt is powerful because it instructs Duo Workflow to:
|
|||
|
||||
## Configuration Guide
|
||||
|
||||
The prompt is defined in the `.gitlab/workflows/java-style-workflow.md` file in the solution package. This file serves as your template for creating GitLab issues that instruct the workflow agent to build out the plan to automate the style guide review on your application and apply the changes.
|
||||
The prompt is defined in the `.gitlab/workflows/java-style-workflow.md` file in the solution package. This file serves as your template for creating GitLab issues that instruct the workflow agent to build out the plan to automate the style guide review on your application and apply the changes.
|
||||
|
||||
In the first section of `.gitlab/workflows/java-style-workflow.md`, it defines variables you need to configure for the prompt.
|
||||
|
||||
|
|
@ -121,7 +121,7 @@ TARGET_FILES=asset-management-api/src/main/java/com/royal/reserve/bank/asset/man
|
|||
|
||||
### Important Notes About AI-Generated Code
|
||||
|
||||
**⚠️ Important Disclaimer**:
|
||||
**⚠️ Important Disclaimer**:
|
||||
|
||||
GitLab Workflow uses Agentic AI which is non-deterministic, meaning:
|
||||
|
||||
|
|
@ -195,12 +195,12 @@ The AI assistant first creates an execution plan with specific tools:
|
|||
1. Access the Java style guidelines by retrieving content from Enterprise Java Standards project using `run_read_only_git_command` on `https://gitlab.com/gl-demo-ultimate-zhenderson/sandbox/enterprise-java-standards.git` for file `coding-style/java/guidelines/java-coding-standards.md`, supported by: `run_read_only_git_command`
|
||||
1. Read and analyze the current content of `AssetManagementService.java` using the `read_file` tool to understand its structure and identify areas that need style updates, supported by `read_file`
|
||||
1. Search for any similar files in the project using find_files to ensure consistent style application across related service classes, supported by `find_files`
|
||||
1. Edit AssetManagementService.java using the edit_file tool to apply the style guidelines, focusing on:
|
||||
1. Edit AssetManagementService.java using the edit_file tool to apply the style guidelines, focusing on:
|
||||
|
||||
- Package and import organization
|
||||
- Class and method documentation
|
||||
- Code indentation and spacing
|
||||
- Annotation placement
|
||||
- Package and import organization
|
||||
- Class and method documentation
|
||||
- Code indentation and spacing
|
||||
- Annotation placement
|
||||
- Method parameter formatting, supported by `edit_file`
|
||||
|
||||
1. Verify all Lombok annotations (@Service, @RequiredArgsConstructor, @Slf4j) are properly formatted according to the style guide using grep_files to check annotation patterns across the codebase, supported by `grep_files`
|
||||
|
|
|
|||
|
|
@ -20,14 +20,14 @@ title: Duo Workflow Use Case for Applying Coding Style
|
|||
1. Obtain the invitation code from your account team.
|
||||
1. Download the solution component from [the solution component webstore](https://cloud.gitlab-accelerator-marketplace.com) by using your invitation code.
|
||||
|
||||
## Duo Workflow Use Case: Improve Java Application with Style Guide
|
||||
## Duo Workflow Use Case: Improve Java Application with Style Guide
|
||||
|
||||
The document describes GitLab Duo Workflow Solution with prompt and context library. The purpose of the solution is to improve appliction coding based on defined style.
|
||||
The document describes GitLab Duo Workflow Solution with prompt and context library. The purpose of the solution is to improve appliction coding based on defined style.
|
||||
|
||||
This solution provides a GitLab issue as the prompt and the style guide as the context, designed to automate Java style guidelines to codebases using GitLab Duo Workflow. The prompt and context library enables Duo Workflow to:
|
||||
This solution provides a GitLab issue as the prompt and the style guide as the context, designed to automate Java style guidelines to codebases using GitLab Duo Workflow. The prompt and context library enables Duo Workflow to:
|
||||
|
||||
1. Access centralized style guide content stored in GitLab repository,
|
||||
1. Understand domain-specific coding standards, and
|
||||
1. Access centralized style guide content stored in GitLab repository,
|
||||
1. Understand domain-specific coding standards, and
|
||||
1. Apply consistent formatting to Java code while preserving functionality.
|
||||
|
||||
For detailed information about GitLab Duo Workflow, review [the document here](../../user/duo_workflow/_index.md).
|
||||
|
|
@ -57,7 +57,7 @@ To run the agentic workflow to review and apply style to your application, you n
|
|||
|
||||
1. **Set up the prompt and contet library** by cloning `Enterprise Code Quality Standards` project
|
||||
1. **Create a GitLab issue** `Review and Apply Style` with the prompt content from the library file `.gitlab/workflows/java-style-workflow.md`
|
||||
1. **In the issue** `Review and Apply Style` configure the workflow variables as detailed in the [Configuration section](#configuration-guide)
|
||||
1. **In the issue** `Review and Apply Style` configure the workflow variables as detailed in the [Configuration section](#configuration-guide)
|
||||
1. **In your VS code** with the project `Enterprise Code Quality Standards`, start the Duo Workflow with a simple [workflow prompt](#example-duo-workflow-prompt)
|
||||
1. **Work with the Duo Workflow** by reviewing the proposed plan and automated tasks, if needed add further input to the workflow
|
||||
1. **Review and commit** the styled code changes to your repository
|
||||
|
|
@ -77,7 +77,7 @@ This simple prompt is powerful because it instructs Duo Workflow to:
|
|||
|
||||
## Configuration Guide
|
||||
|
||||
The prompt is defined in the `.gitlab/workflows/java-style-workflow.md` file in the solution package. This file serves as your template for creating GitLab issues that instruct the workflow agent to build out the plan to automate the style guide review on your application and apply the changes.
|
||||
The prompt is defined in the `.gitlab/workflows/java-style-workflow.md` file in the solution package. This file serves as your template for creating GitLab issues that instruct the workflow agent to build out the plan to automate the style guide review on your application and apply the changes.
|
||||
|
||||
In the first section of `.gitlab/workflows/java-style-workflow.md`, it defines variables you need to configure for the prompt.
|
||||
|
||||
|
|
@ -121,7 +121,7 @@ TARGET_FILES=asset-management-api/src/main/java/com/royal/reserve/bank/asset/man
|
|||
|
||||
### Important Notes About AI-Generated Code
|
||||
|
||||
**⚠️ Important Disclaimer**:
|
||||
**⚠️ Important Disclaimer**:
|
||||
|
||||
GitLab Workflow uses Agentic AI which is non-deterministic, meaning:
|
||||
|
||||
|
|
@ -195,12 +195,12 @@ The AI assistant first creates an execution plan with specific tools:
|
|||
1. Access the Java style guidelines by retrieving content from Enterprise Java Standards project using `run_read_only_git_command` on `https://gitlab.com/gl-demo-ultimate-zhenderson/sandbox/enterprise-java-standards.git` for file `coding-style/java/guidelines/java-coding-standards.md`, supported by: `run_read_only_git_command`
|
||||
1. Read and analyze the current content of `AssetManagementService.java` using the `read_file` tool to understand its structure and identify areas that need style updates, supported by `read_file`
|
||||
1. Search for any similar files in the project using find_files to ensure consistent style application across related service classes, supported by `find_files`
|
||||
1. Edit AssetManagementService.java using the edit_file tool to apply the style guidelines, focusing on:
|
||||
1. Edit AssetManagementService.java using the edit_file tool to apply the style guidelines, focusing on:
|
||||
|
||||
- Package and import organization
|
||||
- Class and method documentation
|
||||
- Code indentation and spacing
|
||||
- Annotation placement
|
||||
- Package and import organization
|
||||
- Class and method documentation
|
||||
- Code indentation and spacing
|
||||
- Annotation placement
|
||||
- Method parameter formatting, supported by `edit_file`
|
||||
|
||||
1. Verify all Lombok annotations (@Service, @RequiredArgsConstructor, @Slf4j) are properly formatted according to the style guide using grep_files to check annotation patterns across the codebase, supported by `grep_files`
|
||||
|
|
|
|||
|
|
@ -19,9 +19,9 @@ With the ServiceNow DevOps Change Velocity integration, it's able to track infor
|
|||
|
||||
It automates the creation of change requests and automatically approve the change requests based on the policy critieria when it's integrated with GitLab CI/CD pipelines.
|
||||
|
||||
This document shows you how to
|
||||
This document shows you how to
|
||||
|
||||
1. Integrate ServiceNow with GitLab with Change Velocity for change management,
|
||||
1. Integrate ServiceNow with GitLab with Change Velocity for change management,
|
||||
1. Create in the GitLab CI/CD pipeline automatically the change request in ServiceNow,
|
||||
1. Approve the change request in ServiceNow if it requires CAB review and approval,
|
||||
1. Start the production deployment based on the change request approval.
|
||||
|
|
@ -43,7 +43,7 @@ There are multiple ways to integrate GitLab with ServiceNow. The followings opti
|
|||
|
||||
## ServiceNow DevOps Change Velocity
|
||||
|
||||
Upon installing and configuring DevOps Change Velocity from ServiceNow store, enable change control through automated change creation in the DevOps Change Workspace Directly.
|
||||
Upon installing and configuring DevOps Change Velocity from ServiceNow store, enable change control through automated change creation in the DevOps Change Workspace Directly.
|
||||
|
||||
### Built-in Change Request Process
|
||||
|
||||
|
|
@ -53,8 +53,8 @@ The normal change process requires the change request to be approved before the
|
|||
|
||||
#### Setup the Pipeline and Change Request Jobs
|
||||
|
||||
Use the `gitlab-ci-workflow1.yml` sample pipeline in the solution repository as a starting point.
|
||||
Check below for the steps to enable the automatic change creation and pass the change attributes through the pipeline.
|
||||
Use the `gitlab-ci-workflow1.yml` sample pipeline in the solution repository as a starting point.
|
||||
Check below for the steps to enable the automatic change creation and pass the change attributes through the pipeline.
|
||||
|
||||
Note: for more detailed instructions, please see [the ServiceNow documentation](https://www.servicenow.com/docs/bundle/yokohama-it-service-management/page/product/enterprise-dev-ops/task/automate-devops-change-request.html)
|
||||
|
||||
|
|
@ -76,19 +76,19 @@ Below are the high-level steps:
|
|||
|
||||
#### Run Pipeline with Change Management
|
||||
|
||||
Once the above steps are completed, the project CD pipeline can incorporate the jobs illustrated in the `gitlab-ci-workflow1.yml` sample pipeline. Below are the Change Management steps:
|
||||
Once the above steps are completed, the project CD pipeline can incorporate the jobs illustrated in the `gitlab-ci-workflow1.yml` sample pipeline. Below are the Change Management steps:
|
||||
|
||||
1. In ServiceNow, Change control is enabled for one of the stages in the pipeline. 
|
||||
1. In GitLab, the pipeline job with the change control function runs. 
|
||||
1. In ServiceNow, a change request is automatically created in ServiceNow. 
|
||||
1. In ServiceNow, approve the change request
|
||||
1. In ServiceNow, approve the change request
|
||||

|
||||
1. Pipeline resumes and begins the next job for deploying to the production environment upon the approval of the change request.
|
||||
1. Pipeline resumes and begins the next job for deploying to the production environment upon the approval of the change request.
|
||||

|
||||
|
||||
### Custom Actions with Velocity Container Image
|
||||
|
||||
Use the ServiceNow custom actions via the DevOps Change Velocity Docker image to set Change Request title, description, change plan, rollback plan, and data related to artifacts to be deployed, and package registration. This allows you to customize the change request descriptions instead of passing the pipeline metadata as the change request description.
|
||||
Use the ServiceNow custom actions via the DevOps Change Velocity Docker image to set Change Request title, description, change plan, rollback plan, and data related to artifacts to be deployed, and package registration. This allows you to customize the change request descriptions instead of passing the pipeline metadata as the change request description.
|
||||
|
||||
#### Setup the Pipeline and Change Request Jobs
|
||||
|
||||
|
|
@ -126,7 +126,7 @@ Use the `gitlab-ci-workflow2.yml` sample pipeline in this repository as an examp
|
|||
#### Run Pipeline with Custom Change Management
|
||||
|
||||
Use the `gitlab-ci-workflow2.yml` sample pipeline as a starting point.
|
||||
Once the above steps are completed, the project CD pipeline can incorporate the jobs illustrated in the `gitlab-ci-workflow2.yml` sample pipeline. Below are the Change Management steps:
|
||||
Once the above steps are completed, the project CD pipeline can incorporate the jobs illustrated in the `gitlab-ci-workflow2.yml` sample pipeline. Below are the Change Management steps:
|
||||
|
||||
1. In ServiceNow, change control is enabled for one of the stages in the pipeline. 
|
||||
1. In GitLab, the pipeline job with the change control function runs. 
|
||||
|
|
@ -134,5 +134,5 @@ Once the above steps are completed, the project CD pipeline can incorporate the
|
|||
1. In GitLab, change request number and other information can be found in the pipeline details. The pipeline job will remain running until the change request is approved, then it will proceed to the next job. 
|
||||
1. In ServiceNow, approve the change request.
|
||||

|
||||
1. In GitLab, the Pipeline job resumes and begins the next job which is the deployment to the production environment upon the approval of the change request.
|
||||
1. In GitLab, the Pipeline job resumes and begins the next job which is the deployment to the production environment upon the approval of the change request.
|
||||

|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ If you already have a security policy project but don't have dependency and/or l
|
|||
1. Make sure to:
|
||||
- Maintain the existing YAML structure
|
||||
- Place these sections at the same level as other top-level sections
|
||||
- Set `user_approvers_ids` and/or `group_approvers_ids` and/or `role_approvers` (only one is needed)
|
||||
- Set `user_approvers_ids` and/or `group_approvers_ids` and/or `role_approvers` (only one is needed)
|
||||
- Replace `YOUR_USER_ID_HERE` or `YOUR_GROUP_ID_HERE` with appropriate user/group IDs (ensure you paste the user/group IDs e.g. 1234567 and NOT the usernames)
|
||||
- Replace `YOUR_PROJECT_ID_HERE` if you'd like to exclude any projects from the policy (ensure you paste the project IDs e.g. 1234 and NOT the project names/paths)
|
||||
- Set `approvals_required: 1` to the number of approvals you want to require
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ title: Secret Detection
|
|||
|
||||
- GitLab Ultimate tier
|
||||
- Administrator access to your GitLab instance or group
|
||||
- [Secret Detection](../../user/application_security/secret_detection/_index.md) enabled for your projects
|
||||
- [Secret Detection](../../user/application_security/secret_detection/_index.md) enabled for your projects
|
||||
|
||||
## Configure Secret Detection Custom Rules
|
||||
|
||||
|
|
@ -91,7 +91,7 @@ To set the access and authentication, follow these steps:
|
|||
|
||||
This guide covers the steps to configure the policy to run secret detection for all projects using centralized custom ruleset.
|
||||
|
||||
### Configure Secret Detection Policy
|
||||
### Configure Secret Detection Policy
|
||||
|
||||
To run secret detection automatically in the pipeline as the enforced global policy, set up the policy at the highest level, in this case the top group level. Follow the steps outlined below to create the new secret detection policy.
|
||||
|
||||
|
|
@ -102,11 +102,11 @@ To run secret detection automatically in the pipeline as the enforced global pol
|
|||
1. Set the **Policy scope** by selecting either "All projects in this group" (and optionally set exceptions) or "Specific projects" (and select the projects from the dropdown).
|
||||
1. Under the **Actions** section, select "Dependency scanning" instead of "Secret Detection" (default).
|
||||
1. Under the **Conditions** section, you can optionally change "Triggers:" to "Schedules:" if you want to run the scan on a schedule instead of at every commit.
|
||||
1. Setup access to the custom ruleset: add CI variables with the value of the bot user, group variable and the URL of the custom ruleset project.
|
||||
1. Setup access to the custom ruleset: add CI variables with the value of the bot user, group variable and the URL of the custom ruleset project.
|
||||
|
||||
Since the custom ruleset is hosted in a different project and considered as the remote ruleset, the `SECRET_DETECTION_RULESET_GIT_REFERENCE` must be used.
|
||||
Since the custom ruleset is hosted in a different project and considered as the remote ruleset, the `SECRET_DETECTION_RULESET_GIT_REFERENCE` must be used.
|
||||
|
||||
```yaml
|
||||
```yaml
|
||||
variables:
|
||||
SECRET_DETECTION_RULESET_GIT_REFERENCE: "group_[group_id]_bot_[random_number]:$SECRET_DETECTION_GROUP_TOKEN@[custom ruleset project URL]"
|
||||
SECRET_DETECTION_HISTORIC_SCAN: "true"
|
||||
|
|
@ -121,7 +121,7 @@ For detailed information about this CI variable, see [this document for details]
|
|||
|
||||
Upon creating the policy, for reference, here is the complete policy configuration:
|
||||
|
||||
```yaml
|
||||
```yaml
|
||||
---
|
||||
scan_execution_policy:
|
||||
- name: Scan Execution for Secret Detection with Custom Rules
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ title: Security Metrics and KPIs
|
|||
|
||||
{{< /details >}}
|
||||
|
||||
The document describes the installation, configuration and user guide of GitLab Security Metrics and KPIs Solution Component. This security solution component provides metrics and KPIs that can be viewed by business units, time range, vulnerability severity and security types. It can provide snapshot of the seucrity posture on the monthly and quarterly basis with pdf documents. The dashboard and visualization of data are displayed as Dashboard in Splunk.
|
||||
The document describes the installation, configuration and user guide of GitLab Security Metrics and KPIs Solution Component. This security solution component provides metrics and KPIs that can be viewed by business units, time range, vulnerability severity and security types. It can provide snapshot of the seucrity posture on the monthly and quarterly basis with pdf documents. The dashboard and visualization of data are displayed as Dashboard in Splunk.
|
||||
|
||||

|
||||
|
||||
|
|
|
|||
|
|
@ -12,9 +12,9 @@ title: Auto DevOps
|
|||
|
||||
{{< /details >}}
|
||||
|
||||
Auto DevOps turns your code into production-ready applications without the usual configuration overhead.
|
||||
The entire DevOps lifecycle is pre-configured using industry best practices. Start with the defaults
|
||||
to ship quickly, then customize when you need more control. No complex configuration files or deep
|
||||
Auto DevOps turns your code into production-ready applications without the usual configuration overhead.
|
||||
The entire DevOps lifecycle is pre-configured using industry best practices. Start with the defaults
|
||||
to ship quickly, then customize when you need more control. No complex configuration files or deep
|
||||
DevOps expertise is required.
|
||||
|
||||
With Auto DevOps you get:
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ Use CI/CD pipelines to automatically build, test, and deploy your code.
|
|||
| [Use Auto DevOps to deploy an application](../topics/autodevops/cloud_deployments/auto_devops_with_gke.md) | Deploy an application to Google Kubernetes Engine (GKE). | |
|
||||
| [Using Buildah in a rootless container with GitLab Runner Operator on OpenShift](../ci/docker/buildah_rootless_tutorial.md) | Learn how to set up GitLab Runner Operator on OpenShift to build Docker images with Buildah in a rootless container | |
|
||||
| [Automatically build and publish packages with CI/CD](../user/packages/pypi_repository/auto_publish_tutorial.md) | Learn how to automatically build, test, and publish a PyPI package to the package registry. | |
|
||||
| [Structure the package registry for enterprise scale](../user/packages/package_registry/enterprise_structure_tutorial.md) | Set up your organization to upload, manage, and consume packages at scale. | |
|
||||
| [Structure the package registry for enterprise scale](../user/packages/package_registry/enterprise_structure_tutorial.md) | Set up your organization to upload, manage, and consume packages at scale. | |
|
||||
| [Set up CI/CD steps](setup_steps/_index.md) | Learn how to set up the steps component and configure a CI/CD pipeline to use the step in a job. | |
|
||||
| [Build and sign Python packages with GitLab CI/CD](../user/packages/package_registry/pypi_cosign_tutorial.md) | Learn how to build a secure pipeline for Python packages using GitLab CI/CD and Sigstore Cosign. | |
|
||||
|
||||
|
|
|
|||
|
|
@ -38,14 +38,15 @@ or the [Support team](https://about.gitlab.com/support/).
|
|||
|
||||
The zero-downtime upgrade process has the following requirements:
|
||||
|
||||
- Zero-downtime upgrades are only supported on multi-node GitLab environments built with the Linux package that have Load Balancing and HA mechanisms configured as follows:
|
||||
- Zero-downtime upgrades are only supported on multi-node GitLab environments built with the Linux package that have Load Balancing and available HA mechanisms configured as follows:
|
||||
- External Load Balancer configured for Rails nodes with health checks enabled against the [Readiness](../administration/monitoring/health_check.md#readiness) (`/-/readiness`) endpoint.
|
||||
- Internal Load Balancer configured for any PgBouncer and Praefect components with TCP health checks enabled.
|
||||
- HA mechanisms configured for the Consul, Postgres and Redis components if present.
|
||||
- Any of these components that are not deployed in a HA fashion will need to be upgraded separately with downtime.
|
||||
- HA mechanisms configured for the Consul, Postgres, Redis components if present.
|
||||
- Any of these components that are not deployed in a HA fashion need to be upgraded separately with downtime.
|
||||
- For databases, the [Linux package only supports HA for the main GitLab database](https://gitlab.com/groups/gitlab-org/-/epics/7814). For any other databases, such as the [Praefect database](#praefect-gitaly-cluster), a third party database solution is required to achieve HA and subsequently to avoid downtime.
|
||||
- **You can only upgrade one minor release at a time**. So from `16.1` to `16.2`, not to `16.3`. If you skip releases, database modifications may be run in the wrong sequence [and leave the database schema in a broken state](https://gitlab.com/gitlab-org/gitlab/-/issues/321542).
|
||||
- You have to use [post-deployment migrations](../development/database/post_deployment_migrations.md).
|
||||
- [Zero-downtime upgrades are not available with the GitLab Charts](https://docs.gitlab.com/charts/installation/upgrade.html) but are with [GitLab Operator](https://docs.gitlab.com/operator/gitlab_upgrades.html).
|
||||
- [Zero-downtime upgrades are not available with the GitLab Charts](https://docs.gitlab.com/charts/installation/upgrade.html). Support is available with the [GitLab Operator](https://docs.gitlab.com/operator/gitlab_upgrades.html) but there are [known limitations](https://docs.gitlab.com/operator/#known-issues) with this deployment method and as such it's not covered in this guide at this time.
|
||||
|
||||
In addition to the above, please be aware of the following considerations:
|
||||
|
||||
|
|
@ -59,9 +60,10 @@ In addition to the above, please be aware of the following considerations:
|
|||
- Certain major or minor releases may require a set of background migrations to be finished. While this doesn't require downtime (if the above conditions are met), it's required that you [wait for background migrations to complete](background_migrations.md) between each major or minor release upgrade.
|
||||
- The time necessary to complete these migrations can be reduced by increasing the number of Sidekiq workers that can process jobs in the
|
||||
`background_migration` queue. To see the size of this queue, [check for background migrations before upgrading](background_migrations.md).
|
||||
- Zero downtime upgrades can be performed for [Gitaly](#gitaly) when it's set up in its Cluster or Sharded setups due to a graceful reload mechanism. For the [Praefect (Gitaly Cluster)](#praefect-gitaly-cluster) component it can also be directly upgraded without downtime, however the GitLab Linux package does not offer HA and subsequently Zero Downtime support for it's database - A third party database solution is required to avoid downtime.
|
||||
- [PostgreSQL major version upgrades](../administration/postgresql/replication_and_failover.md#near-zero-downtime-upgrade-of-postgresql-in-a-patroni-cluster) are a separate process and not covered by zero-downtime upgrades (smaller upgrades are covered).
|
||||
- Zero-downtime upgrades are supported for any GitLab components you've deployed with the GitLab Linux package. If you've deployed select components through a supported third party service, such as PostgreSQL in AWS RDS or Redis in GCP Memorystore, upgrades for those services will need to be performed separately as per their standard processes.
|
||||
- As a general guideline, the larger amount of data you have, the more time it will take for the upgrade to complete. In testing, any database smaller than 10 GB shouldn't generally take longer than an hour, but your mileage may vary.
|
||||
- Zero-downtime upgrades are supported for the noted GitLab components you've deployed with the GitLab Linux package. If you've deployed select components through a supported third party service, such as PostgreSQL in AWS RDS or Redis in GCP Memorystore, upgrades for those services need to be performed separately as per their standard processes.
|
||||
- As a general guideline, the larger amount of data you have, the more time is needed for the upgrade to complete. In testing, any database smaller than 10 GB shouldn't generally take longer than an hour, but your mileage may vary.
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
||||
|
|
@ -153,7 +155,7 @@ This process applies to both Gitaly Sharded and Cluster setups. Run through the
|
|||
sudo gitlab-ctl restart consul node-exporter logrotate
|
||||
```
|
||||
|
||||
### Praefect
|
||||
#### Praefect (Gitaly Cluster)
|
||||
|
||||
For Gitaly Cluster setups, you must deploy and upgrade Praefect in a similar way by using a graceful reload.
|
||||
|
||||
|
|
@ -165,6 +167,12 @@ In the future this functionality may be changed, [refer to this Epic](https://gi
|
|||
|
||||
{{< /alert >}}
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
||||
This section focuses exclusively on the Praefect component, not its [required PostgreSQL database](../administration/gitaly/praefect.md#postgresql). The [GitLab Linux package does not offer HA](https://gitlab.com/groups/gitlab-org/-/epics/7814) and subsequently Zero Downtime support for the Praefect database. A third party database solution is required to avoid downtime.
|
||||
|
||||
{{< /alert >}}
|
||||
|
||||
One additional step though for Praefect is that it will also need to run through its database migrations to upgrade its data.
|
||||
Migrations need to be run on only one Praefect node to avoid clashes. This is best done by selecting one of the
|
||||
nodes to be a deploy node. This target node will be configured to run migrations while the rest are not. We'll refer to this as the **Praefect deploy node** below:
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ For a description of the advantages of GitOps, see [the OpenGitOps initiative](h
|
|||
- For scaling, Flux supports [vertical](https://fluxcd.io/flux/installation/configuration/vertical-scaling/) and [horizontal sharding](https://fluxcd.io/flux/installation/configuration/sharding/).
|
||||
- For Flux-specific guidance, see the [Flux guides](https://fluxcd.io/flux/guides/) in the Flux documentation.
|
||||
- To simplify maintenance, you should run a single GitLab agent for Kubernetes installation per cluster. You can share the agent connection with impersonation features across the GitLab domain.
|
||||
- Consider using the Flux `OCIRepository` for storing and retrieving manifests.
|
||||
- Consider using the Flux `OCIRepository` for storing and retrieving manifests.
|
||||
You can use GitLab pipelines to build and push the OCI images to the container registry.
|
||||
- To shorten the feedback loop, trigger an immediate GitOps reconciliation from the related GitLab pipeline.
|
||||
- You should sign generated OCI images, and deploy only images signed and verified by Flux.
|
||||
|
|
@ -29,7 +29,7 @@ For a description of the advantages of GitOps, see [the OpenGitOps initiative](h
|
|||
|
||||
### OCI containers
|
||||
|
||||
When you use OCI containers instead of Git repositories, the source of truth for the manifests is still the Git repository.
|
||||
When you use OCI containers instead of Git repositories, the source of truth for the manifests is still the Git repository.
|
||||
You can think of the OCI container as a caching layer between the Git repository and the cluster.
|
||||
|
||||
There are several benefits to using OCI containers:
|
||||
|
|
@ -37,7 +37,7 @@ There are several benefits to using OCI containers:
|
|||
- OCI was designed for scalability. Although the GitLab Git repositories scale well, they were not designed for this use case.
|
||||
- A single Git repository can be the source of several OCI containers, each packaging a small set of manifests.
|
||||
This way, if you need to retrieve a set of manifests, you don't need to download the whole Git repository.
|
||||
- OCI repositories can follow a well-known versioning scheme, and Flux can be configured to auto-update following that scheme.
|
||||
- OCI repositories can follow a well-known versioning scheme, and Flux can be configured to auto-update following that scheme.
|
||||
For example, if you use semantic versioning, Flux can deploy all the minor and patch changes automatically, while major versions require a manual update.
|
||||
- OCI images can be signed, and the signature can be verified by Flux.
|
||||
- OCI repositories can be scanned by the container registry, even after the image is built.
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ If the organization moves its verified domains to another paid group, its enterp
|
|||
|
||||
### Identifying unclaimed users
|
||||
|
||||
If a user is not automatically claimed as an Enterprise User, their existing access will not be revoked.
|
||||
If a user is not automatically claimed as an Enterprise User, their existing access will not be revoked.
|
||||
A group with domain verification enabled can have both claimed and unclaimed users as members.
|
||||
|
||||
The only difference between a member claimed as an Enterprise User and one that isn't is that a Group Owner cannot [manage unclaimed users](#manage-enterprise-users-in-a-namespace).
|
||||
|
|
|
|||
|
|
@ -19,9 +19,9 @@ title: GitLab-managed Terraform/OpenTofu state
|
|||
|
||||
{{< /history >}}
|
||||
|
||||
Managing infrastructure state files across teams requires both security and reliability. GitLab-managed
|
||||
Managing infrastructure state files across teams requires both security and reliability. GitLab-managed
|
||||
OpenTofu state eliminates the typical challenges of state management.
|
||||
With minimal configuration, your OpenTofu states become a natural extension of your GitLab project.
|
||||
With minimal configuration, your OpenTofu states become a natural extension of your GitLab project.
|
||||
This integration keeps your infrastructure definitions, code, and state all in one secure location.
|
||||
|
||||
With GitLab-managed OpenTofu state, you:
|
||||
|
|
|
|||
|
|
@ -523,8 +523,8 @@ correct location:
|
|||
|
||||
{{< alert type="note" >}}
|
||||
|
||||
If you protect a Maven package before publishing it, the package will be rejected with a `403 Forbidden` error and an `Authorization failed` error message.
|
||||
Ensure the Maven package is not protected when publishing.
|
||||
If you protect a Maven package before publishing it, the package will be rejected with a `403 Forbidden` error and an `Authorization failed` error message.
|
||||
Ensure the Maven package is not protected when publishing.
|
||||
For more information about package protection rules, see [how to protect a package](../../../user/packages/package_registry/package_protection_rules.md#protect-a-package).
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
|
|||
|
|
@ -129,7 +129,7 @@ in your package project root directory where `package.json` is found:
|
|||
For example, to trigger only on any tag push:
|
||||
|
||||
In Yarn 1:
|
||||
|
||||
|
||||
```yaml
|
||||
image: node:lts
|
||||
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ In both situations, you are redirected to the sign-in page and see the following
|
|||
{{< alert type="note" >}}
|
||||
|
||||
Your account can have more than one verified email address, and any email address
|
||||
associated with your account can be verified. However, only the primary email address
|
||||
associated with your account can be verified. However, only the primary email address
|
||||
can be used to sign in once the password is reset.
|
||||
|
||||
{{< /alert >}}
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ title: Deploy tokens
|
|||
|
||||
{{< /details >}}
|
||||
|
||||
Deploy tokens provide secure access to GitLab resources without tying permissions to individual user accounts. Use them with Git operations, container registries, and package registries, giving your deployment
|
||||
Deploy tokens provide secure access to GitLab resources without tying permissions to individual user accounts. Use them with Git operations, container registries, and package registries, giving your deployment
|
||||
automation access to exactly what it needs.
|
||||
|
||||
With deploy tokens, you have:
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ title: Releases
|
|||
|
||||
{{< /details >}}
|
||||
|
||||
Create a release to package your project at critical milestones. Releases combine code, binaries, documentation,
|
||||
Create a release to package your project at critical milestones. Releases combine code, binaries, documentation,
|
||||
and release notes into a complete snapshot of your project.
|
||||
When a release is created, GitLab automatically tags your code, archives a snapshot, and generates
|
||||
audit-ready evidence. This creates a permanent record that's perfect for compliance requirements and
|
||||
|
|
|
|||
|
|
@ -172,6 +172,7 @@ To set this default:
|
|||
|
||||
- [Added](https://gitlab.com/gitlab-org/gitlab/-/issues/463016) 60 day and 30 days triggers to project and group access tokens webhooks in GitLab 17.9 [with a flag](../../../administration/feature_flags.md) named `extended_expiry_webhook_execution_setting`. Disabled by default.
|
||||
- [Enabled on GitLab.com](https://gitlab.com/gitlab-org/gitlab/-/issues/513684) in GitLab 17.10.
|
||||
- [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/513684) in GitLab 17.10. Feature flag `extended_expiry_webhook_execution_setting` removed.
|
||||
|
||||
{{< /history >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -34,8 +34,8 @@ A project transfer includes:
|
|||
- Membership invitations
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
||||
Members with [inherited membership](../members/_index.md#membership-types)
|
||||
|
||||
Members with [inherited membership](../members/_index.md#membership-types)
|
||||
in the project lose access unless they are also members of the target group.
|
||||
The project inherits new member permissions from the group you transfer it to.
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,13 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module ActiveContext
|
||||
class Embeddings
|
||||
def self.generate_embeddings(content)
|
||||
embeddings = Gitlab::Llm::VertexAi::Embeddings::Text
|
||||
.new(content, user: nil, tracking_context: { action: 'embedding' }, unit_primitive: 'semantic_search_issue')
|
||||
.execute
|
||||
|
||||
embeddings.all?(Array) ? embeddings : [embeddings]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module ActiveContext
|
||||
module Preprocessors
|
||||
module Embeddings
|
||||
extend ActiveSupport::Concern
|
||||
|
||||
IndexingError = Class.new(StandardError)
|
||||
|
||||
# Vertex bulk limit is 250 so we choose a lower batch size
|
||||
# Gitlab::Llm::VertexAi::Embeddings::Text::BULK_LIMIT
|
||||
BATCH_SIZE = 100
|
||||
|
||||
class_methods do
|
||||
def bulk_embeddings(refs)
|
||||
unless respond_to?(:embedding_content)
|
||||
raise IndexingError, "#{self} should implement :embedding_content method"
|
||||
end
|
||||
|
||||
refs.each_slice(BATCH_SIZE) do |batch|
|
||||
contents = batch.map { |ref| embedding_content(ref) }
|
||||
embeddings = ActiveContext::Embeddings.generate_embeddings(contents)
|
||||
|
||||
batch.each_with_index do |ref, index|
|
||||
ref.embedding = embeddings[index]
|
||||
end
|
||||
end
|
||||
|
||||
refs
|
||||
rescue StandardError => e
|
||||
::ActiveContext::Logger.exception(e)
|
||||
refs # we will generate each embedding on the fly if bulk fails
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,104 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
RSpec.describe ActiveContext::Preprocessors::Embeddings do
|
||||
let(:reference_class) do
|
||||
Class.new(Test::References::MockWithDatabaseRecord) do
|
||||
include ::ActiveContext::Preprocessors::Embeddings
|
||||
|
||||
add_preprocessor :bulk_embeddings do |refs|
|
||||
bulk_embeddings(refs)
|
||||
end
|
||||
|
||||
attr_accessor :embedding
|
||||
end
|
||||
end
|
||||
|
||||
let(:reference_1) { reference_class.new(collection_id, partition, object_id) }
|
||||
let(:reference_2) { reference_class.new(collection_id, partition, object_id) }
|
||||
|
||||
let(:mock_adapter) { double }
|
||||
let(:mock_collection) { double(name: collection_name, partition_for: partition) }
|
||||
let(:mock_object) { double(id: object_id) }
|
||||
let(:mock_relation) { double(find_by: mock_object) }
|
||||
let(:mock_connection) { double(id: connection_id) }
|
||||
|
||||
let(:connection_id) { 3 }
|
||||
let(:partition) { 2 }
|
||||
let(:collection_id) { 1 }
|
||||
let(:object_id) { 5 }
|
||||
let(:collection_name) { 'mock_collection' }
|
||||
let(:embeddings) { [[1, 2], [3, 4]] }
|
||||
let(:embedding_content) { 'some text' }
|
||||
|
||||
subject(:preprocess_refs) { ActiveContext::Reference.preprocess_references([reference_1, reference_2]) }
|
||||
|
||||
before do
|
||||
allow(ActiveContext).to receive(:adapter).and_return(mock_adapter)
|
||||
allow(ActiveContext::CollectionCache).to receive(:fetch).and_return(mock_collection)
|
||||
allow(ActiveContext::Logger).to receive(:exception).and_return(nil)
|
||||
allow(reference_class).to receive(:model_klass).and_return(mock_relation)
|
||||
end
|
||||
|
||||
context 'when the reference klass implements :embedding_content' do
|
||||
before do
|
||||
allow(reference_class).to receive(:embedding_content).and_return(embedding_content)
|
||||
end
|
||||
|
||||
it 'generates embeddings in bulk and sets the embeddings for each reference' do
|
||||
expect(ActiveContext::Embeddings).to receive(:generate_embeddings)
|
||||
.with([embedding_content, embedding_content])
|
||||
.and_return(embeddings)
|
||||
|
||||
preprocess_refs
|
||||
|
||||
expect(reference_1.embedding).to eq(embeddings.first)
|
||||
expect(reference_2.embedding).to eq(embeddings.last)
|
||||
end
|
||||
|
||||
context 'when generating for a single reference' do
|
||||
it 'generates embeddings in bulk and sets the embeddings for the reference' do
|
||||
expect(ActiveContext::Embeddings).to receive(:generate_embeddings)
|
||||
.with([embedding_content])
|
||||
.and_return([embeddings.first])
|
||||
|
||||
ActiveContext::Reference.preprocess_references([reference_1])
|
||||
|
||||
expect(reference_1.embedding).to eq(embeddings.first)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when generate_embeddings returns an error' do
|
||||
let(:error) { StandardError }
|
||||
|
||||
before do
|
||||
allow(ActiveContext::Embeddings).to receive(:generate_embeddings).and_raise(error)
|
||||
end
|
||||
|
||||
it 'logs and returns all references without embeddings' do
|
||||
expect(::ActiveContext::Logger).to receive(:exception).with(error)
|
||||
|
||||
expect(preprocess_refs).to eq([reference_1, reference_2])
|
||||
|
||||
expect(reference_1.embedding).to be_nil
|
||||
expect(reference_2.embedding).to be_nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when the reference does not implement :embedding_content' do
|
||||
it 'logs and does not raise an error' do
|
||||
expect(ActiveContext::Embeddings).not_to receive(:generate_embeddings)
|
||||
expect(::ActiveContext::Logger).to receive(:exception)
|
||||
.with(ActiveContext::Preprocessors::Embeddings::IndexingError)
|
||||
|
||||
expect { preprocess_refs }.not_to raise_error
|
||||
end
|
||||
|
||||
it 'returns references without embeddings' do
|
||||
expect(preprocess_refs).to eq([reference_1, reference_2])
|
||||
|
||||
expect(reference_1.embedding).to be_nil
|
||||
expect(reference_2.embedding).to be_nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -119,6 +119,14 @@ class EventFilter
|
|||
end
|
||||
end
|
||||
|
||||
def filters
|
||||
[ALL, PUSH, MERGED, ISSUE, COMMENTS, TEAM, WIKI, DESIGNS]
|
||||
end
|
||||
|
||||
def ==(other)
|
||||
other.is_a?(self.class) && filter == other.filter
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def in_operator_params(array_data:, scope: nil, in_column: nil, in_values: nil, order_hint_column: nil)
|
||||
|
|
@ -213,10 +221,6 @@ class EventFilter
|
|||
def design_events(events)
|
||||
events.for_design
|
||||
end
|
||||
|
||||
def filters
|
||||
[ALL, PUSH, MERGED, ISSUE, COMMENTS, TEAM, WIKI, DESIGNS]
|
||||
end
|
||||
end
|
||||
# rubocop: enable CodeReuse/ActiveRecord
|
||||
|
||||
|
|
|
|||
|
|
@ -197,6 +197,15 @@ semgrep-sast:
|
|||
- '**/*.swift'
|
||||
- '**/*.m'
|
||||
- '**/*.kt'
|
||||
- '**/*.properties'
|
||||
- '**/application*.yml'
|
||||
- '**/management*.yml'
|
||||
- '**/actuator*.yml'
|
||||
- '**/bootstrap*.yml'
|
||||
- '**/application*.yaml'
|
||||
- '**/management*.yaml'
|
||||
- '**/actuator*.yaml'
|
||||
- '**/bootstrap*.yaml'
|
||||
## In case gitlab-advanced-sast already covers all the files that semgrep-sast would have scanned
|
||||
- if: $CI_COMMIT_BRANCH &&
|
||||
$GITLAB_FEATURES =~ /\bsast_advanced\b/ &&
|
||||
|
|
@ -230,6 +239,15 @@ semgrep-sast:
|
|||
- '**/*.m'
|
||||
- '**/*.rb'
|
||||
- '**/*.kt'
|
||||
- '**/*.properties'
|
||||
- '**/application*.yml'
|
||||
- '**/management*.yml'
|
||||
- '**/actuator*.yml'
|
||||
- '**/bootstrap*.yml'
|
||||
- '**/application*.yaml'
|
||||
- '**/management*.yaml'
|
||||
- '**/actuator*.yaml'
|
||||
- '**/bootstrap*.yaml'
|
||||
|
||||
sobelow-sast:
|
||||
extends: .sast-analyzer
|
||||
|
|
|
|||
|
|
@ -250,6 +250,15 @@ semgrep-sast:
|
|||
- '**/*.swift'
|
||||
- '**/*.m'
|
||||
- '**/*.kt'
|
||||
- '**/*.properties'
|
||||
- '**/application*.yml'
|
||||
- '**/management*.yml'
|
||||
- '**/actuator*.yml'
|
||||
- '**/bootstrap*.yml'
|
||||
- '**/application*.yaml'
|
||||
- '**/management*.yaml'
|
||||
- '**/actuator*.yaml'
|
||||
- '**/bootstrap*.yaml'
|
||||
## In case gitlab-advanced-sast already covers all the files that semgrep-sast would have scanned
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event" &&
|
||||
$GITLAB_FEATURES =~ /\bsast_advanced\b/ &&
|
||||
|
|
@ -283,6 +292,15 @@ semgrep-sast:
|
|||
- '**/*.m'
|
||||
- '**/*.rb'
|
||||
- '**/*.kt'
|
||||
- '**/*.properties'
|
||||
- '**/application*.yml'
|
||||
- '**/management*.yml'
|
||||
- '**/actuator*.yml'
|
||||
- '**/bootstrap*.yml'
|
||||
- '**/application*.yaml'
|
||||
- '**/management*.yaml'
|
||||
- '**/actuator*.yaml'
|
||||
- '**/bootstrap*.yaml'
|
||||
- if: $CI_OPEN_MERGE_REQUESTS # Don't add it to a *branch* pipeline if it's already in a merge request pipeline.
|
||||
when: never
|
||||
# If there's no open merge request, add it to a *branch* pipeline instead.
|
||||
|
|
@ -308,6 +326,15 @@ semgrep-sast:
|
|||
- '**/*.swift'
|
||||
- '**/*.m'
|
||||
- '**/*.kt'
|
||||
- '**/*.properties'
|
||||
- '**/application*.yml'
|
||||
- '**/management*.yml'
|
||||
- '**/actuator*.yml'
|
||||
- '**/bootstrap*.yml'
|
||||
- '**/application*.yaml'
|
||||
- '**/management*.yaml'
|
||||
- '**/actuator*.yaml'
|
||||
- '**/bootstrap*.yaml'
|
||||
## In case gitlab-advanced-sast already covers all the files that semgrep-sast would have scanned
|
||||
- if: $CI_COMMIT_BRANCH &&
|
||||
$GITLAB_FEATURES =~ /\bsast_advanced\b/ &&
|
||||
|
|
@ -341,6 +368,15 @@ semgrep-sast:
|
|||
- '**/*.m'
|
||||
- '**/*.rb'
|
||||
- '**/*.kt'
|
||||
- '**/*.properties'
|
||||
- '**/application*.yml'
|
||||
- '**/management*.yml'
|
||||
- '**/actuator*.yml'
|
||||
- '**/bootstrap*.yml'
|
||||
- '**/application*.yaml'
|
||||
- '**/management*.yaml'
|
||||
- '**/actuator*.yaml'
|
||||
- '**/bootstrap*.yaml'
|
||||
|
||||
sobelow-sast:
|
||||
extends: .sast-analyzer
|
||||
|
|
|
|||
|
|
@ -45,14 +45,7 @@ module Gitlab
|
|||
end
|
||||
command :set_parent, :epic do |parent_param|
|
||||
if quick_action_target.instance_of?(WorkItem)
|
||||
parent = extract_work_items(parent_param).first
|
||||
|
||||
if parent && current_user.can?(:read_work_item, parent)
|
||||
@updates[:set_parent] = parent
|
||||
@execution_message[:set_parent] = success_msg[:set_parent]
|
||||
else
|
||||
@execution_message[:set_parent] = _("This parent does not exist or you don't have sufficient permission.")
|
||||
end
|
||||
handle_set_parent(parent_param)
|
||||
elsif quick_action_target.instance_of?(Issue)
|
||||
handle_set_epic(parent_param)
|
||||
end
|
||||
|
|
@ -210,6 +203,35 @@ module Gitlab
|
|||
# overridden in EE
|
||||
def handle_set_epic(parent_param); end
|
||||
|
||||
# rubocop:disable Gitlab/ModuleWithInstanceVariables -- @updates is already defined and part of
|
||||
# Gitlab::QuickActions::Dsl implementation
|
||||
def handle_set_parent(parent_param)
|
||||
parent = extract_work_items(parent_param).first
|
||||
child = quick_action_target
|
||||
|
||||
message =
|
||||
if parent && current_user.can?(:read_work_item, parent)
|
||||
if child&.work_item_parent == parent
|
||||
format(_('Work item %{work_item_reference} has already been added to parent %{parent_reference}.'),
|
||||
work_item_reference: child&.to_reference, parent_reference: parent.to_reference)
|
||||
elsif parent.confidential? && !child&.confidential?
|
||||
_("Cannot assign a confidential parent to a non-confidential work item. Make the work item " \
|
||||
"confidential and try again")
|
||||
elsif ::WorkItems::HierarchyRestriction.find_by_parent_type_id_and_child_type_id(parent.work_item_type_id,
|
||||
child&.work_item_type_id).nil?
|
||||
_("Cannot assign this work item type to parent type")
|
||||
else
|
||||
@updates[:set_parent] = parent
|
||||
success_msg[:set_parent]
|
||||
end
|
||||
else
|
||||
_("This parent does not exist or you don't have sufficient permission.")
|
||||
end
|
||||
|
||||
@execution_message[:set_parent] = message
|
||||
end
|
||||
# rubocop:enable Gitlab/ModuleWithInstanceVariables
|
||||
|
||||
# overridden in EE
|
||||
def show_epic_alias?; end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -11694,6 +11694,12 @@ msgstr ""
|
|||
msgid "Cannot assign a confidential epic to a non-confidential issue. Make the issue confidential and try again"
|
||||
msgstr ""
|
||||
|
||||
msgid "Cannot assign a confidential parent to a non-confidential work item. Make the work item confidential and try again"
|
||||
msgstr ""
|
||||
|
||||
msgid "Cannot assign this work item type to parent type"
|
||||
msgstr ""
|
||||
|
||||
msgid "Cannot be merged automatically"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -42621,9 +42627,6 @@ msgstr ""
|
|||
msgid "PipelineCharts|An unknown error occurred while processing CI/CD analytics."
|
||||
msgstr ""
|
||||
|
||||
msgid "PipelineCharts|CI/CD Analytics"
|
||||
msgstr ""
|
||||
|
||||
msgid "PipelineCharts|Failure rate"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -48241,6 +48244,9 @@ msgstr ""
|
|||
msgid "RegistrationFeatures|use this feature"
|
||||
msgstr ""
|
||||
|
||||
msgid "Registration|There are no seats left on your GitLab instance. Please contact your GitLab administrator."
|
||||
msgstr ""
|
||||
|
||||
msgid "Registries enqueued to be resynced"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -54673,6 +54679,9 @@ msgstr ""
|
|||
msgid "Service account token expiration"
|
||||
msgstr ""
|
||||
|
||||
msgid "Service account was successfully updated."
|
||||
msgstr ""
|
||||
|
||||
msgid "Service accounts"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -54715,6 +54724,9 @@ msgstr ""
|
|||
msgid "ServiceAccount|User does not have permission to delete a service account."
|
||||
msgstr ""
|
||||
|
||||
msgid "ServiceAccount|You are not authorized to update service accounts in this namespace."
|
||||
msgstr ""
|
||||
|
||||
msgid "ServiceDesk|%{customEmail} with SMTP host %{smtpAddress} is %{badgeStart}verified%{badgeEnd}"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -63362,6 +63374,9 @@ msgstr[1] ""
|
|||
msgid "User is blocked"
|
||||
msgstr ""
|
||||
|
||||
msgid "User is not a service account"
|
||||
msgstr ""
|
||||
|
||||
msgid "User is not allowed to resolve thread"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -66167,6 +66182,9 @@ msgstr ""
|
|||
msgid "Work in progress limit: %{wipLimit}"
|
||||
msgstr ""
|
||||
|
||||
msgid "Work item %{work_item_reference} has already been added to parent %{parent_reference}."
|
||||
msgstr ""
|
||||
|
||||
msgid "Work item not supported"
|
||||
msgstr ""
|
||||
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@
|
|||
"@gitlab/fonts": "^1.3.0",
|
||||
"@gitlab/query-language-rust": "0.4.2",
|
||||
"@gitlab/svgs": "3.123.0",
|
||||
"@gitlab/ui": "110.1.0",
|
||||
"@gitlab/ui": "111.0.0",
|
||||
"@gitlab/vue-router-vue3": "npm:vue-router@4.5.0",
|
||||
"@gitlab/vuex-vue3": "npm:vuex@4.1.0",
|
||||
"@gitlab/web-ide": "^0.0.1-dev-20250309164831",
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ To see the full list of circular dependencies, run the command ${chalk.bold.cyan
|
|||
If you have fixed existing circular dependencies or find false positives, you can add/remove them from the
|
||||
exclusions list in the 'config/dependency-cruiser.js' file.\n
|
||||
${chalk.italic('If the above command fails because of memory issues, increase the memory by prepending it with the following')}
|
||||
${chalk.bold.cyan('NODE_OPTIONS="--max_old_space_size=4096"')}
|
||||
${chalk.bold.cyan('NODE_OPTIONS="--max-old-space-size=4096"')}
|
||||
`);
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ class StaticAnalysis
|
|||
Task.new(%w[scripts/lint-vendored-gems.sh], 10),
|
||||
Task.new(%w[yarn run check-dependencies], 1),
|
||||
Task.new(%w[scripts/gemfile_lock_changed.sh], 1),
|
||||
Task.new(%w[yarn run deps:check:all], 60)
|
||||
Task.new(%w[yarn run deps:check:all --no-cache], 60)
|
||||
].compact.freeze
|
||||
|
||||
def run_tasks!(options = {})
|
||||
|
|
|
|||
|
|
@ -953,28 +953,6 @@ RSpec.describe ProjectsController, feature_category: :groups_and_projects do
|
|||
expect(project.emails_disabled?).to eq(!result)
|
||||
expect(project.extended_prat_expiry_webhooks_execute?).to eq(result)
|
||||
end
|
||||
|
||||
context 'when extended_expiry_webhook_execution_setting feature flag is false' do
|
||||
before do
|
||||
stub_feature_flags(extended_expiry_webhook_execution_setting: false)
|
||||
end
|
||||
|
||||
it "does not update extended_expiry_webhook_execution_setting" do
|
||||
put :update, params: {
|
||||
namespace_id: project.namespace,
|
||||
id: project.path,
|
||||
project: {
|
||||
project_setting_attributes: {
|
||||
extended_prat_expiry_webhooks_execute: boolean_value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
project.reload
|
||||
|
||||
expect(project.extended_prat_expiry_webhooks_execute?).to be false
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -659,8 +659,8 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
|
|||
context 'when deployment does not have a deployable' do
|
||||
let!(:second_deployment) { create(:deployment, :success, environment: environment, deployable: nil) }
|
||||
|
||||
it 'has an empty href' do
|
||||
expect(find_by_testid('job-deployment-link')['href']).to be_empty
|
||||
it 'has a href of #' do
|
||||
expect(page).to have_selector('[data-testid="job-deployment-link"][href="#"]')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1267,6 +1267,7 @@ export const mockPipelineWithoutMR = {
|
|||
path: 'pipeline/28029444',
|
||||
ref: {
|
||||
name: 'test-branch',
|
||||
path: 'test-branch',
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -9,12 +9,12 @@ exports[`NewCluster renders the cluster component correctly 1`] = `
|
|||
</h4>
|
||||
<p>
|
||||
Enter details about your cluster.
|
||||
<b-link-stub
|
||||
<a
|
||||
class="gl-link"
|
||||
href="/help/user/project/clusters/add_existing_cluster"
|
||||
>
|
||||
How do I use a certificate to connect to my cluster?
|
||||
</b-link-stub>
|
||||
</a>
|
||||
</p>
|
||||
</div>
|
||||
`;
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ describe('BrowserSupportAlert', () => {
|
|||
createComponent({ mountFn: mount });
|
||||
expect(findLink().attributes()).toMatchObject({
|
||||
target: '_blank',
|
||||
rel: 'noopener',
|
||||
rel: 'noopener noreferrer',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -79,13 +79,11 @@ describe('OrganizationGroupsNewApp', () => {
|
|||
expect(findAllParagraphs().at(0).text()).toMatchInterpolatedText(
|
||||
'Groups allow you to manage and collaborate across multiple projects. Members of a group have access to all of its projects.',
|
||||
);
|
||||
expect(findAllLinks().at(0).attributes('href')).toBe(helpPagePath('user/group/index'));
|
||||
expect(findAllLinks().at(0).props('href')).toBe(helpPagePath('user/group/index'));
|
||||
expect(findAllParagraphs().at(1).text()).toContain(
|
||||
'Groups can also be nested by creating subgroups.',
|
||||
);
|
||||
expect(findAllLinks().at(1).attributes('href')).toBe(
|
||||
helpPagePath('user/group/subgroups/index'),
|
||||
);
|
||||
expect(findAllLinks().at(1).props('href')).toBe(helpPagePath('user/group/subgroups/index'));
|
||||
});
|
||||
|
||||
it('renders form and passes correct props', () => {
|
||||
|
|
|
|||
|
|
@ -34,13 +34,14 @@ exports[`packages_list_app renders 1`] = `
|
|||
class="gl-mb-0 gl-mt-4 gl-text-subtle"
|
||||
>
|
||||
Learn how to
|
||||
<b-link-stub
|
||||
<a
|
||||
class="gl-link"
|
||||
href="/help/user/packages/terraform_module_registry/_index"
|
||||
rel="noopener noreferrer"
|
||||
target="_blank"
|
||||
>
|
||||
publish and share your packages
|
||||
</b-link-stub>
|
||||
</a>
|
||||
with GitLab.
|
||||
</p>
|
||||
<div
|
||||
|
|
|
|||
|
|
@ -226,7 +226,7 @@ exports[`PypiInstallation renders all the messages 1`] = `
|
|||
class="gl-link"
|
||||
data-testid="pypi-docs-link"
|
||||
href="/help/user/packages/pypi_repository/_index"
|
||||
rel="noopener"
|
||||
rel="noopener noreferrer"
|
||||
target="_blank"
|
||||
>
|
||||
see the documentation
|
||||
|
|
|
|||
|
|
@ -0,0 +1,30 @@
|
|||
import { shallowMount } from '@vue/test-utils';
|
||||
import DashboardHeader from '~/projects/pipelines/charts/components/dashboard_header.vue';
|
||||
|
||||
describe('DashboardHeader', () => {
|
||||
let wrapper;
|
||||
|
||||
const createComponent = ({ ...options }) => {
|
||||
wrapper = shallowMount(DashboardHeader, { ...options });
|
||||
};
|
||||
|
||||
it('shows heading', () => {
|
||||
createComponent({
|
||||
slots: {
|
||||
default: 'My Heading',
|
||||
},
|
||||
});
|
||||
|
||||
expect(wrapper.find('h2').text()).toBe('My Heading');
|
||||
});
|
||||
|
||||
it('shows description', () => {
|
||||
createComponent({
|
||||
slots: {
|
||||
description: '<p>My Description</p>',
|
||||
},
|
||||
});
|
||||
|
||||
expect(wrapper.find('p').text()).toContain('My Description');
|
||||
});
|
||||
});
|
||||
|
|
@ -108,15 +108,34 @@ describe('Branch rule protection', () => {
|
|||
});
|
||||
|
||||
describe('When `edit_branch_rules` FF is disabled', () => {
|
||||
beforeEach(() => createComponent({ editBranchRules: false }));
|
||||
|
||||
it('does not render `Edit` button', () => {
|
||||
createComponent({ editBranchRules: false });
|
||||
|
||||
expect(findEditButton().exists()).toBe(false);
|
||||
});
|
||||
|
||||
it('renders link to manage branch protections', () => {
|
||||
expect(findLink().text()).toBe(protectionPropsMock.headerLinkTitle);
|
||||
expect(findLink().attributes('href')).toBe(protectionPropsMock.headerLinkHref);
|
||||
describe('when headerLinkHref and headerLinkTitle are set', () => {
|
||||
beforeEach(() => {
|
||||
createComponent({ editBranchRules: false });
|
||||
});
|
||||
|
||||
it('renders link to manage branch protections', () => {
|
||||
expect(findLink().text()).toBe(protectionPropsMock.headerLinkTitle);
|
||||
expect(findLink().attributes('href')).toBe(protectionPropsMock.headerLinkHref);
|
||||
});
|
||||
});
|
||||
|
||||
describe('when headerLinkHref and headerLinkTitle are not set', () => {
|
||||
beforeEach(() => {
|
||||
createComponent(
|
||||
{ editBranchRules: false },
|
||||
{ headerLinkHref: null, headerLinkTitle: null },
|
||||
);
|
||||
});
|
||||
|
||||
it('does not render link to manage branch protections', () => {
|
||||
expect(findLink().exists()).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
it('renders a protection row for status checks', () => {
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import { GlLoadingIcon } from '@gitlab/ui';
|
||||
import { nextTick } from 'vue';
|
||||
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
|
||||
import { Mousetrap } from '~/lib/mousetrap';
|
||||
|
|
@ -111,11 +112,17 @@ describe('WorkItemSidebarWidget component', () => {
|
|||
});
|
||||
|
||||
describe('when updating', () => {
|
||||
it('renders Edit button as disabled', () => {
|
||||
beforeEach(() => {
|
||||
createComponent({ canUpdate: true, isUpdating: true });
|
||||
});
|
||||
|
||||
it('renders Edit button as disabled', () => {
|
||||
expect(findEditButton().props('disabled')).toBe(true);
|
||||
});
|
||||
|
||||
it('shows loading icon', () => {
|
||||
expect(wrapper.findComponent(GlLoadingIcon).exists()).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -7,5 +7,7 @@ RSpec.describe Types::EventType do
|
|||
|
||||
specify { expect(described_class).to require_graphql_authorizations(:read_event) }
|
||||
|
||||
specify { expect(described_class).to have_graphql_fields(:id, :author, :action, :created_at, :updated_at) }
|
||||
specify do
|
||||
expect(described_class).to have_graphql_fields(:id, :author, :action, :project, :target, :created_at, :updated_at)
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -0,0 +1,48 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe GitlabSchema.types['ActivityStream'], feature_category: :user_profile do
|
||||
include GraphqlHelpers
|
||||
|
||||
let_it_be(:current_user) { create(:user) }
|
||||
let_it_be(:project) { create(:project, :public) }
|
||||
|
||||
before_all do
|
||||
project.add_developer(current_user)
|
||||
end
|
||||
|
||||
specify { expect(described_class.graphql_name).to eq('ActivityStream') }
|
||||
|
||||
specify { expect(described_class).to require_graphql_authorizations(:read_user_profile) }
|
||||
|
||||
it 'exposes the expected fields' do
|
||||
expected_fields = %i[followed_users_activity]
|
||||
|
||||
expect(described_class).to have_graphql_fields(*expected_fields)
|
||||
end
|
||||
|
||||
describe "#followed_users_activity" do
|
||||
let_it_be(:followed_user) { create(:user) }
|
||||
let_it_be(:joined_project_event) { create(:event, :joined, project: project, author: followed_user) }
|
||||
let_it_be(:issue) { create(:issue, project: project) }
|
||||
let_it_be(:closed_issue_event) { create(:event, :closed, author: followed_user, project: project, target: issue) }
|
||||
let(:scope) { current_user.followees }
|
||||
let(:filter) { EventFilter.new('ALL') }
|
||||
let(:params) { { limit: 20 } }
|
||||
let(:field) { resolve_field(:followed_users_activity, current_user, ctx: { current_user: current_user }) }
|
||||
|
||||
before do
|
||||
current_user.follow(followed_user)
|
||||
end
|
||||
|
||||
it 'calls UserRecentEventsFinder' do
|
||||
expect_next_instance_of(UserRecentEventsFinder, current_user, scope, filter, params) do |finder|
|
||||
expect(finder).to receive(:execute).and_call_original
|
||||
end
|
||||
expect(field.items.length).to be(2)
|
||||
expect(field.items.first.action).to eq "closed"
|
||||
expect(field.items.second.action).to eq "joined"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe GitlabSchema.types['EventTarget'], feature_category: :user_profile do
|
||||
specify { expect(described_class.graphql_name).to eq('EventTarget') }
|
||||
|
||||
it 'exposes all the existing event target types' do
|
||||
expected = EventFilter.new('').filters.map(&:upcase) # varies between foss/ee
|
||||
expect(described_class.values.keys).to match_array(expected)
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Types::Users::EventTargetType, feature_category: :user_profile do
|
||||
it 'returns possible types' do
|
||||
expect(described_class.possible_types).to include(Types::IssueType, Types::MilestoneType,
|
||||
Types::MergeRequestType, Types::ProjectType,
|
||||
Types::SnippetType, Types::UserType, Types::Wikis::WikiPageType,
|
||||
Types::DesignManagement::DesignType, Types::Notes::NoteType)
|
||||
end
|
||||
|
||||
describe '.resolve_type' do
|
||||
using RSpec::Parameterized::TableSyntax
|
||||
|
||||
where(:factory, :graphql_type) do
|
||||
:issue | Types::IssueType
|
||||
:milestone | Types::MilestoneType
|
||||
:merge_request | Types::MergeRequestType
|
||||
:note | Types::Notes::NoteType
|
||||
:project | Types::ProjectType
|
||||
:project_snippet | Types::SnippetType
|
||||
:user | Types::UserType
|
||||
:wiki_page_meta | Types::Wikis::WikiPageType
|
||||
:design | Types::DesignManagement::DesignType
|
||||
end
|
||||
|
||||
with_them do
|
||||
it 'correctly maps type in object to GraphQL type' do
|
||||
expect(described_class.resolve_type(build(factory), {})).to eq(graphql_type)
|
||||
end
|
||||
end
|
||||
|
||||
it 'raises an error if the type is not supported' do
|
||||
expect do
|
||||
described_class.resolve_type(build(:group), {})
|
||||
end.to raise_error(RuntimeError, /Unsupported event target type/)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -120,11 +120,17 @@ RSpec.describe Keeps::OverdueFinalizeBackgroundMigration, feature_category: :too
|
|||
end
|
||||
end
|
||||
|
||||
context 'when schema is gitlab_ci' do
|
||||
let(:gitlab_schema) { 'gitlab_ci' }
|
||||
context 'when using multiple databases' do
|
||||
before do
|
||||
skip_if_shared_database(:ci)
|
||||
end
|
||||
|
||||
it 'returns the database name' do
|
||||
expect(database_name).to eq(database_exists?(:ci) ? 'ci' : 'main')
|
||||
context 'when schema is gitlab_ci' do
|
||||
let(:gitlab_schema) { 'gitlab_ci' }
|
||||
|
||||
it 'returns the database name' do
|
||||
expect(database_name).to eq('ci')
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -6453,16 +6453,6 @@ RSpec.describe Project, factory_default: :keep, feature_category: :groups_and_pr
|
|||
it_behaves_like 'webhook is added to execution list'
|
||||
end
|
||||
|
||||
context 'when feature flag is disabled' do
|
||||
let(:data) { { interval: :thirty_days } }
|
||||
|
||||
before do
|
||||
stub_feature_flags(extended_expiry_webhook_execution_setting: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'webhook is added to execution list'
|
||||
end
|
||||
|
||||
context 'when setting extended_prat_expiry_webhooks_execute is disabled' do
|
||||
before do
|
||||
project.update!(extended_prat_expiry_webhooks_execute: false)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,123 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe 'ActivityStream GraphQL Query', feature_category: :user_profile do
|
||||
include GraphqlHelpers
|
||||
|
||||
let_it_be(:user) { create(:user) }
|
||||
let_it_be(:followed_user_1) { create(:user) }
|
||||
let_it_be(:followed_user_2) { create(:user) }
|
||||
let_it_be(:project) { create(:project, :public) }
|
||||
let(:graphql_response) { post_graphql(query, current_user: user) }
|
||||
let(:activity_stream) { graphql_data_at(:current_user, :activity, :followed_users_activity, :nodes) }
|
||||
let(:query) do
|
||||
<<~GRAPHQL
|
||||
query UserActivity {
|
||||
currentUser {
|
||||
activity {
|
||||
followedUsersActivity {
|
||||
nodes {
|
||||
author {
|
||||
name
|
||||
}
|
||||
action
|
||||
project {
|
||||
name
|
||||
}
|
||||
target {
|
||||
... on Design {
|
||||
id
|
||||
}
|
||||
... on Issue {
|
||||
title
|
||||
}
|
||||
... on Note {
|
||||
id
|
||||
}
|
||||
... on MergeRequest {
|
||||
title
|
||||
}
|
||||
... on Milestone {
|
||||
title
|
||||
}
|
||||
... on Project {
|
||||
fullPath
|
||||
}
|
||||
... on Snippet {
|
||||
title
|
||||
}
|
||||
... on UserCore {
|
||||
username
|
||||
}
|
||||
... on WikiPage {
|
||||
title
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
GRAPHQL
|
||||
end
|
||||
|
||||
before do
|
||||
user.follow(followed_user_1)
|
||||
user.follow(followed_user_2)
|
||||
end
|
||||
|
||||
context 'when there are no events in the activity stream' do
|
||||
it 'returns empty nodes array' do
|
||||
graphql_response
|
||||
|
||||
expect(activity_stream).to eq([])
|
||||
end
|
||||
end
|
||||
|
||||
context 'when there are events in the activity stream' do
|
||||
let_it_be(:joined_project_event) { create(:event, :joined, project: project, author: followed_user_1) }
|
||||
let_it_be(:issue) { create(:issue, project: project) }
|
||||
let_it_be(:closed_issue_event) { create(:event, :closed, author: followed_user_1, project: project, target: issue) }
|
||||
let_it_be(:left_event) { create(:event, :left, author: followed_user_2, target: project) }
|
||||
|
||||
it 'returns followed user\'s activity' do
|
||||
graphql_response
|
||||
|
||||
expect(activity_stream).to eq(
|
||||
[
|
||||
{
|
||||
"action" => "LEFT",
|
||||
"author" => { "name" => followed_user_2.name },
|
||||
"project" => nil,
|
||||
"target" => { "fullPath" => project.full_path }
|
||||
},
|
||||
{
|
||||
"action" => "CLOSED",
|
||||
"author" => { "name" => followed_user_1.name },
|
||||
"project" => { "name" => project.name },
|
||||
"target" => { "title" => issue.title }
|
||||
},
|
||||
{
|
||||
"action" => "JOINED",
|
||||
"author" => { "name" => followed_user_1.name },
|
||||
"project" => { "name" => project.name },
|
||||
"target" => nil
|
||||
}
|
||||
]
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when the activity_stream_graphql feature flag is disabled' do
|
||||
before do
|
||||
stub_feature_flags(activity_stream_graphql: false)
|
||||
end
|
||||
|
||||
it 'returns `nil`' do
|
||||
graphql_response
|
||||
|
||||
expect(activity_stream).to be_nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -3316,7 +3316,7 @@ RSpec.describe QuickActions::InterpretService, feature_category: :text_editors d
|
|||
let_it_be(:parent_ref) { parent.to_reference(project) }
|
||||
|
||||
context 'on a work item' do
|
||||
context 'when the parent reference is valid' do
|
||||
context 'with a valid parent reference' do
|
||||
let(:content) { "/set_parent #{parent_ref}" }
|
||||
|
||||
it 'returns success message' do
|
||||
|
|
@ -3342,11 +3342,54 @@ RSpec.describe QuickActions::InterpretService, feature_category: :text_editors d
|
|||
|
||||
expect(updates).to be_empty
|
||||
expect(message).to eq("This parent does not exist or you don't have sufficient permission.")
|
||||
expect(task_work_item.reload.work_item_parent).to be_nil
|
||||
end
|
||||
end
|
||||
|
||||
context 'when the parent is already set to the same work item' do
|
||||
let_it_be(:task_work_item_with_parent) do
|
||||
create(:work_item, :task, project: project, work_item_parent: parent)
|
||||
end
|
||||
|
||||
it 'does not assign the parent and returns an appropriate error' do
|
||||
_, updates, message = service.execute(content, task_work_item_with_parent)
|
||||
|
||||
expect(updates).to be_empty
|
||||
expect(message).to eq("Work item #{task_work_item_with_parent.to_reference} has already been added to " \
|
||||
"parent #{parent.to_reference}.")
|
||||
expect(task_work_item_with_parent.reload.work_item_parent).to eq parent
|
||||
end
|
||||
end
|
||||
|
||||
context 'when the child is not confidential but the parent is confidential' do
|
||||
let_it_be(:confidential_parent) { create(:work_item, :issue, :confidential, project: project) }
|
||||
let(:content) { "/set_parent #{confidential_parent.to_reference(project)}" }
|
||||
|
||||
it 'does not assign the parent and returns an appropriate error' do
|
||||
_, updates, message = service.execute(content, task_work_item)
|
||||
|
||||
expect(updates).to be_empty
|
||||
expect(message).to eq("Cannot assign a confidential parent to a non-confidential work item. Make the " \
|
||||
"work item confidential and try again")
|
||||
expect(task_work_item.reload.work_item_parent).to be_nil
|
||||
end
|
||||
end
|
||||
|
||||
context 'when the child and parent are incompatible types' do
|
||||
let(:other_task_work_item) { create(:work_item, :task, project: project) }
|
||||
let(:content) { "/set_parent #{other_task_work_item.to_reference(project)}" }
|
||||
|
||||
it 'does not assign the parent and returns an appropriate error' do
|
||||
_, updates, message = service.execute(content, task_work_item)
|
||||
|
||||
expect(updates).to be_empty
|
||||
expect(message).to eq("Cannot assign this work item type to parent type")
|
||||
expect(task_work_item.reload.work_item_parent).to be_nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when the parent reference is invalid' do
|
||||
context 'with an invalid parent reference' do
|
||||
let(:content) { "/set_parent not_a_valid_parent" }
|
||||
|
||||
it 'does not assign the parent and returns an appropriate error' do
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ RSpec.describe Users::AutoBanService, feature_category: :instance_resiliency do
|
|||
response = execute
|
||||
|
||||
expect(response[:status]).to eq(:error)
|
||||
expect(response[:message]).to match('State cannot transition via "ban"')
|
||||
expect(response[:message]).to match('You cannot ban blocked users.')
|
||||
end
|
||||
|
||||
it 'does not modify the BannedUser record or user state' do
|
||||
|
|
@ -76,7 +76,7 @@ RSpec.describe Users::AutoBanService, feature_category: :instance_resiliency do
|
|||
end
|
||||
|
||||
it 'raises an error and does not ban the user', :aggregate_failures do
|
||||
expect { execute! }.to raise_error(StateMachines::InvalidTransition)
|
||||
expect { execute! }.to raise_error(described_class::Error)
|
||||
.and not_change { Users::BannedUser.count }
|
||||
.and not_change { user.state }
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,6 +1,10 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
RSpec.shared_examples 'sets work item parent' do
|
||||
after do
|
||||
noteable.reload
|
||||
end
|
||||
|
||||
it 'leaves the note empty' do
|
||||
expect(execute(note)).to be_empty
|
||||
end
|
||||
|
|
|
|||
|
|
@ -5,9 +5,9 @@ require 'spec_helper'
|
|||
RSpec.describe Pages::DeactivateExpiredDeploymentsCronWorker, feature_category: :pages do
|
||||
subject(:worker) { described_class.new }
|
||||
|
||||
let(:expired_pages_deployment) { create(:pages_deployment, expires_at: 3.minutes.ago) }
|
||||
let(:not_yet_expired_pages_deployment) { create(:pages_deployment, expires_at: 1.hour.from_now) }
|
||||
let(:never_expire_pages_deployment) { create(:pages_deployment, expires_at: nil) }
|
||||
let!(:expired_pages_deployment) { create(:pages_deployment, expires_at: 3.minutes.ago) }
|
||||
let!(:not_yet_expired_pages_deployment) { create(:pages_deployment, expires_at: 1.hour.from_now) }
|
||||
let!(:never_expire_pages_deployment) { create(:pages_deployment, expires_at: nil) }
|
||||
|
||||
it 'deactivates all expired pages deployments' do
|
||||
expect { worker.perform }
|
||||
|
|
@ -15,4 +15,34 @@ RSpec.describe Pages::DeactivateExpiredDeploymentsCronWorker, feature_category:
|
|||
.and not_change { not_yet_expired_pages_deployment.reload.active? }
|
||||
.and not_change { never_expire_pages_deployment.reload.active? }
|
||||
end
|
||||
|
||||
it 'logs extra metadata on done' do
|
||||
expect(worker).to receive(:log_extra_metadata_on_done).with(:deactivate_expired_pages_deployments, {
|
||||
deactivated_deployments: 1,
|
||||
duration: be > 0
|
||||
})
|
||||
|
||||
worker.perform
|
||||
end
|
||||
|
||||
it 'uses the expected values for batching and limiting' do
|
||||
expect(Pages::DeactivateExpiredDeploymentsCronWorker::MAX_NUM_DELETIONS).to be(10000)
|
||||
expect(Pages::DeactivateExpiredDeploymentsCronWorker::BATCH_SIZE).to be(1000)
|
||||
end
|
||||
|
||||
describe 'batching and limiting' do
|
||||
before do
|
||||
stub_const('Pages::DeactivateExpiredDeploymentsCronWorker::MAX_NUM_DELETIONS', 9)
|
||||
stub_const('Pages::DeactivateExpiredDeploymentsCronWorker::BATCH_SIZE', 5)
|
||||
|
||||
11.times do # we already have 1 deployment from the outer scope
|
||||
create(:pages_deployment, expires_at: 3.minutes.ago)
|
||||
end
|
||||
end
|
||||
|
||||
it 'processes a maximum number of deletions, but will complete the last batch of deletions' do
|
||||
expect { worker.perform }
|
||||
.to change { PagesDeployment.active.expired.count }.from(12).to(2)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue