Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-01-26 18:10:25 +00:00
parent 20d3e87b4f
commit 77e516bc6a
49 changed files with 8932 additions and 185 deletions

View File

@ -80,7 +80,6 @@ Layout/SpaceInsideParens:
- 'ee/spec/support/shared_examples/models/concerns/blob_replicator_strategy_shared_examples.rb'
- 'ee/spec/support/shared_examples/services/geo/geo_request_service_shared_examples.rb'
- 'ee/spec/workers/elastic/migration_worker_spec.rb'
- 'ee/spec/workers/security/auto_fix_worker_spec.rb'
- 'qa/qa/page/group/settings/group_deploy_tokens.rb'
- 'qa/qa/specs/features/ee/browser_ui/10_govern/scan_result_policy_vulnerabilities_spec.rb'
- 'qa/qa/tools/delete_subgroups.rb'

View File

@ -58,7 +58,6 @@ RSpec/AnyInstanceOf:
- 'ee/spec/workers/geo/registry_sync_worker_spec.rb'
- 'ee/spec/workers/project_cache_worker_spec.rb'
- 'ee/spec/workers/repository_import_worker_spec.rb'
- 'ee/spec/workers/security/auto_fix_worker_spec.rb'
- 'ee/spec/workers/vulnerability_exports/export_deletion_worker_spec.rb'
- 'spec/controllers/admin/sessions_controller_spec.rb'
- 'spec/controllers/application_controller_spec.rb'

View File

@ -702,7 +702,6 @@ RSpec/NamedSubject:
- 'ee/spec/models/vulnerabilities/finding_spec.rb'
- 'ee/spec/models/vulnerabilities/state_transition_spec.rb'
- 'ee/spec/models/weight_note_spec.rb'
- 'ee/spec/models/zoekt/indexed_namespace_spec.rb'
- 'ee/spec/policies/epic_policy_spec.rb'
- 'ee/spec/policies/group_policy_spec.rb'
- 'ee/spec/policies/merge_request_policy_spec.rb'

View File

@ -38,7 +38,6 @@ Search/NamespacedClass:
- 'ee/app/models/elastic/reindexing_task.rb'
- 'ee/app/models/elasticsearch_indexed_namespace.rb'
- 'ee/app/models/elasticsearch_indexed_project.rb'
- 'ee/app/models/zoekt/indexed_namespace.rb'
- 'ee/app/presenters/ee/search_service_presenter.rb'
- 'ee/app/services/ee/search_service.rb'
- 'ee/app/services/elastic/bookkeeping_shard_service.rb'

2
DEI.md
View File

@ -8,6 +8,8 @@ The DEI.md file was originally created in the CHAOSS project. This comment provi
Please use the DEI.md Guide at https://github.com/badging/ProjectBadging/blob/main/Guide.DEI.md when creating your DEI.md file
-->
![CHAOSS DEI Bronze Badge](https://images.ctfassets.net/xz1dnu24egyd/5qxlqiIMLUYwuinHpFm67P/edc10e44c37235cef20c0f910a947669/dei-bronze-badge.svg)
## GitLab
The scope of this DEI.md file is intended to cover the entire organization of GitLab. Any specific differences for a particular GitLab project may be noted within this document.

View File

@ -0,0 +1,167 @@
<script>
import {
GlBadge,
GlIcon,
GlLink,
GlSkeletonLoader,
GlSprintf,
GlTooltipDirective as GlTooltip,
} from '@gitlab/ui';
import ClipboardButton from '~/vue_shared/components/clipboard_button.vue';
import TimeAgoTooltip from '~/vue_shared/components/time_ago_tooltip.vue';
import { s__, __ } from '~/locale';
import DeploymentStatusLink from '~/environments/environment_details/components/deployment_status_link.vue';
import DeploymentCommit from '~/environments/components/commit.vue';
import { FINISHED_STATUSES } from '../utils';
export default {
components: {
GlBadge,
GlIcon,
GlLink,
GlSkeletonLoader,
GlSprintf,
ClipboardButton,
TimeAgoTooltip,
DeploymentStatusLink,
DeploymentCommit,
},
directives: {
GlTooltip,
},
props: {
deployment: {
required: true,
type: Object,
},
environment: {
required: true,
type: Object,
},
loading: {
required: false,
default: false,
type: Boolean,
},
},
computed: {
iid() {
return this.deployment.iid;
},
status() {
return this.deployment.status?.toLowerCase() ?? '';
},
job() {
return this.deployment.job;
},
needsApproval() {
return (
!this.isFinished(this.deployment.status) &&
this.deployment.approvalSummary?.status === 'PENDING_APPROVAL'
);
},
environmentName() {
return this.environment.name;
},
environmentPath() {
return this.environment.path;
},
commit() {
return this.deployment.commit || {};
},
commitPath() {
return this.deployment.commit.webUrl || '';
},
shortSha() {
return this.deployment.commit?.shortId;
},
createdAt() {
return this.deployment.createdAt || '';
},
finishedAt() {
return this.deployment.finishedAt || '';
},
triggerer() {
return this.deployment.triggerer;
},
triggererUsername() {
return this.triggerer?.username;
},
triggererUrl() {
return this.triggerer?.webUrl;
},
timeagoText() {
return this.isFinished(this.deployment.status)
? this.$options.i18n.finishedTimeagoText
: this.$options.i18n.startedTimeagoText;
},
timeagoTime() {
return this.isFinished(this.deployment.status) ? this.finishedAt : this.createdAt;
},
},
methods: {
isFinished(status) {
return FINISHED_STATUSES.includes(status);
},
},
i18n: {
copyButton: __('Copy commit SHA'),
needsApproval: s__('Deployment|Needs Approval'),
startedTimeagoText: s__('Deployment|Started %{timeago} by %{username}'),
finishedTimeagoText: s__('Deployment|Finished %{timeago} by %{username}'),
},
};
</script>
<template>
<div v-if="loading" class="gl-mt-4">
<gl-skeleton-loader class="gl-mt-3" :height="20" viewbox="0 0 400 20">
<rect width="26" height="8" rx="4" />
<rect width="26" x="28" height="8" rx="4" />
<rect width="36" x="56" height="8" rx="4" />
<rect width="82" x="94" height="8" rx="4" />
<rect width="176" y="10" height="8" rx="4" />
</gl-skeleton-loader>
</div>
<div v-else>
<div class="gl-display-flex gl-gap-3 gl-mb-3 gl-align-items-center">
<deployment-status-link :status="status" :deployment-job="job" />
<gl-badge v-if="needsApproval" variant="warning">
{{ $options.i18n.needsApproval }}
</gl-badge>
<div class="gl-display-flex gl-align-items-center">
<gl-icon name="environment" class="gl-mr-2" />
<gl-link :href="environmentPath">{{ environmentName }}</gl-link>
</div>
<div v-if="shortSha" class="gl-font-monospace gl-display-flex gl-align-items-center">
<gl-icon ref="deployment-commit-icon" name="commit" class="gl-mr-2" />
<gl-link v-gl-tooltip :title="$options.i18n.commitSha" :href="commitPath">
{{ shortSha }}
</gl-link>
<clipboard-button
:text="shortSha"
category="tertiary"
:title="$options.i18n.copyButton"
size="small"
/>
</div>
<time-ago-tooltip
v-if="timeagoTime"
:time="timeagoTime"
class="gl-display-flex gl-align-items-center"
>
<template #default="{ timeAgo }">
<gl-icon name="calendar" class="gl-mr-2" />
<span class="gl-mr-2 gl-white-space-nowrap">
<gl-sprintf :message="timeagoText">
<template #timeago>{{ timeAgo }}</template>
<template #username>
<gl-link :href="triggererUrl">@{{ triggererUsername }}</gl-link>
</template>
</gl-sprintf>
</span>
</template>
</time-ago-tooltip>
</div>
<deployment-commit :commit="commit" />
</div>
</template>

View File

@ -0,0 +1,75 @@
<script>
import { GlAlert, GlSprintf } from '@gitlab/ui';
import { captureException } from '~/sentry/sentry_browser_wrapper';
import { s__ } from '~/locale';
import deploymentQuery from '../graphql/queries/deployment.query.graphql';
import environmentQuery from '../graphql/queries/environment.query.graphql';
import DeploymentHeader from './deployment_header.vue';
export default {
components: {
GlAlert,
GlSprintf,
DeploymentHeader,
},
inject: ['projectPath', 'deploymentIid', 'environmentName'],
apollo: {
deployment: {
query: deploymentQuery,
variables() {
return { fullPath: this.projectPath, iid: this.deploymentIid };
},
update(data) {
return data?.project?.deployment;
},
error(error) {
captureException(error);
this.errorMessage = this.$options.i18n.errorMessage;
},
},
environment: {
query: environmentQuery,
variables() {
return { fullPath: this.projectPath, name: this.environmentName };
},
update(data) {
return data?.project?.environment;
},
error(error) {
captureException(error);
this.errorMessage = this.$options.i18n.errorMessage;
},
},
},
data() {
return { deployment: {}, environment: {}, errorMessage: '' };
},
computed: {
hasError() {
return Boolean(this.errorMessage);
},
},
i18n: {
header: s__('Deployment|Deployment #%{iid}'),
errorMessage: s__(
'Deployment|There was an issue fetching the deployment, please try again later.',
),
},
};
</script>
<template>
<div>
<h1 class="page-title gl-font-size-h-display">
<gl-sprintf :message="$options.i18n.header">
<template #iid>{{ deploymentIid }}</template>
</gl-sprintf>
</h1>
<gl-alert v-if="hasError" variant="danger">{{ errorMessage }}</gl-alert>
<deployment-header
v-else
:deployment="deployment"
:environment="environment"
:loading="$apollo.queries.deployment.loading"
/>
</div>
</template>

View File

@ -0,0 +1,3 @@
fragment ApprovalSummary on Deployment {
iid
}

View File

@ -0,0 +1,45 @@
#import "~/graphql_shared/fragments/user.fragment.graphql"
#import "~/environments/graphql/fragments/deployment_job.fragment.graphql"
#import "ee_else_ce/deployments/graphql/fragments/approval_summary.fragment.graphql"
query fetchDeployment($fullPath: ID!, $iid: ID!) {
project(fullPath: $fullPath) {
id
deployment(iid: $iid) {
id
...ApprovalSummary
status
ref
tag
job {
...DeploymentJob
deploymentPipeline: pipeline {
id
jobs(whenExecuted: ["manual"], retried: false) {
nodes {
...DeploymentJob
scheduledAt
}
}
}
}
commit {
id
shortId
message
webUrl
authorGravatar
authorName
authorEmail
author {
...User
}
}
triggerer {
...User
}
createdAt
finishedAt
}
}
}

View File

@ -0,0 +1,10 @@
query fetchEnvironment($fullPath: ID!, $name: String!) {
project(fullPath: $fullPath) {
id
environment(name: $name) {
id
name
path
}
}
}

View File

@ -0,0 +1,31 @@
import Vue from 'vue';
import VueApollo from 'vue-apollo';
import createDefaultClient from '~/lib/graphql';
import ShowDeployment from './components/show_deployment.vue';
Vue.use(VueApollo);
export const initializeShowDeployment = (selector = 'js-deployment-details') => {
const el = document.getElementById(selector);
if (el) {
const apolloProvider = new VueApollo({
defaultClient: createDefaultClient(),
});
const { projectPath, deploymentIid, environmentName } = el.dataset;
return new Vue({
el,
apolloProvider,
provide: {
projectPath,
deploymentIid,
environmentName,
},
render(h) {
return h(ShowDeployment);
},
});
}
return null;
};

View File

@ -0,0 +1,3 @@
export const STATUSES = ['RUNNING', 'SUCCESS', 'FAILED', 'CANCELED', 'BLOCKED'];
export const FINISHED_STATUSES = ['SUCCESS', 'FAILED', 'CANCELED'];
export const UPCOMING_STATUSES = ['RUNNING', 'BLOCKED'];

View File

@ -33,7 +33,7 @@ export default (
type: 'string',
param: '',
symbol: '',
icon: 'admin',
icon: 'pencil-square',
tag: __('Yes or No'),
lowercaseValueOnSubmit: true,
capitalizeTokenValue: true,
@ -198,7 +198,7 @@ export default (
type: 'string',
param: '',
symbol: '',
icon: 'cloud-gear',
icon: 'environment',
tag: 'environment',
};

View File

@ -23,6 +23,9 @@ export default {
modalBodyStableVersions: s__(
'VersionCheck|You are currently on version %{currentVersion}! We strongly recommend upgrading your GitLab installation to one of the following versions immediately: %{latestStableVersions}.',
),
additionalAvailablePatch: s__(
'VersionCheck|Additionally, there is an available stable patch for your current GitLab minor version: %{latestStableVersionOfMinor}',
),
modalDetails: s__('VersionCheck|%{details}'),
learnMore: s__('VersionCheck|Learn more about this critical security release.'),
primaryButtonText: s__('VersionCheck|Upgrade now'),
@ -53,6 +56,11 @@ export default {
required: false,
default: () => [],
},
latestStableVersionOfMinor: {
type: String,
required: false,
default: '',
},
},
data() {
return {
@ -76,6 +84,12 @@ export default {
latestStableVersionsStrings() {
return this.latestStableVersions?.length > 0 ? this.latestStableVersions.join(', ') : '';
},
showLatestStableVersionOfMinor() {
return (
this.latestStableVersionOfMinor &&
!this.latestStableVersionsStrings.includes(this.latestStableVersionOfMinor)
);
},
},
created() {
if (getHideAlertModalCookie(this.currentVersion)) {
@ -136,6 +150,13 @@ export default {
<span class="gl-font-weight-bold">{{ latestStableVersionsStrings }}</span>
</template>
</gl-sprintf>
<div v-if="showLatestStableVersionOfMinor" class="gl-mt-6">
<gl-sprintf :message="$options.i18n.additionalAvailablePatch">
<template #latestStableVersionOfMinor>
<span class="gl-font-weight-bold">{{ latestStableVersionOfMinor }}</span>
</template>
</gl-sprintf>
</div>
</div>
<div v-if="details" data-testid="alert-modal-details" class="gl-mb-6">
{{ modalDetails }}

View File

@ -36,7 +36,11 @@ const mountSecurityPatchUpgradeAlertModal = (el) => {
const { currentVersion, version } = el.dataset;
try {
const { details, latestStableVersions } = convertObjectPropsToCamelCase(JSON.parse(version));
const {
details,
latestStableVersions,
latestStableVersionOfMinor,
} = convertObjectPropsToCamelCase(JSON.parse(version));
return new Vue({
el,
@ -46,6 +50,7 @@ const mountSecurityPatchUpgradeAlertModal = (el) => {
currentVersion,
details,
latestStableVersions,
latestStableVersionOfMinor,
},
});
},

View File

@ -0,0 +1,3 @@
import { initializeShowDeployment } from '~/deployments/show';
initializeShowDeployment();

View File

@ -7,7 +7,6 @@ module Projects
before_action :check_oauth_state, only: :slack_auth
before_action :authorize_admin_project!
before_action :slack_integration, only: [:edit, :update]
before_action :service, only: [:destroy, :edit, :update]
layout 'project_settings'
@ -66,10 +65,6 @@ module Projects
@slack_integration ||= project.gitlab_slack_application_integration.slack_integration
end
def service
@service = project.gitlab_slack_application_integration
end
def slack_integration_params
params.require(:slack_integration).permit(:alias)
end

View File

@ -28,8 +28,10 @@ module Packages
::Packages::CreatePackageFileService.new(created_package, file_params).execute
created_package
ServiceResponse.success(payload: { package: created_package })
end
rescue ActiveRecord::RecordInvalid => e
ServiceResponse.error(message: e.message, reason: :invalid_parameter)
end
private

View File

@ -22,15 +22,15 @@
.admin-dashboard.gl-mt-3
.h3.gl-mb-5.gl-mt-0= _('Instance overview')
.row
- component_params = { body_options: { class: 'gl-display-flex gl-justify-content-space-between gl-align-items-baseline' } }
- component_params = { body_options: { class: 'gl-display-flex gl-justify-content-space-between gl-align-items-flex-start' } }
.col-md-4.gl-mb-6
= render Pajamas::CardComponent.new(**component_params) do |c|
- c.with_body do
%span
.gl-display-flex.gl-align-items-center
= sprite_icon('project', size: 16, css_class: 'gl-text-gray-700')
%h3.gl-heading-2.gl-ml-3{ class: 'gl-mb-0!' }= approximate_count_with_delimiters(@counts, Project)
.gl-heading-4{ class: 'gl-mb-0!' }= s_('AdminArea|Projects')
%div
.gl-text-gray-700
= sprite_icon('project', size: 16)
= s_('AdminArea|Projects')
%h3.gl-heading-2{ class: 'gl-mb-0!' }= approximate_count_with_delimiters(@counts, Project)
= render Pajamas::ButtonComponent.new(href: new_project_path) do
= s_('AdminArea|New project')
- c.with_footer do
@ -39,10 +39,12 @@
.col-md-4.gl-mb-6
= render Pajamas::CardComponent.new(**component_params) do |c|
- c.with_body do
%span
%div
.gl-text-gray-700
= sprite_icon('users', size: 16)
= s_('AdminArea|Users')
.gl-display-flex.gl-align-items-center
= sprite_icon('users', size: 16, css_class: 'gl-text-gray-700')
%h3.gl-heading-2.gl-ml-3{ class: 'gl-mb-0!' }= approximate_count_with_delimiters(@counts, User)
%h3.gl-heading-2{ class: 'gl-mb-0!' }= approximate_count_with_delimiters(@counts, User)
%span.gl-outline-0.gl-ml-3{ tabindex: "0", data: { container: "body",
toggle: "popover",
placement: "top",
@ -51,8 +53,6 @@
content: s_("AdminArea|All users created in the instance, including users who are not %{billable_users_link_start}billable users%{billable_users_link_end}.").html_safe % { billable_users_link_start: billable_users_link_start, billable_users_link_end: '</a>'.html_safe },
} }
= sprite_icon('question-o', size: 16, css_class: 'gl-text-blue-600')
.gl-heading-4{ class: 'gl-mb-0!' }
= s_('AdminArea|Users')
= render Pajamas::ButtonComponent.new(href: new_admin_user_path) do
= s_('AdminArea|New user')
- c.with_footer do
@ -63,11 +63,12 @@
.col-md-4.gl-mb-6
= render Pajamas::CardComponent.new(**component_params) do |c|
- c.with_body do
%span
%div
.gl-text-gray-700
= sprite_icon('group', size: 16)
= s_('AdminArea|Groups')
.gl-display-flex.gl-align-items-center
= sprite_icon('group', size: 16, css_class: 'gl-text-gray-700')
%h3.gl-heading-2.gl-ml-3{ class: 'gl-mb-0!' }= approximate_count_with_delimiters(@counts, Group)
.gl-heading-4{ class: 'gl-mb-0!' }= s_('AdminArea|Groups')
%h3.gl-heading-2{ class: 'gl-mb-0!' }= approximate_count_with_delimiters(@counts, Group)
= render Pajamas::ButtonComponent.new(href: new_admin_group_path) do
= s_('AdminArea|New group')
- c.with_footer do

View File

@ -2,3 +2,7 @@
- add_to_breadcrumbs @environment.name, project_environment_path(@project, @environment)
- breadcrumb_title _("Deployment #%{iid}") % { iid: @deployment.iid }
- page_title _("Deployment #%{iid}") % { iid: @deployment.iid }
#js-deployment-details{ data: { project_path: @project.full_path,
environment_name: @environment.name,
deployment_iid: @deployment.iid } }

View File

@ -17,4 +17,4 @@
.footer-block.row-content-block
= form.submit _('Save changes'), pajamas_button: true
&nbsp;
= link_button_to _('Cancel'), edit_project_settings_integration_path(@project, @service)
= link_button_to _('Cancel'), edit_project_settings_integration_path(@project, @slack_integration.integration)

View File

@ -1,8 +0,0 @@
---
name: remove_request_stats_for_tracing
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/133951
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/428041
milestone: '16.5'
type: development
group: group::code review
default_enabled: false

View File

@ -0,0 +1,468 @@
---
owning-stage: "~devops::platforms"
group: Delivery
description: 'Cells: Application Deployment'
creation-date: "2024-01-09"
authors: [ "@nolith", "@skarbek" ]
coach:
approvers: []
---
Disclaimer: This blueprint requires more cross-functional alignment - [Confidence Level] --> Low
# Application Deployment with a Cellular Architecture
This blueprint describes a deployment strategy that can support the new scaling dimension intruduced by the Cell Architecture.
The complexity of this transition will demand participation from many team in the Platforms section to take ownership of the features necessary to reach the production grade rating on this architecture.
## Introduction
### Preamble
From an high level perspective, a Cell Cluster is a system made of only 3 items:
1. **Router** - An HA routing system deployed independently from the GitLab application.
1. **Primary Cell** - The GitLab installation that is the leader for all the cluster wide data and services. This will be the legacy GitLab.com deployment.
1. Zero or more **Secondary Cells** - GitLab installations authoritative for a limited number of Organizations. Those Cells are deployed using GitLab Dedicated tools.
```plantuml
@startuml
actor User
cloud router {
component Router as R
}
component "Primary Cell" as Primary
collections "Secondary Cells" as Secondary
User ==> R
R ==> Primary
R ==> Secondary
Primary --> Secondary : "DB selective replica"
Secondary --> Primary : "internal API"
@enduml
```
As we can see from the diagram, users interact with the system through the router only. Cells communicate with the Primary Cell using internal API calls and have a local copy of all the database rows necessary to operate.
It is important to note that even if a Secondary Cell supports GitLab Geo out of the box, we will not be able to provide this feature to our users until the Router supports it.
### Key Terms
- Deployment - The GitLab application and its components being installed into infrastructure
- `auto-deploy` version - The active version that creates a package viable for deployment
- ring - A logical partition of the cell cluster. In order to deploy to the next ring a package must be validated inside the current ring
- `perimeter` - the ring marking the "definition of done" for Release Managers, a package validated inside the perimeter is allowed to rollout in the rest of the fleet
- `graduated` version - The version deemed safe to deploy to cells outside of the perimeter
- `.com` - refers to our old existing or currently running infrastructure
- Primary Cell - The GitLab installation that is the leader for all the cluster wide data and services. Initially this will be the legacy GitLab.com deployment. This implicitly includes .com as our legacy infrastructure.
- Secondary Cell(s) - GitLab installation(s) authoritative for a limited number of Organizations. Cell(s) are deployed using GitLab Dedicated tools.
### Ring deployment
The scale of the Cell project deployment together with the strong user partitioning maps well with a [ring deployment](https://configcat.com/ring-deployment/) approach.
```plantuml
@startuml
skinparam frame {
borderColor<<perimeter>> red
}
left to right direction
frame "Ring 3" as r3 {
component "Cell4" as c4
component "Cell5" as c5
component "Cell6" as c6
component "Cell7" as c7
component "Cell8" as c8
component "Cell9" as c9
frame "Ring 2" as r2 {
component "Cell1" as c1
component "Cell2" as c2
component "Cell3" as c3
frame "Ring 1" <<perimeter>> as r1 {
frame "Ring 0" as r0 {
component "Canary stage" <<legacy>> as cny
component "QA Cell" as QA
note as ring0_note
Ring 0 goes in parallel with canary
QA tests executed on **canary and QA Cell**
end note
}
component "Main stage\nPrimary Cell" <<legacy>> as Primary
note as perimeter_note
The perimeter marks the definition of done for an auto_deploy package.
When post-deployment migrations are executed inside the perimeter,
the package is ready to be pulled by the outer rings
**outside of the release managers coordinator pipeline**
end note
}
note as baking_areas
A package cannot rollout to the next ring before it is successfully
installed inside the current ring.
end note
}
}
@enduml
```
In the image above we are showing a possible ring layout with a cluster made of the Primary Cell and 10 Secondary cells, the upper bound of the Cell 1.0 milestone.
The general rule is that:
1. The deployment process progresses from Ring 0 to the outer rings
1. Rings are a collection of Cells sharing the same risk factor associated to a deployment.
1. Deployments can get halted at any stage and the package will not reach the outer rings.
1. We define the "perimeter" ring that marks the "definition of done" for the Release Managers.
- Crossing perimeter is the logical point in time of a given package lifecycle after the PDM has successfully run on the Main Stage. Effectively, between Ring 1 and Ring 2 as described throughout this document.
- A successful run of the Post Deploy Migrations inside the perimeter marks a package as `graduated`.
- A `graduated` package is a valid candidate for the monthly release.
- A `graduated` package is rolled out to the rest of the rings automatically.
- Deployments must be automated: inside the perimeter are responsibility of Release Managers, outside of it are responsibility of Team:Ops.
### Reference materials
- [Cell 1.0 blueprint](https://gitlab.com/gitlab-org/gitlab/-/blob/master/doc/architecture/blueprints/cells/iterations/cells-1.0.md)
- [The merge request for this blueprint](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/141427)
- [Delivery Point of View on Cells](https://gitlab.com/gitlab-com/Product/-/issues/12770)
- [GitLab.com deployment process before Cells](https://gitlab.com/gitlab-com/content-sites/handbook/-/blob/21f6898110466b5c581a881db0ce343bf9cb1a72/content/handbook/engineering/deployments-and-releases/deployments/index.md)
## Goals and Non-Goals
### Goals
- Limit the increase in cognitive load for the release manager; in doing so, we defined the perimeter as the clear handover point where a package is no longer a release manager's responsibility.
- Limit the blast radius of failures by partitioning the cell cluster into rings, automated validation occurs between each ring.
- Ensure deployments are reliably automated
- Ensure automatic handling of failed deployments
- Provide observability into package rollouts and deployments
### Non-Goals
- Extending `release-tools` to take ownership of the Cell Application Deployments. A smaller, more specific piece of software will allow us to keep the tooling focused on one job.
- Introduce major changes related to Release Management
- Lifecycle management of Cells
- Management of routing traffic to/from Cells
- Individual component deploys
## Stakeholders
We have several teams partaking in the operations of Cell, the first distinction is between teams implementing and maintaining the tools and teams utilizing those tools.
| Areas | Features | Owners |
|-----------------------------------|----------------------------------------------------------|---------------------------|
| Integration with Dedicated tools* | | |
| | integration with Release Managers' workflows | Delivery:Deployments |
| | deployment mechanics using `Instrumentor` and `AMP` | Foundations |
| SSOT for cluster state* | | |
| | Investigate GitOps model | Delivery:Deployments |
| | Investigate CRD + operator | Delivery:Deployments |
| Ring-based deployment automation | | |
| | propagating changes inside a ring perimeter | Delivery:Deployments |
| | orchestrating changes propagation outside ring perimeter | Foundations |
| | emergency brake: stopping a package rollout | Delivery:Deployments |
| Rollback capabilities | | |
| | rollback with downtime (for QA Cell in ring 0) | Delivery:Deployments |
| | delayed PDM for rollback support | Environment Automation* |
| Cell Lifecycle Automation | | |
| | Cell reference architecture | Foundations |
| | Cell dimensioning | Foundations |
| | Cell provisioning | Foundations |
| | Cell deprovisioning | Foundations |
| Observability | | |
| | Cell health metric | Scalability:Observability |
| | Fleet health metric | Scalability:Observability |
| | Paging EOC | Foundations |
| | Package States | Delivery:Deployments |
> \* These items may require contributions from Delivery:Deployments. This work should be heavily collaborated on as it'll help ensure appropriate alignment to meet the needs of the owning team and customer teams.
The users of those features are the Release Managers, the Engineer On Call, and the Team:Ops.
The following list define the tasks those groups can perform in the cell cluster:
1. Release Managers
- Command deployments inside the perimeter
- Declare "graduated" packages
- Rollback deployments inside the perimeter
1. Engineer On Call
- Receive alerts for failed deployments
- Can pause a package rollout (not reaching the next ring)
- Drive investigation for failed deployments
1. Team:Ops
- Cells Administration
- Provisioning
- Deprovisioning
- Re-balancing
- Cell-Ring association
## Requirements
Before we can integrate Secondary Cells to our deployment pipeline, we need a few items immediately:
1. The router should exist, it must be HA, and have an independent deployment pipeline
- This is required for appropriate testing. As noted below, we'll need a QA cell to direct a deployment to for which QA will execute tests against. A router will need to route QA tests to the appropriate Cell.
1. Assets Deployment
- This already exists today for .com. Today this is handled via HAProxy, but with Cells, the routing layer will become the responsible party to redirect assets in a similar fashion.
- If assets are chosen to be managed differently, this changes both how Delivery need to deploy said assets in order to provide as close to Zero Downtime Upgrades as possible, and configuration to the Cell installation to support routing to assets properly.
1. Feature Flags
- We are assuming that the current Feature Flags workflows and tooling will just work on the Primary Cell and that Secondary Cells will not be affected.
- The use of feature flags to mitigate incidents is limited to only the Primary Cell.
- Tooling may need to mature to ensure that Cells do not drift for long periods of time with feature flags. This ensures that customers have a similar experience if their work expands across Cells and that we as operators of .com need not worry about version drift and the implications of code differing behind the feature flag.
- Further guidance, documentation will need to be developed for this area. Engineers shouldn't care what cell an organization is a part of. Thus Feature Flag toggles abstract away the need for engineers to care.
## Proposed plan of action
From a delivery perspective not much changes between the 3 proposed Cells iterations (1.0, 1.5, and 2.0). The split is an iterative approach based on cutting the scope of the features available for Organizations bound to a given Cell. From a deployment point of view, it should be possible to have multiple Secondary Cells from the first iteration so we have to figure out a roadmap to get there that is independent from the Cell architecture version.
### Iterations
#### Cells 1.0
The intent in this iteration is to focus our efforts on building and integrating our own tooling that builds and manages Cells. The following milestones, and their exit criterion, are a collaborative effort of the Platforms section and spans across many teams.
1. The Dedicated technology stack expansion:
- Instrumentor and AMP support GCP
- A cell is defined as a reference architecture in Instrumentor
1. Control Plane for Cells - Cell Cluster Coordinator
- Switchboard is currently leveraged by Dedicated but is not an appropriate tool for Cells. We should evaluate the capabilities of other tooling created by Dedicated, `amp` and `instrumentor`, to determine how they could be integrated into a deployment workflow.
- Implement Cell deployment converging the entire infrastructure of the cell (current dedicated capability)
- Implement the concept of Rings: initially only Rings 0 and 2
1. First Secondary Cell: the QA Cell in Ring 0
- Build integration with our current tooling to perform deployments to the QA cell via the Coordinator
- The QA Cell runs it's own QA smoke tests
- The QA Cell is updated in parallel with the production canary stage: QA cell failures are considered soft and do not block auto_deploy
1. Control Plane for Cells - Individual dashboards and alerting
- observability is at least on par with the legacy infrastructure
- alerting is at least on par with the legacy infrastructure
1. First Customer Secondary Cell: Ring 2
- release-tools can `graduate` a package after the PDM execution
- the Coordinator can manage Ring 2 deployments
1. Support for multiple Secondary Cells
- the Coordinator can converge multiple cells in the same Ring to the desired version
> - Limitations:
> - all Secondary Cells will be in the same ring, Ring 2
> - Rollbacks are possible but require downtime to achieve on all secondary cells
#### Cells 1.5 and 2.0
The following features can be distributed between Cell 1.5 and 2.0, they are all improving the operational aspects and we should prioritize them as we learn more about operating Cells.
1. Control Plane for Cells - Additional rings
- Secondary Cells can be spread over multiple rings
- Deployment to the next ring starts automatically after the current ring converged
- Emergency brake: ability to block package rollout to the next ring
1. The QA Cell becomes a blocker for auto-deploy
1. Control Plane for Cells - Cluster dashboards and alerting
- A dashboard should indicate what package is expected for any given Cell and Ring deployment
- Any cell not running the desired version should be easily visible and alert if not converged in a reasonable amount of time
- Deployment health metrics to block package rollout inside a ring (z-score on the four golden signals?)
1. The Post Deploy Migration (PDM) step of deployments needs to be segregated from the application deployment to ensure we have the ability to perform rollbacks on Cells.
- Without this capability, a Cell must suffer downtime in order for a rollback to complete successfully. This is disruptive and should not be considered a wise solution.
- The separation of the PDM on the primary Cell already functions as desired. Thus our Primary Cell will have rollbacks as an option to mitigate incidents.
1. Modified tooling that enables us to target only Deploying the GitLab application. Currently the destined tooling to be leveraged employs a strategy where the entire installation is converged. This includes the infrastructure and the version of GitLab which creates a lengthy CI pipeline and long running jobs.
1. Automated Rollbacks - if a deployment fails for any reason, a rollback procedure should be initiated automatically to minimize disruption to the affected Cell. We should be able to use a health metric for this.
The focus here is productionalizing what has been built and cleaning up areas of tech debt incurred during the MVP stage of the first iteration.
#### Mindmap
```mermaid
%%{init: {'theme':'default'}}%%
mindmap
root((Cells Deployment))
Core concepts 📚
Current .com infra is the Primary Cell
It is possible to have multiple Cells in Cell 1.0
Cell isn't an HA solution
Secondary Cells talks to the Primary Cell using internal API
Prerequisites 🏗️
router
HA solution
independent deployment
Dedicated
Support GCP
Cell reference architecture
Decisions 📔
Ring style deployment
Ring 0: Current Canary + a new QA Cell
Ring 1: Current Main stage
Ring 2+: New Secondary Cells for customers
The Perimenter
Marks the handover point between Release Managers and Team:Ops
Inside: Ring 0 and 1
Outside: Ring 2+
Running PDM inside the perimeter graduates a package
A graduated pacage is a valid candidate for the monthly release
We are not porting staging to Cell
A new QA Cell will validate the package without affecting users
Procedures needing upgrades 🦾
Rollbacks
auto-deploy rollout
Post Deployment Migration
Deployment health metrics
Risk area ☢️
There is no dogfooding in Cell 1.0 and 1.5
```
#### Deployment coordinator and Cell Cluster Coordinator
In the context of `auto deploy` we have an external coordinator pipeline, inside the `release-tools` project, that takes care of orchestrating package generation and rollout invoking the specific tool for each job.
In today's GitLab.com infrastructure, deployments are executed by specific tools (`deployer` and `gitlab-com/k8s-workloads`) that can be independently operated by SRE and Release Managers, with the introduction of the Cell cluster we will face new operational challenges like a simple cluster overview, package rollout status, feature flag configuration, provisioning and deprovisioning.
The GitLab Dedicated stack features its own method of controlling installs of GitLab, primarily through a slew of tools, Switchboard, Amp, and Tenctl. The use of Switchboard is not geared towards Cells and thus cannot be leveraged. Other tooling such as Instrumentor and Amp may have a place or modifications to enable them to be more portable for usage between both the Dedicated team and Cells. We'll need to evaluate these tools, their interactions with Cells, and how we may leverage them. Pending how the work is scheduled, this may be a highly collaborative effort with team members working closely across team boundaries to ensure requirements are met during the initial period or MVP for Cells.
In this paragraph we describe an ideal interaction where a data store is updated with a desired version to be deployed, and a Cell Cluster Coordinator is created to support Cell deployments.
In Cell 1.0, inside the perimeter, we will have a single Secondary Cell, the QA Cell.
We should expand release-tools to command some-tool to perform a Cell update on demand.
```plantuml
@startuml
participant "release-tools" as rt
participant "Cell Cluster Coordinator" as sb
participant "AMP Cluster" as AMP
collections "Secondary Cells" as cells
participant QA
rt -> sb: update QA Cell
note right: In parallel with canary stage rollout
sb -> AMP: schedule deployment job
AMP -> cells: QA Cell: version rollout
cells --> AMP
AMP --> sb
sb --> rt
rt --> QA: test QA Cell
note over rt,QA: It does not replace canary QA
QA -> cells: run tests
cells --> QA
QA --> rt
@enduml
```
As we mentioned before, when we run post-deployment migrations in Ring 1, release-tools will mark that version as `graduated` and thus be capable to rollout outside of the perimeter.
Cell Cluster Coordinator will be leveraged to help coordinate automated version upgrades to further rings with automated checks before and after deployments to ensure we are deploying to the correct cells of a desired ring and validate instances are healthy before and after deployments, rolling back in the face of failure, and alerting the appropriate teams as necessary.
```plantuml
@startuml
participant "Cell Cluster Coordinator" as sb
participant "AMP Cluster" as AMP
collections "Secondary Cells" as cells
loop
sb -> sb: pull ring-version mapping
opt version missmatch
sb -> AMP: schedule configure job
AMP -> cells: CellX: version rollout
cells --> AMP
AMP --> sb
opt the ring now run the same version
sb -> sb: promote version to the next ring
end
end
end
@enduml
```
### Procedures
#### Auto-Deploy
Auto-deploy shall continue to work as it does today as our Primary Cell is equivalent to our legacy .com infrastructure. Thus our existing procedures related to auto-deploy can still be continued to be leveraged. Think hot-patching, rollbacks, auto-deploy picking, the PDM, the existing auto-deploy schedule, etc. A new procedure will be added to ensure that `release-tools` knows to trigger a deployment after a PDM is executed to the next Ring. Currently `release-tools` doesn't understand anything related to Ring Deployments, this is functionality that will need to be added.
- Auto-deploy is limited to Rings 0 and 1:
- Ring 0 contains a QA Cell plus the canary stage of the .com infra
- Ring 1 contains main stage of the .com infra - this is the cut off for release tools
- All cells will deploy the same way; this eliminates needing to deal with differing deployment technologies
- `release-tools` will interact with the Coordinator to pilot the deployments to Ring 0 as part of its coordinator pipeline
- Release-tools must be able to `graduate` a package:
- A `graduate` version of GitLab is any `auto-deploy` version which has a successful deploy onto the Main Stage of Production and the [Post Deploy Migration (PDM)](https://gitlab.com/gitlab-org/release/docs/-/blob/master/general/post_deploy_migration/readme.md) has completed.
- This could mean we expect to see a single package deploy each day to our Secondary Cells. Currently, the PDM is only run 1 time per day. Note that there are exceptions to this rule.
- This will enable us to use our existing procedures to remediate high severity incidents where application code may be at fault.
- We do not want to run official released versions of GitLab as these are produced far slower than auto-deploys thus we risk missing SLA's on incident response. In the cell architecture, most issues should be found in the Primary Cell and fixed prior to being deployed to any Secondary Cell.
- We'll need new procedures, runbooks, and documentation such that when a problem is found through manual testing, we have some ability to halt deployments of what may be labeled a `graduated` package from actually being deployed.
- It would be wise to track these failure cases as realistically, QA should be finding issues to enable us to run a automated deployments.
Note that currently, some smaller components deploy themselves to the .com infrastructure. Notably, Zoekt, Container Registry, and Mailroom, have their own cadence of providing newer versions to .com. This aspect will not be carried over into secondary cells, as currently, the tooling we'll leverage does not allow a segregation of components to enable this functionality. Instead, we'll rely on the current defined versions as specified in the default branch which built the `auto-deploy` package. This mimics how our releases are accomplished and thus should carry over well with Cells.
#### Rollbacks
Long term, we should aim to modify the deployment tooling such that Cells are provided a grace period to enable each of them to be able to be safely rolled back in the event of a deployment failure, or mitigating a failure that is noticed post deployment. Currently for the legacy .com or the Primary Cell, we hold the PDM to execute 1 time per day at the discretion of Release Managers. The tooling that performs deployments to Cells currently do not have a way to NOT run the PDM, thus no there does not currently exist a way to rollback without inducing downtime on a particular Cell. Procedures and tooling updates will be required in this area.
#### Hot patching
Hot patching is one source of our ability to mitigate problems. If we rely on `graduate` versions, the hot patcher has no place for secondary cells. It could still be leveraged for our Primary Cell, however. Though, it would be wise if we can eliminate hot patching in favor of safer deployment methodologies.
> For reference, we've only hot patched production 1 time for year 2023.
#### Deployment Health Metrics
Currently we do not automate a deployment to the Main stage of the .com legacy infrastructure, or the Primary Cell. In order to reduce operational overhead we should be able to rely on existing metrics which form a health indicator for a given installation and automatically trigger a deployment at the appropriate time. This deployment health indicator would also need to be carried into each of our cells. Tooling that triggers a deployment at various rings should be made aware to continue or halt a deploy given the status of earlier rings and the health state of the next target ring.
#### Feature Flags
Feature Flags are discussed in [data-stores#83](https://gitlab.com/gitlab-org/enablement-section/data-stores/-/issues/83).
#### Package Rollout Policy
We have an implicit procedure driven by our current use of auto-deploys. This will become more prominent with Cells. As implied in various formats above, auto-deploy shall operate relatively similarly to how it operates today. Cells becomes an addition to the existing `release-tools` pipeline with triggers in differing areas. When and what we trigger will need to be keenly defined. It is expected that Secondary Cells only receive `graduated` versions of GitLab. Thus, we'll leverage the use of our Post Deployment Migration pipeline as the gatekeeper for when a package is considered `graduated`. In an ideal world, when the PDM is executed successfully on the Primary Cell, that package is then considered `graduated` and can be deployed to any outer ring. This same concept is already leveraged when we build releases for self managed customers. This break point is already natural to Release Managers and thus is a good carry over for Cell deployments.
We should aim to deploy to Cells as quickly as possible. For all Cells that exist in a single ring, we should have the ability to deploy in parallel. Doing so minimizes the version drift between Cells and reduces potential issues. If the version drifts too greatly, auto-deploy shall pause itself and an investigation into the reason why we are too far behind begins. Ideally we know about this situation ahead of time. We should aim to be no greater than 1 `graduate` package behind our PDM. Thus the expectation is that for every PDM, is a deployment to our Cells, every day. There are days which the PDM is skipped. We'll need to evaluate on a case-by-case basis why the PDM is halted to determine the detriment this will incur on our Cell deployments.
Rings outside of the perimeter are self-managed by the orchestration engine. Once `release-tools` graduates a package it can forget about it. The orchestration engine will converge the desired GitLab version to all Cell in Ring 2, ther first ring outside of the perimeter, and move to next ring only when all Cells converged.
### FAQ
**Will Developers see indicators on MR's as they are deployed to various Cells?**
No. Our current labeling schema is primarily to showcase that the commit landed in production, the PDM successfully executed, which signals to us that the observed commit is safe for being placed in a release for self-managed customers. Being that after we reach this point, issues with a package should be minimal, there's no need to update issues/MR's with the status as we move forward into our many Rings of deployments. Developers should not need to care what version is deployed to what Cell.
**A P1/S1 issue exists, how do we mitigate this on Cells?**
Cells are still a part of .com, thus our existing [bug](https://handbook.gitlab.com/handbook/engineering/infrastructure/engineering-productivity/issue-triage/#severity-slos) and [vulnerability](https://handbook.gitlab.com/handbook/security/threat-management/vulnerability-management/#remediation-slas) SLA's for remediation apply. We can deploy whatever we want to secondary cells so long as it's considered `graduated`. If a high priority issue comes about, we should be able to freely leverage our existing procedures to update our code base and any given auto-deploy branch for mitigation, and maybe after some extra rounds of testing, or perhaps a slower roll out, we can deploy that auto-deploy package into our cells. This provides us with the same mitigation methods that we leverage today. The problem that this causes is that there could exist some code that may not have been fully vetted. We can still rely on rollbacks in this case and revisit any necessary patch for the next round of auto-deployments and evaluate the fix for another attempt to remediate our cells.
**What changes are expected from a Developers perspective**
Release and Auto-Deploy procedures should largely remain the same. We're shifting where code lands. Any changes in this realm would increase the most the closer we are to Iteration 2.0 when various environments or stages to GitLab begin to change.
**All tiers but one have a failed deploy, what triggers a rollback of that package for all cells?**
This depends on various characteristics that we'll probably want to iterate on and develop processes for. Example, if we fail on the very first cell on the first Tier, we should investigate that cell, but also ensure that this is not systemic to all cells. This can only be handled on a case-by-case basis. If we reach the last tier and last cell and some failure would occur, there should be no reason to rollback any other cell as enough time should have passed by for us to catch application failures.
**What happens with self-managed releases?**
Theoretically not much changes. Currently we use Production, or .com's Main Stage as our proving grounds for changes that are destined to be releasable for self-managed. This does not change as in the Cellular architecture, this notion for this exists in the same place. The vocabulary changes, in this case, a `graduated` package is now considered safe for a release.
**What happens to PreProd**
This instance specifically tests the hybrid installation of a GitLab package and Helm chart when we create release candidates. It's our last step prior to a release being tagged. This is not impacted by the Cells work. Though we may change how preprod is managed.
**What happens with Staging**
Staging is crucial for long term instance testing of a deployment alongside QA. Hypothetically staging could completely go away in favor of a deployment to Tier 0. Reference the above Iteration 3 {+TODO add proper link+}
**What happens to Ops**
No need to change. But if Cell management becomes easy, it would be prudent to make this installation operate as similar as possible to avoid overloading operations teams with unique knowledge for our many instances.
This same answer could be provided for the Dev instance.

View File

@ -194,13 +194,7 @@ See [Cells: Routing Service](routing-service.md).
### 4. Cell deployment
We will run many Cells.
To manage them easier, we need to have consistent deployment procedures for Cells, including a way to deploy, manage, migrate, and monitor.
We are very likely to use tooling made for [GitLab Dedicated](https://about.gitlab.com/dedicated/) with its control planes.
1. **Extend GitLab Dedicated to support GCP.**
1. TBD
See [Cell: Application deployment](application-deployment.md).
### 5. Migration

View File

@ -424,7 +424,9 @@ Use the asynchronous index helpers on your local environment to test changes for
1. Enable the feature flags by running `Feature.enable(:database_async_index_creation)` and `Feature.enable(:database_reindexing)` in the Rails console.
1. Run `bundle exec rails db:migrate` so that it creates an entry in the `postgres_async_indexes` table.
1. Run `bundle exec rails gitlab:db:reindex` so that the index is created asynchronously.
<!-- markdownlint-disable MD044 -->
1. Run `bundle exec rails gitlab:db:execute_async_index_operations:all` so that the index is created asynchronously on all databases.
<!-- markdownlint-enable MD044 -->
1. To verify the index, open the PostgreSQL console using the [GDK](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/howto/postgresql.md) command `gdk psql` and run the command `\d <index_name>` to check that your newly created index exists.
## Drop indexes asynchronously

View File

@ -816,6 +816,13 @@ the following are the names of GitLab Duo features:
After the first use, use the feature name without **GitLab Duo**.
## GitLab Duo Pro
Always use **GitLab Duo Pro** for the add-on. Do not use **Duo Pro** unless approved by legal.
You can use **the GitLab Duo Pro add-on** (with this capitalization) but you do not need to use **add-on**
and should leave it off when you can.
## GitLab Flavored Markdown
When possible, spell out [**GitLab Flavored Markdown**](../../../user/markdown.md).

View File

@ -63,6 +63,8 @@ DETAILS:
**Tier:** Free, Premium, Ultimate
**Offering:** Self-managed
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/427549) in GitLab 16.8.
Administrators can enforce 2FA for administrator users in a self-managed instance.
1. On the left sidebar, at the bottom, select **Admin Area**.

View File

@ -154,7 +154,7 @@ REST endpoint to create a runner:
1. Save the returned `token` value in a secure location or your secrets management
solution. The `token` value is returned only once in the API response.
## With the `gitlab_user_runner` Terraform resource
### With the `gitlab_user_runner` Terraform resource
To create the runner configuration with Terraform, use the
[`gitlab_user_runner` Terraform resource](https://gitlab.com/gitlab-org/terraform-provider-gitlab/-/blob/main/docs/resources/user_runner.md?ref_type=heads)

View File

@ -5,15 +5,13 @@ module Gitlab
# Finding is a data object representing a secret finding identified within a blob
class Finding
attr_reader :blob_id, :status, :line_number, :type, :description
attr_accessor :occurrences
def initialize(blob_id, status, line_number = nil, type = nil, description = nil, occurrences = nil) # rubocop:disable Metrics/ParameterLists -- all params are needed
def initialize(blob_id, status, line_number = nil, type = nil, description = nil)
@blob_id = blob_id
@status = status
@line_number = line_number
@type = type
@description = description
@occurrences = occurrences
end
def ==(other)
@ -26,8 +24,7 @@ module Gitlab
status: status,
line_number: line_number,
type: type,
description: description,
occurrences: occurrences
description: description
}
end

View File

@ -102,7 +102,9 @@ RSpec.describe Gitlab::SecretDetection::Scan, feature_category: :secret_detectio
data: "GR134894112312312312312312312\nglft-12312312312312312312"), # gitleaks:allow
new_blob(id: 555, data: "data with no secret"),
new_blob(id: 666, data: "data with no secret"),
new_blob(id: 777, data: "\nglptt-1231231231231231231212312312312312312312") # gitleaks:allow
new_blob(id: 777, data: "\nglptt-1231231231231231231212312312312312312312"), # gitleaks:allow
new_blob(id: 888,
data: "glpat-12312312312312312312;GR134894112312312312312312312") # gitleaks:allow
]
end
@ -144,6 +146,20 @@ RSpec.describe Gitlab::SecretDetection::Scan, feature_category: :secret_detectio
2,
ruleset['rules'][1]['id'],
ruleset['rules'][1]['description']
),
Gitlab::SecretDetection::Finding.new(
blobs[7].id,
Gitlab::SecretDetection::Status::FOUND,
1,
ruleset['rules'][0]['id'],
ruleset['rules'][0]['description']
),
Gitlab::SecretDetection::Finding.new(
blobs[7].id,
Gitlab::SecretDetection::Status::FOUND,
1,
ruleset['rules'][2]['id'],
ruleset['rules'][2]['description']
)
]
)

View File

@ -299,10 +299,12 @@ module API
validate_fips! if Gitlab::FIPS.enabled?
::Packages::Pypi::CreatePackageService
service_response = ::Packages::Pypi::CreatePackageService
.new(project, current_user, declared_params.merge(build: current_authenticated_job))
.execute
bad_request!(service_response.message) if service_response.error?
created!
rescue ObjectStorage::RemoteStoreError => e
Gitlab::ErrorTracking.track_exception(e, extra: { file_name: params[:name], project_id: authorized_user_project.id })

View File

@ -59,11 +59,9 @@ module Gitlab
end
def compare(start_sha, head_sha, straight: false)
include_stats = !Feature.enabled?(:remove_request_stats_for_tracing, project)
compare = CompareService.new(project, head_sha).execute(project, start_sha, straight: straight)
compare.diffs(paths: paths, expanded: true, ignore_whitespace_change: @ignore_whitespace_change, include_stats:
include_stats)
false)
end
end
end

View File

@ -24,7 +24,7 @@ module Gitlab
increase_total_counter(event_name)
increase_weekly_total_counter(event_name)
update_unique_counter(event_name, kwargs)
update_unique_counters(event_name, kwargs)
trigger_snowplow_event(event_name, category, kwargs) if send_snowplow_event
if Feature.enabled?(:internal_events_for_product_analytics)
@ -61,21 +61,37 @@ module Gitlab
Gitlab::Redis::SharedState.with { |redis| redis.incr(redis_counter_key) }
end
def update_unique_counter(event_name, kwargs)
unique_property = EventDefinitions.unique_property(event_name)
return unless unique_property
def update_unique_counters(event_name, kwargs)
unique_properties = EventDefinitions.unique_properties(event_name)
return if unique_properties.empty?
unique_method = :id
unless kwargs.has_key?(unique_property)
message = "#{event_name} should be triggered with a named parameter '#{unique_property}'."
Gitlab::AppJsonLogger.warn(message: message)
return
if Feature.disabled?(:redis_hll_property_name_tracking, type: :wip)
unique_properties = handle_legacy_property_names(unique_properties, event_name)
end
unique_value = kwargs[unique_property].public_send(unique_method) # rubocop:disable GitlabSecurity/PublicSend
unique_properties.each do |property_name|
unless kwargs[property_name]
message = "#{event_name} should be triggered with a named parameter '#{property_name}'."
Gitlab::AppJsonLogger.warn(message: message)
next
end
UsageDataCounters::HLLRedisCounter.track_event(event_name, values: unique_value)
unique_value = kwargs[property_name].id
UsageDataCounters::HLLRedisCounter.track_event(event_name, values: unique_value, property_name: property_name)
end
end
def handle_legacy_property_names(unique_properties, event_name)
# make sure we're not incrementing the user_id counter with project_id value
return [:user] if event_name.to_s == 'user_visited_dashboard'
return unique_properties if unique_properties.length == 1
# in case a new event got defined with multiple unique_properties, raise an error
raise Gitlab::InternalEvents::EventDefinitions::InvalidMetricConfiguration,
"The same event cannot have several unique properties defined. " \
"Event: #{event_name}, unique values: #{unique_properties}"
end
def trigger_snowplow_event(event_name, category, kwargs)

View File

@ -13,16 +13,20 @@ module Gitlab
nil
end
def unique_property(event_name)
unique_value = events[event_name]&.to_s
def unique_properties(event_name)
unique_values = events.fetch(event_name, [])
return unless unique_value
unique_values.filter_map do |unique_value|
next unless unique_value # legacy events include `nil` unique_value
unless VALID_UNIQUE_VALUES.include?(unique_value)
raise(InvalidMetricConfiguration, "Invalid unique value '#{unique_value}' for #{event_name}")
unique_value = unique_value.to_s
unless VALID_UNIQUE_VALUES.include?(unique_value)
raise(InvalidMetricConfiguration, "Invalid unique value '#{unique_value}' for #{event_name}")
end
unique_value.split('.').first.to_sym
end
unique_value.split('.').first.to_sym
end
def known_event?(event_name)
@ -52,17 +56,12 @@ module Gitlab
end
def process_events(all_events, metric_events)
metric_events.each do |event_name, event_unique_attribute|
unless all_events[event_name]
all_events[event_name] = event_unique_attribute
next
end
metric_events.each do |event_name, event_unique_property|
all_events[event_name] ||= []
next if event_unique_attribute.nil? || event_unique_attribute == all_events[event_name]
next if all_events[event_name].include?(event_unique_property)
raise InvalidMetricConfiguration,
"The same event cannot have several unique properties defined. " \
"Event: #{event_name}, unique values: #{event_unique_attribute}, #{all_events[event_name]}"
all_events[event_name] << event_unique_property
end
end
end

View File

@ -17303,12 +17303,18 @@ msgstr ""
msgid "Deployment|Created"
msgstr ""
msgid "Deployment|Deployment #%{iid}"
msgstr ""
msgid "Deployment|Deployment ID"
msgstr ""
msgid "Deployment|Failed"
msgstr ""
msgid "Deployment|Finished %{timeago} by %{username}"
msgstr ""
msgid "Deployment|Flux sync failed"
msgstr ""
@ -17339,12 +17345,18 @@ msgstr ""
msgid "Deployment|Skipped"
msgstr ""
msgid "Deployment|Started %{timeago} by %{username}"
msgstr ""
msgid "Deployment|Success"
msgstr ""
msgid "Deployment|Sync status is unknown. %{linkStart}How do I configure Flux for my deployment?%{linkEnd}"
msgstr ""
msgid "Deployment|There was an issue fetching the deployment, please try again later."
msgstr ""
msgid "Deployment|Triggerer"
msgstr ""
@ -54182,6 +54194,9 @@ msgstr ""
msgid "VersionCheck|%{details}"
msgstr ""
msgid "VersionCheck|Additionally, there is an available stable patch for your current GitLab minor version: %{latestStableVersionOfMinor}"
msgstr ""
msgid "VersionCheck|Important notice - Critical security release"
msgstr ""

View File

@ -0,0 +1,106 @@
import { GlSkeletonLoader } from '@gitlab/ui';
import mockDeploymentFixture from 'test_fixtures/graphql/deployments/graphql/queries/deployment.query.graphql.json';
import mockEnvironmentFixture from 'test_fixtures/graphql/deployments/graphql/queries/environment.query.graphql.json';
import { mountExtended } from 'helpers/vue_test_utils_helper';
import DeploymentStatusLink from '~/environments/environment_details/components/deployment_status_link.vue';
import DeploymentHeader from '~/deployments/components/deployment_header.vue';
import DeploymentCommit from '~/environments/components/commit.vue';
import ClipboardButton from '~/vue_shared/components/clipboard_button.vue';
import TimeAgoTooltip from '~/vue_shared/components/time_ago_tooltip.vue';
const {
data: {
project: { deployment },
},
} = mockDeploymentFixture;
const {
data: {
project: { environment },
},
} = mockEnvironmentFixture;
describe('~/deployments/components/deployment_header.vue', () => {
let wrapper;
const createComponent = ({ propsData = {} } = {}) => {
wrapper = mountExtended(DeploymentHeader, {
propsData: {
deployment,
environment,
loading: false,
...propsData,
},
});
};
describe('loading', () => {
it('shows a skeleton loader while loading', () => {
createComponent({ propsData: { loading: true } });
expect(wrapper.findComponent(GlSkeletonLoader).exists()).toBe(true);
});
});
describe('finished deployment', () => {
beforeEach(() => {
createComponent();
});
it('shows the deployment status link', () => {
const link = wrapper.findComponent(DeploymentStatusLink);
expect(link.props('status')).toBe(deployment.status.toLowerCase());
expect(link.props('deploymentJob')).toEqual(deployment.job);
});
it('shows a link to the environment name', () => {
const link = wrapper.findByRole('link', { name: environment.name });
expect(link.attributes('href')).toBe(environment.path);
});
it('shows a link to the commit', () => {
const link = wrapper.findByRole('link', { name: deployment.commit.shortId });
expect(link.attributes('href')).toBe(deployment.commit.webUrl);
});
it('has a clipboard button that copies the commit SHA', () => {
const button = wrapper.findComponent(ClipboardButton);
expect(button.props()).toMatchObject({
text: deployment.commit.shortId,
title: 'Copy commit SHA',
});
});
it('shows when the deployment finished', () => {
const timeago = wrapper.findComponent(TimeAgoTooltip);
expect(timeago.text()).toBe(`Finished 6 months ago by @${deployment.triggerer.username}`);
});
it('shows the commit message for the deployment', () => {
const commit = wrapper.findComponent(DeploymentCommit);
expect(commit.props('commit')).toEqual(deployment.commit);
});
});
describe('unfinished deployment', () => {
beforeEach(() => {
createComponent({
propsData: {
deployment: {
...deployment,
status: 'running',
finishedAt: null,
},
},
});
});
it('shows when the deployment was created', () => {
const timeago = wrapper.findComponent(TimeAgoTooltip);
expect(timeago.text()).toBe(`Started 1 year ago by @${deployment.triggerer.username}`);
});
});
});

View File

@ -0,0 +1,101 @@
import VueApollo from 'vue-apollo';
import Vue from 'vue';
import { mount } from '@vue/test-utils';
import { GlAlert } from '@gitlab/ui';
import mockDeploymentFixture from 'test_fixtures/graphql/deployments/graphql/queries/deployment.query.graphql.json';
import mockEnvironmentFixture from 'test_fixtures/graphql/deployments/graphql/queries/environment.query.graphql.json';
import { captureException } from '~/sentry/sentry_browser_wrapper';
import ShowDeployment from '~/deployments/components/show_deployment.vue';
import DeploymentHeader from '~/deployments/components/deployment_header.vue';
import deploymentQuery from '~/deployments/graphql/queries/deployment.query.graphql';
import environmentQuery from '~/deployments/graphql/queries/environment.query.graphql';
import waitForPromises from 'helpers/wait_for_promises';
import createMockApollo from 'helpers/mock_apollo_helper';
jest.mock('~/sentry/sentry_browser_wrapper');
Vue.use(VueApollo);
const PROJECT_PATH = 'group/project';
const ENVIRONMENT_NAME = mockEnvironmentFixture.data.project.environment.name;
const DEPLOYMENT_IID = mockDeploymentFixture.data.project.deployment.iid;
describe('~/deployments/components/show_deployment.vue', () => {
let wrapper;
let mockApollo;
let deploymentQueryResponse;
let environmentQueryResponse;
beforeEach(() => {
deploymentQueryResponse = jest.fn();
environmentQueryResponse = jest.fn();
});
const createComponent = () => {
mockApollo = createMockApollo([
[deploymentQuery, deploymentQueryResponse],
[environmentQuery, environmentQueryResponse],
]);
wrapper = mount(ShowDeployment, {
apolloProvider: mockApollo,
provide: {
projectPath: PROJECT_PATH,
environmentName: ENVIRONMENT_NAME,
deploymentIid: DEPLOYMENT_IID,
},
});
return waitForPromises();
};
const findHeader = () => wrapper.findComponent(DeploymentHeader);
const findAlert = () => wrapper.findComponent(GlAlert);
describe('errors', () => {
it('shows an error message when the deployment query fails', async () => {
deploymentQueryResponse.mockRejectedValue(new Error());
await createComponent();
expect(findAlert().text()).toBe(
'There was an issue fetching the deployment, please try again later.',
);
});
it('shows an error message when the environment query fails', async () => {
environmentQueryResponse.mockRejectedValue(new Error());
await createComponent();
expect(findAlert().text()).toBe(
'There was an issue fetching the deployment, please try again later.',
);
});
it('captures exceptions for sentry', async () => {
const error = new Error('oops!');
deploymentQueryResponse.mockRejectedValue(error);
await createComponent();
expect(captureException).toHaveBeenCalledWith(error);
});
});
describe('header', () => {
beforeEach(() => {
deploymentQueryResponse.mockResolvedValue(mockDeploymentFixture);
environmentQueryResponse.mockResolvedValue(mockEnvironmentFixture);
return createComponent();
});
it('shows a header containing the deployment iid', () => {
expect(wrapper.find('h1').text()).toBe(
`Deployment #${mockDeploymentFixture.data.project.deployment.iid}`,
);
});
it('shows the header component, binding the environment and deployment', () => {
expect(findHeader().props()).toMatchObject({
deployment: mockDeploymentFixture.data.project.deployment,
environment: mockEnvironmentFixture.data.project.environment,
});
});
});
});

View File

@ -0,0 +1,53 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Deployments (JavaScript fixtures)', feature_category: :continuous_delivery do
include ApiHelpers
include JavaScriptFixturesHelpers
let_it_be(:admin) { create(:admin, username: 'administrator', email: 'admin@example.gitlab.com') }
let_it_be(:group) { create(:group, path: 'deployment-group') }
let_it_be(:project) { create(:project, :repository, group: group, path: 'releases-project') }
let_it_be(:environment) do
create(:environment, project: project)
end
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
let_it_be(:build) { create(:ci_build, :success, pipeline: pipeline) }
let_it_be(:deployment) do
create(:deployment,
:success,
environment: environment,
deployable: build,
created_at: Date.new(2019, 1, 1),
finished_at: Date.new(2020, 1, 1))
end
describe GraphQL::Query, type: :request do
include GraphqlHelpers
deployment_query_path = 'deployments/graphql/queries/deployment.query.graphql'
it "graphql/#{deployment_query_path}.json" do
query = get_graphql_query_as_string(deployment_query_path)
post_graphql(query, current_user: admin, variables: { fullPath: project.full_path, iid: deployment.iid })
expect_graphql_errors_to_be_empty
expect(graphql_data_at(:project, :deployment)).to be_present
end
environment_query_path = 'deployments/graphql/queries/environment.query.graphql'
it "graphql/#{environment_query_path}.json" do
query = get_graphql_query_as_string(environment_query_path)
post_graphql(query, current_user: admin, variables: { fullPath: project.full_path, name: environment.name })
expect_graphql_errors_to_be_empty
expect(graphql_data_at(:project, :environment)).to be_present
end
end
end

View File

@ -2,7 +2,6 @@ import { GlModal, GlLink, GlSprintf } from '@gitlab/ui';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import { mockTracking, unmockTracking } from 'helpers/tracking_helper';
import { stubComponent, RENDER_ALL_SLOTS_TEMPLATE } from 'helpers/stub_component';
import { sprintf } from '~/locale';
import SecurityPatchUpgradeAlertModal from '~/gitlab_version_check/components/security_patch_upgrade_alert_modal.vue';
import * as utils from '~/gitlab_version_check/utils';
import {
@ -16,7 +15,6 @@ describe('SecurityPatchUpgradeAlertModal', () => {
let wrapper;
let trackingSpy;
const hideMock = jest.fn();
const { i18n } = SecurityPatchUpgradeAlertModal;
const defaultProps = {
currentVersion: '11.1.1',
@ -72,14 +70,12 @@ describe('SecurityPatchUpgradeAlertModal', () => {
});
it('renders the modal title correctly', () => {
expect(findGlModalTitle().text()).toBe(i18n.modalTitle);
expect(findGlModalTitle().text()).toBe('Important notice - Critical security release');
});
it('renders modal body without suggested versions', () => {
expect(findGlModalBody().text()).toBe(
sprintf(i18n.modalBodyNoStableVersions, {
currentVersion: defaultProps.currentVersion,
}),
`You are currently on version ${defaultProps.currentVersion}! We strongly recommend upgrading your GitLab installation immediately.`,
);
});
@ -99,7 +95,7 @@ describe('SecurityPatchUpgradeAlertModal', () => {
describe('Learn more link', () => {
it('renders with correct text and link', () => {
expect(findGlLink().text()).toBe(i18n.learnMore);
expect(findGlLink().text()).toBe('Learn more about this critical security release.');
expect(findGlLink().attributes('href')).toBe(ABOUT_RELEASES_PAGE);
});
@ -112,7 +108,7 @@ describe('SecurityPatchUpgradeAlertModal', () => {
describe('Remind me button', () => {
it('renders with correct text', () => {
expect(findGlRemindButton().text()).toBe(i18n.secondaryButtonText);
expect(findGlRemindButton().text()).toBe('Remind me again in 3 days');
});
it(`tracks click ${TRACKING_LABELS.REMIND_ME_BTN} when clicked`, async () => {
@ -137,7 +133,7 @@ describe('SecurityPatchUpgradeAlertModal', () => {
describe('Upgrade button', () => {
it('renders with correct text and link', () => {
expect(findGlUpgradeButton().text()).toBe(i18n.primaryButtonText);
expect(findGlUpgradeButton().text()).toBe('Upgrade now');
expect(findGlUpgradeButton().attributes('href')).toBe(UPGRADE_DOCS_URL);
});
@ -165,10 +161,11 @@ describe('SecurityPatchUpgradeAlertModal', () => {
it('renders modal body with suggested versions', () => {
expect(findGlModalBody().text()).toBe(
sprintf(i18n.modalBodyStableVersions, {
currentVersion: defaultProps.currentVersion,
latestStableVersions: latestStableVersions.join(', '),
}),
`You are currently on version ${
defaultProps.currentVersion
}! We strongly recommend upgrading your GitLab installation to one of the following versions immediately: ${latestStableVersions.join(
', ',
)}.`,
);
});
});
@ -181,7 +178,53 @@ describe('SecurityPatchUpgradeAlertModal', () => {
});
it('renders modal details', () => {
expect(findGlModalDetails().text()).toBe(sprintf(i18n.modalDetails, { details }));
expect(findGlModalDetails().text()).toBe(details);
});
});
describe('template with latestStableVersionOfMinor', () => {
describe('when value is null', () => {
const latestStableVersionOfMinor = null;
beforeEach(() => {
createComponent({ latestStableVersionOfMinor });
});
it('does not render the additional text', () => {
expect(findGlModalBody().text()).not.toContain(
`Additionally, there is an available stable patch for your current GitLab minor version: ${latestStableVersionOfMinor}`,
);
});
});
describe('when value is already included in latestStableVersions', () => {
const latestStableVersionOfMinor = '11.1.2';
const latestStableVersions = ['11.3.1', '11.2.1', '11.1.2'];
beforeEach(() => {
createComponent({ latestStableVersionOfMinor, latestStableVersions });
});
it('does not render the additional text', () => {
expect(findGlModalBody().text()).not.toContain(
`Additionally, there is an available stable patch for your current GitLab minor version: ${latestStableVersionOfMinor}`,
);
});
});
describe('when value is not already included in latestStableVersions', () => {
const latestStableVersionOfMinor = '11.1.2';
const latestStableVersions = ['11.4.1', '11.3.1', '11.2.1'];
beforeEach(() => {
createComponent({ latestStableVersionOfMinor, latestStableVersions });
});
it('does render the additional text', () => {
expect(findGlModalBody().text()).toContain(
`Additionally, there is an available stable patch for your current GitLab minor version: ${latestStableVersionOfMinor}`,
);
});
});
});

View File

@ -120,66 +120,30 @@ RSpec.describe Gitlab::Diff::PositionTracer do
describe 'when requesting diffs' do
shared_examples 'it does not call diff stats' do
it 'does not call diff stats' do
expect_next_instance_of(Gitlab::GitalyClient::CommitService) do |instance|
expect(instance).not_to receive(:diff_stats)
expect_next_instance_of(Compare) do |instance|
expect(instance).to receive(:diffs).with(hash_including(include_stats: false)).and_call_original
end
diff_files
end
end
shared_examples 'it calls diff stats' do
it 'calls diff stats' do
expect_next_instance_of(Gitlab::GitalyClient::CommitService) do |instance|
expect(instance).to receive(:diff_stats).and_call_original
end
context 'ac diffs' do
let(:diff_files) { subject.ac_diffs.diff_files }
diff_files
end
it_behaves_like 'it does not call diff stats'
end
context 'when remove_request_stats_for_tracing is true' do
context 'ac diffs' do
let(:diff_files) { subject.ac_diffs.diff_files }
context 'bd diffs' do
let(:diff_files) { subject.bd_diffs.diff_files }
it_behaves_like 'it does not call diff stats'
end
context 'bd diffs' do
let(:diff_files) { subject.bd_diffs.diff_files }
it_behaves_like 'it does not call diff stats'
end
context 'cd diffs' do
let(:diff_files) { subject.cd_diffs.diff_files }
it_behaves_like 'it does not call diff stats'
end
it_behaves_like 'it does not call diff stats'
end
context 'when remove_request_stats_for_tracing is false' do
before do
stub_feature_flags(remove_request_stats_for_tracing: false)
end
context 'cd diffs' do
let(:diff_files) { subject.cd_diffs.diff_files }
context 'ac diffs' do
let(:diff_files) { subject.ac_diffs.diff_files }
it_behaves_like 'it calls diff stats'
end
context 'bd diffs' do
let(:diff_files) { subject.bd_diffs.diff_files }
it_behaves_like 'it calls diff stats'
end
context 'cd diffs' do
let(:diff_files) { subject.cd_diffs.diff_files }
it_behaves_like 'it calls diff stats'
end
it_behaves_like 'it does not call diff stats'
end
end
end

View File

@ -29,22 +29,22 @@ RSpec.describe Gitlab::InternalEvents::EventDefinitions, feature_category: :prod
allow(definition2).to receive(:events).and_return(events2)
end
describe ".unique_property" do
describe ".unique_properties" do
context 'when event has valid unique value with a period', :aggregate_failures do
let(:events1) { { 'event1' => :'user.id' } }
let(:events2) { { 'event2' => :'project.id' } }
it 'is returned' do
expect(described_class.unique_property('event1')).to eq(:user)
expect(described_class.unique_property('event2')).to eq(:project)
expect(described_class.unique_properties('event1')).to eq([:user])
expect(described_class.unique_properties('event2')).to eq([:project])
end
end
context 'when event has no periods in unique property', :aggregate_failures do
let(:events1) { { 'event1' => :plan_id } }
context 'when event has no periods in unique property' do
let(:events1) { { 'event1' => :user_id } }
it 'fails' do
expect { described_class.unique_property('event1') }
expect { described_class.unique_properties('event1') }
.to raise_error(described_class::InvalidMetricConfiguration, /Invalid unique value/)
end
end
@ -53,29 +53,39 @@ RSpec.describe Gitlab::InternalEvents::EventDefinitions, feature_category: :prod
let(:events1) { { 'event1' => :'project.namespace.id' } }
it 'fails' do
expect { described_class.unique_property('event1') }
expect { described_class.unique_properties('event1') }
.to raise_error(described_class::InvalidMetricConfiguration, /Invalid unique value/)
end
end
context 'when event does not have unique property' do
it 'returns nil' do
expect(described_class.unique_property('event1')).to be_nil
it 'returns an empty array' do
expect(described_class.unique_properties('event1')).to eq([])
end
end
context 'when an event has multiple unique properties' do
let(:events1) { { 'event1' => :'user.id' } }
let(:events2) { { 'event1' => :'project.id' } }
it "returns all the properties" do
expect(described_class.unique_properties('event1')).to match_array([:user, :project])
end
end
context 'when an event has nil property' do
let(:events1) { { 'event1' => :'user.id' } }
let(:events2) { { 'event1' => nil } }
it "ignores the nil property" do
expect(described_class.unique_properties('event1')).to eq([:user])
end
end
end
describe ".load_configurations" do
context 'when unique property for event is ambiguous' do
let(:events1) { { 'event1' => :user_id } }
let(:events2) { { 'event1' => :project_id } }
it 'logs error when loading' do
expect(Gitlab::ErrorTracking).to receive(:track_and_raise_for_dev_exception)
.with(described_class::InvalidMetricConfiguration)
described_class.load_configurations
end
it 'raises no errors' do
described_class.load_configurations
end
end

View File

@ -13,7 +13,7 @@ RSpec.describe Gitlab::InternalEvents, :snowplow, feature_category: :product_ana
allow(redis).to receive(:incr)
allow(Gitlab::Redis::SharedState).to receive(:with).and_yield(redis)
allow(Gitlab::Tracking).to receive(:tracker).and_return(fake_snowplow)
allow(Gitlab::InternalEvents::EventDefinitions).to receive(:unique_property).and_return(unique_property)
allow(Gitlab::InternalEvents::EventDefinitions).to receive(:unique_properties).and_return(unique_properties)
allow(fake_snowplow).to receive(:event)
end
@ -29,9 +29,12 @@ RSpec.describe Gitlab::InternalEvents, :snowplow, feature_category: :product_ana
end
end
def expect_redis_hll_tracking
def expect_redis_hll_tracking(value_override = nil, property_name_override = nil)
expected_value = value_override || unique_value
expected_property_name = property_name_override || property_name
expect(Gitlab::UsageDataCounters::HLLRedisCounter).to have_received(:track_event)
.with(event_name, values: unique_value)
.with(event_name, values: expected_value, property_name: expected_property_name)
end
def expect_redis_tracking
@ -93,8 +96,9 @@ RSpec.describe Gitlab::InternalEvents, :snowplow, feature_category: :product_ana
let(:fake_snowplow) { instance_double(Gitlab::Tracking::Destinations::Snowplow) }
let(:event_name) { 'g_edit_by_web_ide' }
let(:category) { 'InternalEventTracking' }
let(:unique_property) { :user }
let(:unique_properties) { [:user] }
let(:unique_value) { user.id }
let(:property_name) { :user }
let(:redis_arguments) { [event_name, Date.today.strftime('%G-%V')] }
context 'when only user is passed' do
@ -123,8 +127,9 @@ RSpec.describe Gitlab::InternalEvents, :snowplow, feature_category: :product_ana
end
context 'when namespace is not passed' do
let(:unique_property) { :namespace }
let(:unique_properties) { [:namespace] }
let(:unique_value) { project.namespace.id }
let(:property_name) { :namespace }
it 'uses id from projects namespace' do
described_class.track_event(event_name, user: user, project: project)
@ -209,9 +214,17 @@ RSpec.describe Gitlab::InternalEvents, :snowplow, feature_category: :product_ana
.with(message: /should be triggered with a named parameter/)
end
it 'logs warning on nil property', :aggregate_failures do
expect { described_class.track_event(event_name, user: nil) }.not_to raise_error
expect_redis_tracking
expect(Gitlab::AppJsonLogger).to have_received(:warn)
.with(message: /should be triggered with a named parameter/)
end
context 'when unique property is missing' do
before do
allow(Gitlab::InternalEvents::EventDefinitions).to receive(:unique_property)
allow(Gitlab::InternalEvents::EventDefinitions).to receive(:unique_properties)
.and_raise(Gitlab::InternalEvents::EventDefinitions::InvalidMetricConfiguration)
end
@ -226,12 +239,13 @@ RSpec.describe Gitlab::InternalEvents, :snowplow, feature_category: :product_ana
context 'when unique key is defined' do
let(:event_name) { 'p_ci_templates_terraform_base_latest' }
let(:unique_value) { project.id }
let(:property_names) { [:project] }
let(:property_name) { :project }
before do
allow(Gitlab::InternalEvents::EventDefinitions).to receive(:unique_property)
allow(Gitlab::InternalEvents::EventDefinitions).to receive(:unique_properties)
.with(event_name)
.and_return(property_name)
.and_return(property_names)
end
it 'is used when logging to RedisHLL', :aggregate_failures do
@ -243,6 +257,10 @@ RSpec.describe Gitlab::InternalEvents, :snowplow, feature_category: :product_ana
end
context 'when property is missing' do
let(:unique_value) { project.id }
let(:property_names) { [:project] }
let(:property_name) { :project }
it 'logs error' do
expect { described_class.track_event(event_name, merge_request_id: 1) }.not_to raise_error
@ -251,6 +269,62 @@ RSpec.describe Gitlab::InternalEvents, :snowplow, feature_category: :product_ana
end
end
context 'when there are multiple unique keys' do
let(:property_names) { [:project, :user] }
context "with the property_name tracking feature flag enabled" do
before do
stub_feature_flags(redis_hll_property_name_tracking: true)
end
it 'all of them are used when logging to RedisHLL', :aggregate_failures do
described_class.track_event(event_name, user: user, project: project)
expect_redis_tracking
expect_redis_hll_tracking(user.id, :user)
expect_redis_hll_tracking(project.id, :project)
expect_snowplow_tracking
end
end
context "with the property_name tracking feature flag disabled" do
context "with multiple property_names defined" do
it 'logs an error', :aggregate_failures do
described_class.track_event(event_name, user: user, project: project)
expect(Gitlab::ErrorTracking).to have_received(:track_and_raise_for_dev_exception).with(
Gitlab::InternalEvents::EventDefinitions::InvalidMetricConfiguration, anything
)
expect(Gitlab::UsageDataCounters::HLLRedisCounter).not_to have_received(:track_event)
end
end
context "with single property_names defined" do
let(:property_names) { [:project] }
it 'logs to RedisHLL only once' do
described_class.track_event(event_name, user: user, project: project)
expect(Gitlab::UsageDataCounters::HLLRedisCounter).to have_received(:track_event).once
end
end
context "when event_name is user_visited_dashboard" do
let(:event_name) { 'user_visited_dashboard' }
it 'logs to RedisHLL only once with user_id' do
# make it defined also on FOSS tests
allow(Gitlab::InternalEvents::EventDefinitions).to receive(:known_event?).with(event_name).and_return(true)
described_class.track_event(event_name, user: user, project: project)
expect(Gitlab::UsageDataCounters::HLLRedisCounter).to have_received(:track_event).once
.with(event_name, values: user.id, property_name: :user)
end
end
end
end
context 'when send_snowplow_event is false' do
it 'logs to Redis and RedisHLL but not Snowplow' do
described_class.track_event(event_name, send_snowplow_event: false, user: user, project: project)
@ -266,9 +340,9 @@ RSpec.describe Gitlab::InternalEvents, :snowplow, feature_category: :product_ana
let(:event_name) { 'p_ci_templates_terraform_base_latest' }
before do
allow(Gitlab::InternalEvents::EventDefinitions).to receive(:unique_property)
allow(Gitlab::InternalEvents::EventDefinitions).to receive(:unique_properties)
.with(event_name)
.and_return(nil)
.and_return([])
end
it 'logs to Redis and Snowplow but not RedisHLL', :aggregate_failures do

View File

@ -27,7 +27,7 @@ RSpec.describe Gitlab::UsageDataCounters::CiTemplateUniqueCounter, feature_categ
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.to receive(:track_event).with(template_name, values: project.id).once
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.to receive(:track_event).with('ci_template_included', values: project.id).once
.to receive(:track_event).with('ci_template_included', values: project.id, property_name: :project).once
subject
end

View File

@ -8,6 +8,8 @@ RSpec.describe ProtectedBranch, feature_category: :source_code_management do
describe 'Associations' do
it { is_expected.to belong_to(:project) }
it { is_expected.to belong_to(:group) }
it { is_expected.to have_many(:merge_access_levels).inverse_of(:protected_branch) }
it { is_expected.to have_many(:push_access_levels).inverse_of(:protected_branch) }
end
describe 'Validation' do

View File

@ -5,6 +5,7 @@ require 'spec_helper'
RSpec.describe ProtectedTag, feature_category: :source_code_management do
describe 'Associations' do
it { is_expected.to belong_to(:project).touch(true) }
it { is_expected.to have_many(:create_access_levels).inverse_of(:protected_tag) }
end
describe 'Validation' do

View File

@ -113,8 +113,8 @@ RSpec.describe Packages::Pypi::CreatePackageService, :aggregate_failures, featur
context 'with an invalid metadata' do
let(:requires_python) { 'x' * 256 }
it 'raises an error' do
expect { subject }.to raise_error(ActiveRecord::RecordInvalid)
it_behaves_like 'returning an error service response', /Pypi package metadata invalid/ do
it { is_expected.to have_attributes(reason: :invalid_parameter) }
end
end
@ -122,8 +122,13 @@ RSpec.describe Packages::Pypi::CreatePackageService, :aggregate_failures, featur
let(:package) { created_package }
end
it_behaves_like 'assigns build to package'
it_behaves_like 'assigns status to package'
it_behaves_like 'assigns build to package' do
let(:subject) { super().payload.fetch(:package) }
end
it_behaves_like 'assigns status to package' do
let(:subject) { super().payload.fetch(:package) }
end
context 'with an existing package' do
before do
@ -137,11 +142,14 @@ RSpec.describe Packages::Pypi::CreatePackageService, :aggregate_failures, featur
params[:md5_digest] = md5
end
it 'throws an error' do
it_behaves_like 'returning an error service response', /File name has already been taken/ do
it { is_expected.to have_attributes(reason: :invalid_parameter) }
end
it 'does not create a pypi package' do
expect { subject }
.to change { Packages::Package.pypi.count }.by(0)
.and change { Packages::PackageFile.count }.by(0)
.and raise_error(/File name has already been taken/)
end
context 'with a pending_destruction package' do

View File

@ -3040,7 +3040,6 @@
- './ee/spec/workers/requirements_management/process_requirements_reports_worker_spec.rb'
- './ee/spec/workers/sbom/ingest_reports_worker_spec.rb'
- './ee/spec/workers/scan_security_report_secrets_worker_spec.rb'
- './ee/spec/workers/security/auto_fix_worker_spec.rb'
- './ee/spec/workers/security/create_orchestration_policy_worker_spec.rb'
- './ee/spec/workers/security/orchestration_policy_rule_schedule_namespace_worker_spec.rb'
- './ee/spec/workers/security/orchestration_policy_rule_schedule_worker_spec.rb'

View File

@ -10,7 +10,7 @@ RSpec.shared_examples 'tracking unique visits' do |method|
ids.each do |id|
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.to receive(:track_event).with(id, values: anything)
.to receive(:track_event).with(id, hash_including(values: anything))
end
# allow other method calls in addition to the expected one
@ -24,7 +24,7 @@ RSpec.shared_examples 'tracking unique visits' do |method|
ids.each do |id|
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.to receive(:track_event).with(id, values: anything)
.to receive(:track_event).with(id, hash_including(values: anything))
end
# allow other method calls in addition to the expected one

7517
vendor/spdx.json vendored

File diff suppressed because one or more lines are too long