Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2023-11-09 21:12:54 +00:00
parent 519f46346b
commit da7b4c2be2
40 changed files with 730 additions and 155 deletions

View File

@ -4263,7 +4263,6 @@ Layout/LineLength:
- 'spec/requests/api/graphql/mutations/snippets/create_spec.rb'
- 'spec/requests/api/graphql/mutations/work_items/create_from_task_spec.rb'
- 'spec/requests/api/graphql/mutations/work_items/delete_spec.rb'
- 'spec/requests/api/graphql/namespace/root_storage_statistics_spec.rb'
- 'spec/requests/api/graphql/namespace_query_spec.rb'
- 'spec/requests/api/graphql/packages/package_spec.rb'
- 'spec/requests/api/graphql/project/alert_management/alert/notes_spec.rb'

View File

@ -5,8 +5,6 @@ import {
BatchV1Api,
WatchApi,
EVENT_DATA,
EVENT_TIMEOUT,
EVENT_ERROR,
} from '@gitlab/cluster-client';
import { humanizeClusterErrors } from '../../helpers/k8s_integration_helper';
import k8sPodsQuery from '../queries/k8s_pods.query.graphql';
@ -63,60 +61,57 @@ const handleClusterError = async (err) => {
throw errorData;
};
const watchPods = ({ configuration, namespace, client }) => {
const path = namespace ? `/api/v1/namespaces/${namespace}/pods` : '/api/v1/pods';
const config = new Configuration(configuration);
const watcherApi = new WatchApi(config);
watcherApi
.subscribeToStream(path, { watch: true })
.then((watcher) => {
let result = [];
watcher.on(EVENT_DATA, (data) => {
result = data.map((item) => {
return { status: { phase: item.status.phase } };
});
client.writeQuery({
query: k8sPodsQuery,
variables: { configuration, namespace },
data: { k8sPods: result },
});
});
})
.catch((err) => {
handleClusterError(err);
});
};
export default {
k8sPods(_, { configuration, namespace }, { client }) {
const config = new Configuration(configuration);
if (!gon.features?.k8sWatchApi) {
const coreV1Api = new CoreV1Api(config);
const podsApi = namespace
? coreV1Api.listCoreV1NamespacedPod({ namespace })
: coreV1Api.listCoreV1PodForAllNamespaces();
const coreV1Api = new CoreV1Api(config);
const podsApi = namespace
? coreV1Api.listCoreV1NamespacedPod({ namespace })
: coreV1Api.listCoreV1PodForAllNamespaces();
return podsApi
.then((res) => res?.items || [])
.catch(async (err) => {
try {
await handleClusterError(err);
} catch (error) {
throw new Error(error.message);
}
});
}
return podsApi
.then((res) => {
if (gon.features?.k8sWatchApi) {
watchPods({ configuration, namespace, client });
}
const path = namespace ? `/api/v1/namespaces/${namespace}/pods` : '/api/v1/pods';
const watcherApi = new WatchApi(config);
return watcherApi.subscribeToStream(path, { watch: true }).then((watcher) => {
let result = [];
return new Promise((resolve, reject) => {
watcher.on(EVENT_DATA, (data) => {
result = data.map((item) => {
return { status: { phase: item.status.phase } };
});
resolve(result);
setTimeout(() => {
client.writeQuery({
query: k8sPodsQuery,
variables: { configuration, namespace },
data: { k8sPods: result },
});
}, 0);
});
watcher.on(EVENT_TIMEOUT, () => {
resolve(result);
});
watcher.on(EVENT_ERROR, (errorData) => {
const error = errorData?.message ? new Error(errorData.message) : errorData;
reject(error);
});
return res?.items || [];
})
.catch(async (err) => {
try {
await handleClusterError(err);
} catch (error) {
throw new Error(error.message);
}
});
});
},
k8sServices(_, { configuration, namespace }) {
const coreV1Api = new CoreV1Api(new Configuration(configuration));

View File

@ -72,6 +72,8 @@ export default {
<template>
<merge-checks-message :check="check">
<action-buttons v-if="!isLoading" :tertiary-buttons="tertiaryActionsButtons" />
<template #failed>
<action-buttons v-if="!isLoading" :tertiary-buttons="tertiaryActionsButtons" />
</template>
</merge-checks-message>
</template>

View File

@ -57,6 +57,7 @@ export default {
<div class="gl-display-flex">{{ failureReason }}</div>
</div>
<slot></slot>
<slot v-if="check.status === 'FAILED'" name="failed"></slot>
</div>
</div>
</template>

View File

@ -184,7 +184,9 @@ export default {
<template>
<merge-checks-message :check="check">
<action-buttons v-if="!isLoading" :tertiary-buttons="tertiaryActionsButtons" />
<template #failed>
<action-buttons v-if="!isLoading" :tertiary-buttons="tertiaryActionsButtons" />
</template>
<gl-modal
ref="modal"
:modal-id="$options.modal.id"

View File

@ -32,6 +32,8 @@ export default {
<template>
<merge-checks-message :check="check">
<action-buttons v-if="check.status === 'FAILED'" :tertiary-buttons="tertiaryActionsButtons" />
<template #failed>
<action-buttons :tertiary-buttons="tertiaryActionsButtons" />
</template>
</merge-checks-message>
</template>

View File

@ -17,7 +17,7 @@ const defaultRender = (apolloProvider) => ({
data() {
return { service: {}, mr: { conflictResolutionPath: 'https://gitlab.com' } };
},
template: '<merge-checks :mr="mr" />',
template: '<merge-checks :mr="mr" :service="service" />',
});
const Template = ({ canMerge, failed, pushToSourceBranch }) => {

View File

@ -124,6 +124,10 @@ class BulkImports::Entity < ApplicationRecord
entity_type.pluralize
end
def portable_class
entity_type.classify.constantize
end
def base_resource_url_path
"/#{pluralized_name}/#{encoded_source_full_path}"
end

View File

@ -5,6 +5,8 @@ module BulkImports
include ApplicationWorker
include ExclusiveLeaseGuard
DEFER_ON_HEALTH_DELAY = 5.minutes
data_consistency :always # rubocop:disable SidekiqLoadBalancing/WorkerDataConsistency
feature_category :importers
sidekiq_options dead: false, retry: 3
@ -16,6 +18,26 @@ module BulkImports
new.perform_failure(msg['args'].first, exception)
end
defer_on_database_health_signal(:gitlab_main, [], DEFER_ON_HEALTH_DELAY) do |job_args, schema, tables|
batch = ::BulkImports::BatchTracker.find(job_args.first)
pipeline_tracker = batch.tracker
pipeline_schema = ::BulkImports::PipelineSchemaInfo.new(
pipeline_tracker.pipeline_class,
pipeline_tracker.entity.portable_class
)
if pipeline_schema.db_schema && pipeline_schema.db_table
schema = pipeline_schema.db_schema
tables = [pipeline_schema.db_table]
end
[schema, tables]
end
def self.defer_on_database_health_signal?
Feature.enabled?(:bulk_import_deferred_workers)
end
def perform(batch_id)
@batch = ::BulkImports::BatchTracker.find(batch_id)

View File

@ -7,6 +7,8 @@ module BulkImports
FILE_EXTRACTION_PIPELINE_PERFORM_DELAY = 10.seconds
DEFER_ON_HEALTH_DELAY = 5.minutes
data_consistency :always
feature_category :importers
sidekiq_options dead: false, retry: 3
@ -21,6 +23,25 @@ module BulkImports
new.perform_failure(msg['args'][0], msg['args'][2], exception)
end
defer_on_database_health_signal(:gitlab_main, [], DEFER_ON_HEALTH_DELAY) do |job_args, schema, tables|
pipeline_tracker = ::BulkImports::Tracker.find(job_args.first)
pipeline_schema = ::BulkImports::PipelineSchemaInfo.new(
pipeline_tracker.pipeline_class,
pipeline_tracker.entity.portable_class
)
if pipeline_schema.db_schema && pipeline_schema.db_table
schema = pipeline_schema.db_schema
tables = [pipeline_schema.db_table]
end
[schema, tables]
end
def self.defer_on_database_health_signal?
Feature.enabled?(:bulk_import_deferred_workers)
end
# Keep _stage parameter for backwards compatibility.
def perform(pipeline_tracker_id, _stage, entity_id)
@entity = ::BulkImports::Entity.find(entity_id)

View File

@ -201,10 +201,10 @@ module WorkerAttributes
!!get_class_attribute(:big_payload)
end
def defer_on_database_health_signal(gitlab_schema, tables = [], delay_by = DEFAULT_DEFER_DELAY)
def defer_on_database_health_signal(gitlab_schema, tables = [], delay_by = DEFAULT_DEFER_DELAY, &block)
set_class_attribute(
:database_health_check_attrs,
{ gitlab_schema: gitlab_schema, tables: tables, delay_by: delay_by }
{ gitlab_schema: gitlab_schema, tables: tables, delay_by: delay_by, block: block }
)
end

View File

@ -0,0 +1,8 @@
---
name: bulk_import_deferred_workers
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/136137
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/431032
milestone: '16.6'
type: development
group: group::import and integrate
default_enabled: false

View File

@ -0,0 +1,23 @@
---
key_path: redis_hll_counters.quickactions.i_quickactions_request_changes_monthly
description: Count using the `/request_changes` quick action on Merge Requests
product_section: dev
product_stage: create
product_group: code_review
value_type: number
status: active
milestone: '16.6'
time_frame: 28d
data_source: redis_hll
data_category: optional
instrumentation_class: RedisHLLMetric
options:
events:
- i_quickactions_request_changes
distribution:
- ce
- ee
tier:
- free
- premium
- ultimate

View File

@ -0,0 +1,23 @@
---
key_path: redis_hll_counters.quickactions.i_quickactions_request_changes_weekly
description: Count using the `/request_changes` quick action on Merge Requests
product_section: dev
product_stage: create
product_group: code_review
value_type: number
status: active
milestone: '16.6'
time_frame: 7d
data_source: redis_hll
data_category: optional
instrumentation_class: RedisHLLMetric
options:
events:
- i_quickactions_request_changes
distribution:
- ce
- ee
tier:
- free
- premium
- ultimate

View File

@ -0,0 +1,13 @@
- title: "Deprecation of `lfs_check` feature flag" # (required) Clearly explain the change, or planned change. For example, "The `confidential` field for a `Note` is deprecated" or "CI/CD job names will be limited to 250 characters."
removal_milestone: "16.9" # (required) The milestone when this feature is planned to be removed
announcement_milestone: "16.6" # (required) The milestone when this feature was first announced as deprecated.
breaking_change: false # (required) Change to false if this is not a breaking change.
reporter: derekferguson # (required) GitLab username of the person reporting the change
stage: Create # (required) String value of the stage that the feature was created in. e.g., Growth
issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/233550 # (required) Link to the deprecation issue in GitLab
body: | # (required) Do not modify this line, instead modify the lines below.
In GitLab 16.9, we will remove the `lfs_check` feature flag. This feature flag was [introduced 4 years ago](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/60588) and controls whether the LFS integrity check is enabled. The feature flag is enabled by default, but some customers experienced performance issues with the LFS integrity check and explicitly disabled it.
After [dramatically improving the performance](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/61355) of the LFS integrity check, we are ready to remove the feature flag. After the flag is removed, the feature will automatically be turned on for any environment in which it is currently disabled.
If this feature flag is disabled for your environment, and you are concerned about performance issues, please enable it and monitor the performance before it is removed in 16.9. If you see any performance issues after enabling it, please let us know in [this feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/233550).

View File

@ -68,6 +68,25 @@ compared with the pros and cons of alternatives.
## Design and implementation details
### Workspace & Artifacts
- We will store implementation details like metrics, budgets, and development & architectural patterns here in the docs
- We will store large bodies of research, the results of audits, etc. in the [wiki](https://gitlab.com/gitlab-com/create-stage/new-diffs/-/wikis/home) of the [New Diffs project](https://gitlab.com/gitlab-com/create-stage/new-diffs)
- We will store audio & video recordings on the public Youtube channel in the Code Review / New Diffs playlist
- We will store drafts, meeting notes, and other temporary documents in public Google docs
### Definitions
#### Maintainability
Maintainable projects are _simple_ projects.
Simplicity is the opposite of complexity. This uses a definition of simple and complex [described by Rich Hickey in "Simple Made Easy"](https://www.infoq.com/presentations/Simple-Made-Easy/) (Strange Loop, 2011).
- Maintainable code is simple (single task, single concept, separate from other things).
- Maintainable projects expand on simple code by having simple structure (folders define classes of behaviors, e.g. you can be assured that a component directory will never initiate a network call, because that would be complecting visual display with data access)
- Maintainable applications flow out of simple organization and simple code. The old saying is a cluttered desk is representative of a cluttered mind. Rigorous discipline on simplicity will be represented in our output (the product). By being strict about working simply, we will naturally produce applications where our users can more easily reason about their behavior.
<!--
This section should contain enough information that the specifics of your
change are understandable. This may include API specs (though not always

View File

@ -55,6 +55,8 @@ merge request targeting that branch.
## Configuring Browser Performance Testing
> Support for the `SITESPEED_DOCKER_OPTIONS` variable [introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/134024) in GitLab 16.6.
This example shows how to run the [sitespeed.io container](https://hub.docker.com/r/sitespeedio/sitespeed.io/)
on your code by using GitLab CI/CD and [sitespeed.io](https://www.sitespeed.io)
using Docker-in-Docker.

View File

@ -0,0 +1,38 @@
---
stage: Solutions Architecture
group: Solutions Architecture
info: This page is owned by the Solutions Architecture team.
description: GitLab partnership certifications and designations from AWS.
---
# GitLab partnership certifications and designations from AWS
The certifications and designations outlined here can be validated on [GitLabs partner page at AWS](https://partners.amazonaws.com/partners/001E0000018YWFfIAO/GitLab,%20Inc.).
All AWS partner qualifications require submission and validation of extensive checklists and submission of backing evidence that AWS utilizes to determine whether to grant the qualification.
## DevOps Software / ISV Competency
This competency validates that GitLab delivers DevOps solutions that work with and on AWS. [AWS Program Information](https://aws.amazon.com/devops/partner-solutions/)
## DevSecOps Specialty Category
[AWS Program Information](https://aws.amazon.com/blogs/apn/aws-devops-competency-expands-to-include-devsecops-category/) [GitLab Announcement](https://about.gitlab.com/blog/2023/09/25/aws-devsecops-competency-partner/)
## Public Sector Partner
This designation indicates that GitLab has been deemed qualified to work with AWS Public Sector customers. In fact, we have an entire organization dedicated to this practice. [AWS Program Information](https://aws.amazon.com/partners/programs/public-sector/)
## AWS Graviton
GitLab Instances and Runners have been tested and work on AWS Graviton. For Amazon Linux we maintain YUM packages for ARM architecture. [AWS Program Information](https://aws.amazon.com/ec2/graviton/partners/)
## Amazon Linux Ready
GitLab Instances and Runner have been validated on Amazon Linux 2 and 2023 - this includes YUM packages and package repositories for both and over 2300 CI tests for both before packaging. [AWS Program Information](https://aws.amazon.com/amazon-linux/partners/)
## AWS Marketplace Seller
GitLab is a marketplace seller and you can purchase and deploy it through AWS marketplace [AWS Program Information](https://aws.amazon.com/marketplace/partners/management-tour)
![AWS Partner Designations Logo](img/all-aws-partner-designations.png){: .right}

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -6,12 +6,13 @@ info: This page is owned by the Solutions Architecture team.
# AWS Solutions
This documentation covers solutions relating to GitLab and Amazon Web Services (AWS).
This documentation covers solutions relating to leveraging GitLab with and on Amazon Web Services (AWS).
[GitLab AWS Integration Index](gitlab_aws_integration.md)
[GitLab Instances on AWS EKS](gitlab_instance_on_aws.md)
[SRE Considerations Gitaly on AWS](gitaly_sre_for_aws.md)
[Provision GitLab on a single EC2 instance in AWS](gitlab_single_box_on_aws.md)
- [GitLab partnership certifications and designations from AWS](gitlab_aws_integration.md)
- [GitLab AWS Integration Index](gitlab_aws_partner_designations.md)
- [GitLab Instances on AWS EKS](gitlab_instance_on_aws.md)
- [SRE Considerations Gitaly on AWS](gitaly_sre_for_aws.md)
- [Provision GitLab on a single EC2 instance in AWS](gitlab_single_box_on_aws.md)
## Cloud platform well architected compliance

View File

@ -1257,6 +1257,29 @@ Previous work helped [align the vulnerabilities calls for pipeline security tabs
</div>
</div>
<div class="milestone-wrapper" data-milestone="16.9">
## GitLab 16.9
<div class="deprecation " data-milestone="16.9">
### Deprecation of `lfs_check` feature flag
<div class="deprecation-notes">
- Announced in GitLab <span class="milestone">16.6</span>
- Removal in GitLab <span class="milestone">16.9</span>
- To discuss this change or learn more, see the [deprecation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/233550).
</div>
In GitLab 16.9, we will remove the `lfs_check` feature flag. This feature flag was [introduced 4 years ago](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/60588) and controls whether the LFS integrity check is enabled. The feature flag is enabled by default, but some customers experienced performance issues with the LFS integrity check and explicitly disabled it.
After [dramatically improving the performance](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/61355) of the LFS integrity check, we are ready to remove the feature flag. After the flag is removed, the feature will automatically be turned on for any environment in which it is currently disabled.
If this feature flag is disabled for your environment, and you are concerned about performance issues, please enable it and monitor the performance before it is removed in 16.9. If you see any performance issues after enabling it, please let us know in [this feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/233550).
</div>
</div>
<div class="milestone-wrapper" data-milestone="16.8">
## GitLab 16.8

View File

@ -117,9 +117,33 @@ Specific information applies to installations using Geo:
SELECT id FROM push_rules WHERE LENGTH(delete_branch_regex) > 511;
```
To find out if a push rule belongs to a project, group, or instance, run this script
in the [Rails console](../../administration/operations/rails_console.md#starting-a-rails-console-session):
```ruby
# replace `delete_branch_regex` with a name of the field used in constraint
long_rules = PushRule.where("length(delete_branch_regex) > 511")
array = long_rules.map do |lr|
if lr.project
"Push rule with ID #{lr.id} is configured in a project #{lr.project.full_name}"
elsif lr.group
"Push rule with ID #{lr.id} is configured in a group #{lr.group.full_name}"
else
"Push rule with ID #{lr.id} is configured on the instance level"
end
end
puts "Total long rules: #{array.count}"
puts array.join("\n")
```
Reduce the value length of the regex field for affected push rules records, then
retry the migration.
If you have too many affected push rules, and you can't update them through the GitLab UI,
contact [GitLab support](https://about.gitlab.com/support/).
### Self-compiled installations
- A new method of configuring paths for the GitLab secret and custom hooks is preferred in GitLab 16.4 and later:

View File

@ -132,23 +132,6 @@ To improve this metric, you should consider:
- Improving the efficacy of code review processes.
- Adding more automated testing.
## DORA metrics in GitLab
The DORA metrics are displayed on the following charts:
- [Value Streams Dashboard](value_streams_dashboard.md), which helps you identify trends, patterns, and opportunities for improvement. DORA metrics are displayed in the [metrics comparison panel](value_streams_dashboard.md#devsecops-metrics-comparison-panel) and the [DORA Performers score panel](value_streams_dashboard.md#dora-performers-score-panel).
- [CI/CD analytics charts](ci_cd_analytics.md), which show pipeline success rates and duration, and the history of DORA metrics over time.
- Insights reports for [groups](../group/insights/index.md) and [projects](../group/value_stream_analytics/index.md), where you can also use [DORA query parameters](../../user/project/insights/index.md#dora-query-parameters) to create custom charts.
The table below provides an overview of the DORA metrics' data aggregation in different charts.
| Metric name | Measured values | Data aggregation in the [Value Streams Dashboard](value_streams_dashboard.md) | Data aggregation in [CI/CD analytics charts](ci_cd_analytics.md) | Data aggregation in [Custom insights reporting](../../user/project/insights/index.md#dora-query-parameters) |
|---------------------------|-------------------|-----------------------------------------------------|------------------------|----------|
| Deployment frequency | Number of successful deployments | daily average per month | daily average | `day` (default) or `month` |
| Lead time for changes | Number of seconds to successfully deliver a commit into production | daily median per month | median time | `day` (default) or `month` |
| Time to restore service | Number of seconds an incident was open for | daily median per month | daily median | `day` (default) or `month` |
| Change failure rate | percentage of deployments that cause an incident in production | daily median per month | percentage of failed deployments | `day` (default) or `month` |
## DORA custom calculation rules **(ULTIMATE ALL EXPERIMENT)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/96561) in GitLab 15.4 [with a flag](../../administration/feature_flags.md) named `dora_configuration`. Disabled by default. This feature is an [Experiment](../../policy/experiment-beta-support.md).
@ -211,7 +194,9 @@ and use it to automatically:
1. [Create an incident when an alert is triggered](../../operations/incident_management/manage_incidents.md#automatically-when-an-alert-is-triggered).
1. [Close incidents via recovery alerts](../../operations/incident_management/manage_incidents.md#automatically-close-incidents-via-recovery-alerts).
### Supported DORA metrics in GitLab
## DORA metrics in GitLab
GitLab supports the following DORA metrics:
| Metric | Level | API | UI chart | Comments |
|---------------------------|-------------------|-----------------------------------------------------|------------------------|----------|
@ -221,3 +206,22 @@ and use it to automatically:
| `lead_time_for_changes` | Group | [GitLab 13.10 and later](../../api/dora/metrics.md) | GitLab 14.0 and later | Unit in seconds. Aggregation method is median. |
| `time_to_restore_service` | Project and group | [GitLab 14.9 and later](../../api/dora/metrics.md) | GitLab 15.1 and later | Unit in days. Aggregation method is median. |
| `change_failure_rate` | Project and group | [GitLab 14.10 and later](../../api/dora/metrics.md) | GitLab 15.2 and later | Percentage of deployments. |
### DORA metrics charts
The DORA metrics are displayed on the following charts:
- [Value Streams Dashboard](value_streams_dashboard.md), which helps you identify trends, patterns, and opportunities for improvement. DORA metrics are displayed in the [metrics comparison panel](value_streams_dashboard.md#devsecops-metrics-comparison-panel) and the [DORA Performers score panel](value_streams_dashboard.md#dora-performers-score-panel).
- [CI/CD analytics charts](ci_cd_analytics.md), which show pipeline success rates and duration, and the history of DORA metrics over time.
- Insights reports for [groups](../group/insights/index.md) and [projects](../group/value_stream_analytics/index.md), where you can also use [DORA query parameters](../../user/project/insights/index.md#dora-query-parameters) to create custom charts.
### DORA metrics data aggregation
The table below provides an overview of the DORA metrics' data aggregation in different charts.
| Metric name | Measured values | Data aggregation in the [Value Streams Dashboard](value_streams_dashboard.md) | Data aggregation in [CI/CD analytics charts](ci_cd_analytics.md) | Data aggregation in [Custom insights reporting](../../user/project/insights/index.md#dora-query-parameters) |
|---------------------------|-------------------|-----------------------------------------------------|------------------------|----------|
| Deployment frequency | Number of successful deployments | daily average per month | daily average | `day` (default) or `month` |
| Lead time for changes | Number of seconds to successfully deliver a commit into production | daily median per month | median time | `day` (default) or `month` |
| Time to restore service | Number of seconds an incident was open for | daily median per month | daily median | `day` (default) or `month` |
| Change failure rate | percentage of deployments that cause an incident in production | daily median per month | percentage of failed deployments | `day` (default) or `month` |

View File

@ -113,6 +113,18 @@ To onboard a project:
1. Select **Set up product analytics**.
Your instance is being created, and the project onboarded.
### Onboard an internal project
GitLab team members can enable Product Analytics on their internal projects on GitLab.com (Ultimate) during the experiment phase.
1. Send a message to the Product Analytics team (`#g_analyze_product_analytics`) informing them of the repository to be enabled.
1. Using ChatOps, enable both the `product_analytics_dashboards` and `combined_analytics_dashboards`:
```plaintext
/chatops run feature set product_analytics_dashboards true --project=FULLPATH_TO_PROJECT
/chatops run feature set combined_analytics_dashboards true --project=FULLPATH_TO_PROJECT
```
## Instrument your application
To instrument code to collect data, use one or more of the existing SDKs:
@ -304,19 +316,6 @@ To view product analytics usage quota:
The usage quota excludes projects that are not onboarded with product analytics.
## Onboarding GitLab internal projects
GitLab team members can enable Product Analytics on their own internal projects on GitLab.com during the experiment phase.
1. Send a message to the Product Analytics team (`#g_analyze_product_analytics`) informing them of the repository to be enabled.
1. Ensure that the project is within an Ultimate namespace.
1. Using ChatOps, enable both the `product_analytics_dashboards` and `combined_analytics_dashboards`
```plaintext
/chatops run feature set product_analytics_dashboards true --project=FULLPATH_TO_PROJECT
/chatops run feature set combined_analytics_dashboards true --project=FULLPATH_TO_PROJECT
```
## Troubleshooting
### No events are collected

View File

@ -0,0 +1,36 @@
# frozen_string_literal: true
module BulkImports
class PipelineSchemaInfo
def initialize(pipeline_class, portable_class)
@pipeline_class = pipeline_class
@portable_class = portable_class
end
def db_schema
return unless relation
return unless association
Gitlab::Database::GitlabSchema.tables_to_schema[association.table_name]
end
def db_table
return unless relation
return unless association
association.table_name
end
private
attr_reader :pipeline_class, :portable_class
def relation
@relation ||= pipeline_class.try(:relation)
end
def association
@association ||= portable_class.reflect_on_association(relation)
end
end
end

View File

@ -172,6 +172,25 @@ module Gitlab
end
end
desc { _('Request changes') }
explanation { _('Request changes to the current merge request.') }
types MergeRequest
condition do
Feature.enabled?(:mr_request_changes, current_user) &&
quick_action_target.persisted? &&
quick_action_target.find_reviewer(current_user)
end
command :request_changes do
result = ::MergeRequests::UpdateReviewerStateService.new(project: quick_action_target.project, current_user: current_user)
.execute(quick_action_target, "requested_changes")
@execution_message[:request_changes] = if result[:status] == :success
_('Changes requested to the current merge request.')
else
result[:message]
end
end
desc { _('Approve a merge request') }
explanation { _('Approve the current merge request.') }
types MergeRequest

View File

@ -80,13 +80,20 @@ module Gitlab
end
health_check_attrs = worker_class.database_health_check_attrs
job_base_model = Gitlab::Database.schemas_to_base_models[health_check_attrs[:gitlab_schema]].first
tables, schema = health_check_attrs.values_at(:tables, :gitlab_schema)
if health_check_attrs[:block].respond_to?(:call)
schema, tables = health_check_attrs[:block].call(job['args'], schema, tables)
end
job_base_model = Gitlab::Database.schemas_to_base_models[schema].first
health_context = Gitlab::Database::HealthStatus::Context.new(
DatabaseHealthStatusChecker.new(job['jid'], worker_class.name),
job_base_model.connection,
health_check_attrs[:tables],
health_check_attrs[:gitlab_schema]
tables,
schema
)
Gitlab::Database::HealthStatus.evaluate(health_context).any?(&:stop?)

View File

@ -9802,6 +9802,9 @@ msgstr ""
msgid "Changes"
msgstr ""
msgid "Changes requested to the current merge request."
msgstr ""
msgid "Changes saved."
msgstr ""
@ -30157,9 +30160,6 @@ msgstr ""
msgid "Metrics:"
msgstr ""
msgid "Metrics|Check again"
msgstr ""
msgid "Metrics|Create metric"
msgstr ""
@ -30199,9 +30199,6 @@ msgstr ""
msgid "Metrics|New metric"
msgstr ""
msgid "Metrics|No metrics to display."
msgstr ""
msgid "Metrics|PromQL query is valid"
msgstr ""
@ -40405,6 +40402,9 @@ msgstr ""
msgid "Request changes"
msgstr ""
msgid "Request changes to the current merge request."
msgstr ""
msgid "Request data is too large"
msgstr ""

View File

@ -3,6 +3,7 @@ import { CoreV1Api, AppsV1Api, BatchV1Api, WatchApi } from '@gitlab/cluster-clie
import axios from '~/lib/utils/axios_utils';
import { resolvers } from '~/environments/graphql/resolvers';
import { CLUSTER_AGENT_ERROR_MESSAGES } from '~/environments/constants';
import k8sPodsQuery from '~/environments/graphql/queries/k8s_pods.query.graphql';
import { k8sPodsMock, k8sServicesMock, k8sNamespacesMock } from '../mock_data';
describe('~/frontend/environments/graphql/resolvers', () => {
@ -28,16 +29,16 @@ describe('~/frontend/environments/graphql/resolvers', () => {
describe('k8sPods', () => {
const client = { writeQuery: jest.fn() };
describe('when k8sWatchApi feature is disabled', () => {
const mockPodsListFn = jest.fn().mockImplementation(() => {
return Promise.resolve({
items: k8sPodsMock,
});
const mockPodsListFn = jest.fn().mockImplementation(() => {
return Promise.resolve({
items: k8sPodsMock,
});
});
const mockNamespacedPodsListFn = jest.fn().mockImplementation(mockPodsListFn);
const mockAllPodsListFn = jest.fn().mockImplementation(mockPodsListFn);
const mockNamespacedPodsListFn = jest.fn().mockImplementation(mockPodsListFn);
const mockAllPodsListFn = jest.fn().mockImplementation(mockPodsListFn);
describe('when k8sWatchApi feature is disabled', () => {
beforeEach(() => {
jest
.spyOn(CoreV1Api.prototype, 'listCoreV1NamespacedPod')
@ -90,63 +91,68 @@ describe('~/frontend/environments/graphql/resolvers', () => {
describe('when k8sWatchApi feature is enabled', () => {
const mockWatcher = WatchApi.prototype;
const mockPodsListFn = jest.fn().mockImplementation(() => {
const mockPodsListWatcherFn = jest.fn().mockImplementation(() => {
return Promise.resolve(mockWatcher);
});
const mockOnDataFn = jest.fn().mockImplementation((eventName, callback) => {
if (eventName === 'data') {
callback(k8sPodsMock);
}
});
const mockOnErrorFn = jest.fn().mockImplementation((eventName, callback) => {
if (eventName === 'error') {
const error = { message: 'API error' };
callback(error);
}
});
const mockOnTimeoutFn = jest.fn().mockImplementation((eventName, callback) => {
if (eventName === 'timeout') {
callback({});
callback([]);
}
});
beforeEach(() => {
gon.features = { k8sWatchApi: true };
jest.spyOn(mockWatcher, 'subscribeToStream').mockImplementation(mockPodsListFn);
jest.spyOn(mockWatcher, 'on').mockImplementation(mockOnDataFn);
});
describe('when the pods data is present', () => {
beforeEach(() => {
gon.features = { k8sWatchApi: true };
it('should request namespaced pods from the cluster_client library if namespace is specified', () => {
mockResolvers.Query.k8sPods(null, { configuration, namespace }, { client });
jest
.spyOn(CoreV1Api.prototype, 'listCoreV1NamespacedPod')
.mockImplementation(mockNamespacedPodsListFn);
jest
.spyOn(CoreV1Api.prototype, 'listCoreV1PodForAllNamespaces')
.mockImplementation(mockAllPodsListFn);
jest.spyOn(mockWatcher, 'subscribeToStream').mockImplementation(mockPodsListWatcherFn);
jest.spyOn(mockWatcher, 'on').mockImplementation(mockOnDataFn);
});
expect(mockPodsListFn).toHaveBeenCalledWith(`/api/v1/namespaces/${namespace}/pods`, {
watch: true,
it('should request namespaced pods from the cluster_client library if namespace is specified', async () => {
await mockResolvers.Query.k8sPods(null, { configuration, namespace }, { client });
expect(mockPodsListWatcherFn).toHaveBeenCalledWith(
`/api/v1/namespaces/${namespace}/pods`,
{
watch: true,
},
);
});
it('should request all pods from the cluster_client library if namespace is not specified', async () => {
await mockResolvers.Query.k8sPods(null, { configuration, namespace: '' }, { client });
expect(mockPodsListWatcherFn).toHaveBeenCalledWith(`/api/v1/pods`, { watch: true });
});
it('should update cache with the new data when received from the library', async () => {
await mockResolvers.Query.k8sPods(null, { configuration, namespace: '' }, { client });
expect(client.writeQuery).toHaveBeenCalledWith({
query: k8sPodsQuery,
variables: { configuration, namespace: '' },
data: { k8sPods: [] },
});
});
});
it('should request all pods from the cluster_client library if namespace is not specified', () => {
mockResolvers.Query.k8sPods(null, { configuration, namespace: '' }, { client });
expect(mockPodsListFn).toHaveBeenCalledWith(`/api/v1/pods`, { watch: true });
});
it('should return data when received from the library', async () => {
const pods = await mockResolvers.Query.k8sPods(null, { configuration }, { client });
it('should not watch pods from the cluster_client library when the pods data is not present', async () => {
jest.spyOn(CoreV1Api.prototype, 'listCoreV1NamespacedPod').mockImplementation(
jest.fn().mockImplementation(() => {
return Promise.resolve({
items: [],
});
}),
);
expect(pods).toEqual(k8sPodsMock);
});
it('should throw an error when received from the library', async () => {
jest.spyOn(mockWatcher, 'on').mockImplementation(mockOnErrorFn);
await mockResolvers.Query.k8sPods(null, { configuration, namespace }, { client });
await expect(
mockResolvers.Query.k8sPods(null, { configuration }, { client }),
).rejects.toThrow('API error');
});
it('should return empty data when received timeout event from the library', async () => {
jest.spyOn(mockWatcher, 'on').mockImplementation(mockOnTimeoutFn);
await expect(
mockResolvers.Query.k8sPods(null, { configuration }, { client }),
).resolves.toEqual([]);
expect(mockPodsListWatcherFn).not.toHaveBeenCalled();
});
});
});

View File

@ -74,7 +74,12 @@ describe('Merge request merge checks conflicts component', () => {
sourceBranchProtected,
rendersConflictButton,
}) => {
factory({ mr: { conflictResolutionPath }, pushToSourceBranch, sourceBranchProtected });
factory({
status: 'FAILED',
mr: { conflictResolutionPath },
pushToSourceBranch,
sourceBranchProtected,
});
await waitForPromises();

View File

@ -67,7 +67,7 @@ function createWrapper({ propsData = {}, provideData = {}, handler = mockQueryHa
service: {},
check: {
identifier: 'need_rebase',
status: 'failed',
status: 'FAILED',
},
...propsData,
},

View File

@ -0,0 +1,60 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe BulkImports::PipelineSchemaInfo, feature_category: :importers do
let(:entity) { build(:bulk_import_entity, :project_entity) }
let(:tracker) { build(:bulk_import_tracker, entity: entity, pipeline_name: pipeline_name) }
let(:pipeline_name) { BulkImports::Common::Pipelines::LabelsPipeline.to_s }
subject { described_class.new(tracker.pipeline_class, tracker.entity.portable_class) }
describe '#db_schema' do
context 'when pipeline defines a relation name which is an association' do
it 'returns the schema name of the table used by the association' do
expect(subject.db_schema).to eq(:gitlab_main_cell)
end
end
context 'when pipeline does not define a relation name' do
let(:pipeline_name) { BulkImports::Common::Pipelines::EntityFinisher.to_s }
it 'returns nil' do
expect(subject.db_schema).to eq(nil)
end
end
context 'when pipeline relation name is not an association' do
let(:pipeline_name) { BulkImports::Projects::Pipelines::CommitNotesPipeline.to_s }
it 'returns nil' do
expect(subject.db_schema).to eq(nil)
end
end
end
describe '#db_table' do
context 'when pipeline defines a relation name which is an association' do
it 'returns the name of the table used by the association' do
expect(subject.db_table).to eq('labels')
end
end
context 'when pipeline does not define a relation name' do
let(:pipeline_name) { BulkImports::Common::Pipelines::EntityFinisher.to_s }
it 'returns nil' do
expect(subject.db_table).to eq(nil)
end
end
context 'when pipeline relation name is not an association' do
let(:pipeline_name) { BulkImports::Projects::Pipelines::CommitNotesPipeline.to_s }
it 'returns nil' do
expect(subject.db_table).to eq(nil)
end
end
end
end

View File

@ -185,6 +185,21 @@ RSpec.describe Gitlab::SidekiqMiddleware::SkipJobs, feature_category: :scalabili
TestWorker.perform_async(*job['args'])
end
end
context 'when a block is provided' do
before do
TestWorker.defer_on_database_health_signal(*health_signal_attrs.values) do
[:gitlab_ci, [:ci_pipelines]]
end
end
it 'uses the lazy evaluated schema and tables returned by the block' do
expect(Gitlab::Database::HealthStatus::Context).to receive(:new)
.with(anything, anything, [:ci_pipelines], :gitlab_ci).and_call_original
expect { |b| subject.call(TestWorker.new, job, queue, &b) }.to yield_control
end
end
end
end
end

View File

@ -248,6 +248,24 @@ RSpec.describe BulkImports::Entity, type: :model, feature_category: :importers d
end
end
describe '#portable_class' do
context 'when entity is group' do
it 'returns Group class' do
entity = build(:bulk_import_entity, :group_entity)
expect(entity.portable_class).to eq(Group)
end
end
context 'when entity is project' do
it 'returns Project class' do
entity = build(:bulk_import_entity, :project_entity)
expect(entity.portable_class).to eq(Project)
end
end
end
describe '#export_relations_url_path' do
context 'when entity is group' do
it 'returns group export relations url' do

View File

@ -6,8 +6,15 @@ RSpec.describe 'rendering namespace statistics', feature_category: :metrics do
include GraphqlHelpers
let(:namespace) { user.namespace }
let!(:statistics) { create(:namespace_root_storage_statistics, namespace: namespace, packages_size: 5.gigabytes, uploads_size: 3.gigabytes) }
let(:user) { create(:user) }
let!(:statistics) do
create(
:namespace_root_storage_statistics,
namespace: namespace,
packages_size: 5.gigabytes,
uploads_size: 3.gigabytes
)
end
let(:query) do
graphql_query_for(

View File

@ -2187,6 +2187,67 @@ RSpec.describe QuickActions::InterpretService, feature_category: :team_planning
end
end
context 'request_changes command' do
let(:merge_request) { create(:merge_request, source_project: project) }
let(:content) { '/request_changes' }
context "when `mr_request_changes` feature flag is disabled" do
before do
stub_feature_flags(mr_request_changes: false)
end
it 'does not call MergeRequests::UpdateReviewerStateService' do
expect(MergeRequests::UpdateReviewerStateService).not_to receive(:new)
service.execute(content, merge_request)
end
end
context "when the user is a reviewer" do
before do
create(:merge_request_reviewer, merge_request: merge_request, reviewer: current_user)
end
it 'calls MergeRequests::UpdateReviewerStateService with requested_changes' do
expect_next_instance_of(
MergeRequests::UpdateReviewerStateService,
project: project, current_user: current_user
) do |service|
expect(service).to receive(:execute).with(merge_request, "requested_changes").and_return({ status: :success })
end
_, _, message = service.execute(content, merge_request)
expect(message).to eq('Changes requested to the current merge request.')
end
it 'returns error message from MergeRequests::UpdateReviewerStateService' do
expect_next_instance_of(
MergeRequests::UpdateReviewerStateService,
project: project, current_user: current_user
) do |service|
expect(service).to receive(:execute).with(merge_request, "requested_changes").and_return({ status: :error, message: 'Error' })
end
_, _, message = service.execute(content, merge_request)
expect(message).to eq('Error')
end
end
context "when the user is not a reviewer" do
it 'does not call MergeRequests::UpdateReviewerStateService' do
expect(MergeRequests::UpdateReviewerStateService).not_to receive(:new)
service.execute(content, merge_request)
end
end
it_behaves_like 'approve command unavailable' do
let(:issuable) { issue }
end
end
it_behaves_like 'issues link quick action', :relate do
let(:user) { developer }
end

View File

@ -13,6 +13,10 @@ RSpec.describe BulkImports::PipelineBatchWorker, feature_category: :importers do
@context = context
end
def self.relation
'labels'
end
def run
@context.tracker.finish!
end
@ -202,4 +206,55 @@ RSpec.describe BulkImports::PipelineBatchWorker, feature_category: :importers do
expect(batch.reload).to be_failed
end
end
context 'with stop signal from database health check' do
around do |example|
with_sidekiq_server_middleware do |chain|
chain.add Gitlab::SidekiqMiddleware::SkipJobs
Sidekiq::Testing.inline! { example.run }
end
end
before do
stub_feature_flags("drop_sidekiq_jobs_#{described_class.name}": false)
stop_signal = instance_double("Gitlab::Database::HealthStatus::Signals::Stop", stop?: true)
allow(Gitlab::Database::HealthStatus).to receive(:evaluate).and_return([stop_signal])
end
it 'defers the job by set time' do
expect_next_instance_of(described_class) do |worker|
expect(worker).not_to receive(:perform).with(batch.id)
end
expect(described_class).to receive(:perform_in).with(described_class::DEFER_ON_HEALTH_DELAY, batch.id)
described_class.perform_async(batch.id)
end
it 'lazy evaluates schema and tables', :aggregate_failures do
block = described_class.database_health_check_attrs[:block]
job_args = [batch.id]
schema, table = block.call([job_args])
expect(schema).to eq(:gitlab_main_cell)
expect(table).to eq(['labels'])
end
context 'when `bulk_import_deferred_workers` feature flag is disabled' do
it 'does not defer job execution' do
stub_feature_flags(bulk_import_deferred_workers: false)
expect_next_instance_of(described_class) do |worker|
expect(worker).to receive(:perform).with(batch.id)
end
expect(described_class).not_to receive(:perform_in)
described_class.perform_async(batch.id)
end
end
end
end

View File

@ -9,6 +9,10 @@ RSpec.describe BulkImports::PipelineWorker, feature_category: :importers do
def run; end
def self.relation
'labels'
end
def self.file_extraction_pipeline?
false
end
@ -155,6 +159,62 @@ RSpec.describe BulkImports::PipelineWorker, feature_category: :importers do
end
end
context 'with stop signal from database health check' do
around do |example|
with_sidekiq_server_middleware do |chain|
chain.add Gitlab::SidekiqMiddleware::SkipJobs
Sidekiq::Testing.inline! { example.run }
end
end
before do
stub_feature_flags("drop_sidekiq_jobs_#{described_class.name}": false)
stop_signal = instance_double("Gitlab::Database::HealthStatus::Signals::Stop", stop?: true)
allow(Gitlab::Database::HealthStatus).to receive(:evaluate).and_return([stop_signal])
end
it 'defers the job by set time' do
expect_next_instance_of(described_class) do |worker|
expect(worker).not_to receive(:perform).with(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
end
expect(described_class).to receive(:perform_in).with(
described_class::DEFER_ON_HEALTH_DELAY,
pipeline_tracker.id,
pipeline_tracker.stage,
entity.id
)
described_class.perform_async(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
end
it 'lazy evaluates schema and tables', :aggregate_failures do
block = described_class.database_health_check_attrs[:block]
job_args = [pipeline_tracker.id, pipeline_tracker.stage, entity.id]
schema, table = block.call([job_args])
expect(schema).to eq(:gitlab_main_cell)
expect(table).to eq(['labels'])
end
context 'when `bulk_import_deferred_workers` feature flag is disabled' do
it 'does not defer job execution' do
stub_feature_flags(bulk_import_deferred_workers: false)
expect_next_instance_of(described_class) do |worker|
expect(worker).to receive(:perform).with(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
end
expect(described_class).not_to receive(:perform_in)
described_class.perform_async(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
end
end
end
context 'when pipeline is finished' do
let(:pipeline_tracker) do
create(

View File

@ -75,7 +75,8 @@ RSpec.describe Ci::PipelineSuccessUnlockArtifactsWorker, feature_category: :buil
expect(described_class.database_health_check_attrs).to eq(
gitlab_schema: :gitlab_ci,
delay_by: described_class::DEFAULT_DEFER_DELAY,
tables: [:ci_job_artifacts]
tables: [:ci_job_artifacts],
block: nil
)
end
end

View File

@ -37,7 +37,7 @@ RSpec.describe WorkerAttributes, feature_category: :shared do
:worker_has_external_dependencies? | :worker_has_external_dependencies! | false | [] | true
:idempotent? | :idempotent! | false | [] | true
:big_payload? | :big_payload! | false | [] | true
:database_health_check_attrs | :defer_on_database_health_signal | nil | [:gitlab_main, [:users], 1.minute] | { gitlab_schema: :gitlab_main, tables: [:users], delay_by: 1.minute }
:database_health_check_attrs | :defer_on_database_health_signal | nil | [:gitlab_main, [:users], 1.minute] | { gitlab_schema: :gitlab_main, tables: [:users], delay_by: 1.minute, block: nil }
end
# rubocop: enable Layout/LineLength