Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2023-07-11 00:09:14 +00:00
parent 505a610385
commit 1326fc930c
24 changed files with 67 additions and 128 deletions

View File

@ -44,7 +44,7 @@ default:
workflow:
name: '$PIPELINE_NAME'
rules:
- if: '$CI_PROJECT_PATH == "gitlab/gitaly" && $CI_PIPELINE_SOURCE == "parent_pipeline" && $GITALY_TEST'
- if: '$CI_PROJECT_PATH == "gitlab-org/gitaly" && $CI_PIPELINE_SOURCE == "parent_pipeline" && $GITALY_TEST'
variables:
PIPELINE_NAME: 'Gitaly Rails Test Pipeline'
# If `$FORCE_GITLAB_CI` is set, create a pipeline.

View File

@ -68,7 +68,7 @@ export default {
text: this.primaryText,
attributes: {
variant: this.primaryVariant,
'data-qa-selector': 'confirm_ok_button',
'data-testid': 'confirm-ok-button',
},
};
},
@ -110,6 +110,7 @@ export default {
ref="modal"
modal-id="confirmationModal"
body-class="gl-display-flex"
data-testid="confirmation-modal"
:size="size"
:title="title"
:action-primary="primaryAction"

View File

@ -0,0 +1,17 @@
# frozen_string_literal: true
class RemoveIndexEventsOnProjectIdAndIdDescOnMergedAction < Gitlab::Database::Migration[2.1]
INDEX_NAME = 'index_events_on_project_id_and_id_desc_on_merged_action'
disable_ddl_transaction!
def up
remove_concurrent_index(:events, [:project_id, :id], order: { id: :desc },
where: "action = 7", name: INDEX_NAME)
end
def down
add_concurrent_index(:events, [:project_id, :id], order: { id: :desc },
where: "action = 7", name: INDEX_NAME)
end
end

View File

@ -0,0 +1 @@
e7cf459527a96936684778c46bfc3b3490d314f4dbf5e3a3daca8670f1fcaeaf

View File

@ -31249,8 +31249,6 @@ CREATE INDEX index_events_on_project_id_and_created_at ON events USING btree (pr
CREATE INDEX index_events_on_project_id_and_id ON events USING btree (project_id, id);
CREATE INDEX index_events_on_project_id_and_id_desc_on_merged_action ON events USING btree (project_id, id DESC) WHERE (action = 7);
CREATE UNIQUE INDEX index_events_on_target_type_and_target_id_and_fingerprint ON events USING btree (target_type, target_id, fingerprint);
CREATE INDEX index_evidences_on_release_id ON evidences USING btree (release_id);

View File

@ -55,8 +55,6 @@ on a machine:
- With multiple disks mounted as a single mount-point (like with a RAID array).
- Using LVM.
Gitaly may work with NFS or a mounted Storage Appliance, but it is [not officially supported](../nfs.md#gitaly-with-nfs-not-supported) because Git requires low latency.
Each project can have up to 3 different repositories:
- A project repository, where the source code is stored.

View File

@ -80,10 +80,8 @@ on a machine:
- With multiple disks mounted as a single mount-point (like with a RAID array).
- Using LVM.
GitLab does not require a special file system and can work with:
- NFS.
- A mounted Storage Appliance (there may be performance limitations when using a remote file system).
GitLab does not require a special file system and can work with a mounted Storage Appliance. However, there can be
performance limitations and consistency issues when using a remote file system.
Geo triggers garbage collection in Gitaly to [deduplicate forked repositories](../../../development/git_object_deduplication.md#git-object-deduplication-and-gitlab-geo) on Geo secondary sites.

View File

@ -377,9 +377,6 @@ This can be risky because anything that prevents your Gitaly clients from reachi
servers causes all Gitaly requests to fail. For example, any sort of network, firewall, or name
resolution problems.
Additionally, you must [disable Rugged](../nfs.md#improving-nfs-performance-with-gitlab)
if previously enabled manually.
Gitaly makes the following assumptions:
- Your `gitaly1.internal` Gitaly server can be reached at `gitaly1.internal:8075` from your Gitaly

View File

@ -17,7 +17,6 @@ The following tables are intended to guide you to choose the right combination o
|------------|--------------|----------------|-----------------|-------------|-----------------|
|Gitaly Cluster | Very high - tolerant of node failures | RTO for a single node of 10 s with no manual intervention | Data is stored on multiple nodes | Good - While writes may take slightly longer due to voting, read distribution improves read speeds | **Trade-off** - Slight decrease in write speed for redundant, strongly-consistent storage solution. **Risks** - [Does not support snapshot backups](../gitaly/index.md#snapshot-backup-and-recovery-limitations), GitLab backup task can be slow for large data sets |
|Gitaly Shards | Single storage location is a single point of failure | Would need to restore only shards which failed | Single point of failure | Good - can allocate repositories to shards to spread load | **Trade-off** - Need to manually configure repositories into different shards to balance loads / storage space **Risks** - Single point of failure relies on recovery process when single-node failure occurs |
|Gitaly + NFS | Single storage location is a single point of failure | Single node failure requires restoration from backup | Single point of failure | Average - NFS is not ideally suited to large quantities of small reads / writes which can have a detrimental impact on performance | **Trade-off** - Familiar administration though NFS is not ideally suited to Git demands **Risks** - Many instances of NFS compatibility issues which provide very poor customer experiences |
## Geo capabilities

View File

@ -52,13 +52,10 @@ Before deploying Gitaly Cluster, review:
- [Configuration guidance](configure_gitaly.md) and [Repository storage options](../repository_storage_paths.md) to make
sure that Gitaly Cluster is the best setup for you.
If you have:
If you have not yet migrated to Gitaly Cluster, you have two options:
- Not yet migrated to Gitaly Cluster and want to continue using NFS, remain on the service you are using. However, NFS
is [no longer supported](../../update/deprecations.md#nfs-for-git-repository-storage).
- Not yet migrated to Gitaly Cluster but want to migrate away from NFS, you have two options:
- A sharded Gitaly instance.
- Gitaly Cluster.
- A sharded Gitaly instance.
- Gitaly Cluster.
Contact your [Customer Success Manager](https://about.gitlab.com/job-families/sales/customer-success-management/) or customer support if you have any questions.

View File

@ -1487,7 +1487,7 @@ repository storage redundancy.
For a replication factor:
- Of `1`: NFS, Gitaly, and Gitaly Cluster have roughly the same storage requirements.
- Of `1`: Gitaly and Gitaly Cluster have roughly the same storage requirements.
- More than `1`: The amount of required storage is `used space * replication factor`. `used space`
should include any planned future growth.
@ -1628,20 +1628,8 @@ You should use [repository-specific primary nodes](#repository-specific-primary-
> - [Introduced](https://gitlab.com/gitlab-org/gitaly/-/issues/3492) in GitLab 13.12, with primary elections run when Praefect starts or the cluster's consensus of a Gitaly node's health changes.
> - [Changed](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/3543) in GitLab 14.1, primary elections are run lazily.
Gitaly Cluster supports electing repository-specific primary Gitaly nodes. Repository-specific
Gitaly primary nodes are enabled in `/etc/gitlab/gitlab.rb` by setting
`praefect['failover_election_strategy'] = 'per_repository'`.
Praefect's [deprecated election strategies](#deprecated-election-strategies):
- Elected a primary Gitaly node for each virtual storage, which was used as the primary node for
each repository in the virtual storage.
- Prevented horizontal scaling of a virtual storage. The primary Gitaly node needed a replica of
each repository and thus became the bottleneck.
The `per_repository` election strategy solves this problem by electing a primary Gitaly node separately for each
repository. Combined with [configurable replication factors](#configure-replication-factor), you can
horizontally scale storage capacity and distribute write load across Gitaly nodes.
Gitaly Cluster elects a primary Gitaly node separately for each repository. Combined with
[configurable replication factors](#configure-replication-factor), you can horizontally scale storage capacity and distribute write load across Gitaly nodes.
Primary elections are run lazily. Praefect doesn't immediately elect a new primary node if the current
one is unhealthy. A new primary is elected if a request must be served while the current primary is unavailable.

View File

@ -38,8 +38,7 @@ From left to right, the performance bar displays:
[Gitaly](../../gitaly/index.md) calls. Select to display a modal window with more
details.
- **Rugged calls**: the time taken (in milliseconds) and the total number of
[Rugged](../../nfs.md#improving-nfs-performance-with-gitlab) calls.
Select to display a modal window with more details.
Rugged calls. Select to display a modal window with more details.
- **Redis calls**: the time taken (in milliseconds) and the total number of
Redis calls. Select to display a modal window with more details.
- **Elasticsearch calls**: the time taken (in milliseconds) and the total number of

View File

@ -15,36 +15,11 @@ is recommended over NFS where possible, due to better performance.
When eliminating the usage of NFS, there are [additional steps you need to take](object_storage.md#alternatives-to-file-system-storage)
in addition to moving to Object Storage.
File system performance can impact overall GitLab performance, especially for
actions that read or write to Git repositories. For steps you can use to test
file system performance, see
NFS cannot be used for repository storage.
For steps you can use to test file system performance, see
[File System Performance Benchmarking](operations/filesystem_benchmarking.md).
## Gitaly with NFS not supported
Technical and engineering support for using NFS to store Git repository data is officially at end-of-life. No product
changes or troubleshooting is provided through engineering, security or paid support channels.
If an issue is reproducible, or if it happens intermittently but regularly, GitLab Support can investigate providing the
issue can be reproduced without NFS. To reproduce without NFS, migrate the affected repositories to a different Gitaly
shard. For example, a Gitaly Cluster or a standalone Gitaly VM, backed with block storage.
## Known kernel version incompatibilities
RedHat Enterprise Linux (RHEL) and CentOS v7.7 and v7.8 ship with kernel
version `3.10.0-1127`, which [contains a bug](https://bugzilla.redhat.com/show_bug.cgi?id=1783554) that causes
[uploads to fail to copy over NFS](https://gitlab.com/gitlab-org/gitlab/-/issues/218999). The
following GitLab versions include a fix to work properly with that
kernel version:
- [12.10.12](https://about.gitlab.com/releases/2020/06/25/gitlab-12-10-12-released/)
- [13.0.7](https://about.gitlab.com/releases/2020/06/25/gitlab-13-0-7-released/)
- [13.1.1](https://about.gitlab.com/releases/2020/06/24/gitlab-13-1-1-released/)
- 13.2 and up
If you are using that kernel version, be sure to upgrade GitLab to avoid
errors.
## Fast lookup of authorized SSH keys
The [fast SSH key lookup](operations/fast_ssh_key_lookup.md) feature can improve
@ -59,26 +34,6 @@ is moved to NFS.
We are investigating the use of
[fast lookup as the default](https://gitlab.com/groups/gitlab-org/-/epics/3104).
## Improving NFS performance with GitLab
NFS performance with GitLab can in some cases be improved with
[direct Git access](gitaly/index.md#direct-access-to-git-in-gitlab) using [Rugged](https://github.com/libgit2/rugged).
Depending on the GitLab version, GitLab [automatically detects](gitaly/index.md#automatic-detection) if Rugged can and should
be used per storage.
If the Rugged feature flag is explicitly set to either `true` or `false`, GitLab uses the value explicitly set. If you
previously enabled Rugged using the feature flag and you want to use automatic detection instead, you must unset
the feature flag:
```shell
sudo gitlab-rake gitlab:features:unset_rugged
```
From GitLab 12.7, Rugged is only automatically enabled for use with Puma if the
[Puma thread count is set to `1`](../install/requirements.md#puma-settings). To use Rugged with a Puma thread count of
more than `1`, enable Rugged using the [feature flag](../development/gitaly.md#legacy-rugged-code).
## NFS server
Installing the `nfs-kernel-server` package allows you to share directories with
@ -361,33 +316,6 @@ sudo ufw allow from <client_ip_address> to any port nfs
## Known issues
### Upgrade to Gitaly Cluster or disable caching if experiencing data loss
WARNING:
NFS for Git repositories
[has been removed](../update/deprecations.md#nfs-for-git-repository-storage).
Customers and users have reported data loss on high-traffic repositories when using NFS for Git repositories.
For example, we have seen:
- [Inconsistent updates after a push](https://gitlab.com/gitlab-org/gitaly/-/issues/2589).
- `git ls-remote` [returning the wrong (or no branches)](https://gitlab.com/gitlab-org/gitaly/-/issues/3083)
causing Jenkins to intermittently re-run all pipelines for a repository.
The problem may be partially mitigated by adjusting caching using the following NFS client mount options:
| Setting | Description |
| ------- | ----------- |
| `lookupcache=positive` | Tells the NFS client to honor `positive` cache results but invalidates any `negative` cache results. Negative cache results cause problems with Git. Specifically, a `git push` can fail to register uniformly across all NFS clients. The negative cache causes the clients to 'remember' that the files did not exist previously.
| `actimeo=0` | Sets the time to zero that the NFS client caches files and directories before requesting fresh information from a server. |
| `noac` | Tells the NFS client not to cache file attributes and forces application writes to become synchronous so that local changes to a file become visible on the server immediately. |
WARNING:
The `actimeo=0` and `noac` options both result in a significant reduction in performance, possibly leading to timeouts.
You may be able to avoid timeouts and data loss using `actimeo=0` and `lookupcache=positive` _without_ `noac`, however
we expect the performance reduction is still significant. Upgrade to
[Gitaly Cluster](gitaly/praefect.md) as soon as possible.
### Avoid using cloud-based file systems
GitLab strongly recommends against using cloud-based file systems such as:
@ -447,10 +375,3 @@ On Ubuntu 16.04, use:
```shell
sudo perf trace --no-syscalls --event 'nfs4:*' -p $(pgrep -fd ',' puma)
```
### Warnings `garbage found: .../repositories/@hashed/...git/objects/pack/.nfs...` in Gitaly logs
If you find any warnings like `garbage found: .../repositories/@hashed/...git/objects/pack/.nfs...` in [Gitaly logs](logs/index.md#gitaly-logs),
this problem occurs if `lookupcache=positive` is not set, which we recommend as an
[NFS mount option](#mount-options).
See [Gitaly issue #3175](https://gitlab.com/gitlab-org/gitaly/-/issues/3175) for more details.

View File

@ -383,7 +383,7 @@ blocking all jobs on that worker from proceeding. If Rugged calls performed by S
background task processing.
By default, Rugged is used when Git repository data is stored on local storage or on an NFS mount.
[Using Rugged is recommended when using NFS](../nfs.md#improving-nfs-performance-with-gitlab), but if
Using Rugged is recommended when using NFS, but if
you are using local storage, disabling Rugged can improve Sidekiq performance:
```shell

View File

@ -57,6 +57,9 @@ For human interactions, use credentials tied to users such as Personal Access To
To help detect a potential secret leak, you can use the
[Audit Event](../../../administration/audit_event_streaming/examples.md#example-payloads-for-ssh-events-with-deploy-key) feature.
WARNING:
Deploy keys work even if the user who created them is removed from the group or project.
## View deploy keys
To view the deploy keys available to a project:

View File

@ -98,7 +98,7 @@ module Sidebars
::Sidebars::MenuItem.new(
title: _('Model experiments'),
link: project_ml_experiments_path(context.project),
super_sidebar_parent: Sidebars::Projects::SuperSidebarMenus::DeployMenu,
super_sidebar_parent: Sidebars::Projects::SuperSidebarMenus::AnalyzeMenu,
active_routes: { controller: %w[projects/ml/experiments projects/ml/candidates] },
item_id: :model_experiments
)

View File

@ -25,7 +25,8 @@ module Sidebars
:code_review,
:merge_request_analytics,
:issues,
:insights
:insights,
:model_experiments
].each { |id| add_item(::Sidebars::NilMenuItem.new(item_id: id)) }
end
end

View File

@ -20,8 +20,7 @@ module Sidebars
:releases,
:feature_flags,
:packages_registry,
:container_registry,
:model_experiments
:container_registry
].each { |id| add_item(::Sidebars::NilMenuItem.new(item_id: id)) }
end
end

View File

@ -11,6 +11,7 @@ module QA
base.view 'app/assets/javascripts/lib/utils/confirm_via_gl_modal/confirm_modal.vue' do
element :confirm_ok_button
element :confirmation_modal
end
base.view 'app/assets/javascripts/vue_shared/components/confirm_danger/confirm_danger_modal.vue' do
@ -37,6 +38,20 @@ module QA
def click_confirmation_ok_button
click_element(:confirm_ok_button)
end
# Click the confirmation button if the confirmation modal is present
# Can be used when the modal may not always appear in a test. For example, if the modal is behind a feature flag
#
# @return [void]
def click_confirmation_ok_button_if_present
# In the case of changing access levels[1], the modal appears while there's a request in process, so we need
# to skip the loading check otherwise it will time out.
#
# [1]: https://gitlab.com/gitlab-org/gitlab/-/blob/4a99af809b86047ce3c8985e6582748bbd23fc84/qa/qa/page/component/members/members_table.rb#L54
return unless has_element?(:confirmation_modal, skip_finished_loading_check: true)
click_element(:confirm_ok_button, skip_finished_loading_check: true)
end
end
end
end

View File

@ -14,6 +14,7 @@ module QA
include MembersFilter
include RemoveMemberModal
include RemoveGroupModal
include ConfirmModal
end
base.view 'app/assets/javascripts/members/components/table/members_table.vue' do
@ -53,6 +54,8 @@ module QA
click_element :access_level_dropdown
click_element :access_level_link, text: access_level
end
click_confirmation_ok_button_if_present
end
def remove_member(username)

View File

@ -562,6 +562,10 @@ module QA
ENV.fetch('QA_CONTAINER_REGISTRY_HOST', 'registry.gitlab.com')
end
def runner_container_image
ENV.fetch('QA_RUNNER_CONTAINER_IMAGE', 'gitlab-runner:alpine')
end
# ENV variables for authenticating against a private container registry
# These need to be set if using the
# Service::DockerRun::Mixins::ThirdPartyDocker module

View File

@ -16,7 +16,7 @@ module QA
MSG
def initialize(name)
@image = "#{QA::Runtime::Env.container_registry_host}/gitlab-org/gitlab-runner:alpine"
@image = "#{QA::Runtime::Env.container_registry_host}/gitlab-org/#{QA::Runtime::Env.runner_container_image}"
@name = name || "qa-runner-#{SecureRandom.hex(4)}"
@run_untagged = true
@executor = :shell

View File

@ -23,7 +23,8 @@ RSpec.describe Sidebars::Projects::SuperSidebarMenus::AnalyzeMenu, feature_categ
:code_review,
:merge_request_analytics,
:issues,
:insights
:insights,
:model_experiments
])
end
end

View File

@ -18,8 +18,7 @@ RSpec.describe Sidebars::Projects::SuperSidebarMenus::DeployMenu, feature_catego
:releases,
:feature_flags,
:packages_registry,
:container_registry,
:model_experiments
:container_registry
])
end
end