Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-01-03 18:12:53 +00:00
parent 5429e3d4e0
commit b87af16bf2
48 changed files with 894 additions and 644 deletions

View File

@ -1,6 +1,6 @@
include:
- project: gitlab-org/quality/pipeline-common
ref: 8.3.0
ref: 8.3.1
file:
- /ci/danger-review.yml

View File

@ -5,7 +5,7 @@ workflow:
name: $PIPELINE_NAME
include:
- component: "gitlab.com/gitlab-org/quality/pipeline-common/allure-report@8.2.0"
- component: "gitlab.com/gitlab-org/quality/pipeline-common/allure-report@8.3.0"
inputs:
job_name: "e2e-test-report"
job_stage: "report"
@ -15,7 +15,7 @@ include:
gitlab_auth_token_variable_name: "PROJECT_TOKEN_FOR_CI_SCRIPTS_API_USAGE"
allure_job_name: "${QA_RUN_TYPE}"
- project: gitlab-org/quality/pipeline-common
ref: 8.2.0
ref: 8.3.0
file:
- /ci/base.gitlab-ci.yml
- /ci/knapsack-report.yml

View File

@ -123,7 +123,7 @@
if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PIPELINE_SOURCE == "schedule" && $SCHEDULE_TYPE == "nightly"'
.if-ruby-branch-schedule-nightly: &if-ruby-branch-schedule-nightly
if: '$CI_COMMIT_BRANCH =~ "^ruby\d+(_\d)*$" && $CI_PIPELINE_SOURCE == "schedule" && $SCHEDULE_TYPE == "nightly"'
if: '$CI_COMMIT_BRANCH =~ /^ruby\d+(_\d)*$/ && $CI_PIPELINE_SOURCE == "schedule" && $SCHEDULE_TYPE == "nightly"'
.if-security-schedule: &if-security-schedule
if: '$CI_PROJECT_NAMESPACE == "gitlab-org/security" && $CI_PIPELINE_SOURCE == "schedule"'
@ -159,7 +159,7 @@
if: '$CI_SERVER_HOST == "gitlab.com" && $CI_PROJECT_NAMESPACE =~ /^gitlab-org($|\/security$)/ && $CI_COMMIT_TAG'
.if-ruby-branch: &if-ruby-branch
if: '$CI_COMMIT_BRANCH =~ "^ruby\d+(_\d)*$" || $CI_MERGE_REQUEST_LABELS =~ /pipeline:run-in-ruby\d+(_\d)*/'
if: '$CI_COMMIT_BRANCH =~ /^ruby\d+(_\d)*$/ || $CI_MERGE_REQUEST_LABELS =~ /pipeline:run-in-ruby\d+(_\d)*/'
####################
# Changes patterns #

View File

@ -80,6 +80,7 @@ PATH
remote: gems/gitlab-secret_detection
specs:
gitlab-secret_detection (0.1.0)
parallel (~> 1.22)
re2 (~> 2.4)
toml-rb (~> 2.2)

View File

@ -1,13 +1,15 @@
<script>
import { GlSprintf, GlTooltipDirective, GlModal } from '@gitlab/ui';
import { __, s__ } from '~/locale';
import { DOCS_URL_IN_EE_DIR } from 'jh_else_ce/lib/utils/url_utility';
import { helpPagePath } from '~/helpers/help_page_helper';
import eventHub from '../event_hub';
import stopEnvironmentMutation from '../graphql/mutations/stop_environment.mutation.graphql';
export default {
yamlDocsLink: `${DOCS_URL_IN_EE_DIR}/ee/ci/yaml/`,
stoppingEnvironmentDocsLink: `${DOCS_URL_IN_EE_DIR}/environments/#stopping-an-environment`,
yamlDocsLink: helpPagePath('ci/yaml/index'),
stoppingEnvironmentDocsLink: helpPagePath('ci/environments/index', {
anchor: 'stopping-an-environment',
}),
id: 'stop-environment-modal',
name: 'StopEnvironmentModal',

View File

@ -24,7 +24,7 @@ const PERSISTENT_USER_CALLOUTS = [
'.js-branch-rules-info-callout',
'.js-new-nav-for-everyone-callout',
'.js-namespace-over-storage-users-combined-alert',
'.js-code-suggestions-ga-non-owner-alert',
'.js-code-suggestions-ga-alert',
];
const initCallouts = () => {

View File

@ -4,11 +4,7 @@ module ViteHelper
def vite_enabled?
# vite is not production ready yet
return false if Rails.env.production?
# Enable vite if explicitly turned on in the GDK
return Gitlab::Utils.to_boolean(ViteRuby.env['VITE_ENABLED'], default: false) if ViteRuby.env.key?('VITE_ENABLED')
# Enable vite the legacy way (in case GDK hasn't been updated)
# This is going to be removed with https://gitlab.com/gitlab-org/gitlab/-/issues/431041
Rails.env.development? ? Feature.enabled?(:vite) : false
Gitlab::Utils.to_boolean(ViteRuby.env['VITE_ENABLED'], default: false)
end
end

View File

@ -79,7 +79,8 @@ module Users
vulnerability_report_grouping: 77, # EE-only
new_nav_for_everyone_callout: 78,
code_suggestions_ga_non_owner_alert: 79, # EE-only
duo_chat_callout: 80 # EE-only
duo_chat_callout: 80, # EE-only
code_suggestions_ga_owner_alert: 81 # EE-only
}
validates :feature_name,

View File

@ -23,5 +23,6 @@
= dispensable_render_if_exists "shared/free_user_cap_alert", source: @group
= dispensable_render_if_exists "shared/unlimited_members_during_trial_alert", resource: @group
= dispensable_render_if_exists "shared/code_suggestions_ga_non_owner_alert", resource: @group
= dispensable_render_if_exists "shared/code_suggestions_ga_owner_alert", resource: @group
= render template: base_layout || "layouts/application"

View File

@ -26,5 +26,6 @@
= dispensable_render_if_exists "projects/free_user_cap_alert", project: @project
= dispensable_render_if_exists 'shared/unlimited_members_during_trial_alert', resource: @project
= dispensable_render_if_exists 'projects/code_suggestions_ga_non_owner_alert', project: @project
= dispensable_render_if_exists 'projects/code_suggestions_ga_owner_alert', project: @project
= render template: "layouts/application"

View File

@ -1,8 +0,0 @@
---
name: inherit_higher_access_levels_no_cross_join
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/132947
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/427238
milestone: '16.5'
type: development
group: group::authentication
default_enabled: true

View File

@ -1,8 +0,0 @@
---
name: vite
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/129392
rollout_issue_url:
milestone: '16.4'
type: development
group: group::code review
default_enabled: false

View File

@ -79,11 +79,6 @@ In Kubernetes, you can [use the same domain under `global.hosts.domain` as for t
NOTE:
The feature flag described in this section is planned to be deprecated and removed in a future release. Support for read-only Geo secondary sites is proposed in [issue 366810](https://gitlab.com/gitlab-org/gitlab/-/issues/366810), you can upvote and share your use cases in that issue.
There are minor known issues linked in the
["Geo secondary proxying with separate URLs" epic](https://gitlab.com/groups/gitlab-org/-/epics/6865).
You can also add feedback in the epic about any use-cases that
are not possible anymore with proxying enabled.
If you run into issues, to disable this feature, disable the `geo_secondary_proxy_separate_urls` feature flag.
1. SSH into one node running Rails on your primary Geo site and run:
@ -121,7 +116,7 @@ for details.
To use TLS certificates with Let's Encrypt, you can manually point the domain to one of the Geo sites, generate
the certificate, then copy it to all other sites.
- Using Geo secondary sites to accelerate runners is not officially supported. Support for this functionality is planned and can be tracked in [epic 9779](https://gitlab.com/groups/gitlab-org/-/epics/9779). If a replication lag occurs between the primary and secondary site, and the pipeline ref is not available on the secondary site when the job is executed, the job will fail.
- Using Geo secondary sites to accelerate runners is experimental and is not recommended for production. It can be configured and tested by following the steps in [secondary proxy runners](runners.md). Progress toward general availability can be tracked in [epic 9779](https://gitlab.com/groups/gitlab-org/-/epics/9779).
- When secondary proxying is used together with separate URLs,
[signing in the secondary site using SAML](../replication/single_sign_on.md#saml-with-separate-url-with-proxying-enabled)

View File

@ -0,0 +1,40 @@
---
stage: Systems
group: Geo
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
---
# Secondary runners **(PREMIUM SELF EXPERIMENT)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/415179) in GitLab 16.7 [with a flag](../../feature_flags.md) named `geo_proxy_check_pipeline_refs`. Disabled by default.
FLAG:
On self-managed GitLab, by default this feature is not available.
To make it available, an administrator can [enable the feature flag](../../feature_flags.md) named `geo_proxy_check_pipeline_refs`. On GitLab.com, this feature is not available.
With [Geo proxying for secondary sites](index.md), it is possible to register a `gitlab-runner` with a secondary site. This offloads load from the primary instance.
## Enable or disable secondary runners
To enable secondary runners, SSH into a Rails node on the **primary** Geo site and run:
```ruby
sudo gitlab-rails runner 'Feature.enable(:geo_proxy_check_pipeline_refs)'
```
To disable secondary runners, SSH into a Rails node on the **primary** Geo site and run:
```ruby
sudo gitlab-rails runner `Feature.disable(:geo_proxy_check_pipeline_refs)`
```
## Use secondary runners with a Location Aware public URL (Unified URL)
Using a [Location Aware public URL](location_aware_external_url.md), with the feature flag enabled works with no extra configuration. After you install and register a runner in the same location as a secondary site, it automatically talks to the closest site, and only proxies to the primary if the secondary is out of date.
## Use secondary runners with separate URLs
Using separate secondary URLs, the runners should be:
1. Registered with the secondary external URL.
1. Configured with [`clone_url`](https://docs.gitlab.com/runner/configuration/advanced-configuration.html#how-clone_url-works) set to the `external_url` of the secondary instance.

View File

@ -36,6 +36,22 @@ To activate your instance with an activation code:
The subscription is activated.
### Using one activation code for multiple instances
You can use one activation code or license key for multiple self-managed instances if the users on
these instances are the same or are a subset of your licensed production instance. This means that if
you have a licensed production instance of GitLab, and other instances with the same list of users, the
production activation code applies, even if these users are configured in different groups and projects.
### Uploading licenses for scaled architectures
In a scaled architecture, upload the license file to one application instance only. The license is stored in the
database and is replicated to all your application instances so that you do not need to upload the license to all instances.
### Uploading licenses for GitLab Geo
When using GitLab Geo, you only need to upload the license to your primary Geo instance. The license is stored in the database and is replicated to all instances.
If you have an offline environment,
[activate GitLab EE with a license file or key](license_file.md) instead.

View File

@ -1212,6 +1212,8 @@ TLS connection-based rate limits are enforced using the following:
- `rate_limit_tls_domain`: Set the maximum threshold in number of TLS connections per hosted pages domain per second. Set to 0 to disable this feature.
- `rate_limit_tls_domain_burst`: Sets the maximum threshold of number of TLS connections allowed in an initial outburst of TLS connections per hosted pages domain.
An IPv6 address receives a large prefix in the 128-bit address space. The prefix is typically at least size /64. Because of the large number of possible addresses, if the client's IP address is IPv6, the limit is applied to the IPv6 prefix with a length of 64, rather than the entire IPv6 address.
#### Enable HTTP requests rate limits by source-IP
> [Introduced](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/631) in GitLab 14.5.

View File

@ -563,8 +563,7 @@ gitlab-rake gitlab:db:configure
> **Note**: If you encounter a `rake aborted!` error stating that PgBouncer is failing to connect to PostgreSQL it may be that your PgBouncer node's IP address is missing from
PostgreSQL's `trust_auth_cidr_addresses` in `gitlab.rb` on your database nodes. See
[PgBouncer error `ERROR: pgbouncer cannot connect to server`](#pgbouncer-error-error-pgbouncer-cannot-connect-to-server)
in the Troubleshooting section before proceeding.
[PgBouncer error `ERROR: pgbouncer cannot connect to server`](../../administration/postgresql/replication_and_failover_troubleshooting.md#pgbouncer-error-error-pgbouncer-cannot-connect-to-server) before you proceed.
### Backups
@ -575,8 +574,7 @@ Do not backup or restore GitLab through a PgBouncer connection: this causes a Gi
### Ensure GitLab is running
At this point, your GitLab instance should be up and running. Verify you're able
to sign in, and create issues and merge requests. If you encounter issues, see
the [Troubleshooting section](#troubleshooting).
to sign in, and create issues and merge requests. For more information, see [Troubleshooting replication and failover](../../administration/postgresql/replication_and_failover_troubleshooting.md).
## Example configuration
@ -901,7 +899,7 @@ Stopping or restarting the Patroni service on the leader node triggers an automa
WARNING:
In GitLab 16.5 and earlier, PgBouncer nodes do not automatically fail over alongside
Patroni nodes. PgBouncer services
[must be restarted manually](#pgbouncer-errors-error-running-command-gitlabctlerrorsexecutionerror-and-error-database-gitlabhq_production-is-not-paused)
[must be restarted manually](../../administration/postgresql/replication_and_failover_troubleshooting.md#pgbouncer-errors-error-running-command-gitlabctlerrorsexecutionerror-and-error-database-gitlabhq_production-is-not-paused)
for a successful switchover.
While Patroni supports automatic failover, you also have the ability to perform
@ -1085,8 +1083,7 @@ Considering these, you should carefully plan your PostgreSQL upgrade:
```
If issues are encountered upgrading the replicas,
[there is a troubleshooting section](#postgresql-major-version-upgrade-fails-on-a-patroni-replica)
that might be the solution.
[there is a troubleshooting section](../../administration/postgresql/replication_and_failover_troubleshooting.md#postgresql-major-version-upgrade-fails-on-a-patroni-replica) that might be the solution.
NOTE:
Reverting the PostgreSQL upgrade with `gitlab-ctl revert-pg-upgrade` has the same considerations as
@ -1274,468 +1271,3 @@ After completing these steps, then you can clean up the resources of the old Pat
They are no longer needed. However, before removing the resources, remove the
logical replication subscription on the new leader by running `DROP SUBSCRIPTION patroni_upgrade`
with `gitlab-psql`.
## Troubleshooting
### Consul and PostgreSQL changes not taking effect
Due to the potential impacts, `gitlab-ctl reconfigure` only reloads Consul and PostgreSQL, it does not restart the services. However, not all changes can be activated by reloading.
To restart either service, run `gitlab-ctl restart SERVICE`
For PostgreSQL, it is usually safe to restart the leader node by default. Automatic failover defaults to a 1 minute timeout. Provided the database returns before then, nothing else needs to be done.
On the Consul server nodes, it is important to [restart the Consul service](../consul.md#restart-consul) in a controlled manner.
### PgBouncer error `ERROR: pgbouncer cannot connect to server`
You may get this error when running `gitlab-rake gitlab:db:configure` or you
may see the error in the PgBouncer log file.
```plaintext
PG::ConnectionBad: ERROR: pgbouncer cannot connect to server
```
The problem may be that your PgBouncer node's IP address is not included in the
`trust_auth_cidr_addresses` setting in `/etc/gitlab/gitlab.rb` on the database nodes.
You can confirm that this is the issue by checking the PostgreSQL log on the leader
database node. If you see the following error then `trust_auth_cidr_addresses`
is the problem.
```plaintext
2018-03-29_13:59:12.11776 FATAL: no pg_hba.conf entry for host "123.123.123.123", user "pgbouncer", database "gitlabhq_production", SSL off
```
To fix the problem, add the IP address to `/etc/gitlab/gitlab.rb`.
```ruby
postgresql['trust_auth_cidr_addresses'] = %w(123.123.123.123/32 <other_cidrs>)
```
[Reconfigure GitLab](../restart_gitlab.md#reconfigure-a-linux-package-installation) for the changes to take effect.
### PgBouncer errors `Error running command: GitlabCtl::Errors::ExecutionError` and `ERROR: database gitlabhq_production is not paused`
Due to a [known issue](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/8166) that
affects versions of GitLab prior to 16.5.0, the automatic failover of PgBouncer nodes does not
happen after a [Patroni switchover](#manual-failover-procedure-for-patroni). In this
example, GitLab failed to detect a paused database, then attempted to `RESUME` a
not-paused database:
```plaintext
INFO -- : Running: gitlab-ctl pgb-notify --pg-database gitlabhq_production --newhost database7.example.com --user pgbouncer --hostuser gitlab-consul
ERROR -- : STDERR: Error running command: GitlabCtl::Errors::ExecutionError
ERROR -- : STDERR: ERROR: ERROR: database gitlabhq_production is not paused
```
To ensure a [Patroni switchover](#manual-failover-procedure-for-patroni) succeeds,
you must manually restart the PgBouncer service on all PgBouncer nodes with this command:
```shell
gitlab-ctl restart pgbouncer
```
### Reinitialize a replica
If a replica cannot start or rejoin the cluster, or when it lags behind and cannot catch up, it might be necessary to reinitialize the replica:
1. [Check the replication status](#check-replication-status) to confirm which server
needs to be reinitialized. For example:
```plaintext
+ Cluster: postgresql-ha (6970678148837286213) ------+---------+--------------+----+-----------+
| Member | Host | Role | State | TL | Lag in MB |
+-------------------------------------+--------------+---------+--------------+----+-----------+
| gitlab-database-1.example.com | 172.18.0.111 | Replica | running | 55 | 0 |
| gitlab-database-2.example.com | 172.18.0.112 | Replica | start failed | | unknown |
| gitlab-database-3.example.com | 172.18.0.113 | Leader | running | 55 | |
+-------------------------------------+--------------+---------+--------------+----+-----------+
```
1. Sign in to the broken server and reinitialize the database and replication. Patroni shuts
down PostgreSQL on that server, remove the data directory, and reinitialize it from scratch:
```shell
sudo gitlab-ctl patroni reinitialize-replica --member gitlab-database-2.example.com
```
This can be run on any Patroni node, but be aware that `sudo gitlab-ctl patroni
reinitialize-replica` without `--member` restarts the server it is run on.
You should run it locally on the broken server to reduce the risk of
unintended data loss.
1. Monitor the logs:
```shell
sudo gitlab-ctl tail patroni
```
### Reset the Patroni state in Consul
WARNING:
Resetting the Patroni state in Consul is a potentially destructive process. Make sure that you have a healthy database backup first.
As a last resort you can reset the Patroni state in Consul completely.
This may be required if your Patroni cluster is in an unknown or bad state and no node can start:
```plaintext
+ Cluster: postgresql-ha (6970678148837286213) ------+---------+---------+----+-----------+
| Member | Host | Role | State | TL | Lag in MB |
+-------------------------------------+--------------+---------+---------+----+-----------+
| gitlab-database-1.example.com | 172.18.0.111 | Replica | stopped | | unknown |
| gitlab-database-2.example.com | 172.18.0.112 | Replica | stopped | | unknown |
| gitlab-database-3.example.com | 172.18.0.113 | Replica | stopped | | unknown |
+-------------------------------------+--------------+---------+---------+----+-----------+
```
**Before deleting the Patroni state in Consul**,
[try and resolve the `gitlab-ctl` errors](#errors-running-gitlab-ctl) on the Patroni nodes.
This process results in a reinitialized Patroni cluster when
the first Patroni node starts.
To reset the Patroni state in Consul:
1. Take note of the Patroni node that was the leader, or that the application thinks is the current leader,
if the current state shows more than one, or none:
- Look on the PgBouncer nodes in `/var/opt/gitlab/consul/databases.ini`,
which contains the hostname of the current leader.
- Look in the Patroni logs `/var/log/gitlab/patroni/current` (or the older rotated and
compressed logs `/var/log/gitlab/patroni/@40000*`) on **all** database nodes to see
which server was most recently identified as the leader by the cluster:
```plaintext
INFO: no action. I am a secondary (database1.local) and following a leader (database2.local)
```
1. Stop Patroni on all nodes:
```shell
sudo gitlab-ctl stop patroni
```
1. Reset the state in Consul:
```shell
/opt/gitlab/embedded/bin/consul kv delete -recurse /service/postgresql-ha/
```
1. Start one Patroni node, which initializes the Patroni cluster to elect as a leader.
It's highly recommended to start the previous leader (noted in the first step),
so as to not lose existing writes that may have not been replicated because
of the broken cluster state:
```shell
sudo gitlab-ctl start patroni
```
1. Start all other Patroni nodes that join the Patroni cluster as replicas:
```shell
sudo gitlab-ctl start patroni
```
If you are still seeing issues, the next step is restoring the last healthy backup.
### Errors in the Patroni log about a `pg_hba.conf` entry for `127.0.0.1`
The following log entry in the Patroni log indicates the replication is not working
and a configuration change is needed:
```plaintext
FATAL: no pg_hba.conf entry for replication connection from host "127.0.0.1", user "gitlab_replicator"
```
To fix the problem, ensure the loopback interface is included in the CIDR addresses list:
1. Edit `/etc/gitlab/gitlab.rb`:
```ruby
postgresql['trust_auth_cidr_addresses'] = %w(<other_cidrs> 127.0.0.1/32)
```
1. [Reconfigure GitLab](../restart_gitlab.md#reconfigure-a-linux-package-installation) for the changes to take effect.
1. Check that [all the replicas are synchronized](#check-replication-status)
### Errors in Patroni logs: the requested start point is ahead of the Write Ahead Log (WAL) flush position
This error indicates that the database is not replicating:
```plaintext
FATAL: could not receive data from WAL stream: ERROR: requested starting point 0/5000000 is ahead of the WAL flush position of this server 0/4000388
```
This example error is from a replica that was initially misconfigured, and had never replicated.
Fix it [by reinitializing the replica](#reinitialize-a-replica).
### Patroni fails to start with `MemoryError`
Patroni may fail to start, logging an error and stack trace:
```plaintext
MemoryError
Traceback (most recent call last):
File "/opt/gitlab/embedded/bin/patroni", line 8, in <module>
sys.exit(main())
[..]
File "/opt/gitlab/embedded/lib/python3.7/ctypes/__init__.py", line 273, in _reset_cache
CFUNCTYPE(c_int)(lambda: None)
```
If the stack trace ends with `CFUNCTYPE(c_int)(lambda: None)`, this code triggers `MemoryError`
if the Linux server has been hardened for security.
The code causes Python to write temporary executable files, and if it cannot find a file system in which to do this. For example, if `noexec` is set on the `/tmp` file system, it fails with `MemoryError` ([read more in the issue](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/6184)).
Workarounds:
- Remove `noexec` from the mount options for filesystems like `/tmp` and `/var/tmp`.
- If set to enforcing, SELinux may also prevent these operations. Verify the issue is fixed by setting
SELinux to permissive.
Patroni first shipped in the Linux package for GitLab 13.1, along with a build of Python 3.7.
The code which causes this was removed in Python 3.8: this fix shipped in
[the Linux package for GitLab 14.3](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/5547)
and later, removing the need for a workaround.
### Errors running `gitlab-ctl`
Patroni nodes can get into a state where `gitlab-ctl` commands fail
and `gitlab-ctl reconfigure` cannot fix the node.
If this co-incides with a version upgrade of PostgreSQL, [follow a different procedure](#postgresql-major-version-upgrade-fails-on-a-patroni-replica)
One common symptom is that `gitlab-ctl` cannot determine
information it needs about the installation if the database server is failing to start:
```plaintext
Malformed configuration JSON file found at /opt/gitlab/embedded/nodes/<HOSTNAME>.json.
This usually happens when your last run of `gitlab-ctl reconfigure` didn't complete successfully.
```
```plaintext
Error while reinitializing replica on the current node: Attributes not found in
/opt/gitlab/embedded/nodes/<HOSTNAME>.json, has reconfigure been run yet?
```
Similarly, the nodes file (`/opt/gitlab/embedded/nodes/<HOSTNAME>.json`) should contain a lot of information,
but might get created with only:
```json
{
"name": "<HOSTNAME>"
}
```
The following process for fixing this includes reinitializing this replica:
the current state of PostgreSQL on this node is discarded:
1. Shut down the Patroni and (if present) PostgreSQL services:
```shell
sudo gitlab-ctl status
sudo gitlab-ctl stop patroni
sudo gitlab-ctl stop postgresql
```
1. Remove `/var/opt/gitlab/postgresql/data` in case its state prevents
PostgreSQL from starting:
```shell
cd /var/opt/gitlab/postgresql
sudo rm -rf data
```
**Take care with this step to avoid data loss**.
This step can be also achieved by renaming `data/`:
make sure there's enough free disk for a new copy of the primary database,
and remove the extra directory when the replica is fixed.
1. With PostgreSQL not running, the nodes file now gets created successfully:
```shell
sudo gitlab-ctl reconfigure
```
1. Start Patroni:
```shell
sudo gitlab-ctl start patroni
```
1. Monitor the logs and check the cluster state:
```shell
sudo gitlab-ctl tail patroni
sudo gitlab-ctl patroni members
```
1. Re-run `reconfigure` again:
```shell
sudo gitlab-ctl reconfigure
```
1. Reinitialize the replica if `gitlab-ctl patroni members` indicates this is needed:
```shell
sudo gitlab-ctl patroni reinitialize-replica
```
If this procedure doesn't work **and** if the cluster is unable to elect a leader,
[there is a another fix](#reset-the-patroni-state-in-consul) which should only be
used as a last resort.
### PostgreSQL major version upgrade fails on a Patroni replica
A Patroni **replica** can get stuck in a loop during `gitlab-ctl pg-upgrade`, and
the upgrade fails.
An example set of symptoms is as follows:
1. A `postgresql` service is defined,
which shouldn't usually be present on a Patroni node. It is present because
`gitlab-ctl pg-upgrade` adds it to create a new empty database:
```plaintext
run: patroni: (pid 1972) 1919s; run: log: (pid 1971) 1919s
down: postgresql: 1s, normally up, want up; run: log: (pid 1973) 1919s
```
1. PostgreSQL generates `PANIC` log entries in
`/var/log/gitlab/postgresql/current` as Patroni is removing
`/var/opt/gitlab/postgresql/data` as part of reinitializing the replica:
```plaintext
DETAIL: Could not open file "pg_xact/0000": No such file or directory.
WARNING: terminating connection because of crash of another server process
LOG: all server processes terminated; reinitializing
PANIC: could not open file "global/pg_control": No such file or directory
```
1. In `/var/log/gitlab/patroni/current`, Patroni logs the following.
The local PostgreSQL version is different from the cluster leader:
```plaintext
INFO: trying to bootstrap from leader 'HOSTNAME'
pg_basebackup: incompatible server version 12.6
pg_basebackup: removing data directory "/var/opt/gitlab/postgresql/data"
ERROR: Error when fetching backup: pg_basebackup exited with code=1
```
**Important**: This workaround applies when the Patroni cluster is in the following state:
- The [leader has been successfully upgraded to the new major version](#upgrading-postgresql-major-version-in-a-patroni-cluster).
- The step to upgrade PostgreSQL on replicas is failing.
This workaround completes the PostgreSQL upgrade on a Patroni replica
by setting the node to use the new PostgreSQL version, and then reinitializing
it as a replica in the new cluster that was created
when the leader was upgraded:
1. Check the cluster status on all nodes to confirm which is the leader
and what state the replicas are in
```shell
sudo gitlab-ctl patroni members
```
1. Replica: check which version of PostgreSQL is active:
```shell
sudo ls -al /opt/gitlab/embedded/bin | grep postgres
```
1. Replica: ensure the nodes file is correct and `gitlab-ctl` can run. This resolves
the [errors running `gitlab-ctl`](#errors-running-gitlab-ctl) issue if the replica
has any of those errors as well:
```shell
sudo gitlab-ctl stop patroni
sudo gitlab-ctl reconfigure
```
1. Replica: relink the PostgreSQL binaries to the required version
to fix the `incompatible server version` error:
1. Edit `/etc/gitlab/gitlab.rb` and specify the required version:
```ruby
postgresql['version'] = 13
```
1. Reconfigure GitLab:
```shell
sudo gitlab-ctl reconfigure
```
1. Check the binaries are relinked. The binaries distributed for
PostgreSQL vary between major releases, it's typical to
have a small number of incorrect symbolic links:
```shell
sudo ls -al /opt/gitlab/embedded/bin | grep postgres
```
1. Replica: ensure PostgreSQL is fully reinitialized for the specified version:
```shell
cd /var/opt/gitlab/postgresql
sudo rm -rf data
sudo gitlab-ctl reconfigure
```
1. Replica: optionally monitor the database in two additional terminal sessions:
- Disk use increases as `pg_basebackup` runs. Track progress of the
replica initialization with:
```shell
cd /var/opt/gitlab/postgresql
watch du -sh data
```
- Monitor the process in the logs:
```shell
sudo gitlab-ctl tail patroni
```
1. Replica: Start Patroni to reinitialize the replica:
```shell
sudo gitlab-ctl start patroni
```
1. Replica: After it completes, remove the hardcoded version from `/etc/gitlab/gitlab.rb`:
1. Edit `/etc/gitlab/gitlab.rb` and remove `postgresql['version']`.
1. Reconfigure GitLab:
```shell
sudo gitlab-ctl reconfigure
```
1. Check the correct binaries are linked:
```shell
sudo ls -al /opt/gitlab/embedded/bin | grep postgres
```
1. Check the cluster status on all nodes:
```shell
sudo gitlab-ctl patroni members
```
Repeat this procedure on the other replica if required.
### Issues with other components
If you're running into an issue with a component not outlined here, be sure to check the troubleshooting section of their specific documentation page:
- [Consul](../consul.md#troubleshooting-consul)
- [PostgreSQL](https://docs.gitlab.com/omnibus/settings/database.html#troubleshooting)

View File

@ -0,0 +1,472 @@
---
stage: Data Stores
group: Database
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
---
# Troubleshooting PostgreSQL replication and failover for Linux package installations **(PREMIUM SELF)**
When working with PostgreSQL replication and failover, you might encounter the following issues.
## Consul and PostgreSQL changes not taking effect
Due to the potential impacts, `gitlab-ctl reconfigure` only reloads Consul and PostgreSQL, it does not restart the services. However, not all changes can be activated by reloading.
To restart either service, run `gitlab-ctl restart SERVICE`
For PostgreSQL, it is usually safe to restart the leader node by default. Automatic failover defaults to a 1 minute timeout. Provided the database returns before then, nothing else needs to be done.
On the Consul server nodes, it is important to [restart the Consul service](../consul.md#restart-consul) in a controlled manner.
## PgBouncer error `ERROR: pgbouncer cannot connect to server`
You may get this error when running `gitlab-rake gitlab:db:configure` or you
may see the error in the PgBouncer log file.
```plaintext
PG::ConnectionBad: ERROR: pgbouncer cannot connect to server
```
The problem may be that your PgBouncer node's IP address is not included in the
`trust_auth_cidr_addresses` setting in `/etc/gitlab/gitlab.rb` on the database nodes.
You can confirm that this is the issue by checking the PostgreSQL log on the leader
database node. If you see the following error then `trust_auth_cidr_addresses`
is the problem.
```plaintext
2018-03-29_13:59:12.11776 FATAL: no pg_hba.conf entry for host "123.123.123.123", user "pgbouncer", database "gitlabhq_production", SSL off
```
To fix the problem, add the IP address to `/etc/gitlab/gitlab.rb`.
```ruby
postgresql['trust_auth_cidr_addresses'] = %w(123.123.123.123/32 <other_cidrs>)
```
[Reconfigure GitLab](../restart_gitlab.md#reconfigure-a-linux-package-installation) for the changes to take effect.
## PgBouncer errors `Error running command: GitlabCtl::Errors::ExecutionError` and `ERROR: database gitlabhq_production is not paused`
Due to a [known issue](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/8166) that
affects versions of GitLab prior to 16.5.0, the automatic failover of PgBouncer nodes does not
happen after a [Patroni switchover](../../administration/postgresql/replication_and_failover.md#manual-failover-procedure-for-patroni). In this
example, GitLab failed to detect a paused database, then attempted to `RESUME` a
not-paused database:
```plaintext
INFO -- : Running: gitlab-ctl pgb-notify --pg-database gitlabhq_production --newhost database7.example.com --user pgbouncer --hostuser gitlab-consul
ERROR -- : STDERR: Error running command: GitlabCtl::Errors::ExecutionError
ERROR -- : STDERR: ERROR: ERROR: database gitlabhq_production is not paused
```
To ensure a [Patroni switchover](../../administration/postgresql/replication_and_failover.md#manual-failover-procedure-for-patroni) succeeds,
you must manually restart the PgBouncer service on all PgBouncer nodes with this command:
```shell
gitlab-ctl restart pgbouncer
```
## Reinitialize a replica
If a replica cannot start or rejoin the cluster, or when it lags behind and cannot catch up, it might be necessary to reinitialize the replica:
1. [Check the replication status](../../administration/postgresql/replication_and_failover.md#check-replication-status) to confirm which server
needs to be reinitialized. For example:
```plaintext
+ Cluster: postgresql-ha (6970678148837286213) ------+---------+--------------+----+-----------+
| Member | Host | Role | State | TL | Lag in MB |
+-------------------------------------+--------------+---------+--------------+----+-----------+
| gitlab-database-1.example.com | 172.18.0.111 | Replica | running | 55 | 0 |
| gitlab-database-2.example.com | 172.18.0.112 | Replica | start failed | | unknown |
| gitlab-database-3.example.com | 172.18.0.113 | Leader | running | 55 | |
+-------------------------------------+--------------+---------+--------------+----+-----------+
```
1. Sign in to the broken server and reinitialize the database and replication. Patroni shuts
down PostgreSQL on that server, remove the data directory, and reinitialize it from scratch:
```shell
sudo gitlab-ctl patroni reinitialize-replica --member gitlab-database-2.example.com
```
This can be run on any Patroni node, but be aware that `sudo gitlab-ctl patroni
reinitialize-replica` without `--member` restarts the server it is run on.
You should run it locally on the broken server to reduce the risk of
unintended data loss.
1. Monitor the logs:
```shell
sudo gitlab-ctl tail patroni
```
## Reset the Patroni state in Consul
WARNING:
Resetting the Patroni state in Consul is a potentially destructive process. Make sure that you have a healthy database backup first.
As a last resort you can reset the Patroni state in Consul completely.
This may be required if your Patroni cluster is in an unknown or bad state and no node can start:
```plaintext
+ Cluster: postgresql-ha (6970678148837286213) ------+---------+---------+----+-----------+
| Member | Host | Role | State | TL | Lag in MB |
+-------------------------------------+--------------+---------+---------+----+-----------+
| gitlab-database-1.example.com | 172.18.0.111 | Replica | stopped | | unknown |
| gitlab-database-2.example.com | 172.18.0.112 | Replica | stopped | | unknown |
| gitlab-database-3.example.com | 172.18.0.113 | Replica | stopped | | unknown |
+-------------------------------------+--------------+---------+---------+----+-----------+
```
**Before deleting the Patroni state in Consul**,
[try and resolve the `gitlab-ctl` errors](#errors-running-gitlab-ctl) on the Patroni nodes.
This process results in a reinitialized Patroni cluster when
the first Patroni node starts.
To reset the Patroni state in Consul:
1. Take note of the Patroni node that was the leader, or that the application thinks is the current leader,
if the current state shows more than one, or none:
- Look on the PgBouncer nodes in `/var/opt/gitlab/consul/databases.ini`,
which contains the hostname of the current leader.
- Look in the Patroni logs `/var/log/gitlab/patroni/current` (or the older rotated and
compressed logs `/var/log/gitlab/patroni/@40000*`) on **all** database nodes to see
which server was most recently identified as the leader by the cluster:
```plaintext
INFO: no action. I am a secondary (database1.local) and following a leader (database2.local)
```
1. Stop Patroni on all nodes:
```shell
sudo gitlab-ctl stop patroni
```
1. Reset the state in Consul:
```shell
/opt/gitlab/embedded/bin/consul kv delete -recurse /service/postgresql-ha/
```
1. Start one Patroni node, which initializes the Patroni cluster to elect as a leader.
It's highly recommended to start the previous leader (noted in the first step),
so as to not lose existing writes that may have not been replicated because
of the broken cluster state:
```shell
sudo gitlab-ctl start patroni
```
1. Start all other Patroni nodes that join the Patroni cluster as replicas:
```shell
sudo gitlab-ctl start patroni
```
If you are still seeing issues, the next step is restoring the last healthy backup.
## Errors in the Patroni log about a `pg_hba.conf` entry for `127.0.0.1`
The following log entry in the Patroni log indicates the replication is not working
and a configuration change is needed:
```plaintext
FATAL: no pg_hba.conf entry for replication connection from host "127.0.0.1", user "gitlab_replicator"
```
To fix the problem, ensure the loopback interface is included in the CIDR addresses list:
1. Edit `/etc/gitlab/gitlab.rb`:
```ruby
postgresql['trust_auth_cidr_addresses'] = %w(<other_cidrs> 127.0.0.1/32)
```
1. [Reconfigure GitLab](../restart_gitlab.md#reconfigure-a-linux-package-installation) for the changes to take effect.
1. Check that [all the replicas are synchronized](../../administration/postgresql/replication_and_failover.md#check-replication-status)
## Errors in Patroni logs: the requested start point is ahead of the Write Ahead Log (WAL) flush position
This error indicates that the database is not replicating:
```plaintext
FATAL: could not receive data from WAL stream: ERROR: requested starting point 0/5000000 is ahead of the WAL flush position of this server 0/4000388
```
This example error is from a replica that was initially misconfigured, and had never replicated.
Fix it [by reinitializing the replica](#reinitialize-a-replica).
## Patroni fails to start with `MemoryError`
Patroni may fail to start, logging an error and stack trace:
```plaintext
MemoryError
Traceback (most recent call last):
File "/opt/gitlab/embedded/bin/patroni", line 8, in <module>
sys.exit(main())
[..]
File "/opt/gitlab/embedded/lib/python3.7/ctypes/__init__.py", line 273, in _reset_cache
CFUNCTYPE(c_int)(lambda: None)
```
If the stack trace ends with `CFUNCTYPE(c_int)(lambda: None)`, this code triggers `MemoryError`
if the Linux server has been hardened for security.
The code causes Python to write temporary executable files, and if it cannot find a file system in which to do this. For example, if `noexec` is set on the `/tmp` file system, it fails with `MemoryError` ([read more in the issue](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/6184)).
Workarounds:
- Remove `noexec` from the mount options for filesystems like `/tmp` and `/var/tmp`.
- If set to enforcing, SELinux may also prevent these operations. Verify the issue is fixed by setting
SELinux to permissive.
Patroni first shipped in the Linux package for GitLab 13.1, along with a build of Python 3.7.
The code which causes this was removed in Python 3.8: this fix shipped in
[the Linux package for GitLab 14.3](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/5547)
and later, removing the need for a workaround.
## Errors running `gitlab-ctl`
Patroni nodes can get into a state where `gitlab-ctl` commands fail
and `gitlab-ctl reconfigure` cannot fix the node.
If this co-incides with a version upgrade of PostgreSQL, [follow a different procedure](#postgresql-major-version-upgrade-fails-on-a-patroni-replica)
One common symptom is that `gitlab-ctl` cannot determine
information it needs about the installation if the database server is failing to start:
```plaintext
Malformed configuration JSON file found at /opt/gitlab/embedded/nodes/<HOSTNAME>.json.
This usually happens when your last run of `gitlab-ctl reconfigure` didn't complete successfully.
```
```plaintext
Error while reinitializing replica on the current node: Attributes not found in
/opt/gitlab/embedded/nodes/<HOSTNAME>.json, has reconfigure been run yet?
```
Similarly, the nodes file (`/opt/gitlab/embedded/nodes/<HOSTNAME>.json`) should contain a lot of information,
but might get created with only:
```json
{
"name": "<HOSTNAME>"
}
```
The following process for fixing this includes reinitializing this replica:
the current state of PostgreSQL on this node is discarded:
1. Shut down the Patroni and (if present) PostgreSQL services:
```shell
sudo gitlab-ctl status
sudo gitlab-ctl stop patroni
sudo gitlab-ctl stop postgresql
```
1. Remove `/var/opt/gitlab/postgresql/data` in case its state prevents
PostgreSQL from starting:
```shell
cd /var/opt/gitlab/postgresql
sudo rm -rf data
```
**Take care with this step to avoid data loss**.
This step can be also achieved by renaming `data/`:
make sure there's enough free disk for a new copy of the primary database,
and remove the extra directory when the replica is fixed.
1. With PostgreSQL not running, the nodes file now gets created successfully:
```shell
sudo gitlab-ctl reconfigure
```
1. Start Patroni:
```shell
sudo gitlab-ctl start patroni
```
1. Monitor the logs and check the cluster state:
```shell
sudo gitlab-ctl tail patroni
sudo gitlab-ctl patroni members
```
1. Re-run `reconfigure` again:
```shell
sudo gitlab-ctl reconfigure
```
1. Reinitialize the replica if `gitlab-ctl patroni members` indicates this is needed:
```shell
sudo gitlab-ctl patroni reinitialize-replica
```
If this procedure doesn't work **and** if the cluster is unable to elect a leader,
[there is a another fix](#reset-the-patroni-state-in-consul) which should only be
used as a last resort.
## PostgreSQL major version upgrade fails on a Patroni replica
A Patroni **replica** can get stuck in a loop during `gitlab-ctl pg-upgrade`, and
the upgrade fails.
An example set of symptoms is as follows:
1. A `postgresql` service is defined,
which shouldn't usually be present on a Patroni node. It is present because
`gitlab-ctl pg-upgrade` adds it to create a new empty database:
```plaintext
run: patroni: (pid 1972) 1919s; run: log: (pid 1971) 1919s
down: postgresql: 1s, normally up, want up; run: log: (pid 1973) 1919s
```
1. PostgreSQL generates `PANIC` log entries in
`/var/log/gitlab/postgresql/current` as Patroni is removing
`/var/opt/gitlab/postgresql/data` as part of reinitializing the replica:
```plaintext
DETAIL: Could not open file "pg_xact/0000": No such file or directory.
WARNING: terminating connection because of crash of another server process
LOG: all server processes terminated; reinitializing
PANIC: could not open file "global/pg_control": No such file or directory
```
1. In `/var/log/gitlab/patroni/current`, Patroni logs the following.
The local PostgreSQL version is different from the cluster leader:
```plaintext
INFO: trying to bootstrap from leader 'HOSTNAME'
pg_basebackup: incompatible server version 12.6
pg_basebackup: removing data directory "/var/opt/gitlab/postgresql/data"
ERROR: Error when fetching backup: pg_basebackup exited with code=1
```
**Important**: This workaround applies when the Patroni cluster is in the following state:
- The [leader has been successfully upgraded to the new major version](../../administration/postgresql/replication_and_failover.md#upgrading-postgresql-major-version-in-a-patroni-cluster).
- The step to upgrade PostgreSQL on replicas is failing.
This workaround completes the PostgreSQL upgrade on a Patroni replica
by setting the node to use the new PostgreSQL version, and then reinitializing
it as a replica in the new cluster that was created
when the leader was upgraded:
1. Check the cluster status on all nodes to confirm which is the leader
and what state the replicas are in
```shell
sudo gitlab-ctl patroni members
```
1. Replica: check which version of PostgreSQL is active:
```shell
sudo ls -al /opt/gitlab/embedded/bin | grep postgres
```
1. Replica: ensure the nodes file is correct and `gitlab-ctl` can run. This resolves
the [errors running `gitlab-ctl`](#errors-running-gitlab-ctl) issue if the replica
has any of those errors as well:
```shell
sudo gitlab-ctl stop patroni
sudo gitlab-ctl reconfigure
```
1. Replica: relink the PostgreSQL binaries to the required version
to fix the `incompatible server version` error:
1. Edit `/etc/gitlab/gitlab.rb` and specify the required version:
```ruby
postgresql['version'] = 13
```
1. Reconfigure GitLab:
```shell
sudo gitlab-ctl reconfigure
```
1. Check the binaries are relinked. The binaries distributed for
PostgreSQL vary between major releases, it's typical to
have a small number of incorrect symbolic links:
```shell
sudo ls -al /opt/gitlab/embedded/bin | grep postgres
```
1. Replica: ensure PostgreSQL is fully reinitialized for the specified version:
```shell
cd /var/opt/gitlab/postgresql
sudo rm -rf data
sudo gitlab-ctl reconfigure
```
1. Replica: optionally monitor the database in two additional terminal sessions:
- Disk use increases as `pg_basebackup` runs. Track progress of the
replica initialization with:
```shell
cd /var/opt/gitlab/postgresql
watch du -sh data
```
- Monitor the process in the logs:
```shell
sudo gitlab-ctl tail patroni
```
1. Replica: Start Patroni to reinitialize the replica:
```shell
sudo gitlab-ctl start patroni
```
1. Replica: After it completes, remove the hardcoded version from `/etc/gitlab/gitlab.rb`:
1. Edit `/etc/gitlab/gitlab.rb` and remove `postgresql['version']`.
1. Reconfigure GitLab:
```shell
sudo gitlab-ctl reconfigure
```
1. Check the correct binaries are linked:
```shell
sudo ls -al /opt/gitlab/embedded/bin | grep postgres
```
1. Check the cluster status on all nodes:
```shell
sudo gitlab-ctl patroni members
```
Repeat this procedure on the other replica if required.
## Issues with other components
If you're running into an issue with a component not outlined here, be sure to check the troubleshooting section of their specific documentation page:
- [Consul](../consul.md#troubleshooting-consul)
- [PostgreSQL](https://docs.gitlab.com/omnibus/settings/database.html#troubleshooting)

View File

@ -707,7 +707,7 @@ SSH in to any of the Patroni nodes on the **primary site**:
```
If the 'State' column for any node doesn't say "running", check the
[PostgreSQL replication and failover troubleshooting section](../postgresql/replication_and_failover.md#pgbouncer-error-error-pgbouncer-cannot-connect-to-server)
[PostgreSQL replication and failover troubleshooting section](../../administration/postgresql/replication_and_failover_troubleshooting.md#pgbouncer-error-error-pgbouncer-cannot-connect-to-server)
before proceeding.
<div align="right">

View File

@ -715,7 +715,7 @@ SSH in to any of the Patroni nodes on the **primary site**:
```
If the 'State' column for any node doesn't say "running", check the
[PostgreSQL replication and failover troubleshooting section](../postgresql/replication_and_failover.md#pgbouncer-error-error-pgbouncer-cannot-connect-to-server)
[PostgreSQL replication and failover troubleshooting section](../../administration/postgresql/replication_and_failover_troubleshooting.md#pgbouncer-error-error-pgbouncer-cannot-connect-to-server)
before proceeding.
<div align="right">

View File

@ -981,7 +981,7 @@ SSH in to any of the Patroni nodes on the **primary site**:
```
If the 'State' column for any node doesn't say "running", check the
[PostgreSQL replication and failover troubleshooting section](../postgresql/replication_and_failover.md#pgbouncer-error-error-pgbouncer-cannot-connect-to-server)
[PostgreSQL replication and failover troubleshooting section](../../administration/postgresql/replication_and_failover_troubleshooting.md#pgbouncer-error-error-pgbouncer-cannot-connect-to-server)
before proceeding.
<div align="right">

View File

@ -717,7 +717,7 @@ SSH in to any of the Patroni nodes on the **primary site**:
```
If the 'State' column for any node doesn't say "running", check the
[PostgreSQL replication and failover troubleshooting section](../postgresql/replication_and_failover.md#pgbouncer-error-error-pgbouncer-cannot-connect-to-server)
[PostgreSQL replication and failover troubleshooting section](../../administration/postgresql/replication_and_failover_troubleshooting.md#pgbouncer-error-error-pgbouncer-cannot-connect-to-server)
before proceeding.
<div align="right">

View File

@ -980,7 +980,7 @@ SSH in to any of the Patroni nodes on the **primary site**:
```
If the 'State' column for any node doesn't say "running", check the
[PostgreSQL replication and failover troubleshooting section](../postgresql/replication_and_failover.md#pgbouncer-error-error-pgbouncer-cannot-connect-to-server)
[PostgreSQL replication and failover troubleshooting section](../../administration/postgresql/replication_and_failover_troubleshooting.md#pgbouncer-error-error-pgbouncer-cannot-connect-to-server)
before proceeding.
<div align="right">

View File

@ -103,7 +103,7 @@ This section is for links to information elsewhere in the GitLab documentation.
- Managing PostgreSQL versions on Linux package installations [from the development docs](https://docs.gitlab.com/omnibus/development/managing-postgresql-versions.html).
- [PostgreSQL scaling](../postgresql/replication_and_failover.md)
- Including [troubleshooting](../postgresql/replication_and_failover.md#troubleshooting)
- Including [troubleshooting](../../administration/postgresql/replication_and_failover_troubleshooting.md)
`gitlab-ctl patroni check-leader` and PgBouncer errors.
- [Developer database documentation](../../development/feature_development.md#database-guides),

View File

@ -31764,6 +31764,7 @@ Name of the feature that the callout is for.
| <a id="usercalloutfeaturenameenumcloud_licensing_subscription_activation_banner"></a>`CLOUD_LICENSING_SUBSCRIPTION_ACTIVATION_BANNER` | Callout feature name for cloud_licensing_subscription_activation_banner. |
| <a id="usercalloutfeaturenameenumcluster_security_warning"></a>`CLUSTER_SECURITY_WARNING` | Callout feature name for cluster_security_warning. |
| <a id="usercalloutfeaturenameenumcode_suggestions_ga_non_owner_alert"></a>`CODE_SUGGESTIONS_GA_NON_OWNER_ALERT` | Callout feature name for code_suggestions_ga_non_owner_alert. |
| <a id="usercalloutfeaturenameenumcode_suggestions_ga_owner_alert"></a>`CODE_SUGGESTIONS_GA_OWNER_ALERT` | Callout feature name for code_suggestions_ga_owner_alert. |
| <a id="usercalloutfeaturenameenumduo_chat_callout"></a>`DUO_CHAT_CALLOUT` | Callout feature name for duo_chat_callout. |
| <a id="usercalloutfeaturenameenumeoa_bronze_plan_banner"></a>`EOA_BRONZE_PLAN_BANNER` | Callout feature name for eoa_bronze_plan_banner. |
| <a id="usercalloutfeaturenameenumfeature_flags_new_version"></a>`FEATURE_FLAGS_NEW_VERSION` | Callout feature name for feature_flags_new_version. |

View File

@ -221,23 +221,24 @@ To configure the Code Quality job:
For an example, see [Download output in JSON format](#download-output-in-json-format).
### Available CI/CD variables
> In [GitLab 13.4 and later](https://gitlab.com/gitlab-org/gitlab/-/issues/11100), the option to override the Code Quality environment variables was added.
## Available CI/CD variables
Code Quality can be customized by defining available CI/CD variables:
| CI/CD variable | Description |
| --------------------------- | ----------- |
| `SOURCE_CODE` | Path to the source code to scan. |
| `TIMEOUT_SECONDS` | Custom timeout per engine container for the `codeclimate analyze` command, default is 900 seconds (15 minutes). |
| `CODECLIMATE_DEBUG` | Set to enable [Code Climate debug mode](https://github.com/codeclimate/codeclimate#environment-variables) |
| `CODECLIMATE_DEV` | Set to enable `--dev` mode which lets you run engines not known to the CLI. |
| `REPORT_STDOUT` | Set to print the report to `STDOUT` instead of generating the usual report file. |
| `REPORT_FORMAT` | Set to control the format of the generated report file. One of: `json\|html`. |
| `ENGINE_MEMORY_LIMIT_BYTES` | Set the memory limit for engines, default is 1,024,000,000 bytes. |
| `CODE_QUALITY_DISABLED` | Prevents the Code Quality job from running. |
| `CODECLIMATE_PREFIX` | Set a prefix to use with all `docker pull` commands in CodeClimate engines. Useful for [offline scanning](https://github.com/codeclimate/codeclimate/pull/948). |
| CI/CD variable | Description |
|---------------------------------|-------------|
| `CODECLIMATE_DEBUG` | Set to enable [Code Climate debug mode](https://github.com/codeclimate/codeclimate#environment-variables) |
| `CODECLIMATE_DEV` | Set to enable `--dev` mode which lets you run engines not known to the CLI. |
| `CODECLIMATE_PREFIX` | Set a prefix to use with all `docker pull` commands in CodeClimate engines. Useful for [offline scanning](https://github.com/codeclimate/codeclimate/pull/948). For more information, see [Use a private container registry](#use-a-private-container-image-registry). |
| `CODECLIMATE_REGISTRY_USERNAME` | Set to specify the username for the registry domain parsed from `CODECLIMATE_PREFIX`. |
| `CODECLIMATE_REGISTRY_PASSWORD` | Set to specify the password for the registry domain parsed from `CODECLIMATE_PREFIX`. |
| `CODE_QUALITY_DISABLED` | Prevents the Code Quality job from running. |
| `CODE_QUALITY_IMAGE` | Set to a fully prefixed image name. Image must be accessible from your job environment. |
| `ENGINE_MEMORY_LIMIT_BYTES` | Set the memory limit for engines. Default: 1,024,000,000 bytes. |
| `REPORT_STDOUT` | Set to print the report to `STDOUT` instead of generating the usual report file. |
| `REPORT_FORMAT` | Set to control the format of the generated report file. Either `json` or `html`. |
| `SOURCE_CODE` | Path to the source code to scan. |
| `TIMEOUT_SECONDS` | Custom timeout per engine container for the `codeclimate analyze` command. Default: 900 seconds (15 minutes) |
## Output

View File

@ -26,6 +26,11 @@ For example:
While some older sections of the nav are alphabetical, the nav should primarily be workflow-based.
Without a navigation entry:
- The navigation closes when the page is opened, and the reader loses their place.
- The page isn't visible in a group with other pages.
## Choose the right words for your navigation entry
Before you add an item to the left nav, choose the parts of speech you want to use.
@ -41,35 +46,14 @@ as helpful as **Get started with runners**.
## Add a navigation entry
To add a topic to the global nav, edit
[`navigation.yaml`](https://gitlab.com/gitlab-org/gitlab-docs/blob/main/content/_data/navigation.yaml)
and add your item.
**Do not** add items to the global nav without
the consent of one of the technical writers.
Without a navigation entry:
To add a topic to the global navigation:
- The navigation closes when the page is opened, and the reader loses their place.
- The page isn't visible in a group with other pages.
### Pages you don't need to add
Exclude these pages from the global nav:
- Legal notices.
- Pages in the `architecture/blueprints` directory.
- Pages in the `user/application_security/dast/checks/` directory.
The following pages should probably be in the global nav, but the technical writers
do not actively work to add them:
- Pages in the `/development` directory.
- Pages authored by the support team, which are under the `doc/administration/troubleshooting` directory.
Sometimes pages for deprecated features are not in the global nav, depending on how long ago the feature was deprecated.
All other pages should be in the global nav.
The technical writing team runs a report to determine which pages are not in the nav.
The team reviews this list each month.
1. In the [`navigation.yaml`](https://gitlab.com/gitlab-org/gitlab-docs/blob/main/content/_data/navigation.yaml)
file, add the item.
1. Assign the MR to a technical writer for review and merge.
### Where to add
@ -110,7 +94,28 @@ mechanics of what is required is [documented below](#data-file) but, in principl
substitution for **Continuous Integration**.
- Navigation links must follow the rules documented in the [data file](#data-file).
## How it works
### Pages you don't need to add
Exclude these pages from the global nav:
- Legal notices.
- Pages in the `architecture/blueprints` directory.
- Pages in the `user/application_security/dast/checks/` directory.
The following pages should probably be in the global nav, but the technical writers
do not actively work to add them:
- Pages in the `/development` directory.
- Pages authored by the support team, which are under the `doc/administration/troubleshooting` directory.
Sometimes pages for deprecated features are not in the global nav, depending on how long ago the feature was deprecated.
All other pages should be in the global nav.
The technical writing team runs a report to determine which pages are not in the nav.
The team reviews this list each month.
## Navigation structure
The global nav has five levels:
@ -122,9 +127,6 @@ The global nav has five levels:
You can view this structure in [the `navigation.yml` file](https://gitlab.com/gitlab-org/gitlab-docs/-/blob/main/content/_data/navigation.yaml).
**Do not** [add items](#add-a-navigation-entry) to the global nav without
the consent of one of the technical writers.
## Composition
The global nav is built from two files:

View File

@ -1640,13 +1640,37 @@ When names change, it is more complicated to search or grep text that has line b
Tier badges provide information about a feature and are displayed next to the topic title.
You should assign a tier badge:
#### When to add tier badges
- To all H1 topic titles, except the pages under `doc/development/*`.
- To topic titles that don't apply to the same tier as the H1.
Assign tier badges to:
- All H1 topic titles, except the pages under `doc/development/*` and `doc/solutions/*`.
- Topic titles that don't apply to the same tier as the H1.
The H1 tier badge should be the badge that applies to the lowest tier for the features on the page.
#### When not to add tier badges
Do not assign tier badges:
- When a feature does not have one obvious subscription tier or offering.
For example, if a feature applies to one tier for SaaS and a different tier for self-managed.
In this case, do any or all of the following:
- Use a `NOTE` in an alert box to describe the tiers.
- Add tier badges to other topic titles where this information makes more sense.
- Do not add tier badges to the H1.
##### Pages that don't need a tier badge
Some pages won't have a tier badge, because no obvious tier badge applies. For example:
- Tutorials.
- Pages that compare features from different tiers.
- Pages in the `/development` folder. These pages are automatically assigned a `Contribute` badge.
- Pages in the `/solutions` folder. These pages are automatically assigned a `Solutions` badge.
#### Available product tier badges
Tier badges should include two components, in this order: a subscription tier and an offering.
@ -1671,7 +1695,9 @@ You can also add a third component for the feature's status:
For example, `**(FREE ALL EXPERIMENT)**`.
A tier or status can stand alone. An offering should always have a tier.
- A tier or status can stand alone.
- An offering should always have a tier.
- Do not add more than one offering, tier, or status. Multiples do not render properly in the documentation.
#### Add a tier badge
@ -1701,15 +1727,6 @@ Do not add tier badges inline with other text, except for [API attributes](../re
The single source of truth for a feature should be the topic where the
functionality is described.
##### Pages that don't need a tier badge
Some pages won't have a tier badge, because no obvious tier badge applies. For example:
- Tutorials.
- Pages that compare features from different tiers.
- Pages in the `/development` folder. These pages are automatically assigned a `Contribute` badge.
- Pages in the `/solutions` folder. These pages are automatically assigned a `Solutions` badge.
##### Administrator documentation tier badges
Topics that are only for instance administrators should be badged `<TIER> SELF`. Instance

View File

@ -74,11 +74,17 @@ Specific information applies to Linux package installations:
During a package upgrade, the database isn't upgraded to PostgreSQL 14.
If you want to upgrade to PostgreSQL 14, [you must do it manually](https://docs.gitlab.com/omnibus/settings/database.html#upgrade-packaged-postgresql-server).
PostgreSQL 14 isn't supported on Geo deployments and is [planned](https://gitlab.com/groups/gitlab-org/-/epics/9065)
for future releases.
If you want to use PostgreSQL 13, you must set `postgresql['version'] = 13` in `/etc/gitlab/gitlab.rb`.
### Geo installations
- PostgreSQL version 14 is the default for fresh installations of GitLab 16.7 and later. However, due to an [issue](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/7768#note_1652076255), existing Geo secondary sites cannot be upgraded to PostgreSQL version 14. All Geo sites must run the same version of PostgreSQL. If you are adding a new Geo secondary site based on GitLab 16.7 you must take one of the following actions based on your configuration:
- You are adding your first Geo secondary site: [Upgrade the Primary site to PostgreSQL 14](https://docs.gitlab.com/omnibus/settings/database.html#upgrade-packaged-postgresql-server) before setting up the new Geo secondary site. No special action is required if your primary site is already running PostgreSQL 14.
- You are adding a new Geo secondary site to a deployment that already has one or more Geo secondaries:
- All sites are running PostgreSQL 13: Install the new Geo secondary site with [pinned PostgreSQL version 13](https://docs.gitlab.com/omnibus/settings/database.html#pin-the-packaged-postgresql-version-fresh-installs-only).
- All sites are running PostgreSQL 14: No special action is required.
## 16.6.0
- Old [CI Environment destroy jobs may be spawned](https://gitlab.com/gitlab-org/gitlab/-/issues/433264#) after upgrading to GitLab 16.6.

View File

@ -137,7 +137,7 @@ By default, the Helm installation command generated by GitLab:
- Creates a `Secret` resource for the agent's access token. To instead bring your own secret with a token, omit the token (`--set token=...`) and instead use `--set config.secretName=<your secret name>`.
- Creates a `Deployment` resource for the `agentk` pod.
To see the full list of customizations available, see the Helm chart's [default values file](https://gitlab.com/gitlab-org/charts/gitlab-agent/-/blob/main/values.yaml).
To see the full list of customizations available, see the Helm chart's [README](https://gitlab.com/gitlab-org/charts/gitlab-agent/-/blob/main/README.md#values).
##### Use the agent when KAS is behind a self-signed certificate

View File

@ -248,6 +248,11 @@ For GitLab.com, alternatively, when users need to [link SAML to their existing G
### Users receive a 404 **(PREMIUM SAAS)**
If the user receives a `404` after signing in successfully, check if you have IP restrictions configured. IP restriction settings are configured:
- On GitLab.com, [at the group level](../../../user/group/access_and_permissions.md#restrict-group-access-by-ip-address).
- For GitLab self-managed, [at the instance level](../../../administration/reporting/ip_addr_restrictions.md).
Because SAML SSO for groups is a paid feature, your subscription expiring can result in a `404` error when you're signing in using SAML SSO on GitLab.com.
If all users are receiving a `404` when attempting to sign in using SAML, confirm
[there is an active subscription](../../../subscriptions/gitlab_com/index.md#view-your-gitlab-saas-subscription) being used in this SAML SSO namespace.

View File

@ -4,7 +4,7 @@ group: Code Creation
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
---
# Repository X-Ray **(ULTIMATE)**
# Repository X-Ray **(PREMIUM)**
Repository X-Ray enhances GitLab Duo Code Suggestions by providing additional context to improve the accuracy and relevance of code recommendations.

View File

@ -118,20 +118,20 @@ Some validation examples:
- Branches must start with `JIRA-`.
```plaintext
`^JIRA-`
^JIRA-
```
- Branches must end with `-JIRA`.
```plaintext
`-JIRA$`
-JIRA$
```
- Branches must be between `4` and `15` characters long,
accepting only lowercase letters, numbers and dashes.
```plaintext
`^[a-z0-9\\-]{4,15}$`
^[a-z0-9\\-]{4,15}$
```
## Prevent unintended consequences

View File

@ -156,7 +156,7 @@ To revoke an SSH key:
1. On the left sidebar, select your avatar.
1. Select **Edit profile**.
1. On the left sidebar, select (**{key}**) **SSH Keys**.
1. On the left sidebar, select **SSH Keys** (**{key}**).
1. Select **Revoke** next to the SSH key you want to delete.
## Related topics

View File

@ -2,6 +2,7 @@ PATH
remote: .
specs:
gitlab-secret_detection (0.1.0)
parallel (~> 1.22)
re2 (~> 2.4)
toml-rb (~> 2.2)
@ -47,7 +48,7 @@ GEM
mini_portile2 (2.8.5)
minitest (5.20.0)
mutex_m (0.2.0)
parallel (1.23.0)
parallel (1.24.0)
parser (3.2.2.4)
ast (~> 2.4.1)
racc
@ -136,6 +137,7 @@ PLATFORMS
ruby
DEPENDENCIES
benchmark-malloc (~> 0.2)
gitlab-secret_detection!
gitlab-styles (~> 11.0)
rspec (~> 3.0)

View File

@ -24,9 +24,11 @@ Gem::Specification.new do |spec|
spec.files = Dir['lib/**/*.rb']
spec.require_paths = ["lib"]
spec.add_runtime_dependency "parallel", "~> 1.22"
spec.add_runtime_dependency "re2", "~> 2.4"
spec.add_runtime_dependency "toml-rb", "~> 2.2"
spec.add_development_dependency "benchmark-malloc", "~> 0.2"
spec.add_development_dependency "gitlab-styles", "~> 11.0"
spec.add_development_dependency "rspec", "~> 3.0"
spec.add_development_dependency "rspec-benchmark", "~> 0.6.0"

View File

@ -4,6 +4,7 @@ require 'toml-rb'
require 're2'
require 'logger'
require 'timeout'
require 'parallel'
module Gitlab
module SecretDetection
@ -23,12 +24,16 @@ module Gitlab
DEFAULT_BLOB_TIMEOUT_SECS = 5
# file path where the secrets ruleset file is located
RULESET_FILE_PATH = File.expand_path('../../gitleaks.toml', __dir__)
# ignore the scanning of a line which ends with the following keyword
GITLEAKS_KEYWORD_IGNORE = 'gitleaks:allow'
# Max no of child processes to spawn per request
# ref: https://gitlab.com/gitlab-org/gitlab/-/issues/430160
MAX_PROCS_PER_REQUEST = 5
# Minimum cumulative size of the blobs required to spawn and
# run the scan within a new subprocess.
MIN_CHUNK_SIZE_PER_PROC_BYTES = 2_097_152 # 2MiB
# Initializes the instance with logger along with following operations:
# 1. Parse ruleset for the given +ruleset_path+(default: +RULESET_FILE_PATH+). Raises +RulesetParseError+
# incase the operation fails.
# in case the operation fails.
# 2. Extract keywords from the parsed ruleset to use it for matching keywords before regex operation.
# 3. Build and Compile rule regex patterns obtained from the ruleset. Raises +RulesetCompilationError+
# in case the compilation fails.
@ -46,13 +51,31 @@ module Gitlab
# +timeout+:: No of seconds(accepts floating point for smaller time values) to limit the total scan duration
# +blob_timeout+:: No of seconds(accepts floating point for smaller time values) to limit
# the scan duration on each blob
# +subprocess+:: If passed true, the scan is performed within subprocess instead of main process.
# To avoid over-consuming memory by running scan on multiple large blobs within a single subprocess,
# it instead groups the blobs into smaller array where each array contains blobs with cumulative size of
# +MIN_CHUNK_SIZE_PER_PROC_BYTES+ bytes and each group runs in a separate sub-process. Default value
# is true.
#
# NOTE:
# Running the scan in fork mode primarily focuses on reducing the memory consumption of the scan by
# offloading regex operations on large blobs to sub-processes. However, it does not assure the improvement
# in the overall latency of the scan, specifically in the case of smaller blob sizes, where the overhead of
# forking a new process adds to the overall latency of the scan instead. More reference on Subprocess-based
# execution is found here: https://gitlab.com/gitlab-org/gitlab/-/issues/430160.
#
# Returns an instance of SecretDetection::Response by following below structure:
# {
# status: One of the SecretDetection::Status values
# results: [SecretDetection::Finding]
# }
def secrets_scan(blobs, timeout: DEFAULT_SCAN_TIMEOUT_SECS, blob_timeout: DEFAULT_BLOB_TIMEOUT_SECS)
#
def secrets_scan(
blobs,
timeout: DEFAULT_SCAN_TIMEOUT_SECS,
blob_timeout: DEFAULT_BLOB_TIMEOUT_SECS,
subprocess: true
)
return SecretDetection::Response.new(SecretDetection::Status::INPUT_ERROR) unless validate_scan_input(blobs)
Timeout.timeout(timeout) do
@ -60,7 +83,11 @@ module Gitlab
next SecretDetection::Response.new(SecretDetection::Status::NOT_FOUND) if matched_blobs.empty?
secrets = find_secrets_bulk(matched_blobs, blob_timeout)
secrets = if subprocess
run_scan_within_subprocess(blobs, blob_timeout)
else
run_scan(blobs, blob_timeout)
end
scan_status = overall_scan_status(secrets)
@ -114,7 +141,7 @@ module Gitlab
secrets_keywords.flatten.compact.to_set
end
# returns only those blobs that contain atleast one of the keywords
# returns only those blobs that contain at least one of the keywords
# from the keywords list
def filter_by_keywords(blobs)
matched_blobs = []
@ -126,22 +153,43 @@ module Gitlab
matched_blobs.freeze
end
# finds secrets in the given list of blobs
def find_secrets_bulk(blobs, blob_timeout)
found_secrets = []
blobs.each do |blob|
found_secrets << Timeout.timeout(blob_timeout) { find_secrets(blob) }
def run_scan(blobs, blob_timeout)
found_secrets = blobs.flat_map do |blob|
Timeout.timeout(blob_timeout) do
find_secrets(blob)
end
rescue Timeout::Error => e
logger.error "Secret detection scan timed out on the blob(id:#{blob.id}): #{e}"
found_secrets << SecretDetection::Finding.new(
blob.id,
SecretDetection::Status::BLOB_TIMEOUT
)
logger.error "Secret Detection scan timed out on the blob(id:#{blob.id}): #{e}"
SecretDetection::Finding.new(blob.id,
SecretDetection::Status::BLOB_TIMEOUT)
end
found_secrets.flatten.freeze
found_secrets.freeze
end
def run_scan_within_subprocess(blobs, blob_timeout)
blob_sizes = blobs.map(&:size)
grouped_blob_indicies = group_by_chunk_size(blob_sizes)
grouped_blobs = grouped_blob_indicies.map { |idx_arr| idx_arr.map { |i| blobs[i] } }
found_secrets = Parallel.flat_map(
grouped_blobs,
in_processes: MAX_PROCS_PER_REQUEST,
isolation: true # do not reuse sub-processes
) do |grouped_blob|
grouped_blob.flat_map do |blob|
Timeout.timeout(blob_timeout) do
find_secrets(blob)
end
rescue Timeout::Error => e
logger.error "Secret Detection scan timed out on the blob(id:#{blob.id}): #{e}"
SecretDetection::Finding.new(blob.id,
SecretDetection::Status::BLOB_TIMEOUT)
end
end
found_secrets.freeze
end
# finds secrets in the given blob with a timeout circuit breaker
@ -149,10 +197,8 @@ module Gitlab
secrets = []
blob.data.each_line.with_index do |line, index|
# ignore the line scan if it is suffixed with '#gitleaks:allow'
next if line.end_with?(GITLEAKS_KEYWORD_IGNORE)
patterns = pattern_matcher.match(line, exception: false)
next unless patterns.any?
line_number = index + 1
@ -172,7 +218,7 @@ module Gitlab
secrets
rescue StandardError => e
logger.error "Secret detection scan failed on the blob(id:#{blob.id}): #{e}"
logger.error "Secret Detection scan failed on the blob(id:#{blob.id}): #{e}"
SecretDetection::Finding.new(blob.id, SecretDetection::Status::SCAN_ERROR)
end
@ -201,6 +247,35 @@ module Gitlab
SecretDetection::Status::FOUND_WITH_ERRORS
end
end
# This method accepts an array of blob sizes(in bytes) and groups them into an array
# of arrays structure where each element is the group of indicies of the input
# array whose cumulative blob sizes has at least +MIN_CHUNK_SIZE_PER_PROC_BYTES+
def group_by_chunk_size(blob_size_arr)
cumulative_size = 0
chunk_indexes = []
chunk_idx_start = 0
blob_size_arr.each_with_index do |size, index|
cumulative_size += size
next unless cumulative_size >= MIN_CHUNK_SIZE_PER_PROC_BYTES
chunk_indexes << (chunk_idx_start..index).to_a
chunk_idx_start = index + 1
cumulative_size = 0
end
if cumulative_size.positive? && (chunk_idx_start < blob_size_arr.length)
chunk_indexes << if chunk_idx_start == blob_size_arr.length - 1
[chunk_idx_start]
else
(chunk_idx_start..blob_size_arr.length - 1).to_a
end
end
chunk_indexes
end
end
end
end

View File

@ -16,28 +16,28 @@ RSpec.describe Gitlab::SecretDetection::Scan, feature_category: :secret_detectio
{
"id" => "gitlab_personal_access_token",
"description" => "GitLab Personal Access Token",
"regex" => "glpat-[0-9a-zA-Z_\\-]{20}",
"regex" => "\bglpat-[0-9a-zA-Z_-]{20}\b",
"tags" => %w[gitlab revocation_type],
"keywords" => ["glpat"]
},
{
"id" => "gitlab_pipeline_trigger_token",
"description" => "GitLab Pipeline Trigger Token",
"regex" => "glptt-[0-9a-zA-Z_\\-]{40}",
"regex" => "\bglptt-[0-9a-zA-Z_-]{40}\b",
"tags" => ["gitlab"],
"keywords" => ["glptt"]
},
{
"id" => "gitlab_runner_registration_token",
"description" => "GitLab Runner Registration Token",
"regex" => "GR1348941[0-9a-zA-Z_-]{20}",
"regex" => "\bGR1348941[0-9a-zA-Z_-]{20}\b",
"tags" => ["gitlab"],
"keywords" => ["GR1348941"]
},
{
"id" => "gitlab_feed_token_v2",
"description" => "GitLab Feed Token",
"regex" => "glft-[0-9a-zA-Z_-]{20}",
"regex" => "\bglft-[0-9a-zA-Z_-]{20}\b",
"tags" => ["gitlab"],
"keywords" => ["glft"]
}
@ -98,12 +98,16 @@ RSpec.describe Gitlab::SecretDetection::Scan, feature_category: :secret_detectio
new_blob(id: 111, data: "glpat-12312312312312312312"), # gitleaks:allow
new_blob(id: 222, data: "\n\nglptt-1231231231231231231212312312312312312312"), # gitleaks:allow
new_blob(id: 333, data: "data with no secret"),
new_blob(id: 444, data: "GR134894112312312312312312312\nglft-12312312312312312312") # gitleaks:allow
new_blob(id: 444,
data: "GR134894112312312312312312312\nglft-12312312312312312312"), # gitleaks:allow
new_blob(id: 555, data: "data with no secret"),
new_blob(id: 666, data: "data with no secret"),
new_blob(id: 777, data: "\nglptt-1231231231231231231212312312312312312312") # gitleaks:allow
]
end
it "matches different types of rules" do
expected_response = Gitlab::SecretDetection::Response.new(
let(:expected_response) do
Gitlab::SecretDetection::Response.new(
Gitlab::SecretDetection::Status::FOUND,
[
Gitlab::SecretDetection::Finding.new(
@ -133,11 +137,66 @@ RSpec.describe Gitlab::SecretDetection::Scan, feature_category: :secret_detectio
2,
ruleset['rules'][3]['id'],
ruleset['rules'][3]['description']
),
Gitlab::SecretDetection::Finding.new(
blobs[6].id,
Gitlab::SecretDetection::Status::FOUND,
2,
ruleset['rules'][1]['id'],
ruleset['rules'][1]['description']
)
]
)
end
expect(scan.secrets_scan(blobs)).to eq(expected_response)
it "matches multiple rules when running in main process" do
expect(scan.secrets_scan(blobs, subprocess: false)).to eq(expected_response)
end
context "in subprocess" do
let(:dummy_lines) do
10_000
end
let(:large_blobs) do
dummy_data = "\nrandom data" * dummy_lines
[
new_blob(id: 111, data: "glpat-12312312312312312312#{dummy_data}"), # gitleaks:allow
new_blob(id: 222, data: "\n\nglptt-1231231231231231231212312312312312312312#{dummy_data}"), # gitleaks:allow
new_blob(id: 333, data: "data with no secret#{dummy_data}"),
new_blob(id: 444,
data: "GR134894112312312312312312312\nglft-12312312312312312312#{dummy_data}"), # gitleaks:allow
new_blob(id: 555, data: "data with no secret#{dummy_data}"),
new_blob(id: 666, data: "data with no secret#{dummy_data}"),
new_blob(id: 777, data: "#{dummy_data}\nglptt-1231231231231231231212312312312312312312") # gitleaks:allow
]
end
it "matches multiple rules" do
expect(scan.secrets_scan(blobs, subprocess: true)).to eq(expected_response)
end
it "takes at least same time to run as running in main process" do
expect { scan.secrets_scan(large_blobs, subprocess: true) }.to perform_faster_than {
scan.secrets_scan(large_blobs,
subprocess: false)
}.once
end
it "allocates less memory than when running in main process" do
forked_stats = Benchmark::Malloc.new.run { scan.secrets_scan(large_blobs, subprocess: true) }
non_forked_stats = Benchmark::Malloc.new.run { scan.secrets_scan(large_blobs, subprocess: false) }
max_processes = Gitlab::SecretDetection::Scan::MAX_PROCS_PER_REQUEST
forked_memory = forked_stats.allocated.total_memory
non_forked_memory = non_forked_stats.allocated.total_memory
forked_obj_allocs = forked_stats.allocated.total_objects
non_forked_obj_allocs = non_forked_stats.allocated.total_objects
expect(non_forked_memory).to be >= forked_memory * max_processes
expect(non_forked_obj_allocs).to be >= forked_obj_allocs * max_processes
end
end
end

View File

@ -2,6 +2,8 @@
require 'gitlab/secret_detection'
require 'rspec-parameterized'
require 'rspec-benchmark'
require 'benchmark-malloc'
RSpec.configure do |config|
# Enable flags like --only-failures and --next-failure
@ -10,9 +12,17 @@ RSpec.configure do |config|
# Disable RSpec exposing methods globally on `Module` and `main`
config.disable_monkey_patching!
config.include RSpec::Benchmark::Matchers
Dir['./spec/support/**/*.rb'].each { |f| require f }
config.expect_with :rspec do |c|
c.syntax = :expect
end
# configure benchmark factors
RSpec::Benchmark.configure do |cfg|
# to avoid retention of allocated memory by the perf tests in the main process
cfg.run_in_subprocess = true
end
end

View File

@ -12039,6 +12039,18 @@ msgstr ""
msgid "CodeSuggestionsGAAlert| (Code Suggestions transitions to a paid feature on %{date}.)"
msgstr ""
msgid "CodeSuggestionsGAAlert|Contact Sales"
msgstr ""
msgid "CodeSuggestionsGAAlert|Continue accelerating your development with Code Suggestions. Starting February 15, 2024, a paid subscription will be required for access. Upgrade now to lock in the introductory price of $9 per user."
msgstr ""
msgid "CodeSuggestionsGAAlert|Dismiss Code Suggestions banner"
msgstr ""
msgid "CodeSuggestionsGAAlert|GitLab Duo Code Suggestions free access is ending soon"
msgstr ""
msgid "CodeSuggestionsGAAlert|Learn more"
msgstr ""

View File

@ -316,10 +316,10 @@ module QA
raise "Merge did not appear to be successful" unless merged?
end
def merge_when_pipeline_succeeds!
def set_to_auto_merge!
wait_until_ready_to_merge
click_element('merge-button', text: 'Merge when pipeline succeeds')
click_element('merge-button', text: 'Set to auto-merge')
end
def merged?

View File

@ -2,17 +2,11 @@
module QA
RSpec.describe 'Create', :runner, product_group: :code_review do
describe 'Merge requests' do
shared_examples 'merge when pipeline succeeds' do |repeat: 1|
describe 'Merge request' do
shared_examples 'set to auto-merge' do |repeat: 1|
let(:runner_name) { "qa-runner-#{Faker::Alphanumeric.alphanumeric(number: 8)}" }
let(:project) { create(:project, :with_readme, name: 'merge-when-pipeline-succeeds') }
let!(:runner) do
Resource::ProjectRunner.fabricate! do |runner|
runner.project = project
runner.name = runner_name
runner.tags = [runner_name]
end
end
let(:project) { create(:project, :with_readme, name: 'set-to-auto-merge') }
let!(:runner) { create(:project_runner, project: project, name: runner_name, tags: [runner_name]) }
let!(:ci_file) do
create(:commit, project: project, commit_message: 'Add .gitlab-ci.yml', actions: [
@ -48,7 +42,7 @@ module QA
QA::Runtime::Logger.info("Transient bug test - Trial #{i + 1}") if transient_test
# Create a merge request to trigger pipeline
merge_request = create(:project,
merge_request = create(:merge_request,
project: project,
description: Faker::Lorem.sentence,
target_new_branch: false,
@ -64,22 +58,26 @@ module QA
# possible components will be relevant, so it would be inefficient for this test to check for each of
# them. Instead, we fail on anything but the expected state.
#
# The following method allows us to handle and ignore states (as we find them) that users could safely ignore.
# The following method allows us to handle and ignore states (as we find them) that users could safely
# ignore.
mr.wait_until_ready_to_merge(transient_test: transient_test)
mr.retry_until(reload: true, message: 'Wait until ready to click MWPS') do
# Click the MWPS button if we can
break mr.merge_when_pipeline_succeeds! if mr.has_element?('merge-button', text: 'Merge when pipeline succeeds')
mr.retry_until(reload: true, message: 'Wait until ready to click Set to auto-merge') do
# Click the Set to auto-merge button if we can
break mr.set_to_auto_merge! if mr.has_element?('merge-button', text: 'Set to auto-merge')
# But fail if the button is missing because the pipeline is complete
raise "The pipeline already finished before we could click MWPS" if mr.wait_until { project.pipelines.first }[:status] == 'success'
raise "The pipeline already finished before we could set to auto-merge" if mr.wait_until do
project.pipelines.first
end[:status] == 'success'
# Otherwise, if this is not a transient test reload the page and retry
next false unless transient_test
end
aggregate_failures do
expect { mr.merged? }.to eventually_be_truthy.within(max_duration: 120), "Expected content 'The changes were merged' but it did not appear."
expect { mr.merged? }.to eventually_be_truthy.within(max_duration: 120),
"Expected content 'The changes were merged' but it did not appear."
expect(merge_request.reload!.merge_when_pipeline_succeeds).to be_truthy
expect(merge_request.state).to eq('merged')
expect(project.pipelines.last[:status]).to eq('success')
@ -90,11 +88,12 @@ module QA
end
context 'when merging once', testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/347686' do
it_behaves_like 'merge when pipeline succeeds'
it_behaves_like 'set to auto-merge'
end
context 'when merging several times', :transient, testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/347641' do
it_behaves_like 'merge when pipeline succeeds', repeat: Runtime::Env.transient_trials
context 'when merging several times', :transient,
testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/347641' do
it_behaves_like 'set to auto-merge', repeat: Runtime::Env.transient_trials
end
end
end

View File

@ -228,21 +228,30 @@ tests = [
expected: ['spec/finders/members_finder_spec.rb', 'spec/graphql/types/project_member_relation_enum_spec.rb']
},
{
explanation: 'New CI templates should run CI template tests: https://gitlab.com/gitlab-org/quality/engineering-productivity/master-broken-incidents/-/issues/4440#note_1675547256',
changed_file: 'lib/gitlab/ci/templates/Diffblue-Cover.gitlab-ci.yml',
expected: ['spec/lib/gitlab/ci/templates/templates_spec.rb', 'ee/spec/lib/ee/gitlab/ci/templates/templates_spec.rb']
},
{
explanation: 'Map FOSS rake tasks',
changed_file: 'lib/tasks/import.rake',
expected: ['spec/tasks/import_rake_spec.rb']
},
{
explanation: 'Map EE rake tasks',
changed_file: 'ee/lib/tasks/geo.rake',
expected: ['ee/spec/tasks/geo_rake_spec.rb']
},
{
explanation: 'Map controllers to request specs',
changed_file: 'app/controllers/admin/abuse_reports_controller.rb',
expected: ['spec/requests/admin/abuse_reports_controller_spec.rb']
},
{
explanation: 'Map EE controllers to controller and request specs',
changed_file: 'ee/app/controllers/users_controller.rb',

View File

@ -93,7 +93,8 @@ RSpec.describe 'Projects > Members > Manage members', :js, feature_category: :on
end
end
context 'uses ProjectMember valid_access_level_roles for the invite members modal options', :aggregate_failures do
context 'uses ProjectMember valid_access_level_roles for the invite members modal options', :aggregate_failures,
quarantine: 'https://gitlab.com/gitlab-org/gitlab/-/issues/436958' do
before do
sign_in(current_user)
@ -118,7 +119,7 @@ RSpec.describe 'Projects > Members > Manage members', :js, feature_category: :on
context 'when maintainer' do
let(:current_user) { project_maintainer }
it 'does not show the Owner option', quarantine: 'https://gitlab.com/gitlab-org/gitlab/-/issues/436958' do
it 'does not show the Owner option' do
within_modal do
toggle_listbox
expect_listbox_items(%w[Guest Reporter Developer Maintainer])

View File

@ -6,6 +6,10 @@ RSpec.describe WebpackHelper do
let(:source) { 'foo.js' }
let(:asset_path) { "/assets/webpack/#{source}" }
before do
allow(helper).to receive(:vite_enabled?).and_return(false)
end
describe '#prefetch_link_tag' do
it 'returns prefetch link tag' do
expect(helper.prefetch_link_tag(source)).to eq("<link rel=\"prefetch\" href=\"/#{source}\">")
@ -40,7 +44,6 @@ RSpec.describe WebpackHelper do
before do
stub_rails_env('development')
stub_feature_flags(vite: true)
allow(helper).to receive(:vite_javascript_tag).and_return('vite')
allow(helper).to receive(:vite_enabled?).and_return(true)

View File

@ -320,9 +320,6 @@ RSpec.configure do |config|
# Postgres is the primary data source, and ClickHouse only when enabled in certain cases.
stub_feature_flags(clickhouse_data_collection: false)
# This is going to be removed with https://gitlab.com/gitlab-org/gitlab/-/issues/431041
stub_feature_flags(vite: false)
else
unstub_all_feature_flags
end

View File

@ -53,7 +53,7 @@ RSpec.shared_examples 'Base action controller' do
skip: 'https://gitlab.com/gitlab-org/gitlab/-/issues/424334' do
before do
stub_rails_env('development')
stub_feature_flags(vite: true)
allow(ViteHelper).to receive(:vite_enabled?).and_return(true)
end
it 'adds vite csp' do
@ -65,7 +65,7 @@ RSpec.shared_examples 'Base action controller' do
context 'when vite disabled' do
before do
stub_feature_flags(vite: false)
allow(ViteHelper).to receive(:vite_enabled?).and_return(false)
end
it "doesn't add vite csp" do

View File

@ -118,3 +118,9 @@ mapping:
test: 'spec/graphql/types/project_member_relation_enum_spec.rb'
- source: 'app/finders/group_members_finder\.rb'
test: 'spec/graphql/types/group_member_relation_enum_spec.rb'
# See https://gitlab.com/gitlab-org/quality/engineering-productivity/master-broken-incidents/-/issues/4440#note_1675547256
- source: lib/gitlab/ci/templates/.*\.gitlab-ci\.yml
test:
- spec/lib/gitlab/ci/templates/templates_spec.rb
- ee/spec/lib/ee/gitlab/ci/templates/templates_spec.rb