Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
c0f79a5e14
commit
e08403843b
|
|
@ -46,7 +46,6 @@ Fips/SHA1:
|
|||
- 'lib/gitlab/git.rb'
|
||||
- 'lib/gitlab/git/branch.rb'
|
||||
- 'lib/gitlab/git/tag.rb'
|
||||
- 'qa/qa/specs/features/browser_ui/6_release/deploy_key/clone_using_deploy_key_spec.rb'
|
||||
- 'spec/components/diffs/stats_component_spec.rb'
|
||||
- 'spec/controllers/projects/blob_controller_spec.rb'
|
||||
- 'spec/factories/ci/reports/security/finding_keys.rb'
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
4a457ba8e5451a39cb5a0d67bd8c06fed023d8fc
|
||||
7b5e739ea26a4d484d2986595626caf7cf02b002
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
- project = local_assigns.fetch(:project)
|
||||
|
||||
= render Pajamas::CardComponent.new(card_options: { data: { testid: 'export-project-content' } }) do |c|
|
||||
= render Pajamas::CardComponent.new(card_options: { id: 'export-project', data: { testid: 'export-project-content' } }) do |c|
|
||||
- c.with_header do
|
||||
.gl-flex.gl-grow
|
||||
%h4.gl-text-base.gl-leading-24.gl-m-0= _('Export project')
|
||||
|
|
|
|||
|
|
@ -8,8 +8,8 @@
|
|||
- c.with_body do
|
||||
= render Pajamas::AlertComponent.new(variant: :danger, alert_options: { class: 'gl-mb-5' }, dismissible: false) do |c|
|
||||
- c.with_body do
|
||||
- link_start = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe
|
||||
- docs_link_start = link_start % { url: help_page_path('user/project/settings/import_export.md') }
|
||||
- link_start = '<a href="%{url}">'.html_safe
|
||||
- docs_link_start = link_start % { url: edit_project_path(@project, anchor: 'export-project') }
|
||||
- link_end = '</a>'.html_safe
|
||||
= s_('ProjectMaintenance|To ensure that a full backup is available in case changes need to be restored, you should make an %{docs_link_start}export of the project%{docs_link_end}.').html_safe % { docs_link_start: docs_link_start, docs_link_end: link_end }
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ To create the archive file, the backup script:
|
|||
|
||||
To back up the database, the `db` sub-task:
|
||||
|
||||
1. Uses `pg_dump` to create an [SQL dump](https://www.postgresql.org/docs/14/backup-dump.html).
|
||||
1. Uses `pg_dump` to create an [SQL dump](https://www.postgresql.org/docs/16/backup-dump.html).
|
||||
1. Pipes the output of `pg_dump` through `gzip` and creates a compressed SQL file.
|
||||
1. Saves the file to the [backup staging directory](#backup-staging-directory).
|
||||
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ backups
|
|||
└── uploads.tar.gz
|
||||
```
|
||||
|
||||
The `db` directory is used to back up the GitLab PostgreSQL database using `pg_dump` to create [an SQL dump](https://www.postgresql.org/docs/14/backup-dump.html). The output of `pg_dump` is piped through `gzip` in order to create a compressed SQL file.
|
||||
The `db` directory is used to back up the GitLab PostgreSQL database using `pg_dump` to create [an SQL dump](https://www.postgresql.org/docs/16/backup-dump.html). The output of `pg_dump` is piped through `gzip` in order to create a compressed SQL file.
|
||||
|
||||
The `repositories` directory is used to back up Git repositories, as found in the GitLab database.
|
||||
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ However, merge request diffs can also be configured to be offloaded to the file
|
|||
|
||||
Gitaly Cluster's Praefect service uses a PostgreSQL database as a single source of truth to manage its Gitaly nodes.
|
||||
|
||||
A common PostgreSQL utility, [`pg_dump`](https://www.postgresql.org/docs/current/app-pgdump.html), produces a backup file which can be used to restore a PostgreSQL database. The [backup command](#backup-command) uses this utility under the hood.
|
||||
A common PostgreSQL utility, [`pg_dump`](https://www.postgresql.org/docs/16/app-pgdump.html), produces a backup file which can be used to restore a PostgreSQL database. The [backup command](#backup-command) uses this utility under the hood.
|
||||
|
||||
Unfortunately, the larger the database, the longer it takes `pg_dump` to execute. Depending on your situation, the duration becomes impractical at some point (days, for example). If your database is over 100 GB, `pg_dump`, and by extension the [backup command](#backup-command), is likely not usable. For more information, see [alternative backup strategies](#alternative-backup-strategies).
|
||||
|
||||
|
|
@ -1490,7 +1490,7 @@ leaving the `PG*` names as is:
|
|||
sudo GITLAB_BACKUP_MAIN_PGHOST=192.168.1.10 GITLAB_BACKUP_CI_PGHOST=192.168.1.12 /opt/gitlab/bin/gitlab-backup create
|
||||
```
|
||||
|
||||
See the [PostgreSQL documentation](https://www.postgresql.org/docs/12/libpq-envars.html)
|
||||
See the [PostgreSQL documentation](https://www.postgresql.org/docs/16/libpq-envars.html)
|
||||
for more details on what these parameters do.
|
||||
|
||||
#### `gitaly-backup` for repository backup and restore
|
||||
|
|
|
|||
|
|
@ -155,7 +155,7 @@ In this diagram:
|
|||
|
||||
- There is the **primary** site and the details of one **secondary** site.
|
||||
- Writes to the database can only be performed on the **primary** site. A **secondary** site receives database
|
||||
updates by using [PostgreSQL streaming replication](https://www.postgresql.org/docs/current/warm-standby.html#STREAMING-REPLICATION).
|
||||
updates by using [PostgreSQL streaming replication](https://www.postgresql.org/docs/16/warm-standby.html#STREAMING-REPLICATION).
|
||||
- If present, the [LDAP server](#ldap) should be configured to replicate for [Disaster Recovery](disaster_recovery/_index.md) scenarios.
|
||||
- A **secondary** site performs different type of synchronizations against the **primary** site, using a special
|
||||
authorization protected by JWT:
|
||||
|
|
@ -197,8 +197,8 @@ The following are required to run Geo:
|
|||
Geo sites. If using different operating system versions between Geo sites, you
|
||||
**must** [check OS locale data compatibility](replication/troubleshooting/common.md#check-os-locale-data-compatibility)
|
||||
across Geo sites to avoid silent corruption of database indexes.
|
||||
- [Supported PostgreSQL versions](https://handbook.gitlab.com/handbook/engineering/infrastructure-platforms/data-access/database-framework/postgresql-upgrade-cadence/) for your GitLab releases with [Streaming Replication](https://www.postgresql.org/docs/current/warm-standby.html#STREAMING-REPLICATION).
|
||||
- [PostgreSQL Logical replication](https://www.postgresql.org/docs/current/logical-replication.html) is not supported.
|
||||
- [Supported PostgreSQL versions](https://handbook.gitlab.com/handbook/engineering/infrastructure-platforms/data-access/database-framework/postgresql-upgrade-cadence/) for your GitLab releases with [Streaming Replication](https://www.postgresql.org/docs/16/warm-standby.html#STREAMING-REPLICATION).
|
||||
- [PostgreSQL Logical replication](https://www.postgresql.org/docs/16/logical-replication.html) is not supported.
|
||||
- All sites must run [the same PostgreSQL versions](setup/database.md#postgresql-replication).
|
||||
- Git 2.9 or later
|
||||
- Git-lfs 2.4.2 or later on the user side when using LFS
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ where some queries never complete due to being canceled on every replication.
|
|||
These long-running queries are
|
||||
[planned to be removed in the future](https://gitlab.com/gitlab-org/gitlab/-/issues/34269),
|
||||
but as a workaround, we recommend enabling
|
||||
[`hot_standby_feedback`](https://www.postgresql.org/docs/10/hot-standby.html#HOT-STANDBY-CONFLICT).
|
||||
[`hot_standby_feedback`](https://www.postgresql.org/docs/16/hot-standby.html#HOT-STANDBY-CONFLICT).
|
||||
This increases the likelihood of bloat on the **primary** site as it prevents
|
||||
`VACUUM` from removing recently-dead rows. However, it has been used
|
||||
successfully in production on GitLab.com.
|
||||
|
|
@ -221,7 +221,7 @@ The workaround is to increase the memory available to the secondary site's Postg
|
|||
|
||||
If the output of `sudo gitlab-rake geo:status` shows that `Database replication lag` remains significantly high over time, the primary node in database replication can be checked to determine the status of lag for
|
||||
different parts of the database replication process. These values are known as `write_lag`, `flush_lag`, and `replay_lag`. For more information, see
|
||||
[the official PostgreSQL documentation](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-REPLICATION-VIEW).
|
||||
[the official PostgreSQL documentation](https://www.postgresql.org/docs/16/monitoring-stats.html#MONITORING-PG-STAT-REPLICATION-VIEW).
|
||||
|
||||
Run the following command from the primary Geo node's database to provide relevant output:
|
||||
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ recover. See below for more details.
|
|||
The following guide assumes that:
|
||||
|
||||
- You are using the Linux package (so are using PostgreSQL 12 or later),
|
||||
which includes the [`pg_basebackup` tool](https://www.postgresql.org/docs/12/app-pgbasebackup.html).
|
||||
which includes the [`pg_basebackup` tool](https://www.postgresql.org/docs/16/app-pgbasebackup.html).
|
||||
- You have a **primary** site already set up (the GitLab server you are
|
||||
replicating from), running PostgreSQL (or equivalent version) managed by your Linux package installation, and
|
||||
you have a new **secondary** site set up with the same
|
||||
|
|
@ -132,7 +132,7 @@ There is an [issue where support is being discussed](https://gitlab.com/gitlab-o
|
|||
gitlab_rails['db_password'] = '<your_db_password_here>'
|
||||
```
|
||||
|
||||
1. Define a password for the database [replication user](https://www.postgresql.org/docs/current/warm-standby.html#STREAMING-REPLICATION).
|
||||
1. Define a password for the database [replication user](https://www.postgresql.org/docs/16/warm-standby.html#STREAMING-REPLICATION).
|
||||
|
||||
Use the username defined in `/etc/gitlab/gitlab.rb` under the `postgresql['sql_replication_user']`
|
||||
setting. The default value is `gitlab_replicator`. If you changed the username to something else, adapt
|
||||
|
|
@ -216,7 +216,7 @@ There is an [issue where support is being discussed](https://gitlab.com/gitlab-o
|
|||
`postgresql['md5_auth_cidr_addresses']` and `postgresql['listen_address']`.
|
||||
|
||||
The `listen_address` option opens PostgreSQL up to network connections with the interface
|
||||
corresponding to the given address. See [the PostgreSQL documentation](https://www.postgresql.org/docs/12/runtime-config-connection.html)
|
||||
corresponding to the given address. See [the PostgreSQL documentation](https://www.postgresql.org/docs/16/runtime-config-connection.html)
|
||||
for more details.
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
|
@ -272,7 +272,7 @@ There is an [issue where support is being discussed](https://gitlab.com/gitlab-o
|
|||
```
|
||||
|
||||
You may also want to edit the `wal_keep_segments` and `max_wal_senders` to match your
|
||||
database replication requirements. Consult the [PostgreSQL - Replication documentation](https://www.postgresql.org/docs/12/runtime-config-replication.html)
|
||||
database replication requirements. Consult the [PostgreSQL - Replication documentation](https://www.postgresql.org/docs/16/runtime-config-replication.html)
|
||||
for more information.
|
||||
|
||||
1. Save the file and reconfigure GitLab for the database listen changes and
|
||||
|
|
@ -506,7 +506,7 @@ data before running `pg_basebackup`.
|
|||
sudo -i
|
||||
```
|
||||
|
||||
1. Choose a [database-friendly name](https://www.postgresql.org/docs/13/warm-standby.html#STREAMING-REPLICATION-SLOTS-MANIPULATION)
|
||||
1. Choose a [database-friendly name](https://www.postgresql.org/docs/16/warm-standby.html#STREAMING-REPLICATION-SLOTS-MANIPULATION)
|
||||
to use for your **secondary** site to
|
||||
use as the replication slot name. For example, if your domain is
|
||||
`secondary.geo.example.com`, use `secondary_example` as the slot
|
||||
|
|
@ -568,7 +568,7 @@ data before running `pg_basebackup`.
|
|||
(for example, you know the network path is secure, or you are using a site-to-site
|
||||
VPN). It is **not** safe over the public Internet!
|
||||
- You can read more details about each `sslmode` in the
|
||||
[PostgreSQL documentation](https://www.postgresql.org/docs/12/libpq-ssl.html#LIBPQ-SSL-PROTECTION).
|
||||
[PostgreSQL documentation](https://www.postgresql.org/docs/16/libpq-ssl.html#LIBPQ-SSL-PROTECTION).
|
||||
The instructions above are carefully written to ensure protection against
|
||||
both passive eavesdroppers and active "man-in-the-middle" attackers.
|
||||
- If you're repurposing an old site into a Geo **secondary** site, you must
|
||||
|
|
@ -598,7 +598,7 @@ see [the relevant documentation](../../postgresql/replication_and_failover.md).
|
|||
|
||||
### Changing the replication password
|
||||
|
||||
To change the password for the [replication user](https://www.postgresql.org/docs/current/warm-standby.html#STREAMING-REPLICATION)
|
||||
To change the password for the [replication user](https://www.postgresql.org/docs/16/warm-standby.html#STREAMING-REPLICATION)
|
||||
when using PostgreSQL instances managed by a Linux package installation:
|
||||
|
||||
On the GitLab Geo **primary** site:
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ We cannot guarantee compatibility with other external databases.
|
|||
|
||||
To set up an external database, you can either:
|
||||
|
||||
- Set up [streaming replication](https://www.postgresql.org/docs/12/warm-standby.html#STREAMING-REPLICATION-SLOTS) yourself (for example Amazon RDS, or bare metal not managed by the Linux package).
|
||||
- Set up [streaming replication](https://www.postgresql.org/docs/16/warm-standby.html#STREAMING-REPLICATION-SLOTS) yourself (for example Amazon RDS, or bare metal not managed by the Linux package).
|
||||
- Manually perform the configuration of your Linux package installations as follows.
|
||||
|
||||
#### Leverage your cloud provider's tools to replicate the primary database
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ Prerequisites:
|
|||
Prerequisites:
|
||||
|
||||
- You use PostgreSQL 12 or later,
|
||||
which includes the [`pg_basebackup` tool](https://www.postgresql.org/docs/12/app-pgbasebackup.html).
|
||||
which includes the [`pg_basebackup` tool](https://www.postgresql.org/docs/16/app-pgbasebackup.html).
|
||||
|
||||
### Configure the primary site
|
||||
|
||||
|
|
@ -67,7 +67,7 @@ Prerequisites:
|
|||
|
||||
To set up an external database, you can either:
|
||||
|
||||
- Set up [streaming replication](https://www.postgresql.org/docs/12/warm-standby.html#STREAMING-REPLICATION-SLOTS) yourself (for example Amazon RDS, or bare metal not managed by the Linux package).
|
||||
- Set up [streaming replication](https://www.postgresql.org/docs/16/warm-standby.html#STREAMING-REPLICATION-SLOTS) yourself (for example Amazon RDS, or bare metal not managed by the Linux package).
|
||||
- Manually perform the configuration of your Linux package installations as follows.
|
||||
|
||||
#### Leverage your cloud provider's tools to replicate the primary database
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ Prerequisites:
|
|||
Prerequisites:
|
||||
|
||||
- You use PostgreSQL 12 or later,
|
||||
which includes the [`pg_basebackup` tool](https://www.postgresql.org/docs/12/app-pgbasebackup.html).
|
||||
which includes the [`pg_basebackup` tool](https://www.postgresql.org/docs/16/app-pgbasebackup.html).
|
||||
|
||||
### Configure the primary site
|
||||
|
||||
|
|
@ -96,7 +96,7 @@ Prerequisites:
|
|||
gitlab_rails['db_password'] = '<your_db_password_here>'
|
||||
```
|
||||
|
||||
1. Define a password for the database [replication user](https://www.postgresql.org/docs/current/warm-standby.html#STREAMING-REPLICATION).
|
||||
1. Define a password for the database [replication user](https://www.postgresql.org/docs/16/warm-standby.html#STREAMING-REPLICATION).
|
||||
Use the username defined in `/etc/gitlab/gitlab.rb` under the `postgresql['sql_replication_user']`
|
||||
setting. The default value is `gitlab_replicator`.
|
||||
|
||||
|
|
|
|||
|
|
@ -152,7 +152,7 @@ You can restore a backup only to **the exact same version and type** (Community
|
|||
Backups of our production databases are taken hourly through
|
||||
[disk snapshots](https://cloud.google.com/compute/docs/disks/snapshots) and every
|
||||
24 hours through [wal-g base backups](https://github.com/wal-g/wal-g), with
|
||||
[continuous archiving or WAL transaction log files](https://www.postgresql.org/docs/current/continuous-archiving.html)
|
||||
[continuous archiving or WAL transaction log files](https://www.postgresql.org/docs/16/continuous-archiving.html)
|
||||
streamed into GCS for point-in-time recovery.
|
||||
|
||||
All backups are encrypted. After 90 days, backups are deleted.
|
||||
|
|
|
|||
|
|
@ -352,7 +352,7 @@ across NFS. The GitLab support team is not able to assist on performance issues
|
|||
this configuration.
|
||||
|
||||
Additionally, this configuration is specifically warned against in the
|
||||
[PostgreSQL Documentation](https://www.postgresql.org/docs/current/creating-cluster.html#CREATING-CLUSTER-NFS):
|
||||
[PostgreSQL Documentation](https://www.postgresql.org/docs/16/creating-cluster.html#CREATING-CLUSTER-NFS):
|
||||
|
||||
>PostgreSQL does nothing special for NFS file systems, meaning it assumes NFS behaves exactly like
|
||||
>locally-connected drives. If the client or server NFS implementation does not provide standard file
|
||||
|
|
|
|||
|
|
@ -72,14 +72,6 @@ These versions of Oracle Linux are supported.
|
|||
| Oracle Linux 8 | GitLab CE / GitLab EE 12.8.1 | `x86_64` | [Use AlmaLinux installation documentation](https://about.gitlab.com/install/#almalinux) | July 2029 | [Oracle Linux details](https://www.oracle.com/a/ocom/docs/elsp-lifetime-069338.pdf) |
|
||||
| Oracle Linux 9 | GitLab CE / GitLab EE 16.2.0 | `x86_64` | [Use AlmaLinux installation documentation](https://about.gitlab.com/install/#almalinux) | June 2032 | [Oracle Linux details](https://www.oracle.com/a/ocom/docs/elsp-lifetime-069338.pdf) |
|
||||
|
||||
## Raspberry Pi OS
|
||||
|
||||
These versions of Raspberry Pi OS are supported.
|
||||
|
||||
| Operating system version | First supported GitLab version | Architecture | Installation documentation | Operating system EOL | Details |
|
||||
|:-------------------------------------------------------------|:-------------------------------|:-------------|:---------------------------------------------------------------------------------------------|:---------------------|:--------|
|
||||
| Raspberry Pi OS (Bullseye) | GitLab CE 15.5.0 | `armhf` | [Raspberry Pi installation documentation](https://about.gitlab.com/install/#raspberry-pi-os) | 2026 | [Raspberry Pi details](https://www.raspberrypi.com/news/raspberry-pi-os-debian-bullseye/) |
|
||||
|
||||
## Red Hat Enterprise Linux
|
||||
|
||||
These versions of Red Hat Enterprise Linux are supported.
|
||||
|
|
@ -138,7 +130,14 @@ above.
|
|||
|
||||
GitLab provides Linux packages for operating systems only until their
|
||||
end-of-life (EOL) date. After the EOL date, GitLab stops releasing
|
||||
official packages. The list of deprecated operating systems and the final GitLab
|
||||
official packages.
|
||||
|
||||
However, sometimes, we don't deprecate an OS even after it's EOL,
|
||||
because we can't provide packages for a newer version to the users yet.
|
||||
The most common reason for this is that our package repository provider, PackageCloud,
|
||||
not supporting newer versions, and hence we can't upload packages to it.
|
||||
|
||||
The list of deprecated operating systems and the final GitLab
|
||||
release for them can be found below:
|
||||
|
||||
| OS version | End of life | Last supported GitLab version |
|
||||
|
|
@ -171,11 +170,12 @@ release for them can be found below:
|
|||
| Ubuntu 16.04 | [April 2021](https://ubuntu.com/info/release-end-of-life) | [GitLab CE](https://packages.gitlab.com/app/gitlab/gitlab-ce/search?q=gitlab-ce_13.12&dist=ubuntu%2Fxenial) / [GitLab EE](https://packages.gitlab.com/app/gitlab/gitlab-ee/search?q=gitlab-ee_13.12&dist=ubuntu%2Fxenial) 13.12 |
|
||||
| Ubuntu 18.04 | [June 2023](https://ubuntu.com/info/release-end-of-life) | [GitLab CE](https://packages.gitlab.com/app/gitlab/gitlab-ce/search?q=gitlab-ce_16.11&dist=ubuntu%2Fbionic) / [GitLab EE](https://packages.gitlab.com/app/gitlab/gitlab-ee/search?q=ggitlab-ee_16.11&dist=ubuntu%2Fbionic) 16.11 |
|
||||
|
||||
{{< alert type="note" >}}
|
||||
### Raspberry Pi OS (32-bit - Raspbian)
|
||||
|
||||
An exception to this deprecation policy is when we are unable to provide
|
||||
packages for the next version of the operating system. The most common reason
|
||||
for this our package repository provider, PackageCloud, not supporting newer
|
||||
versions and hence we can't upload packages to it.
|
||||
GitLab dropped support for Raspberry Pi OS (32 bit - Raspbian) with GitLab
|
||||
17.11 being the last version available for the 32-bit platform. Starting with
|
||||
GitLab 18.0, users should move to Raspberry Pi OS (64 bit) and use the
|
||||
[Debian arm64 package](https://about.gitlab.com/install/#debian).
|
||||
|
||||
{{< /alert >}}
|
||||
For information on backing up data on a 32-bit OS and restoring it to a 64-bit
|
||||
OS, see [Upgrading operating systems for PostgreSQL](../postgresql/upgrading_os.md).
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ in the Helm Charts documentation.
|
|||
|
||||
## Enhancements
|
||||
|
||||
The metadata database architecture supports performance improvements, bug fixes, and new features
|
||||
The metadata database architecture supports performance improvements, bug fixes, and new features
|
||||
that are not available with the object storage metadata architecture. These enhancements include:
|
||||
|
||||
- Automatic [online garbage collection](../../user/packages/container_registry/delete_container_registry_images.md#garbage-collection)
|
||||
|
|
@ -52,8 +52,8 @@ that are not available with the object storage metadata architecture. These enha
|
|||
- Tracking and displaying tag publish timestamps (see [issue 290949](https://gitlab.com/gitlab-org/gitlab/-/issues/290949))
|
||||
- Sorting repository tags by additional attributes beyond name
|
||||
|
||||
Due to technical constraints of the object storage metadata architecture, new features are only
|
||||
implemented for the metadata database version. Non-security bug fixes might be limited to the
|
||||
Due to technical constraints of the object storage metadata architecture, new features are only
|
||||
implemented for the metadata database version. Non-security bug fixes might be limited to the
|
||||
metadata database version.
|
||||
|
||||
## Known limitations
|
||||
|
|
@ -118,7 +118,7 @@ To enable the database:
|
|||
'user' => '<registry_database_username_placeholder_change_me>',
|
||||
'password' => '<registry_database_placeholder_change_me>',
|
||||
'dbname' => '<registry_database_name_placeholder_change_me>',
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/current/libpq-ssl.html.
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/16/libpq-ssl.html.
|
||||
'sslcert' => '</path/to/cert.pem>',
|
||||
'sslkey' => '</path/to/private.key>',
|
||||
'sslrootcert' => '</path/to/ca.pem>'
|
||||
|
|
@ -137,7 +137,7 @@ To enable the database:
|
|||
'user' => '<registry_database_username_placeholder_change_me>',
|
||||
'password' => '<registry_database_placeholder_change_me>',
|
||||
'dbname' => '<registry_database_name_placeholder_change_me>',
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/current/libpq-ssl.html.
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/16/libpq-ssl.html.
|
||||
'sslcert' => '</path/to/cert.pem>',
|
||||
'sslkey' => '</path/to/private.key>',
|
||||
'sslrootcert' => '</path/to/ca.pem>'
|
||||
|
|
@ -189,7 +189,7 @@ and your registry contains a relatively small amount of data.
|
|||
'user' => '<registry_database_username_placeholder_change_me>',
|
||||
'password' => '<registry_database_placeholder_change_me>',
|
||||
'dbname' => '<registry_database_name_placeholder_change_me>',
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/current/libpq-ssl.html.
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/16/libpq-ssl.html.
|
||||
'sslcert' => '</path/to/cert.pem>',
|
||||
'sslkey' => '</path/to/private.key>',
|
||||
'sslrootcert' => '</path/to/ca.pem>'
|
||||
|
|
@ -237,7 +237,7 @@ and your registry contains a relatively small amount of data.
|
|||
'user' => '<registry_database_username_placeholder_change_me>',
|
||||
'password' => '<registry_database_placeholder_change_me>',
|
||||
'dbname' => '<registry_database_name_placeholder_change_me>',
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/current/libpq-ssl.html.
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/16/libpq-ssl.html.
|
||||
'sslcert' => '</path/to/cert.pem>',
|
||||
'sslkey' => '</path/to/private.key>',
|
||||
'sslrootcert' => '</path/to/ca.pem>'
|
||||
|
|
@ -298,7 +298,7 @@ If you must halt the operation, you have to restart this step.
|
|||
'user' => '<registry_database_username_placeholder_change_me>',
|
||||
'password' => '<registry_database_placeholder_change_me>',
|
||||
'dbname' => '<registry_database_name_placeholder_change_me>',
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/current/libpq-ssl.html.
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/16/libpq-ssl.html.
|
||||
'sslcert' => '</path/to/cert.pem>',
|
||||
'sslkey' => '</path/to/private.key>',
|
||||
'sslrootcert' => '</path/to/ca.pem>'
|
||||
|
|
@ -367,7 +367,7 @@ Allow enough time for downtime while step two is being executed.
|
|||
'user' => '<registry_database_username_placeholder_change_me>',
|
||||
'password' => '<registry_database_placeholder_change_me>',
|
||||
'dbname' => '<registry_database_name_placeholder_change_me>',
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/current/libpq-ssl.html.
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/16/libpq-ssl.html.
|
||||
'sslcert' => '</path/to/cert.pem>',
|
||||
'sslkey' => '</path/to/private.key>',
|
||||
'sslrootcert' => '</path/to/ca.pem>'
|
||||
|
|
@ -756,7 +756,7 @@ You must truncate the table manually on your PostgreSQL instance:
|
|||
'user' => '<registry_database_username_placeholder_change_me>',
|
||||
'password' => '<registry_database_placeholder_change_me>',
|
||||
'dbname' => '<registry_database_name_placeholder_change_me>',
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/current/libpq-ssl.html.
|
||||
'sslmode' => 'require', # See the PostgreSQL documentation for additional information https://www.postgresql.org/docs/16/libpq-ssl.html.
|
||||
'sslcert' => '</path/to/cert.pem>',
|
||||
'sslkey' => '</path/to/private.key>',
|
||||
'sslrootcert' => '</path/to/ca.pem>'
|
||||
|
|
|
|||
|
|
@ -12,13 +12,13 @@ External PostgreSQL database systems have different logging options for monitori
|
|||
You should enable the following logging settings:
|
||||
|
||||
- `log_statement=ddl`: log changes of database model definition (DDL), such as `CREATE`, `ALTER` or `DROP` of objects. This helps track recent model changes that could be causing performance issues and identify security breaches and human errors.
|
||||
- `log_lock_waits=on`: log of processes holding [locks](https://www.postgresql.org/docs/current/explicit-locking.html) for long periods, a common cause of poor query performance.
|
||||
- `log_lock_waits=on`: log of processes holding [locks](https://www.postgresql.org/docs/16/explicit-locking.html) for long periods, a common cause of poor query performance.
|
||||
- `log_temp_files=0`: log usage of intense and unusual temporary files that can indicate poor query performance.
|
||||
- `log_autovacuum_min_duration=0`: log all autovacuum executions. Autovacuum is a key component for overall PostgreSQL engine performance. Essential for troubleshooting and tuning if dead tuples are not being removed from tables.
|
||||
- `log_min_duration_statement=1000`: log slow queries (slower than 1 second).
|
||||
|
||||
The full description of the above parameter settings can be found in
|
||||
[PostgreSQL error reporting and logging documentation](https://www.postgresql.org/docs/current/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHAT).
|
||||
[PostgreSQL error reporting and logging documentation](https://www.postgresql.org/docs/16/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHAT).
|
||||
|
||||
## Amazon RDS
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ kinds of upgrades exist for PostgreSQL databases:
|
|||
|
||||
- **Major version upgrades**: These change the internal storage format and the database
|
||||
catalog. As a result, object statistics used by the query optimizer
|
||||
[are not transferred to the new version](https://www.postgresql.org/docs/current/pgupgrade.html)
|
||||
[are not transferred to the new version](https://www.postgresql.org/docs/16/pgupgrade.html)
|
||||
and must be rebuilt with `ANALYZE`.
|
||||
|
||||
Not following the documented major version upgrade process often results in
|
||||
|
|
@ -32,16 +32,16 @@ Read carefully the major version upgrade steps of your external database platfor
|
|||
- [Amazon RDS for PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.PostgreSQL.html#USER_UpgradeDBInstance.PostgreSQL.MajorVersion.Process)
|
||||
- [Azure Database for PostgreSQL Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/concepts-major-version-upgrade)
|
||||
- [Google Cloud SQL for PostgreSQL](https://cloud.google.com/sql/docs/postgres/upgrade-major-db-version-inplace)
|
||||
- [PostgreSQL community `pg_upgrade`](https://www.postgresql.org/docs/current/pgupgrade.html)
|
||||
- [PostgreSQL community `pg_upgrade`](https://www.postgresql.org/docs/16/pgupgrade.html)
|
||||
|
||||
## Always `ANALYZE` your database after a major version upgrade
|
||||
|
||||
It is mandatory to run the [`ANALYZE` operation](https://www.postgresql.org/docs/current/sql-analyze.html)
|
||||
It is mandatory to run the [`ANALYZE` operation](https://www.postgresql.org/docs/16/sql-analyze.html)
|
||||
to refresh the `pg_statistic` table after a major version upgrade, because optimizer statistics
|
||||
[are not transferred by `pg_upgrade`](https://www.postgresql.org/docs/current/pgupgrade.html).
|
||||
[are not transferred by `pg_upgrade`](https://www.postgresql.org/docs/16/pgupgrade.html).
|
||||
This should be done for all databases on the upgraded PostgreSQL service/instance/cluster.
|
||||
|
||||
To speed up the `ANALYZE` operation, use the
|
||||
[`vacuumdb` utility](https://www.postgresql.org/docs/current/app-vacuumdb.html),
|
||||
[`vacuumdb` utility](https://www.postgresql.org/docs/16/app-vacuumdb.html),
|
||||
with `--analyze-only --jobs=njobs` to execute the `ANALYZE` command in parallel by
|
||||
running `njobs` commands simultaneously.
|
||||
|
|
|
|||
|
|
@ -46,9 +46,9 @@ To move databases from one instance to another:
|
|||
|
||||
In rare occasions, you might notice database performance issues after you perform
|
||||
a `pg_dump` and restore. This can happen because `pg_dump` does not contain the statistics
|
||||
[used by the optimizer to make query planning decisions](https://www.postgresql.org/docs/14/app-pgdump.html).
|
||||
[used by the optimizer to make query planning decisions](https://www.postgresql.org/docs/16/app-pgdump.html).
|
||||
If performance degrades after a restore, fix the problem by finding the problematic query,
|
||||
then running ANALYZE on the tables used by the query.
|
||||
then running ANALYZE on the tables used by the query.
|
||||
|
||||
{{< /alert >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -802,7 +802,7 @@ If a replica is not in sync, `gitlab-ctl patroni members` indicates the volume
|
|||
of missing data, and the `lag` fields indicate the elapsed time.
|
||||
|
||||
Read more about the data returned by the leader
|
||||
[in the PostgreSQL documentation](https://www.postgresql.org/docs/12/monitoring-stats.html#PG-STAT-REPLICATION-VIEW),
|
||||
[in the PostgreSQL documentation](https://www.postgresql.org/docs/16/monitoring-stats.html#PG-STAT-REPLICATION-VIEW),
|
||||
including other values for the `state` field.
|
||||
|
||||
The replicas should return:
|
||||
|
|
@ -826,7 +826,7 @@ conninfo | user=gitlab_replicator host=172.18.0.113 port=5432 appli
|
|||
```
|
||||
|
||||
Read more about the data returned by the replica
|
||||
[in the PostgreSQL documentation](https://www.postgresql.org/docs/12/monitoring-stats.html#PG-STAT-WAL-RECEIVER-VIEW).
|
||||
[in the PostgreSQL documentation](https://www.postgresql.org/docs/16/monitoring-stats.html#PG-STAT-WAL-RECEIVER-VIEW).
|
||||
|
||||
### Selecting the appropriate Patroni replication method
|
||||
|
||||
|
|
@ -841,7 +841,7 @@ Replication is not a backup strategy! There is no replacement for a well-conside
|
|||
|
||||
{{< /alert >}}
|
||||
|
||||
Linux package installations default [`synchronous_commit`](https://www.postgresql.org/docs/11/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT) to `on`.
|
||||
Linux package installations default [`synchronous_commit`](https://www.postgresql.org/docs/16/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT) to `on`.
|
||||
|
||||
```ruby
|
||||
postgresql['synchronous_commit'] = 'on'
|
||||
|
|
@ -1104,9 +1104,9 @@ cluster.
|
|||
|
||||
#### Preflight check
|
||||
|
||||
We rely on PostgreSQL [logical replication](https://www.postgresql.org/docs/current/logical-replication.html)
|
||||
We rely on PostgreSQL [logical replication](https://www.postgresql.org/docs/16/logical-replication.html)
|
||||
to support near-zero-downtime upgrades of Patroni clusters. The of
|
||||
[logical replication requirements](https://www.postgresql.org/docs/current/logical-replication-restrictions.html)
|
||||
[logical replication requirements](https://www.postgresql.org/docs/16/logical-replication-restrictions.html)
|
||||
must be met. In particular, `wal_level` must be `logical`. To check the `wal_level`,
|
||||
run the following command with `gitlab-psql` on any node of the existing cluster:
|
||||
|
||||
|
|
@ -1204,7 +1204,7 @@ CREATE SUBSCRIPTION patroni_upgrade
|
|||
|
||||
In this statement, `EXISTING_CLUSTER_LEADER` is the host address of the leader node
|
||||
of the existing cluster. You can also use
|
||||
[other parameters](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS)
|
||||
[other parameters](https://www.postgresql.org/docs/16/libpq-connect.html#LIBPQ-PARAMKEYWORDS)
|
||||
to change the connection string. For example, you can pass the authentication password.
|
||||
|
||||
To check the status of replication, run these queries:
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ Backup and restore recreates the entire database, including the indexes.
|
|||
|
||||
## Rebuild all indexes
|
||||
|
||||
[Rebuild all indexes](https://www.postgresql.org/docs/14/sql-reindex.html).
|
||||
[Rebuild all indexes](https://www.postgresql.org/docs/16/sql-reindex.html).
|
||||
|
||||
1. Take a scheduled downtime window. In all nodes, stop unnecessary GitLab services:
|
||||
|
||||
|
|
|
|||
|
|
@ -559,9 +559,9 @@ The compressed folder contains a large number of files.
|
|||
### Enable optional query statistics data
|
||||
|
||||
The `gitlab:db:sos` Rake task can also gather data for troubleshooting slow queries using the
|
||||
[`pg_stat_statements` extension](https://www.postgresql.org/docs/current/pgstatstatements.html).
|
||||
[`pg_stat_statements` extension](https://www.postgresql.org/docs/16/pgstatstatements.html).
|
||||
|
||||
Enabling this extension is optional, and requires restarting PostgreSQL and GitLab. This data is
|
||||
Enabling this extension is optional, and requires restarting PostgreSQL and GitLab. This data is
|
||||
likely required for troubleshooting GitLab performance issues caused by slow database queries.
|
||||
|
||||
Prerequisites:
|
||||
|
|
|
|||
|
|
@ -1325,7 +1325,7 @@ Praefect's database is recommended if aiming for full High Availability.
|
|||
There are many third-party solutions for PostgreSQL HA. The solution selected must have the following to work with Praefect:
|
||||
|
||||
- A static IP for all connections that doesn't change on failover.
|
||||
- [`LISTEN`](https://www.postgresql.org/docs/12/sql-listen.html) SQL functionality must be supported.
|
||||
- [`LISTEN`](https://www.postgresql.org/docs/16/sql-listen.html) SQL functionality must be supported.
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -1333,7 +1333,7 @@ Praefect's database is recommended if aiming for full High Availability.
|
|||
There are many third-party solutions for PostgreSQL HA. The solution selected must have the following to work with Praefect:
|
||||
|
||||
- A static IP for all connections that doesn't change on failover.
|
||||
- [`LISTEN`](https://www.postgresql.org/docs/12/sql-listen.html) SQL functionality must be supported.
|
||||
- [`LISTEN`](https://www.postgresql.org/docs/16/sql-listen.html) SQL functionality must be supported.
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -1160,7 +1160,7 @@ Praefect's database is recommended if aiming for full High Availability.
|
|||
There are many third-party solutions for PostgreSQL HA. The solution selected must have the following to work with Praefect:
|
||||
|
||||
- A static IP for all connections that doesn't change on failover.
|
||||
- [`LISTEN`](https://www.postgresql.org/docs/12/sql-listen.html) SQL functionality must be supported.
|
||||
- [`LISTEN`](https://www.postgresql.org/docs/16/sql-listen.html) SQL functionality must be supported.
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -1340,7 +1340,7 @@ Praefect's database is recommended if aiming for full High Availability.
|
|||
There are many third-party solutions for PostgreSQL HA. The solution selected must have the following to work with Praefect:
|
||||
|
||||
- A static IP for all connections that doesn't change on failover.
|
||||
- [`LISTEN`](https://www.postgresql.org/docs/12/sql-listen.html) SQL functionality must be supported.
|
||||
- [`LISTEN`](https://www.postgresql.org/docs/16/sql-listen.html) SQL functionality must be supported.
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -1164,7 +1164,7 @@ Praefect's database is recommended if aiming for full High Availability.
|
|||
There are many third-party solutions for PostgreSQL HA. The solution selected must have the following to work with Praefect:
|
||||
|
||||
- A static IP for all connections that doesn't change on failover.
|
||||
- [`LISTEN`](https://www.postgresql.org/docs/12/sql-listen.html) SQL functionality must be supported.
|
||||
- [`LISTEN`](https://www.postgresql.org/docs/16/sql-listen.html) SQL functionality must be supported.
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -263,7 +263,7 @@ Prerequisites:
|
|||
|
||||
To observe a `CREATE INDEX` or `REINDEX` operation:
|
||||
|
||||
- Use the built-in [`pg_stat_progress_create_index` view](https://www.postgresql.org/docs/current/progress-reporting.html#CREATE-INDEX-PROGRESS-REPORTING).
|
||||
- Use the built-in [`pg_stat_progress_create_index` view](https://www.postgresql.org/docs/16/progress-reporting.html#CREATE-INDEX-PROGRESS-REPORTING).
|
||||
|
||||
For example, from a database console session, run the following command:
|
||||
|
||||
|
|
|
|||
|
|
@ -273,7 +273,7 @@ ORDER BY pg_relation_size(indexrelname::regclass) desc;
|
|||
|
||||
This query outputs a list containing all indexes that have not been used since the stats were last reset and sorts
|
||||
them by index size in descending order. More information on the meaning of the various columns can be found at
|
||||
<https://www.postgresql.org/docs/current/monitoring-stats.html>.
|
||||
<https://www.postgresql.org/docs/16/monitoring-stats.html>.
|
||||
|
||||
For GitLab.com, you can check the latest generated [production reports](https://console.postgres.ai/gitlab/reports/)
|
||||
on postgres.ai and inspect the `H002 Unused Indexes` file.
|
||||
|
|
@ -516,7 +516,7 @@ It is commonly done by creating two [post deployment migrations](post_deployment
|
|||
In most cases, no additional work is needed. The new index is created and is used
|
||||
as expected when queuing and executing the batched background migration.
|
||||
|
||||
[Expression indexes](https://www.postgresql.org/docs/current/indexes-expressional.html),
|
||||
[Expression indexes](https://www.postgresql.org/docs/16/indexes-expressional.html),
|
||||
however, do not generate statistics for the new index on creation. Autovacuum
|
||||
eventually runs `ANALYZE`, and updates the statistics so the new index is used.
|
||||
Run `ANALYZE` explicitly only if it is needed right after the index
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ Milestone 1:
|
|||
|
||||
1. Add a new database index (not unique) to the table via post-migration (if not present already).
|
||||
1. Add model-level uniqueness validation to reduce the likelihood of duplicates (if not present already).
|
||||
1. Add a transaction-level [advisory lock](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) to prevent creating duplicate records.
|
||||
1. Add a transaction-level [advisory lock](https://www.postgresql.org/docs/16/explicit-locking.html#ADVISORY-LOCKS) to prevent creating duplicate records.
|
||||
|
||||
The second step on its own will not prevent duplicate records, see the [Rails guides](https://guides.rubyonrails.org/active_record_validations.html#uniqueness) for more information.
|
||||
|
||||
|
|
@ -150,7 +150,7 @@ Milestone 1:
|
|||
|
||||
1. Add a new database index (not unique) to the table via post migration.
|
||||
1. Add model-level uniqueness validation to reduce the likelihood of duplicates (if not present already).
|
||||
1. Add a transaction-level [advisory lock](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) to prevent creating duplicate records.
|
||||
1. Add a transaction-level [advisory lock](https://www.postgresql.org/docs/16/explicit-locking.html#ADVISORY-LOCKS) to prevent creating duplicate records.
|
||||
|
||||
Milestone 2:
|
||||
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ and `epics`: `issues.label_ids` would be an array column of label IDs, and
|
|||
`issues.label_titles` would be an array of label titles.
|
||||
|
||||
These array columns can be complemented with
|
||||
[GIN indexes](https://www.postgresql.org/docs/11/gin-intro.html) to improve
|
||||
[GIN indexes](https://www.postgresql.org/docs/16/gin-intro.html) to improve
|
||||
matching.
|
||||
|
||||
### Attempt B1: store label IDs for each object
|
||||
|
|
|
|||
|
|
@ -16,9 +16,9 @@ that all decomposed databases have **the same structure** (for example, schema),
|
|||
|
||||
Depending on the used constructs, we can classify migrations to be either:
|
||||
|
||||
1. Modifying structure ([DDL - Data Definition Language](https://www.postgresql.org/docs/current/ddl.html)) (for example, `ALTER TABLE`).
|
||||
1. Modifying data ([DML - Data Manipulation Language](https://www.postgresql.org/docs/current/dml.html)) (for example, `UPDATE`).
|
||||
1. Performing [other queries](https://www.postgresql.org/docs/current/queries.html) (for example, `SELECT`) that are treated as **DML** for the purposes of our migrations.
|
||||
1. Modifying structure ([DDL - Data Definition Language](https://www.postgresql.org/docs/16/ddl.html)) (for example, `ALTER TABLE`).
|
||||
1. Modifying data ([DML - Data Manipulation Language](https://www.postgresql.org/docs/16/dml.html)) (for example, `UPDATE`).
|
||||
1. Performing [other queries](https://www.postgresql.org/docs/16/queries.html) (for example, `SELECT`) that are treated as **DML** for the purposes of our migrations.
|
||||
|
||||
**The usage of `Gitlab::Database::Migration[2.0]` requires migrations to always be of a single purpose**.
|
||||
Migrations cannot mix **DDL** and **DML** changes as the application requires the structure
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ For properly discovering allowed patterns between different databases
|
|||
the GitLab application implements the [database dictionary](database_dictionary.md).
|
||||
|
||||
The database dictionary provides a virtual classification of tables into a `gitlab_schema`
|
||||
which conceptually is similar to [PostgreSQL Schema](https://www.postgresql.org/docs/current/ddl-schemas.html).
|
||||
which conceptually is similar to [PostgreSQL Schema](https://www.postgresql.org/docs/16/ddl-schemas.html).
|
||||
We decided as part of [using database schemas to better isolated CI decomposed features](https://gitlab.com/gitlab-org/gitlab/-/issues/333415)
|
||||
that we cannot use PostgreSQL schema due to complex migration procedures. Instead we implemented
|
||||
the concept of application-level classification.
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ alternative method.
|
|||
|
||||
### Attempt A: PostgreSQL materialized view
|
||||
|
||||
Model can be updated through a refresh strategy based on a project routes SQL and a [materialized view](https://www.postgresql.org/docs/11/rules-materializedviews.html):
|
||||
Model can be updated through a refresh strategy based on a project routes SQL and a [materialized view](https://www.postgresql.org/docs/16/rules-materializedviews.html):
|
||||
|
||||
```sql
|
||||
SELECT split_part("rs".path, '/', 1) as root_path,
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ We are using `integer` in the examples to showcase a more realistic reordering s
|
|||
|
||||
## Type Sizes
|
||||
|
||||
While the [PostgreSQL documentation](https://www.postgresql.org/docs/current/datatype.html) contains plenty
|
||||
While the [PostgreSQL documentation](https://www.postgresql.org/docs/16/datatype.html) contains plenty
|
||||
of information we list the sizes of common types here so it's easier to
|
||||
look them up. Here "word" refers to the word size, which is 4 bytes for a 32
|
||||
bits platform and 8 bytes for a 64 bits platform.
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ such as CI pipelines that failed more than 6 months ago.
|
|||
|
||||
This is the acceptable best practice for addressing time-decay data from a pure database perspective.
|
||||
You can find more information on table partitioning for PostgreSQL in the
|
||||
[documentation page for table partitioning](https://www.postgresql.org/docs/12/ddl-partitioning.html).
|
||||
[documentation page for table partitioning](https://www.postgresql.org/docs/16/ddl-partitioning.html).
|
||||
|
||||
Partitioning by date intervals (for example, month, year) allows us to create much smaller tables
|
||||
(partitions) for each date interval and only access the most recent partitions for any
|
||||
|
|
@ -166,7 +166,7 @@ we can safely keep the 7 latest partitions at all times (current month and 6 mon
|
|||
That means that we can have a worker dropping the 8th oldest partition at the start of each month.
|
||||
|
||||
Moving partitions to cheaper storage inside the same database is relatively simple in PostgreSQL
|
||||
through the use of [tablespaces](https://www.postgresql.org/docs/12/manage-ag-tablespaces.html).
|
||||
through the use of [tablespaces](https://www.postgresql.org/docs/16/manage-ag-tablespaces.html).
|
||||
It is possible to specify a tablespace and storage parameters for each partition separately, so the
|
||||
approach in this case would be to:
|
||||
|
||||
|
|
|
|||
|
|
@ -10,10 +10,10 @@ When adding new columns to store strings or other textual information:
|
|||
1. We always use the `text` data type instead of the `string` data type.
|
||||
1. `text` columns should always have a limit set, either by using the `create_table` with
|
||||
the `#text ... limit: 100` helper (see below) when creating a table, or by using the `add_text_limit`
|
||||
when altering an existing table. Without a limit, the longest possible [character string is about 1 GB](https://www.postgresql.org/docs/current/datatype-character.html).
|
||||
when altering an existing table. Without a limit, the longest possible [character string is about 1 GB](https://www.postgresql.org/docs/16/datatype-character.html).
|
||||
|
||||
The standard Rails `text` column type cannot be defined with a limit, but we extend `create_table` to
|
||||
add a `limit: 255` option. Outside of `create_table`, `add_text_limit` can be used to add a [check constraint](https://www.postgresql.org/docs/11/ddl-constraints.html)
|
||||
add a `limit: 255` option. Outside of `create_table`, `add_text_limit` can be used to add a [check constraint](https://www.postgresql.org/docs/16/ddl-constraints.html)
|
||||
to an already existing column.
|
||||
|
||||
## Background information
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ title: Transaction guidelines
|
|||
|
||||
This document gives a few examples of the usage of database transactions in application code.
|
||||
|
||||
For further reference, check PostgreSQL documentation about [transactions](https://www.postgresql.org/docs/current/tutorial-transactions.html).
|
||||
For further reference, check PostgreSQL documentation about [transactions](https://www.postgresql.org/docs/16/tutorial-transactions.html).
|
||||
|
||||
## Database decomposition and sharding
|
||||
|
||||
|
|
|
|||
|
|
@ -86,8 +86,8 @@ Execution time: 3428.596 ms
|
|||
```
|
||||
|
||||
For more information, refer to the official
|
||||
[`EXPLAIN` documentation](https://www.postgresql.org/docs/current/sql-explain.html)
|
||||
and [using `EXPLAIN` guide](https://www.postgresql.org/docs/current/using-explain.html).
|
||||
[`EXPLAIN` documentation](https://www.postgresql.org/docs/16/sql-explain.html)
|
||||
and [using `EXPLAIN` guide](https://www.postgresql.org/docs/16/using-explain.html).
|
||||
|
||||
## Nodes
|
||||
|
||||
|
|
|
|||
|
|
@ -78,5 +78,5 @@ end
|
|||
|
||||
## Further Reading
|
||||
|
||||
- [PostgreSQL System Catalogs](https://www.postgresql.org/docs/current/catalogs.html)
|
||||
- [PostgreSQL Views](https://www.postgresql.org/docs/current/sql-createview.html)
|
||||
- [PostgreSQL System Catalogs](https://www.postgresql.org/docs/16/catalogs.html)
|
||||
- [PostgreSQL Views](https://www.postgresql.org/docs/16/sql-createview.html)
|
||||
|
|
|
|||
|
|
@ -157,6 +157,56 @@ import { initSimpleApp } from '~/helpers/init_simple_app_helper'
|
|||
initSimpleApp('#js-my-element', MyComponent, { name: 'MyAppRoot' })
|
||||
```
|
||||
|
||||
###### Passing values as `provide`/`inject` instead of props
|
||||
|
||||
To use `initSimpleApp` to pass values as `provide`/`inject` instead of props:
|
||||
|
||||
1. Include an HTML element in the page with an ID or unique class.
|
||||
1. Add a `data-provide` attribute containing a JSON object.
|
||||
1. Import the desired Vue component, and pass it along with a valid CSS selector string
|
||||
that selects the HTML element to `initSimpleApp`. This string mounts the component
|
||||
at the specified location.
|
||||
|
||||
`initSimpleApp` automatically retrieves the content of the data-provide attribute as a JSON object and passes it as inject to the mounted Vue component. This can be used to pre-populate the component with data.
|
||||
|
||||
Example:
|
||||
|
||||
```vue
|
||||
//my_component.vue
|
||||
<template>
|
||||
<div>
|
||||
<p>Inject1: {{ inject1 }}</p>
|
||||
<p>Inject2: {{ inject2 }}</p>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script>
|
||||
export default {
|
||||
name: 'MyComponent',
|
||||
inject: {
|
||||
inject1: {
|
||||
default: '',
|
||||
},
|
||||
inject2: {
|
||||
default: 0
|
||||
}
|
||||
},
|
||||
}
|
||||
</script>
|
||||
```
|
||||
|
||||
```html
|
||||
<div id="js-my-element" data-provide='{"inject1": "my object", "inject2": 42 }'></div>
|
||||
```
|
||||
|
||||
```javascript
|
||||
//index.js
|
||||
import MyComponent from './my_component.vue'
|
||||
import { initSimpleApp } from '~/helpers/init_simple_app_helper'
|
||||
|
||||
initSimpleApp('#js-my-element', MyComponent, { name: 'MyAppRoot' })
|
||||
```
|
||||
|
||||
##### `provide` and `inject`
|
||||
|
||||
Vue supports dependency injection through [`provide` and `inject`](https://v2.vuejs.org/v2/api/#provide-inject).
|
||||
|
|
|
|||
|
|
@ -374,7 +374,7 @@ You should always read `disable_ddl_transaction!` as meaning:
|
|||
|
||||
Even if you don't use an explicit PostgreSQL transaction `.transaction` (or `BEGIN; COMMIT;`),
|
||||
every SQL statement is still executed as a transaction.
|
||||
See [the PostgreSQL documentation on transactions](https://www.postgresql.org/docs/current/tutorial-transactions.html).
|
||||
See [the PostgreSQL documentation on transactions](https://www.postgresql.org/docs/16/tutorial-transactions.html).
|
||||
|
||||
{{< /alert >}}
|
||||
|
||||
|
|
@ -444,7 +444,7 @@ Custom index and constraint names should follow the [constraint naming conventio
|
|||
|
||||
### Truncate long index names
|
||||
|
||||
PostgreSQL [limits the length of identifiers](https://www.postgresql.org/docs/current/limits.html),
|
||||
PostgreSQL [limits the length of identifiers](https://www.postgresql.org/docs/16/limits.html),
|
||||
like column or index names. Column names are not usually a problem, but index names tend
|
||||
to be longer. Some methods for shortening a name that's too long:
|
||||
|
||||
|
|
@ -551,7 +551,7 @@ is concurrently accessed and modified by other processes, acquiring the lock may
|
|||
a while. The lock request is waiting in a queue and it may also block other queries
|
||||
on the `users` table once it has been enqueued.
|
||||
|
||||
More information about PostgreSQL locks: [Explicit Locking](https://www.postgresql.org/docs/current/explicit-locking.html)
|
||||
More information about PostgreSQL locks: [Explicit Locking](https://www.postgresql.org/docs/16/explicit-locking.html)
|
||||
|
||||
For stability reasons, GitLab.com has a short `statement_timeout`
|
||||
set. When the migration is invoked, any database query has
|
||||
|
|
@ -618,7 +618,7 @@ end
|
|||
|
||||
#### Creating a new table when we have two foreign keys
|
||||
|
||||
Only one foreign key should be created per transaction. This is because [the addition of a foreign key constraint requires a `SHARE ROW EXCLUSIVE` lock on the referenced table](https://www.postgresql.org/docs/12/sql-createtable.html#:~:text=The%20addition%20of%20a%20foreign%20key%20constraint%20requires%20a%20SHARE%20ROW%20EXCLUSIVE%20lock%20on%20the%20referenced%20table), and locking multiple tables in the same transaction should be avoided.
|
||||
Only one foreign key should be created per transaction. This is because [the addition of a foreign key constraint requires a `SHARE ROW EXCLUSIVE` lock on the referenced table](https://www.postgresql.org/docs/16/sql-createtable.html#:~:text=The%20addition%20of%20a%20foreign%20key%20constraint%20requires%20a%20SHARE%20ROW%20EXCLUSIVE%20lock%20on%20the%20referenced%20table), and locking multiple tables in the same transaction should be avoided.
|
||||
|
||||
For this, we need three migrations:
|
||||
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ USING GIN(column_name gin_trgm_ops);
|
|||
```
|
||||
|
||||
The key here is the `GIN(column_name gin_trgm_ops)` part. This creates a
|
||||
[GIN index](https://www.postgresql.org/docs/current/gin.html)
|
||||
[GIN index](https://www.postgresql.org/docs/16/gin.html)
|
||||
with the operator class set to `gin_trgm_ops`. These indexes
|
||||
_can_ be used by `ILIKE` / `LIKE` and can lead to greatly improved performance.
|
||||
One downside of these indexes is that they can easily get quite large (depending
|
||||
|
|
|
|||
|
|
@ -1391,7 +1391,7 @@ It uses the [Elasticsearch Refresh API](https://www.elastic.co/guide/en/elastics
|
|||
to make sure all operations performed on an index since the last refresh are available for search. This method is typically
|
||||
called after loading data into PostgreSQL to ensure the data is indexed and searchable.
|
||||
|
||||
Helper methods from `ElasticsearchHelpers` are automatically included when using any of the Elasticsearch metadata. You
|
||||
Helper methods from `ElasticsearchHelpers` are automatically included when using any of the Elasticsearch metadata. You
|
||||
can include them directly with the `:elastic_helpers` metadata.
|
||||
|
||||
You can use the `SEARCH_SPEC_BENCHMARK` environment variable to benchmark test setup steps:
|
||||
|
|
@ -1594,7 +1594,7 @@ Time returned from a database can differ in precision from time objects
|
|||
in Ruby, so we need flexible tolerances when comparing in specs.
|
||||
|
||||
The PostgreSQL time and timestamp types
|
||||
have [the resolution of 1 microsecond](https://www.postgresql.org/docs/current/datatype-datetime.html).
|
||||
have [the resolution of 1 microsecond](https://www.postgresql.org/docs/16/datatype-datetime.html).
|
||||
However, the precision of Ruby `Time` can vary [depending on the OS.](https://blog.paulswartz.net/post/142749676062/ruby-time-precision-os-x-vs-linux)
|
||||
|
||||
Consider the following snippet:
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ It can take a few hours to validate a certificate provisioned through ACM. To av
|
|||
|
||||
Below is a diagram of the recommended architecture.
|
||||
|
||||

|
||||

|
||||
|
||||
## AWS costs
|
||||
|
||||
|
|
@ -160,7 +160,7 @@ We now create a VPC, a virtual networking environment that you control:
|
|||
`10.0.0.0/16`. If you don't require dedicated hardware, you can leave
|
||||
"Tenancy" as default. Select **Create VPC** when ready.
|
||||
|
||||

|
||||

|
||||
|
||||
1. Select the VPC, select **Actions**, select **Edit VPC Settings** and check **Enable DNS resolution**. Select **Save** when done.
|
||||
|
||||
|
|
@ -179,7 +179,7 @@ RDS instances as well:
|
|||
for example `gitlab-public-10.0.0.0`, select the VPC we created previously, select an availability zone (we use `us-west-2a`),
|
||||
and at the IPv4 CIDR block let's give it a 24 subnet `10.0.0.0/24`:
|
||||
|
||||

|
||||

|
||||
|
||||
1. Follow the same steps to create all subnets:
|
||||
|
||||
|
|
@ -204,7 +204,7 @@ create a new one:
|
|||
1. Select it from the table, and then under the **Actions** dropdown list choose
|
||||
"Attach to VPC".
|
||||
|
||||

|
||||

|
||||
|
||||
1. Choose `gitlab-vpc` from the list and hit **Attach**.
|
||||
|
||||
|
|
@ -423,7 +423,7 @@ persistence and is used to store session data, temporary cache information, and
|
|||
Select the VPC we created earlier (`gitlab-vpc`) and ensure the selected subnets table only contains the [private subnets](#subnets).
|
||||
1. Select **Create** when ready.
|
||||
|
||||

|
||||

|
||||
|
||||
### Create the Redis Cluster
|
||||
|
||||
|
|
@ -447,7 +447,8 @@ persistence and is used to store session data, temporary cache information, and
|
|||
1. Manually select the preferred availability zones, and under "Replica 2"
|
||||
choose a different zone than the other two.
|
||||
|
||||

|
||||

|
||||
|
||||
1. Select **Next**.
|
||||
1. In the security settings, edit the security groups and choose the
|
||||
`gitlab-redis-sec-group` we had previously created. Select **Next**.
|
||||
|
|
@ -843,13 +844,13 @@ From the EC2 dashboard:
|
|||
1. **Add** `1` capacity unit when `CPUUtilization` is greater than or equal to 60%.
|
||||
1. Set the **Scaling policy name** to `Scale Up Policy`.
|
||||
|
||||

|
||||

|
||||
|
||||
1. Create a [scale down policy](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html#step-scaling-create-scale-in-policy) using the following conditions:
|
||||
1. **Remove** `1` capacity unit when `CPUUtilization` is less than or equal to 45%.
|
||||
1. Set the **Scaling policy name** to `Scale Down Policy`.
|
||||
|
||||

|
||||

|
||||
|
||||
1. Assign the new dynamic scaling policy to the auto scaling group we created earlier.
|
||||
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ If the highest number stable branch is unclear, check the [GitLab blog](https://
|
|||
| [Go](#4-go) | `1.22.x` | In GitLab 17.1 and later, Go 1.22 or later is required. |
|
||||
| [Git](#git) | `2.47.x` | In GitLab 17.7 and later, Git 2.47.x and later is required. You should use the [Git version provided by Gitaly](#git). |
|
||||
| [Node.js](#5-node) | `20.13.x` | In GitLab 17.0 and later, Node.js 20.13 or later is required. |
|
||||
| [PostgreSQL](#7-database) | `14.x` | In GitLab 17.0 and later, PostgreSQL 14 or later is required. |
|
||||
| [PostgreSQL](#7-database) | `16.x` | In GitLab 18.0 and later, PostgreSQL 16 or later is required. |
|
||||
|
||||
## GitLab directory structure
|
||||
|
||||
|
|
@ -303,7 +303,7 @@ sudo adduser --disabled-login --gecos 'GitLab' git
|
|||
{{< alert type="note" >}}
|
||||
|
||||
Only PostgreSQL is supported.
|
||||
In GitLab 17.0 and later, we [require PostgreSQL 14+](requirements.md#postgresql).
|
||||
In GitLab 18.0 and later, we [require PostgreSQL 16+](requirements.md#postgresql).
|
||||
|
||||
{{< /alert >}}
|
||||
|
||||
|
|
@ -322,7 +322,7 @@ In GitLab 17.0 and later, we [require PostgreSQL 14+](requirements.md#postgresql
|
|||
sudo sh -c 'echo "deb https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
|
||||
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install postgresql-14
|
||||
sudo apt-get -y install postgresql-16
|
||||
```
|
||||
|
||||
1. Verify the PostgreSQL version you have is supported by the version of GitLab you're
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ To install a PostgreSQL extension, this procedure should be followed:
|
|||
sudo gitlab-psql -d gitlabhq_production
|
||||
```
|
||||
|
||||
1. Install the extension (`btree_gist` in this example) using [`CREATE EXTENSION`](https://www.postgresql.org/docs/11/sql-createextension.html):
|
||||
1. Install the extension (`btree_gist` in this example) using [`CREATE EXTENSION`](https://www.postgresql.org/docs/16/sql-createextension.html):
|
||||
|
||||
```sql
|
||||
CREATE EXTENSION IF NOT EXISTS btree_gist
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ For the following versions of GitLab, use these PostgreSQL versions:
|
|||
|
||||
| GitLab version | Minimum PostgreSQL version | Maximum PostgreSQL version |
|
||||
| -------------- | -------------------------- | -------------------------- |
|
||||
| 18.x | 16.x | To be determined |
|
||||
| 18.x | 16.x | To be determined |
|
||||
| 17.x | 14.x | 16.x ([tested against GitLab 16.10 and later](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/145298)) |
|
||||
| 16.x | 13.6 | 15.x ([tested against GitLab 16.1 and later](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/119344)) |
|
||||
| 15.x | 12.10 | 14.x ([tested against GitLab 15.11 only](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/114624)), 13.x |
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ title: Proxying assets
|
|||
A possible security concern when managing a public-facing GitLab instance is
|
||||
the ability to steal a user's IP address by referencing images in issues and comments.
|
||||
|
||||
For example, adding `` to
|
||||
For example, adding `` to
|
||||
an issue description causes the image to be loaded from the external
|
||||
server to be displayed. However, this also allows the external server
|
||||
to log the IP address of the user.
|
||||
|
|
@ -71,7 +71,7 @@ references an external source are proxied to the Camo server.
|
|||
For example, the following is a link to an image in Markdown:
|
||||
|
||||
```markdown
|
||||

|
||||

|
||||
```
|
||||
|
||||
The following is an example of a source link that could result:
|
||||
|
|
|
|||
|
|
@ -248,7 +248,7 @@ version of GitLab. If you [check the status](#check-the-status-of-batched-backgr
|
|||
of batched background migrations, some migrations might display in the **Failed** tab
|
||||
with a **failed** status:
|
||||
|
||||

|
||||

|
||||
|
||||
To determine why the batched background migration failed,
|
||||
view the failure error logs
|
||||
|
|
|
|||
|
|
@ -167,11 +167,11 @@ extensions. For more information, see the
|
|||
|
||||
{{< alert type="warning" >}}
|
||||
|
||||
GitLab 17.0 requires PostgreSQL 14. GitLab 17.5 is compatible with up to PostgreSQL 16.
|
||||
GitLab 18.0 requires PostgreSQL 16.
|
||||
|
||||
{{< /alert >}}
|
||||
|
||||
To upgrade PostgreSQL, refer to its [documentation](https://www.postgresql.org/docs/11/upgrading.html).
|
||||
To upgrade PostgreSQL, refer to its [documentation](https://www.postgresql.org/docs/16/upgrading.html).
|
||||
|
||||
### Update the GitLab codebase
|
||||
|
||||
|
|
|
|||
|
|
@ -761,7 +761,7 @@ To filter the list issues for text in a title or description:
|
|||
1. In the dropdown list that appears, select **Search within**, and then either **Titles** or **Descriptions**.
|
||||
1. Press <kbd>Enter</kbd> or select the search icon ({{< icon name="search" >}}).
|
||||
|
||||
Filtering issues uses [PostgreSQL full text search](https://www.postgresql.org/docs/current/textsearch-intro.html)
|
||||
Filtering issues uses [PostgreSQL full text search](https://www.postgresql.org/docs/16/textsearch-intro.html)
|
||||
to match meaningful and significant words to answer a query.
|
||||
|
||||
For example, if you search for `I am securing information for M&A`,
|
||||
|
|
|
|||
|
|
@ -157,6 +157,13 @@ Prerequisites:
|
|||
- A fork of a public upstream project.
|
||||
- A public upstream project with downstream forks.
|
||||
|
||||
{{< alert type="note" >}}
|
||||
|
||||
To ensure successful blob removal, consider temporarily restricting repository access during the
|
||||
process. New commits pushed during blob removal can cause the operation to fail.
|
||||
|
||||
{{< /alert >}}
|
||||
|
||||
To remove blobs from your repository:
|
||||
|
||||
1. On the left sidebar, select **Search or go to** and find your project.
|
||||
|
|
@ -248,6 +255,31 @@ If you've completed a repository cleanup process but the storage usage remains u
|
|||
- After two weeks, these objects are automatically pruned, which updates storage usage statistics.
|
||||
- To expedite this process, ask an administrator to run the ['Prune Unreachable Objects' housekeeping task](../../../administration/housekeeping.md).
|
||||
|
||||
### Blobs are not removed
|
||||
|
||||
When blobs are successfully removed, GitLab adds an entry in the project audit logs and sends an
|
||||
email notification to the person who initiated the action.
|
||||
|
||||
If the blob removal fails, GitLab sends an email notification to the initiator with the subject
|
||||
`<project_name> | Project history rewrite failure`. The email body contains the full error message.
|
||||
|
||||
Possible errors and solutions:
|
||||
|
||||
- `validating object ID: invalid object ID`:
|
||||
|
||||
The object ID list contains a syntax error or an incorrect object ID. To resolve this:
|
||||
|
||||
1. Regenerate the [object IDs list](#get-a-list-of-object-ids).
|
||||
1. Re-run the [blob removal steps](#remove-blobs).
|
||||
|
||||
- `source repository checksum altered`:
|
||||
|
||||
This occurs when someone pushes a commit during the blob removal process. To resolve this:
|
||||
|
||||
1. Temporarily block all pushes to the repository.
|
||||
1. Re-run the [blob removal steps](#remove-blobs).
|
||||
1. Re-enable pushes after the process completes successfully.
|
||||
|
||||
### Repository size limit reached
|
||||
|
||||
If you've reached the repository size limit:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module QA
|
||||
RSpec.describe 'Configure' do
|
||||
RSpec.describe 'Deploy' do
|
||||
describe 'AutoDevOps Templates', only: { pipeline: %i[staging staging-canary] }, product_group: :environments,
|
||||
quarantine: {
|
||||
issue: 'https://gitlab.com/gitlab-org/gitlab/-/issues/432409',
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module QA
|
||||
RSpec.describe 'Configure',
|
||||
RSpec.describe 'Deploy',
|
||||
only: { pipeline: %i[staging staging-canary canary production] }, product_group: :environments do
|
||||
describe 'Auto DevOps with a Kubernetes Agent' do
|
||||
let!(:app_project) { create(:project, :auto_devops, name: 'autodevops-app-project', template_name: 'express') }
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module QA
|
||||
RSpec.describe 'Release', product_group: :environments do
|
||||
RSpec.describe 'Deploy', product_group: :environments do
|
||||
describe 'Deploy key creation' do
|
||||
it 'user adds a deploy key', :smoke,
|
||||
testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/348023' do
|
||||
|
|
@ -3,7 +3,7 @@
|
|||
require 'digest/sha1'
|
||||
|
||||
module QA
|
||||
RSpec.describe 'Release', product_group: :environments do
|
||||
RSpec.describe 'Deploy', product_group: :environments do
|
||||
describe 'Git clone using a deploy key' do
|
||||
let(:runner_name) { "qa-runner-#{SecureRandom.hex(4)}" }
|
||||
let(:repository_location) { project.repository_ssh_location }
|
||||
|
|
@ -70,7 +70,7 @@ module QA
|
|||
resource.new_branch = true
|
||||
end
|
||||
|
||||
sha1sum = Digest::SHA1.hexdigest(gitlab_ci)
|
||||
sha1sum = Digest::SHA1.hexdigest(gitlab_ci) # rubocop:disable Fips/SHA1 -- Test is for sha1
|
||||
|
||||
Flow::Pipeline.visit_latest_pipeline
|
||||
Page::Project::Pipeline::Show.perform(&:click_on_first_job)
|
||||
|
|
@ -1,14 +1,14 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module QA
|
||||
RSpec.describe 'Release', product_group: :environments do
|
||||
RSpec.describe 'Deploy', product_group: :environments do
|
||||
describe 'Deploy token creation' do
|
||||
it 'user adds a deploy token',
|
||||
testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/348028' do
|
||||
Flow::Login.sign_in
|
||||
|
||||
deploy_token_name = 'deploy token name'
|
||||
one_week_from_now = Date.today + 7
|
||||
one_week_from_now = Date.today + 7 # rubocop:disable Rails/Date -- E2E tests run outside of Rails environment
|
||||
|
||||
deploy_token = create(:project_deploy_token,
|
||||
name: deploy_token_name,
|
||||
Loading…
Reference in New Issue