Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
00871a937d
commit
a6db6a23d1
|
|
@ -18,11 +18,17 @@ import { n__, s__ } from '~/locale';
|
|||
import Tracking from '~/tracking';
|
||||
import SidebarParticipant from '~/sidebar/components/assignees/sidebar_participant.vue';
|
||||
import { DEFAULT_DEBOUNCE_AND_THROTTLE_MS } from '~/lib/utils/constants';
|
||||
import localUpdateWorkItemMutation from '../graphql/local_update_work_item.mutation.graphql';
|
||||
import updateWorkItemMutation from '../graphql/update_work_item.mutation.graphql';
|
||||
import { i18n, TRACKING_CATEGORY_SHOW } from '../constants';
|
||||
|
||||
function isTokenSelectorElement(el) {
|
||||
return el?.classList.contains('gl-token-close') || el?.classList.contains('dropdown-item');
|
||||
return (
|
||||
el?.classList.contains('gl-token-close') ||
|
||||
el?.classList.contains('dropdown-item') ||
|
||||
// TODO: replace this logic when we have a class added to clear-all button in GitLab UI
|
||||
(el?.classList.contains('gl-button') &&
|
||||
el?.closest('.form-control')?.classList.contains('gl-token-selector'))
|
||||
);
|
||||
}
|
||||
|
||||
function addClass(el) {
|
||||
|
|
@ -130,7 +136,7 @@ export default {
|
|||
if (this.searchUsers.some((user) => user.username === this.currentUser.username)) {
|
||||
return this.moveCurrentUserToStart(this.searchUsers);
|
||||
}
|
||||
return [this.currentUser, ...this.searchUsers];
|
||||
return [addClass(this.currentUser), ...this.searchUsers];
|
||||
}
|
||||
return this.searchUsers;
|
||||
},
|
||||
|
|
@ -142,12 +148,18 @@ export default {
|
|||
? s__('WorkItem|Add assignees')
|
||||
: s__('WorkItem|Add assignee');
|
||||
},
|
||||
assigneeIds() {
|
||||
return this.localAssignees.map(({ id }) => id);
|
||||
},
|
||||
},
|
||||
watch: {
|
||||
assignees(newVal) {
|
||||
if (!this.isEditing) {
|
||||
this.localAssignees = newVal.map(addClass);
|
||||
}
|
||||
assignees: {
|
||||
handler(newVal) {
|
||||
if (!this.isEditing) {
|
||||
this.localAssignees = newVal.map(addClass);
|
||||
}
|
||||
},
|
||||
deep: true,
|
||||
},
|
||||
},
|
||||
created() {
|
||||
|
|
@ -169,19 +181,33 @@ export default {
|
|||
handleBlur(e) {
|
||||
if (isTokenSelectorElement(e.relatedTarget) || !this.isEditing) return;
|
||||
this.isEditing = false;
|
||||
this.setAssignees(this.localAssignees);
|
||||
this.setAssignees(this.assigneeIds);
|
||||
},
|
||||
setAssignees(assignees) {
|
||||
this.$apollo.mutate({
|
||||
mutation: localUpdateWorkItemMutation,
|
||||
variables: {
|
||||
input: {
|
||||
id: this.workItemId,
|
||||
assignees,
|
||||
async setAssignees(assigneeIds) {
|
||||
try {
|
||||
const {
|
||||
data: {
|
||||
workItemUpdate: { errors },
|
||||
},
|
||||
},
|
||||
});
|
||||
this.track('updated_assignees');
|
||||
} = await this.$apollo.mutate({
|
||||
mutation: updateWorkItemMutation,
|
||||
variables: {
|
||||
input: {
|
||||
id: this.workItemId,
|
||||
assigneesWidget: {
|
||||
assigneeIds,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
if (errors.length > 0) {
|
||||
this.throwUpdateError();
|
||||
return;
|
||||
}
|
||||
this.track('updated_assignees');
|
||||
} catch {
|
||||
this.throwUpdateError();
|
||||
}
|
||||
},
|
||||
handleFocus() {
|
||||
this.isEditing = true;
|
||||
|
|
@ -205,13 +231,25 @@ export default {
|
|||
},
|
||||
moveCurrentUserToStart(users = []) {
|
||||
if (this.currentUser) {
|
||||
return [this.currentUser, ...users.filter((user) => user.id !== this.currentUser.id)];
|
||||
return [
|
||||
addClass(this.currentUser),
|
||||
...users.filter((user) => user.id !== this.currentUser.id),
|
||||
];
|
||||
}
|
||||
return users;
|
||||
},
|
||||
closeDropdown() {
|
||||
this.$refs.tokenSelector.closeDropdown();
|
||||
},
|
||||
assignToCurrentUser() {
|
||||
this.setAssignees([this.currentUser.id]);
|
||||
this.localAssignees = [addClass(this.currentUser)];
|
||||
},
|
||||
throwUpdateError() {
|
||||
this.$emit('error', i18n.updateError);
|
||||
// If mutation is rejected, we're rolling back to initial state
|
||||
this.localAssignees = this.assignees.map(addClass);
|
||||
},
|
||||
},
|
||||
};
|
||||
</script>
|
||||
|
|
@ -227,11 +265,12 @@ export default {
|
|||
ref="tokenSelector"
|
||||
:selected-tokens="localAssignees"
|
||||
:container-class="containerClass"
|
||||
class="assignees-selector gl-flex-grow-1 gl-border gl-border-white gl-rounded-base col-9 gl-align-self-start gl-px-0!"
|
||||
:class="{ 'gl-hover-border-gray-200': canUpdate }"
|
||||
:dropdown-items="dropdownItems"
|
||||
:loading="isLoadingUsers"
|
||||
:view-only="!canUpdate"
|
||||
:allow-clear-all="isEditing"
|
||||
class="assignees-selector gl-flex-grow-1 gl-border gl-border-white gl-rounded-base col-9 gl-align-self-start gl-px-0!"
|
||||
@input="handleAssigneesInput"
|
||||
@text-input="debouncedSearchKeyUpdate"
|
||||
@focus="handleFocus"
|
||||
|
|
@ -251,7 +290,7 @@ export default {
|
|||
size="small"
|
||||
class="assign-myself"
|
||||
data-testid="assign-self"
|
||||
@click.stop="setAssignees([currentUser])"
|
||||
@click.stop="assignToCurrentUser"
|
||||
>{{ __('Assign myself') }}</gl-button
|
||||
>
|
||||
</div>
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import produce from 'immer';
|
|||
import Vue from 'vue';
|
||||
import VueApollo from 'vue-apollo';
|
||||
import createDefaultClient from '~/lib/graphql';
|
||||
import { WIDGET_TYPE_ASSIGNEES, WIDGET_TYPE_LABELS } from '../constants';
|
||||
import { WIDGET_TYPE_LABELS } from '../constants';
|
||||
import typeDefs from './typedefs.graphql';
|
||||
import workItemQuery from './work_item.query.graphql';
|
||||
|
||||
|
|
@ -29,6 +29,11 @@ export const temporaryConfig = {
|
|||
);
|
||||
},
|
||||
},
|
||||
widgets: {
|
||||
merge(_, incoming) {
|
||||
return incoming;
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -44,13 +49,6 @@ export const resolvers = {
|
|||
});
|
||||
|
||||
const data = produce(sourceData, (draftData) => {
|
||||
if (input.assignees) {
|
||||
const assigneesWidget = draftData.workItem.widgets.find(
|
||||
(widget) => widget.type === WIDGET_TYPE_ASSIGNEES,
|
||||
);
|
||||
assigneesWidget.assignees.nodes = [...input.assignees];
|
||||
}
|
||||
|
||||
if (input.labels) {
|
||||
const labelsWidget = draftData.workItem.mockWidgets.find(
|
||||
(widget) => widget.type === WIDGET_TYPE_LABELS,
|
||||
|
|
|
|||
|
|
@ -603,7 +603,7 @@ module Ci
|
|||
|
||||
cancel_jobs(cancelable_statuses, retries: retries, auto_canceled_by_pipeline_id: auto_canceled_by_pipeline_id)
|
||||
|
||||
if cascade_to_children && project.cascade_cancel_pipelines_enabled?
|
||||
if cascade_to_children
|
||||
# cancel any bridges that could spin up new child pipelines
|
||||
cancel_jobs(bridges_in_self_and_descendants.cancelable, retries: retries, auto_canceled_by_pipeline_id: auto_canceled_by_pipeline_id)
|
||||
cancel_children(auto_canceled_by_pipeline_id: auto_canceled_by_pipeline_id, execute_async: execute_async)
|
||||
|
|
|
|||
|
|
@ -1044,12 +1044,6 @@ class Project < ApplicationRecord
|
|||
!emails_disabled?
|
||||
end
|
||||
|
||||
def cascade_cancel_pipelines_enabled?
|
||||
strong_memoize(:cascade_cancel_pipelines_enabled) do
|
||||
Feature.enabled?(:ci_parent_pipeline_cancels_children, self)
|
||||
end
|
||||
end
|
||||
|
||||
override :lfs_enabled?
|
||||
def lfs_enabled?
|
||||
return namespace.lfs_enabled? if self[:lfs_enabled].nil?
|
||||
|
|
|
|||
|
|
@ -1,8 +0,0 @@
|
|||
---
|
||||
name: ci_parent_pipeline_cancels_children
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/82149
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/354528
|
||||
milestone: '15.3'
|
||||
type: development
|
||||
group: group::pipeline execution
|
||||
default_enabled: false
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
---
|
||||
name: use_redis_hll_instrumentation_classes
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/90237
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/365332
|
||||
milestone: '15.1'
|
||||
type: development
|
||||
group: group::product intelligence
|
||||
default_enabled: false
|
||||
|
|
@ -210,8 +210,8 @@ This shows you which user has this email address. One of two steps must be taken
|
|||
remove this email as a secondary email and make it a primary one so GitLab
|
||||
associates this profile to the LDAP identity.
|
||||
|
||||
The user can do either of these steps [in their
|
||||
profile](../../../user/profile/index.md#access-your-user-profile) or an administrator can do it.
|
||||
The user can do either of these steps
|
||||
[in their profile](../../../user/profile/index.md#access-your-user-profile) or an administrator can do it.
|
||||
|
||||
#### Projects limit errors
|
||||
|
||||
|
|
@ -426,13 +426,12 @@ Rails.logger.level = Logger::DEBUG
|
|||
LdapAllGroupsSyncWorker.new.perform
|
||||
```
|
||||
|
||||
Next, [learn how to read the
|
||||
output](#example-console-output-after-a-group-sync).
|
||||
Next, [learn how to read the output](#example-console-output-after-a-group-sync).
|
||||
|
||||
##### Example console output after a group sync
|
||||
|
||||
Like the output from the user sync, the output from the [manual group
|
||||
sync](#sync-all-groups) is also very verbose. However, it contains lots
|
||||
Like the output from the user sync, the output from the
|
||||
[manual group sync](#sync-all-groups) is also very verbose. However, it contains lots
|
||||
of helpful information.
|
||||
|
||||
Indicates the point where syncing actually begins:
|
||||
|
|
@ -743,8 +742,7 @@ For instructions about how to use the rails console, refer to this
|
|||
This provides debug output that shows what GitLab is doing and with what.
|
||||
This value is not persisted, and is only enabled for this session in the Rails console.
|
||||
|
||||
To enable debug output in the rails console, [enter the rails
|
||||
console](#rails-console) and run:
|
||||
To enable debug output in the rails console, [enter the rails console](#rails-console) and run:
|
||||
|
||||
```ruby
|
||||
Rails.logger.level = Logger::DEBUG
|
||||
|
|
|
|||
|
|
@ -250,8 +250,8 @@ but `LocalAccounts` works for authenticating against local, Active Directory acc
|
|||
<OutputClaim ClaimTypeReferenceId="signInNames.emailAddress" PartnerClaimType="email" />
|
||||
```
|
||||
|
||||
1. For OIDC discovery to work with B2C, the policy must be configured with an issuer compatible with the [OIDC
|
||||
specification](https://openid.net/specs/openid-connect-discovery-1_0.html#rfc.section.4.3).
|
||||
1. For OIDC discovery to work with B2C, the policy must be configured with an issuer compatible with the
|
||||
[OIDC specification](https://openid.net/specs/openid-connect-discovery-1_0.html#rfc.section.4.3).
|
||||
See the [token compatibility settings](https://docs.microsoft.com/en-us/azure/active-directory-b2c/configure-tokens?pivots=b2c-custom-policy#token-compatibility-settings).
|
||||
In `TrustFrameworkBase.xml` under `JwtIssuer`, set `IssuanceClaimPattern` to `AuthorityWithTfp`:
|
||||
|
||||
|
|
@ -529,8 +529,7 @@ If you're having trouble, here are some tips:
|
|||
|
||||
1. Check your system clock to ensure the time is synchronized properly.
|
||||
|
||||
1. As mentioned in [the
|
||||
documentation](https://github.com/m0n9oose/omniauth_openid_connect),
|
||||
1. As mentioned in [the documentation](https://github.com/m0n9oose/omniauth_openid_connect),
|
||||
make sure `issuer` corresponds to the base URL of the Discovery URL. For
|
||||
example, `https://accounts.google.com` is used for the URL
|
||||
`https://accounts.google.com/.well-known/openid-configuration`.
|
||||
|
|
@ -540,5 +539,4 @@ If you're having trouble, here are some tips:
|
|||
If you are seeing 401 errors upon retrieving the `userinfo` endpoint, you may
|
||||
want to check your OpenID Web server configuration. For example, for
|
||||
[`oauth2-server-php`](https://github.com/bshaffer/oauth2-server-php), you
|
||||
may need to [add a configuration parameter to
|
||||
Apache](https://github.com/bshaffer/oauth2-server-php/issues/926#issuecomment-387502778).
|
||||
may need to [add a configuration parameter to Apache](https://github.com/bshaffer/oauth2-server-php/issues/926#issuecomment-387502778).
|
||||
|
|
|
|||
|
|
@ -41,8 +41,8 @@ To bring the former **primary** site up to date:
|
|||
|
||||
NOTE:
|
||||
If you [changed the DNS records](index.md#step-4-optional-updating-the-primary-domain-dns-record)
|
||||
for this site during disaster recovery procedure you may need to [block
|
||||
all the writes to this site](planned_failover.md#prevent-updates-to-the-primary-site)
|
||||
for this site during disaster recovery procedure you may need to
|
||||
[block all the writes to this site](planned_failover.md#prevent-updates-to-the-primary-site)
|
||||
during this procedure.
|
||||
|
||||
1. [Set up database replication](../setup/database.md). In this case, the **secondary** site
|
||||
|
|
|
|||
|
|
@ -331,8 +331,7 @@ Be sure to restart PostgreSQL for this to take effect. See the
|
|||
This occurs when PostgreSQL does not have a replication slot for the
|
||||
**secondary** node by that name.
|
||||
|
||||
You may want to rerun the [replication
|
||||
process](../setup/database.md) on the **secondary** node .
|
||||
You may want to rerun the [replication process](../setup/database.md) on the **secondary** node .
|
||||
|
||||
### Message: "Command exceeded allowed execution time" when setting up replication?
|
||||
|
||||
|
|
@ -869,9 +868,8 @@ or `gitlab-ctl promote-to-primary-node`, either:
|
|||
```
|
||||
|
||||
- Upgrade to GitLab 12.6.3 or later if it is safe to do so. For example,
|
||||
if the failover was just a test. A [caching-related
|
||||
bug](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/22021) was
|
||||
fixed.
|
||||
if the failover was just a test. A
|
||||
[caching-related bug](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/22021) was fixed.
|
||||
|
||||
### Message: `ActiveRecord::RecordInvalid: Validation failed: Enabled Geo primary node cannot be disabled`
|
||||
|
||||
|
|
|
|||
|
|
@ -243,8 +243,8 @@ the recommended procedure, see the
|
|||
## Upgrading to GitLab 12.9
|
||||
|
||||
WARNING:
|
||||
GitLab 12.9.0 through GitLab 12.9.3 are affected by [a bug that stops
|
||||
repository verification](https://gitlab.com/gitlab-org/gitlab/-/issues/213523).
|
||||
GitLab 12.9.0 through GitLab 12.9.3 are affected by
|
||||
[a bug that stops repository verification](https://gitlab.com/gitlab-org/gitlab/-/issues/213523).
|
||||
The issue is fixed in GitLab 12.9.4. Upgrade to GitLab 12.9.4 or later.
|
||||
|
||||
By default, GitLab 12.9 attempts to upgrade the embedded PostgreSQL server
|
||||
|
|
@ -397,6 +397,6 @@ For the recommended procedure, see the
|
|||
## Upgrading to GitLab 12.0
|
||||
|
||||
WARNING:
|
||||
This version is affected by a [bug that results in new LFS objects not being
|
||||
replicated to Geo secondary sites](https://gitlab.com/gitlab-org/gitlab/-/issues/32696).
|
||||
This version is affected by a
|
||||
[bug that results in new LFS objects not being replicated to Geo secondary sites](https://gitlab.com/gitlab-org/gitlab/-/issues/32696).
|
||||
The issue is fixed in GitLab 12.1. Be sure to upgrade to GitLab 12.1 or later.
|
||||
|
|
|
|||
|
|
@ -112,8 +112,9 @@ gitlab:
|
|||
|
||||
Since GitLab 15.1, Geo secondary proxying is enabled by default for separate URLs also.
|
||||
|
||||
There are minor known issues linked in the ["Geo secondary proxying with separate URLs"
|
||||
epic](https://gitlab.com/groups/gitlab-org/-/epics/6865). You can also add feedback in the epic about any use-cases that
|
||||
There are minor known issues linked in the
|
||||
["Geo secondary proxying with separate URLs" epic](https://gitlab.com/groups/gitlab-org/-/epics/6865).
|
||||
You can also add feedback in the epic about any use-cases that
|
||||
are not possible anymore with proxying enabled.
|
||||
|
||||
If you run into issues, to disable this feature, disable the `geo_secondary_proxy_separate_urls` feature flag.
|
||||
|
|
|
|||
|
|
@ -1143,8 +1143,7 @@ gitaly['pack_objects_cache_enabled'] = true
|
|||
#### `enabled` defaults to `false`
|
||||
|
||||
The cache is disabled by default. This is because in some cases, it
|
||||
can create an [extreme
|
||||
increase](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/4010#note_534564684)
|
||||
can create an [extreme increase](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/4010#note_534564684)
|
||||
in the number of bytes written to disk. On GitLab.com, we have verified
|
||||
that our repository storage disks can handle this extra workload, but
|
||||
we felt we cannot assume this is true everywhere.
|
||||
|
|
|
|||
|
|
@ -97,8 +97,8 @@ If you [installed](https://about.gitlab.com/install/) GitLab using the Omnibus G
|
|||
|
||||
### Preparation
|
||||
|
||||
Before beginning, you should already have a working GitLab instance. [Learn how
|
||||
to install GitLab](https://about.gitlab.com/install/).
|
||||
Before beginning, you should already have a working GitLab instance.
|
||||
[Learn how to install GitLab](https://about.gitlab.com/install/).
|
||||
|
||||
Provision a PostgreSQL server. We recommend using the PostgreSQL that is shipped
|
||||
with Omnibus GitLab and use it to configure the PostgreSQL database. You can use an
|
||||
|
|
@ -331,8 +331,8 @@ To configure the additional connection, you must either:
|
|||
#### Configure a new PgBouncer database with `pool_mode = session`
|
||||
|
||||
We recommend using PgBouncer with `session` pool mode. You can use the
|
||||
[bundled PgBouncer](../postgresql/pgbouncer.md) or use an external PgBouncer and [configure it
|
||||
manually](https://www.pgbouncer.org/config.html).
|
||||
[bundled PgBouncer](../postgresql/pgbouncer.md) or use an external PgBouncer and
|
||||
[configure it manually](https://www.pgbouncer.org/config.html).
|
||||
|
||||
The following example uses the bundled PgBouncer and sets up two separate connection pools on PostgreSQL host,
|
||||
one in `session` pool mode and the other in `transaction` pool mode. For this example to work,
|
||||
|
|
@ -620,8 +620,8 @@ Updates to example must be made at:
|
|||
gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
1. To ensure that Praefect [has updated its Prometheus listen
|
||||
address](https://gitlab.com/gitlab-org/gitaly/-/issues/2734),
|
||||
1. To ensure that Praefect
|
||||
[has updated its Prometheus listen address](https://gitlab.com/gitlab-org/gitaly/-/issues/2734),
|
||||
[restart Praefect](../restart_gitlab.md#omnibus-gitlab-restart):
|
||||
|
||||
```shell
|
||||
|
|
@ -928,8 +928,8 @@ For more information on Gitaly server configuration, see our
|
|||
gitlab-ctl reconfigure
|
||||
```
|
||||
|
||||
1. To ensure that Gitaly [has updated its Prometheus listen
|
||||
address](https://gitlab.com/gitlab-org/gitaly/-/issues/2734),
|
||||
1. To ensure that Gitaly
|
||||
[has updated its Prometheus listen address](https://gitlab.com/gitlab-org/gitaly/-/issues/2734),
|
||||
[restart Gitaly](../restart_gitlab.md#omnibus-gitlab-restart):
|
||||
|
||||
```shell
|
||||
|
|
@ -1149,8 +1149,7 @@ running multiple Gitaly storages.
|
|||
### Grafana
|
||||
|
||||
Grafana is included with GitLab, and can be used to monitor your Praefect
|
||||
cluster. See [Grafana Dashboard
|
||||
Service](https://docs.gitlab.com/omnibus/settings/grafana.html)
|
||||
cluster. See [Grafana Dashboard Service](https://docs.gitlab.com/omnibus/settings/grafana.html)
|
||||
for detailed documentation.
|
||||
|
||||
To get started quickly:
|
||||
|
|
|
|||
|
|
@ -65,8 +65,7 @@ Read:
|
|||
## Known kernel version incompatibilities
|
||||
|
||||
RedHat Enterprise Linux (RHEL) and CentOS v7.7 and v7.8 ship with kernel
|
||||
version `3.10.0-1127`, which [contains a
|
||||
bug](https://bugzilla.redhat.com/show_bug.cgi?id=1783554) that causes
|
||||
version `3.10.0-1127`, which [contains a bug](https://bugzilla.redhat.com/show_bug.cgi?id=1783554) that causes
|
||||
[uploads to fail to copy over NFS](https://gitlab.com/gitlab-org/gitlab/-/issues/218999). The
|
||||
following GitLab versions include a fix to work properly with that
|
||||
kernel version:
|
||||
|
|
|
|||
|
|
@ -26,8 +26,8 @@ GitLab has been tested by vendors and customers on a number of object storage pr
|
|||
|
||||
### Known compatibility issues
|
||||
|
||||
- Dell EMC ECS: Prior to GitLab 13.3, there is a [known bug in GitLab Workhorse that prevents
|
||||
HTTP Range Requests from working with CI job artifacts](https://gitlab.com/gitlab-org/gitlab/-/issues/223806).
|
||||
- Dell EMC ECS: Prior to GitLab 13.3, there is a
|
||||
[known bug in GitLab Workhorse that prevents HTTP Range Requests from working with CI job artifacts](https://gitlab.com/gitlab-org/gitlab/-/issues/223806).
|
||||
Be sure to upgrade to GitLab 13.3.0 or above if you use S3 storage with this hardware.
|
||||
|
||||
- Ceph S3 prior to [Kraken 11.0.2](https://ceph.com/releases/kraken-11-0-2-released/) does not support the [Upload Copy Part API](https://gitlab.com/gitlab-org/gitlab/-/issues/300604). You may need to [disable multi-threaded copying](#multi-threaded-copying).
|
||||
|
|
@ -578,9 +578,8 @@ real bucket into multiple virtual buckets. If your object storage
|
|||
bucket is called `my-gitlab-objects` you can configure uploads to go
|
||||
into `my-gitlab-objects/uploads`, artifacts into
|
||||
`my-gitlab-objects/artifacts`, etc. The application will act as if
|
||||
these are separate buckets. Note that use of bucket prefixes [may not
|
||||
work correctly with Helm
|
||||
backups](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/3376).
|
||||
these are separate buckets. Note that use of bucket prefixes
|
||||
[may not work correctly with Helm backups](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/3376).
|
||||
|
||||
Helm-based installs require separate buckets to
|
||||
[handle backup restorations](https://docs.gitlab.com/charts/advanced/external-object-storage/#lfs-artifacts-uploads-packages-external-diffs-terraform-state-dependency-proxy).
|
||||
|
|
@ -693,18 +692,17 @@ configuration.
|
|||
|
||||
When configured either with an instance profile or with the consolidated
|
||||
object configuration, GitLab Workhorse properly uploads files to S3
|
||||
buckets that have [SSE-S3 or SSE-KMS encryption enabled by
|
||||
default](https://docs.aws.amazon.com/kms/latest/developerguide/services-s3.html).
|
||||
Customer master keys (CMKs) and SSE-C encryption are [not
|
||||
supported since this requires sending the encryption keys in every request](https://gitlab.com/gitlab-org/gitlab/-/issues/226006).
|
||||
buckets that have [SSE-S3 or SSE-KMS encryption enabled by default](https://docs.aws.amazon.com/kms/latest/developerguide/services-s3.html).
|
||||
Customer master keys (CMKs) and SSE-C encryption are
|
||||
[not supported since this requires sending the encryption keys in every request](https://gitlab.com/gitlab-org/gitlab/-/issues/226006).
|
||||
|
||||
##### Server-side encryption headers
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/38240) in GitLab 13.3.
|
||||
|
||||
Setting a default encryption on an S3 bucket is the easiest way to
|
||||
enable encryption, but you may want to [set a bucket policy to ensure
|
||||
only encrypted objects are uploaded](https://aws.amazon.com/premiumsupport/knowledge-center/s3-bucket-store-kms-encrypted-objects/).
|
||||
enable encryption, but you may want to
|
||||
[set a bucket policy to ensure only encrypted objects are uploaded](https://aws.amazon.com/premiumsupport/knowledge-center/s3-bucket-store-kms-encrypted-objects/).
|
||||
To do this, you must configure GitLab to send the proper encryption headers
|
||||
in the `storage_options` configuration section:
|
||||
|
||||
|
|
|
|||
|
|
@ -126,8 +126,8 @@ you list. In this example, we exclude all import-related jobs from a Sidekiq nod
|
|||
> - [Renamed from `experimental_queue_selector` to `queue_selector`](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/147) in GitLab 13.6.
|
||||
|
||||
In addition to selecting queues by name, as above, the `queue_selector` option
|
||||
allows queue groups to be selected in a more general way using a [worker matching
|
||||
query](extra_sidekiq_routing.md#worker-matching-query). After `queue_selector`
|
||||
allows queue groups to be selected in a more general way using a
|
||||
[worker matching query](extra_sidekiq_routing.md#worker-matching-query). After `queue_selector`
|
||||
is set, all `queue_groups` must follow the aforementioned syntax.
|
||||
|
||||
In `/etc/gitlab/gitlab.rb`:
|
||||
|
|
@ -201,8 +201,8 @@ have the concurrency tuned according to:
|
|||
- The throughput achieved.
|
||||
|
||||
Each thread requires a Redis connection, so adding threads may increase Redis
|
||||
latency and potentially cause client timeouts. See the [Sidekiq documentation
|
||||
about Redis](https://github.com/mperham/sidekiq/wiki/Using-Redis) for more
|
||||
latency and potentially cause client timeouts. See the
|
||||
[Sidekiq documentation about Redis](https://github.com/mperham/sidekiq/wiki/Using-Redis) for more
|
||||
details.
|
||||
|
||||
#### When running Sidekiq cluster (default)
|
||||
|
|
|
|||
|
|
@ -76,9 +76,9 @@ workers.
|
|||
## Worker matching query
|
||||
|
||||
GitLab provides a query syntax to match a worker based on its
|
||||
attributes. This query syntax is employed by both [Queue routing
|
||||
rules](#queue-routing-rules) and [Queue
|
||||
selector](extra_sidekiq_processes.md#queue-selector). A query includes two
|
||||
attributes. This query syntax is employed by both
|
||||
[Queue routing rules](#queue-routing-rules) and
|
||||
[Queue selector](extra_sidekiq_processes.md#queue-selector). A query includes two
|
||||
components:
|
||||
|
||||
- Attributes that can be selected.
|
||||
|
|
@ -92,8 +92,8 @@ Queue matching query works upon the worker attributes, described in
|
|||
[Sidekiq style guide](../../development/sidekiq/index.md). We support querying
|
||||
based on a subset of worker attributes:
|
||||
|
||||
- `feature_category` - the [GitLab feature
|
||||
category](https://about.gitlab.com/direction/maturity/#category-maturity) the
|
||||
- `feature_category` - the
|
||||
[GitLab feature category](https://about.gitlab.com/direction/maturity/#category-maturity) the
|
||||
queue belongs to. For example, the `merge` queue belongs to the
|
||||
`source_code_management` category.
|
||||
- `has_external_dependencies` - whether or not the queue connects to external
|
||||
|
|
@ -122,10 +122,10 @@ that have tags `a`, `b`, or both. `tags!=a,b` selects queues that have
|
|||
neither of those tags.
|
||||
|
||||
The attributes of each worker are hard-coded in the source code. For
|
||||
convenience, we generate a [list of all available attributes in
|
||||
GitLab Community Edition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/all_queues.yml)
|
||||
and a [list of all available attributes in
|
||||
GitLab Enterprise Edition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/workers/all_queues.yml).
|
||||
convenience, we generate a
|
||||
[list of all available attributes in GitLab Community Edition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/all_queues.yml)
|
||||
and a
|
||||
[list of all available attributes in GitLab Enterprise Edition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/workers/all_queues.yml).
|
||||
|
||||
### Available operators
|
||||
|
||||
|
|
@ -160,8 +160,7 @@ entire queue group selects all queues.
|
|||
After the Sidekiq routing rules are changed, administrators must take care
|
||||
with the migration to avoid losing jobs entirely, especially in a system with
|
||||
long queues of jobs. The migration can be done by following the migration steps
|
||||
mentioned in [Sidekiq job
|
||||
migration](../../raketasks/sidekiq_job_migration.md)
|
||||
mentioned in [Sidekiq job migration](../../raketasks/sidekiq_job_migration.md)
|
||||
|
||||
### Workers that cannot be migrated
|
||||
|
||||
|
|
@ -177,5 +176,5 @@ sidekiq['routing_rules'] = [
|
|||
]
|
||||
```
|
||||
|
||||
These queues must also be included in at least one [Sidekiq
|
||||
queue group](extra_sidekiq_processes.md#start-multiple-processes).
|
||||
These queues must also be included in at least one
|
||||
[Sidekiq queue group](extra_sidekiq_processes.md#start-multiple-processes).
|
||||
|
|
|
|||
|
|
@ -18,10 +18,9 @@ Keep your GitLab instance up and running smoothly.
|
|||
- [Multiple Sidekiq processes](extra_sidekiq_processes.md): Configure multiple Sidekiq processes to ensure certain queues always have dedicated workers, no matter the number of jobs that must be processed. **(FREE SELF)**
|
||||
- [Sidekiq routing rules](extra_sidekiq_routing.md): Configure the routing rules to route a job from a worker to a desirable queue. **(FREE SELF)**
|
||||
- [Puma](puma.md): Understand Puma and puma-worker-killer.
|
||||
- Speed up SSH operations by [Authorizing SSH users via a fast,
|
||||
indexed lookup to the GitLab database](fast_ssh_key_lookup.md), and/or
|
||||
by [doing away with user SSH keys stored on GitLab entirely in favor
|
||||
of SSH certificates](ssh_certificates.md).
|
||||
- Speed up SSH operations by
|
||||
[Authorizing SSH users via a fast, indexed lookup to the GitLab database](fast_ssh_key_lookup.md), and/or
|
||||
by [doing away with user SSH keys stored on GitLab entirely in favor of SSH certificates](ssh_certificates.md).
|
||||
- [File System Performance Benchmarking](filesystem_benchmarking.md): File system
|
||||
performance can have a big impact on GitLab performance, especially for actions
|
||||
that read or write Git repositories. This information helps benchmark
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
|
||||
# Rails console **(FREE SELF)**
|
||||
|
||||
At the heart of GitLab is a web application [built using the Ruby on Rails
|
||||
framework](https://about.gitlab.com/blog/2018/10/29/why-we-use-rails-to-build-gitlab/).
|
||||
At the heart of GitLab is a web application
|
||||
[built using the Ruby on Rails framework](https://about.gitlab.com/blog/2018/10/29/why-we-use-rails-to-build-gitlab/).
|
||||
The [Rails console](https://guides.rubyonrails.org/command_line.html#rails-console).
|
||||
provides a way to interact with your GitLab instance from the command line, and also grants access to the amazing tools built right into Rails.
|
||||
|
||||
|
|
@ -19,8 +19,8 @@ with no consequences, you are strongly advised to do so in a test environment.
|
|||
|
||||
The Rails console is for GitLab system administrators who are troubleshooting
|
||||
a problem or need to retrieve some data that can only be done through direct
|
||||
access of the GitLab application. Basic knowledge of Ruby is needed (try [this
|
||||
30-minute tutorial](https://try.ruby-lang.org/) for a quick introduction).
|
||||
access of the GitLab application. Basic knowledge of Ruby is needed (try
|
||||
[this 30-minute tutorial](https://try.ruby-lang.org/) for a quick introduction).
|
||||
Rails experience is useful but not required.
|
||||
|
||||
## Starting a Rails console session
|
||||
|
|
@ -136,8 +136,8 @@ root
|
|||
1
|
||||
```
|
||||
|
||||
Some basic knowledge of Ruby will be very useful. Try [this
|
||||
30-minute tutorial](https://try.ruby-lang.org/) for a quick introduction.
|
||||
Some basic knowledge of Ruby will be very useful. Try
|
||||
[this 30-minute tutorial](https://try.ruby-lang.org/) for a quick introduction.
|
||||
Rails experience is helpful but not essential.
|
||||
|
||||
### Troubleshooting Rails Runner
|
||||
|
|
|
|||
|
|
@ -57,8 +57,7 @@ The MemoryKiller is controlled using environment variables.
|
|||
the restart is aborted.
|
||||
|
||||
The default value for Omnibus packages is set
|
||||
[in the Omnibus GitLab
|
||||
repository](https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/attributes/default.rb).
|
||||
[in the Omnibus GitLab repository](https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/attributes/default.rb).
|
||||
|
||||
- `SIDEKIQ_MEMORY_KILLER_HARD_LIMIT_RSS` (KB): is used by _daemon_ mode. If the Sidekiq
|
||||
process RSS (expressed in kilobytes) exceeds `SIDEKIQ_MEMORY_KILLER_HARD_LIMIT_RSS`,
|
||||
|
|
|
|||
|
|
@ -35,10 +35,10 @@ uploading user SSH keys to GitLab entirely.
|
|||
## Setting up SSH certificate lookup via GitLab Shell
|
||||
|
||||
How to fully set up SSH certificates is outside the scope of this
|
||||
document. See [OpenSSH's
|
||||
`PROTOCOL.certkeys`](https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?annotate=HEAD)
|
||||
for how it works, for example [RedHat's documentation about
|
||||
it](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/sec-using_openssh_certificate_authentication).
|
||||
document. See
|
||||
[OpenSSH's`PROTOCOL.certkeys`](https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?annotate=HEAD)
|
||||
for how it works, for example
|
||||
[RedHat's documentation about it](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/sec-using_openssh_certificate_authentication).
|
||||
|
||||
We assume that you already have SSH certificates set up, and have
|
||||
added the `TrustedUserCAKeys` of your CA to your `sshd_config`, for example:
|
||||
|
|
@ -159,8 +159,8 @@ users (especially if they're renewed) than you have deploy keys.
|
|||
Users can still bypass SSH certificate authentication by manually
|
||||
uploading an SSH public key to their profile, relying on the
|
||||
`~/.ssh/authorized_keys` fallback to authenticate it. There's
|
||||
currently no feature to prevent this, [but there's an open request for
|
||||
adding it](https://gitlab.com/gitlab-org/gitlab/-/issues/23260).
|
||||
currently no feature to prevent this,
|
||||
[but there's an open request for adding it](https://gitlab.com/gitlab-org/gitlab/-/issues/23260).
|
||||
|
||||
Such a restriction can currently be hacked in by, for example, providing a
|
||||
custom `AuthorizedKeysCommand` which checks if the discovered key-ID
|
||||
|
|
|
|||
|
|
@ -20,8 +20,7 @@ For example:
|
|||
- Omnibus 12.7.6 shipped with PostgreSQL 9.6.14 and 10.9.
|
||||
- Omnibus 12.7.7 shipped with PostgreSQL 9.6.17 and 10.12.
|
||||
|
||||
[Find out which versions of PostgreSQL (and other components) ship with
|
||||
each Omnibus GitLab release](https://gitlab-org.gitlab.io/omnibus-gitlab/licenses.html).
|
||||
[Find out which versions of PostgreSQL (and other components) ship with each Omnibus GitLab release](https://gitlab-org.gitlab.io/omnibus-gitlab/licenses.html).
|
||||
|
||||
The lowest supported PostgreSQL versions are listed in the
|
||||
[installation requirements](../../install/requirements.md#postgresql-requirements).
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ GitLab officially supports LTS versions of operating systems. While OSs like
|
|||
Ubuntu have a clear distinction between LTS and non-LTS versions, there are
|
||||
other OSs, openSUSE for example, that don't follow the LTS concept. Hence to
|
||||
avoid confusion, the official policy is that at any point of time, all the
|
||||
operating systems supported by GitLab are listed in the [installation
|
||||
page](https://about.gitlab.com/install/).
|
||||
operating systems supported by GitLab are listed in the
|
||||
[installation page](https://about.gitlab.com/install/).
|
||||
|
||||
The following lists the currently supported OSs and their possible EOL dates.
|
||||
|
||||
|
|
|
|||
|
|
@ -343,8 +343,8 @@ NOTE:
|
|||
If you are using an external Redis Sentinel instance, be sure
|
||||
to exclude the `requirepass` parameter from the Sentinel
|
||||
configuration. This parameter causes clients to report `NOAUTH
|
||||
Authentication required.`. [Redis Sentinel 3.2.x does not support
|
||||
password authentication](https://github.com/antirez/redis/issues/3279).
|
||||
Authentication required.`.
|
||||
[Redis Sentinel 3.2.x does not support password authentication](https://github.com/antirez/redis/issues/3279).
|
||||
|
||||
Now that the Redis servers are all set up, let's configure the Sentinel
|
||||
servers.
|
||||
|
|
|
|||
|
|
@ -102,8 +102,8 @@ depend on those files.
|
|||
|
||||
## Installations from source
|
||||
|
||||
If you have followed the official installation guide to [install GitLab from
|
||||
source](../install/installation.md), run the following command to restart GitLab:
|
||||
If you have followed the official installation guide to
|
||||
[install GitLab from source](../install/installation.md), run the following command to restart GitLab:
|
||||
|
||||
```shell
|
||||
# For systems running systemd
|
||||
|
|
|
|||
|
|
@ -78,9 +78,9 @@ Terraform state files are stored locally, follow the steps below.
|
|||
|
||||
## Using object storage **(FREE SELF)**
|
||||
|
||||
Instead of storing Terraform state files on disk, we recommend the use of [one of the supported object
|
||||
storage options](object_storage.md#options). This configuration relies on valid credentials to
|
||||
be configured already.
|
||||
Instead of storing Terraform state files on disk, we recommend the use of
|
||||
[one of the supported object storage options](object_storage.md#options).
|
||||
This configuration relies on valid credentials to be configured already.
|
||||
|
||||
[Read more about using object storage with GitLab](object_storage.md).
|
||||
|
||||
|
|
|
|||
|
|
@ -261,8 +261,8 @@ Check the [RFC spec](https://tools.ietf.org/html/rfc6749#section-4.3) for a
|
|||
detailed flow description.
|
||||
|
||||
NOTE:
|
||||
The Resource Owner Password Credentials is disabled for users with [two-factor
|
||||
authentication](../user/profile/account/two_factor_authentication.md) turned on.
|
||||
The Resource Owner Password Credentials is disabled for users with
|
||||
[two-factor authentication](../user/profile/account/two_factor_authentication.md) turned on.
|
||||
These users can access the API using [personal access tokens](../user/profile/personal_access_tokens.md)
|
||||
instead.
|
||||
|
||||
|
|
|
|||
|
|
@ -48,8 +48,8 @@ PostgreSQL database running on GitLab.com.
|
|||
This volume contributes to significant performance problems, development
|
||||
challenges and is often related to production incidents.
|
||||
|
||||
We also expect a [significant growth in the number of builds executed on
|
||||
GitLab.com](../ci_scale/index.md) in the upcoming years.
|
||||
We also expect a [significant growth in the number of builds executed on GitLab.com](../ci_scale/index.md)
|
||||
in the upcoming years.
|
||||
|
||||
## Opportunity
|
||||
|
||||
|
|
@ -61,8 +61,8 @@ pipelines that are older than a few months might help us to move this data out
|
|||
of the primary database, to a different storage, that is more performant and
|
||||
cost effective.
|
||||
|
||||
It is already possible to prevent processing builds [that have been
|
||||
archived](../../../user/admin_area/settings/continuous_integration.md#archive-jobs).
|
||||
It is already possible to prevent processing builds
|
||||
[that have been archived](../../../user/admin_area/settings/continuous_integration.md#archive-jobs).
|
||||
When a build gets archived it will not be possible to retry it, but we still do
|
||||
keep all the processing metadata in the database, and it consumes resources
|
||||
that are scarce in the primary database.
|
||||
|
|
|
|||
|
|
@ -89,8 +89,8 @@ environment.
|
|||
|
||||
We also expect a significant, exponential growth in the upcoming years.
|
||||
|
||||
One of the forecasts done using [Facebook's
|
||||
Prophet](https://facebook.github.io/prophet/) shows that in the first half of
|
||||
One of the forecasts done using [Facebook's Prophet](https://facebook.github.io/prophet/)
|
||||
shows that in the first half of
|
||||
2024 we expect seeing 20M builds created on GitLab.com each day. In comparison
|
||||
to around 2M we see created today, this is 10x growth our product might need to
|
||||
sustain in upcoming years.
|
||||
|
|
@ -115,17 +115,14 @@ of the CI/CD Apdex score, and sometimes even causes a significant performance
|
|||
degradation in the production environment.
|
||||
|
||||
There are multiple other strategies that can improve performance and
|
||||
reliability. We can use [Redis
|
||||
queuing](https://gitlab.com/gitlab-org/gitlab/-/issues/322972), or [a separate
|
||||
table that will accelerate SQL queries used to build
|
||||
queues](https://gitlab.com/gitlab-org/gitlab/-/issues/322766) and we want to
|
||||
explore them.
|
||||
reliability. We can use [Redis queuing](https://gitlab.com/gitlab-org/gitlab/-/issues/322972), or
|
||||
[a separate table that will accelerate SQL queries used to build queues](https://gitlab.com/gitlab-org/gitlab/-/issues/322766)
|
||||
and we want to explore them.
|
||||
|
||||
**Status**: As of October 2021 the new architecture [has been implemented on
|
||||
GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/5909#note_680407908).
|
||||
The following epic tracks making it generally available: [Make the new pending
|
||||
builds architecture generally available](
|
||||
https://gitlab.com/groups/gitlab-org/-/epics/6954).
|
||||
**Status**: As of October 2021 the new architecture
|
||||
[has been implemented on GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/5909#note_680407908).
|
||||
The following epic tracks making it generally available:
|
||||
[Make the new pending builds architecture generally available](https://gitlab.com/groups/gitlab-org/-/epics/6954).
|
||||
|
||||
### Moving big amounts of data is challenging
|
||||
|
||||
|
|
|
|||
|
|
@ -12,8 +12,8 @@ Cloud native and the adoption of Kubernetes has been recognised by GitLab to be
|
|||
one of the top two biggest tailwinds that are helping us grow faster as a
|
||||
company behind the project.
|
||||
|
||||
This effort is described in a more details [in the infrastructure team
|
||||
handbook](https://about.gitlab.com/handbook/engineering/infrastructure/production/kubernetes/gitlab-com/).
|
||||
This effort is described in a more details
|
||||
[in the infrastructure team handbook](https://about.gitlab.com/handbook/engineering/infrastructure/production/kubernetes/gitlab-com/).
|
||||
|
||||
## Traditional build logs
|
||||
|
||||
|
|
@ -88,9 +88,8 @@ even tried to replace NFS with
|
|||
|
||||
Since that time it has become apparent that the cost of operations and
|
||||
maintenance of a NFS cluster is significant and that if we ever decide to
|
||||
migrate to Kubernetes [we need to decouple GitLab from a shared local storage
|
||||
and
|
||||
NFS](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/426#note_375646396).
|
||||
migrate to Kubernetes
|
||||
[we need to decouple GitLab from a shared local storage and NFS](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/426#note_375646396).
|
||||
|
||||
1. NFS might be a single point of failure
|
||||
1. NFS can only be reliably scaled vertically
|
||||
|
|
@ -112,12 +111,10 @@ of complexity, maintenance cost and enormous, negative impact on availability.
|
|||
1. ✓ Rollout the feature into production environment incrementally
|
||||
|
||||
The work needed to make the new architecture production ready and enabled on
|
||||
GitLab.com had been tracked in [Cloud Native Build Logs on
|
||||
GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/4275) epic.
|
||||
GitLab.com had been tracked in [Cloud Native Build Logs on GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/4275) epic.
|
||||
|
||||
Enabling this feature on GitLab.com is a subtask of [making the new
|
||||
architecture generally
|
||||
available](https://gitlab.com/groups/gitlab-org/-/epics/3791) for everyone.
|
||||
Enabling this feature on GitLab.com is a subtask of
|
||||
[making the new architecture generally available](https://gitlab.com/groups/gitlab-org/-/epics/3791) for everyone.
|
||||
|
||||
## Status
|
||||
|
||||
|
|
|
|||
|
|
@ -17,8 +17,8 @@ Cloud Native and the adoption of Kubernetes has been recognised by GitLab to be
|
|||
one of the top two biggest tailwinds that are helping us grow faster as a
|
||||
company behind the project.
|
||||
|
||||
This effort is described in more detail [in the infrastructure team handbook
|
||||
page](https://about.gitlab.com/handbook/engineering/infrastructure/production/kubernetes/gitlab-com/).
|
||||
This effort is described in more detail
|
||||
[in the infrastructure team handbook page](https://about.gitlab.com/handbook/engineering/infrastructure/production/kubernetes/gitlab-com/).
|
||||
|
||||
GitLab Pages is tightly coupled with NFS and in order to unblock Kubernetes
|
||||
migration a significant change to GitLab Pages' architecture is required. This
|
||||
|
|
@ -55,9 +55,8 @@ even tried to replace NFS with
|
|||
|
||||
Since that time it has become apparent that the cost of operations and
|
||||
maintenance of a NFS cluster is significant and that if we ever decide to
|
||||
migrate to Kubernetes [we need to decouple GitLab from a shared local storage
|
||||
and
|
||||
NFS](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/426#note_375646396).
|
||||
migrate to Kubernetes
|
||||
[we need to decouple GitLab from a shared local storage and NFS](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/426#note_375646396).
|
||||
|
||||
1. NFS might be a single point of failure
|
||||
1. NFS can only be reliably scaled vertically
|
||||
|
|
@ -84,8 +83,8 @@ graph TD
|
|||
C -- Serves static content --> E(Visitors)
|
||||
```
|
||||
|
||||
This new architecture has been briefly described in [the blog
|
||||
post](https://about.gitlab.com/blog/2020/08/03/how-gitlab-pages-uses-the-gitlab-api-to-serve-content/)
|
||||
This new architecture has been briefly described in
|
||||
[the blog post](https://about.gitlab.com/blog/2020/08/03/how-gitlab-pages-uses-the-gitlab-api-to-serve-content/)
|
||||
too.
|
||||
|
||||
## Iterations
|
||||
|
|
|
|||
|
|
@ -115,9 +115,9 @@ These are reason why these changes are needed:
|
|||
|
||||
## Iterations
|
||||
|
||||
This work is being done as part of dedicated epic: [Improve internal usage of
|
||||
Feature Flags](https://gitlab.com/groups/gitlab-org/-/epics/3551). This epic
|
||||
describes a meta reasons for making these changes.
|
||||
This work is being done as part of dedicated epic:
|
||||
[Improve internal usage of Feature Flags](https://gitlab.com/groups/gitlab-org/-/epics/3551).
|
||||
This epic describes a meta reasons for making these changes.
|
||||
|
||||
## Who
|
||||
|
||||
|
|
|
|||
|
|
@ -44,12 +44,12 @@ It is an opportunity to learn from our experience in evolving the REST API, for
|
|||
the scale, and to apply this knowledge onto the GraphQL development efforts. We
|
||||
can do that by building query-to-feature correlation mechanisms, adding
|
||||
scalable state synchronization support and aligning GraphQL with other
|
||||
architectural initiatives being executed in parallel, like [the support for
|
||||
direct uploads](https://gitlab.com/gitlab-org/gitlab/-/issues/280819).
|
||||
architectural initiatives being executed in parallel, like
|
||||
[the support for direct uploads](https://gitlab.com/gitlab-org/gitlab/-/issues/280819).
|
||||
|
||||
GraphQL should be secure by default. We can avoid common security mistakes by
|
||||
building mechanisms that will help us to enforce [OWASP GraphQL
|
||||
recommendations](https://cheatsheetseries.owasp.org/cheatsheets/GraphQL_Cheat_Sheet.html)
|
||||
building mechanisms that will help us to enforce
|
||||
[OWASP GraphQL recommendations](https://cheatsheetseries.owasp.org/cheatsheets/GraphQL_Cheat_Sheet.html)
|
||||
that are relevant to us.
|
||||
|
||||
Understanding what are the needs of the wider community will also allow us to
|
||||
|
|
|
|||
|
|
@ -31,9 +31,9 @@ underlying implementation for shared, distributed, highly-available
|
|||
(HA) file storage.
|
||||
|
||||
Over time, we have built support for object storage across the
|
||||
application, solving specific problems in a [multitude of
|
||||
iterations](https://about.gitlab.com/company/team/structure/working-groups/object-storage/#company-efforts-on-uploads). This
|
||||
has led to increased complexity across the board, from development
|
||||
application, solving specific problems in a
|
||||
[multitude of iterations](https://about.gitlab.com/company/team/structure/working-groups/object-storage/#company-efforts-on-uploads).
|
||||
This has led to increased complexity across the board, from development
|
||||
(new features and bug fixes) to installation:
|
||||
|
||||
- New GitLab installations require the creation and configuration of
|
||||
|
|
@ -67,10 +67,8 @@ has led to increased complexity across the board, from development
|
|||
The following is a brief description of the main directions we can take to
|
||||
remove the pain points affecting our object storage implementation.
|
||||
|
||||
This is also available as [a YouTube
|
||||
video](https://youtu.be/X9V_w8hsM8E) recorded for the [Object Storage
|
||||
Working
|
||||
Group](https://about.gitlab.com/company/team/structure/working-groups/object-storage/).
|
||||
This is also available as [a YouTube video](https://youtu.be/X9V_w8hsM8E) recorded for the
|
||||
[Object Storage Working Group](https://about.gitlab.com/company/team/structure/working-groups/object-storage/).
|
||||
|
||||
### Simplify GitLab architecture by shipping MinIO
|
||||
|
||||
|
|
@ -80,8 +78,8 @@ local storage and object storage.
|
|||
|
||||
With local storage, there is the assumption of a shared storage
|
||||
between components. This can be achieved by having a single box
|
||||
installation, without HA, or with a NFS, which [we no longer
|
||||
recommend](../../../administration/nfs.md).
|
||||
installation, without HA, or with a NFS, which
|
||||
[we no longer recommend](../../../administration/nfs.md).
|
||||
|
||||
We have a testing gap on object storage. It also requires Workhorse
|
||||
and MinIO, which are not present in our pipelines, so too much is
|
||||
|
|
@ -136,8 +134,8 @@ access to new features without infrastructure chores.
|
|||
|
||||
Our implementation is built on top of a 3rd-party framework where
|
||||
every object storage client is a 3rd-party library. Unfortunately some
|
||||
of them are unmaintained. [We have customers who cannot push 5GB Git
|
||||
LFS objects](https://gitlab.com/gitlab-org/gitlab/-/issues/216442),
|
||||
of them are unmaintained.
|
||||
[We have customers who cannot push 5GB Git LFS objects](https://gitlab.com/gitlab-org/gitlab/-/issues/216442),
|
||||
but with such a vital feature implemented in 3rd-party libraries we
|
||||
are slowed down in fixing it, and we also rely on external maintainers
|
||||
to merge and release fixes.
|
||||
|
|
@ -147,8 +145,7 @@ Before the introduction of direct upload, using the
|
|||
library, _"a gem that provides a simple and extremely flexible way to
|
||||
upload files from Ruby applications."_, was the boring solution.
|
||||
However this is no longer our use-case, as we upload files from
|
||||
Workhorse, and we had to [patch CarrierWave's
|
||||
internals](https://gitlab.com/gitlab-org/gitlab/-/issues/285597#note_452696638)
|
||||
Workhorse, and we had to [patch CarrierWave's internals](https://gitlab.com/gitlab-org/gitlab/-/issues/285597#note_452696638)
|
||||
to support direct upload.
|
||||
|
||||
A brief proposal covering CarrierWave removal and a new streamlined
|
||||
|
|
@ -217,7 +214,7 @@ Proposal:
|
|||
|
||||
DRIs:
|
||||
|
||||
The DRI for this blueprint is the [Object Storage Working
|
||||
Group](https://about.gitlab.com/company/team/structure/working-groups/object-storage/).
|
||||
The DRI for this blueprint is the
|
||||
[Object Storage Working Group](https://about.gitlab.com/company/team/structure/working-groups/object-storage/).
|
||||
|
||||
<!-- vale gitlab.Spelling = YES -->
|
||||
|
|
|
|||
|
|
@ -33,8 +33,8 @@ This design choice was crucial for the GitLab Runner success. Since that time
|
|||
the auto-scaling feature has been used by many users and customers and enabled
|
||||
rapid growth of CI/CD adoption on GitLab.com.
|
||||
|
||||
We can not, however, continue using Docker Machine. Work on that project [was
|
||||
paused in July 2018](https://github.com/docker/machine/issues/4537) and there
|
||||
We can not, however, continue using Docker Machine. Work on that project
|
||||
[was paused in July 2018](https://github.com/docker/machine/issues/4537) and there
|
||||
was no development made since that time (except for some highly important
|
||||
security fixes). In 2018, after Docker Machine entered the "maintenance mode",
|
||||
we decided to create [our own fork](https://gitlab.com/gitlab-org/ci-cd/docker-machine)
|
||||
|
|
@ -76,8 +76,8 @@ mechanism with a reliable and flexible mechanism. We might be unable to build a
|
|||
drop-in replacement for Docker Machine, as there are presumably many reasons
|
||||
why it has been deprecated. It is very difficult to maintain compatibility with
|
||||
so many cloud providers, and it seems that Docker Machine has been deprecated
|
||||
in favor of Docker Desktop, which is not a viable replacement for us. [This
|
||||
issue](https://github.com/docker/roadmap/issues/245) contains a discussion
|
||||
in favor of Docker Desktop, which is not a viable replacement for us.
|
||||
[This issue](https://github.com/docker/roadmap/issues/245) contains a discussion
|
||||
about how people are using Docker Machine right now, and it seems that GitLab
|
||||
CI is one of the most frequent reasons for people to keep using Docker Machine.
|
||||
|
||||
|
|
|
|||
|
|
@ -233,6 +233,7 @@ The following job, when run for the default branch, is able to read secrets unde
|
|||
|
||||
```yaml
|
||||
read_secrets:
|
||||
image: vault:latest
|
||||
script:
|
||||
# Check job's ref name
|
||||
- echo $CI_COMMIT_REF_NAME
|
||||
|
|
@ -261,6 +262,7 @@ The following job is able to authenticate using the `myproject-production` role
|
|||
|
||||
```yaml
|
||||
read_secrets:
|
||||
image: vault:latest
|
||||
script:
|
||||
# Check job's ref name
|
||||
- echo $CI_COMMIT_REF_NAME
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@ taken to protect the users.
|
|||
|
||||
NOTE:
|
||||
[Shared runners on GitLab.com](../runners/index.md) do not
|
||||
provide an interactive web terminal. Follow [this
|
||||
issue](https://gitlab.com/gitlab-org/gitlab/-/issues/24674) for progress on
|
||||
provide an interactive web terminal. Follow
|
||||
[this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/24674) for progress on
|
||||
adding support. For groups and projects hosted on GitLab.com, interactive web
|
||||
terminals are available when using your own group or project runner.
|
||||
|
||||
|
|
@ -27,8 +27,8 @@ terminals are available when using your own group or project runner.
|
|||
|
||||
Two things need to be configured for the interactive web terminal to work:
|
||||
|
||||
- The runner needs to have [`[session_server]` configured
|
||||
properly](https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-session_server-section)
|
||||
- The runner needs to have
|
||||
[`[session_server]` configured properly](https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-session_server-section)
|
||||
- If you are using a reverse proxy with your GitLab instance, web terminals need to be
|
||||
[enabled](../../administration/integration/terminal.md#enabling-and-disabling-terminal-support)
|
||||
|
||||
|
|
@ -54,8 +54,8 @@ Not all executors are
|
|||
NOTE:
|
||||
The `docker` executor does not keep running
|
||||
after the build script is finished. At that point, the terminal automatically
|
||||
disconnects and does not wait for the user to finish. Please follow [this
|
||||
issue](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/3605) for updates on
|
||||
disconnects and does not wait for the user to finish. Please follow
|
||||
[this issue](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/3605) for updates on
|
||||
improving this behavior.
|
||||
|
||||
Sometimes, when a job is running, things don't go as you would expect, and it
|
||||
|
|
@ -63,8 +63,7 @@ would be helpful if one can have a shell to aid debugging. When a job is
|
|||
running, on the right panel you can see a button `debug` that opens the terminal
|
||||
for the current job.
|
||||
|
||||

|
||||

|
||||
|
||||
When clicked, a new tab opens to the terminal page where you can access
|
||||
the terminal and type commands like a normal shell.
|
||||
|
|
|
|||
|
|
@ -116,8 +116,7 @@ you could add [`--no-tags`](https://git-scm.com/docs/git-fetch#Documentation/git
|
|||
to the extra flags to make your fetches faster and more compact.
|
||||
|
||||
Also in the case where you repository does _not_ contain a lot of
|
||||
tags, `--no-tags` can [make a big difference in some
|
||||
cases](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/746).
|
||||
tags, `--no-tags` can [make a big difference in some cases](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/746).
|
||||
If your CI builds do not depend on Git tags it is worth trying.
|
||||
|
||||
See the [`GIT_FETCH_EXTRA_FLAGS` documentation](../runners/configure_runners.md#git-fetch-extra-flags)
|
||||
|
|
|
|||
|
|
@ -28,8 +28,8 @@ If you are using a self-managed instance of GitLab:
|
|||
going to your project's **Settings > CI/CD**, expanding the **Runners** section,
|
||||
and clicking **Show runner installation instructions**.
|
||||
These instructions are also available [in the documentation](https://docs.gitlab.com/runner/install/index.html).
|
||||
- The administrator can also configure a maximum number of shared runner [CI/CD minutes for
|
||||
each group](../pipelines/cicd_minutes.md#set-the-quota-of-cicd-minutes-for-a-specific-namespace).
|
||||
- The administrator can also configure a maximum number of shared runner
|
||||
[CI/CD minutes for each group](../pipelines/cicd_minutes.md#set-the-quota-of-cicd-minutes-for-a-specific-namespace).
|
||||
|
||||
If you are using GitLab.com:
|
||||
|
||||
|
|
|
|||
|
|
@ -126,8 +126,7 @@ test:
|
|||
|
||||
## Limitations and known issues
|
||||
|
||||
- All the limitations mentioned in our [beta
|
||||
definition](../../../policy/alpha-beta-support.md#beta-features).
|
||||
- All the limitations mentioned in our [beta definition](../../../policy/alpha-beta-support.md#beta-features).
|
||||
- The average provisioning time for a new Windows VM is 5 minutes.
|
||||
This means that you may notice slower build start times
|
||||
on the Windows runner fleet during the beta. In a future
|
||||
|
|
|
|||
|
|
@ -246,8 +246,7 @@ end
|
|||
NOTE:
|
||||
For specifics on implementation, see [Pagination implementation](#pagination-implementation).
|
||||
|
||||
GraphQL uses [cursor based
|
||||
pagination](https://graphql.org/learn/pagination/#pagination-and-edges)
|
||||
GraphQL uses [cursor based pagination](https://graphql.org/learn/pagination/#pagination-and-edges)
|
||||
to expose collections of items. This provides the clients with a lot
|
||||
of flexibility while also allowing the backend to use different
|
||||
pagination models.
|
||||
|
|
@ -1608,8 +1607,8 @@ correctly rendered to the clients.
|
|||
|
||||
### Errors in mutations
|
||||
|
||||
We encourage following the practice of [errors as
|
||||
data](https://graphql-ruby.org/mutations/mutation_errors) for mutations, which
|
||||
We encourage following the practice of
|
||||
[errors as data](https://graphql-ruby.org/mutations/mutation_errors) for mutations, which
|
||||
distinguishes errors by who they are relevant to, defined by who can deal with
|
||||
them.
|
||||
|
||||
|
|
|
|||
|
|
@ -110,15 +110,15 @@ Model.create(foo: params[:foo])
|
|||
|
||||
With Grape v1.3+, Array types must be defined with a `coerce_with`
|
||||
block, or parameters, fails to validate when passed a string from an
|
||||
API request. See the [Grape upgrading
|
||||
documentation](https://github.com/ruby-grape/grape/blob/master/UPGRADING.md#ensure-that-array-types-have-explicit-coercions)
|
||||
API request. See the
|
||||
[Grape upgrading documentation](https://github.com/ruby-grape/grape/blob/master/UPGRADING.md#ensure-that-array-types-have-explicit-coercions)
|
||||
for more details.
|
||||
|
||||
### Automatic coercion of nil inputs
|
||||
|
||||
Prior to Grape v1.3.3, Array parameters with `nil` values would
|
||||
automatically be coerced to an empty Array. However, due to [this pull
|
||||
request in v1.3.3](https://github.com/ruby-grape/grape/pull/2040), this
|
||||
automatically be coerced to an empty Array. However, due to
|
||||
[this pull request in v1.3.3](https://github.com/ruby-grape/grape/pull/2040), this
|
||||
is no longer the case. For example, suppose you define a PUT `/test`
|
||||
request that has an optional parameter:
|
||||
|
||||
|
|
@ -259,8 +259,8 @@ In situations where the same model has multiple entities in the API
|
|||
discretion with applying this scope. It may be that you optimize for the
|
||||
most basic entity, with successive entities building upon that scope.
|
||||
|
||||
The `with_api_entity_associations` scope also [automatically preloads
|
||||
data](https://gitlab.com/gitlab-org/gitlab/-/blob/19f74903240e209736c7668132e6a5a735954e7c/app%2Fmodels%2Ftodo.rb#L34)
|
||||
The `with_api_entity_associations` scope also
|
||||
[automatically preloads data](https://gitlab.com/gitlab-org/gitlab/-/blob/19f74903240e209736c7668132e6a5a735954e7c/app%2Fmodels%2Ftodo.rb#L34)
|
||||
for `Todo` _targets_ when returned in the [to-dos API](../api/todos.md).
|
||||
|
||||
For more context and discussion about preloading see
|
||||
|
|
|
|||
|
|
@ -15,8 +15,7 @@ First of all, you have to gather information and decide which are the different
|
|||
limits that are set for the different GitLab tiers. Coordinate with others to [document](../administration/instance_limits.md)
|
||||
and communicate those limits.
|
||||
|
||||
There is a guide about [introducing application
|
||||
limits](https://about.gitlab.com/handbook/product/product-processes/#introducing-application-limits).
|
||||
There is a guide about [introducing application limits](https://about.gitlab.com/handbook/product/product-processes/#introducing-application-limits).
|
||||
|
||||
## Implement plan limits
|
||||
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ based on your project contents. When Auto DevOps is enabled for a
|
|||
project, the user does not need to explicitly include any pipeline configuration
|
||||
through a [`.gitlab-ci.yml` file](../ci/yaml/index.md).
|
||||
|
||||
In the absence of a `.gitlab-ci.yml` file, the [Auto DevOps CI
|
||||
template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml)
|
||||
In the absence of a `.gitlab-ci.yml` file, the
|
||||
[Auto DevOps CI/CD template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml)
|
||||
is used implicitly to configure the pipeline for the project. This
|
||||
template is a top-level template that includes other sub-templates,
|
||||
which then defines jobs.
|
||||
|
|
|
|||
|
|
@ -13,8 +13,8 @@ pipeline that can be used to trigger a pipeline in the Omnibus GitLab repository
|
|||
that will create:
|
||||
|
||||
- A deb package for Ubuntu 16.04, available as a build artifact, and
|
||||
- A Docker image, which is pushed to the [Omnibus GitLab container
|
||||
registry](https://gitlab.com/gitlab-org/omnibus-gitlab/container_registry)
|
||||
- A Docker image, which is pushed to the
|
||||
[Omnibus GitLab container registry](https://gitlab.com/gitlab-org/omnibus-gitlab/container_registry)
|
||||
(images titled `gitlab-ce` and `gitlab-ee` respectively and image tag is the
|
||||
commit which triggered the pipeline).
|
||||
|
||||
|
|
|
|||
|
|
@ -190,8 +190,8 @@ editor. Once closed, Git presents you with a new text editor instance to edit
|
|||
the commit message of commit B. Add the trailer, then save and quit the editor.
|
||||
If all went well, commit B is now updated.
|
||||
|
||||
For more information about interactive rebases, take a look at [the Git
|
||||
documentation](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History).
|
||||
For more information about interactive rebases, take a look at
|
||||
[the Git documentation](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History).
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -487,16 +487,14 @@ Before taking the decision to merge:
|
|||
- If the MR contains both Quality and non-Quality-related changes, the MR should be merged by the relevant maintainer for user-facing changes (backend, frontend, or database) after the Quality related changes are approved by a Software Engineer in Test.
|
||||
|
||||
If a merge request is fundamentally ready, but needs only trivial fixes (such as
|
||||
typos), consider demonstrating a [bias for
|
||||
action](https://about.gitlab.com/handbook/values/#bias-for-action) by making
|
||||
those changes directly without going back to the author. You can do this by
|
||||
typos), consider demonstrating a [bias for action](https://about.gitlab.com/handbook/values/#bias-for-action)
|
||||
by making those changes directly without going back to the author. You can do this by
|
||||
using the [suggest changes](../user/project/merge_requests/reviews/suggestions.md) feature to apply
|
||||
your own suggestions to the merge request. Note that:
|
||||
|
||||
- If the changes are not straightforward, please prefer allowing the author to make the change.
|
||||
- **Before applying suggestions**, edit the merge request to make sure
|
||||
[squash and
|
||||
merge](../user/project/merge_requests/squash_and_merge.md#squash-and-merge)
|
||||
[squash and merge](../user/project/merge_requests/squash_and_merge.md#squash-and-merge)
|
||||
is enabled, otherwise, the pipeline's Danger job fails.
|
||||
- If a merge request does not have squash and merge enabled, and it
|
||||
has more than one commit, then see the note below about rewriting
|
||||
|
|
@ -511,8 +509,7 @@ When ready to merge:
|
|||
WARNING:
|
||||
**If the merge request is from a fork, also check the [additional guidelines for community contributions](#community-contributions).**
|
||||
|
||||
- Consider using the [Squash and
|
||||
merge](../user/project/merge_requests/squash_and_merge.md#squash-and-merge)
|
||||
- Consider using the [Squash and merge](../user/project/merge_requests/squash_and_merge.md#squash-and-merge)
|
||||
feature when the merge request has a lot of commits.
|
||||
When merging code, a maintainer should only use the squash feature if the
|
||||
author has already set this option, or if the merge request clearly contains a
|
||||
|
|
@ -532,8 +529,7 @@ WARNING:
|
|||
enough to `main`.
|
||||
- When you set the MR to "Merge When Pipeline Succeeds", you should take over
|
||||
subsequent revisions for anything that would be spotted after that.
|
||||
- For merge requests that have had [Squash and
|
||||
merge](../user/project/merge_requests/squash_and_merge.md#squash-and-merge) set,
|
||||
- For merge requests that have had [Squash and merge](../user/project/merge_requests/squash_and_merge.md#squash-and-merge) set,
|
||||
the squashed commit's default commit message is taken from the merge request title.
|
||||
You're encouraged to [select a commit with a more informative commit message](../user/project/merge_requests/squash_and_merge.md) before merging.
|
||||
|
||||
|
|
|
|||
|
|
@ -319,8 +319,8 @@ request:
|
|||
We allow engineering time to fix small problems (with or without an
|
||||
issue) that are incremental improvements, such as:
|
||||
|
||||
1. Unprioritized bug fixes (for example, [Banner alerting of project move is
|
||||
showing up everywhere](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/18985))
|
||||
1. Unprioritized bug fixes (for example,
|
||||
[Banner alerting of project move is showing up everywhere](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/18985))
|
||||
1. Documentation improvements
|
||||
1. RuboCop or Code Quality improvements
|
||||
|
||||
|
|
|
|||
|
|
@ -7,9 +7,8 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
# Multiple Databases
|
||||
|
||||
To allow GitLab to scale further we
|
||||
[decomposed the GitLab application database into multiple
|
||||
databases](https://gitlab.com/groups/gitlab-org/-/epics/6168). The two databases
|
||||
are `main` and `ci`. GitLab supports being run with either one database or two databases.
|
||||
[decomposed the GitLab application database into multiple databases](https://gitlab.com/groups/gitlab-org/-/epics/6168).
|
||||
The two databases are `main` and `ci`. GitLab supports being run with either one database or two databases.
|
||||
On GitLab.com we are using two separate databases.
|
||||
|
||||
## GitLab Schema
|
||||
|
|
|
|||
|
|
@ -269,5 +269,5 @@ For more information, read about the [monthly release process](https://gitlab.co
|
|||
|
||||
## Review Apps for documentation merge requests
|
||||
|
||||
If you are contributing to GitLab docs read how to [create a Review App with each
|
||||
merge request](../index.md#previewing-the-changes-live).
|
||||
If you are contributing to GitLab docs read how to
|
||||
[create a Review App with each merge request](../index.md#previewing-the-changes-live).
|
||||
|
|
|
|||
|
|
@ -281,8 +281,8 @@ There are a few gotchas with it:
|
|||
overriding the method, because we can't know when the overridden method
|
||||
(that is, calling `super` in the overriding method) would want to stop early.
|
||||
In this case, we shouldn't just override it, but update the original method
|
||||
to make it call the other method we want to extend, like a [template method
|
||||
pattern](https://en.wikipedia.org/wiki/Template_method_pattern).
|
||||
to make it call the other method we want to extend, like a
|
||||
[template method pattern](https://en.wikipedia.org/wiki/Template_method_pattern).
|
||||
For example, given this base:
|
||||
|
||||
```ruby
|
||||
|
|
|
|||
|
|
@ -277,8 +277,8 @@ These Advanced Search migrations, like any other GitLab changes, need to support
|
|||
|
||||
Depending on the order of deployment, it's possible that the migration
|
||||
has started or finished and there's still a server running the application code from before the
|
||||
migration. We need to take this into consideration until we can [ensure all Advanced Search migrations
|
||||
start after the deployment has finished](https://gitlab.com/gitlab-org/gitlab/-/issues/321619).
|
||||
migration. We need to take this into consideration until we can
|
||||
[ensure all Advanced Search migrations start after the deployment has finished](https://gitlab.com/gitlab-org/gitlab/-/issues/321619).
|
||||
|
||||
### Reverting a migration
|
||||
|
||||
|
|
@ -317,9 +317,8 @@ safely can.
|
|||
|
||||
We choose to use GitLab major version upgrades as a safe time to remove
|
||||
backwards compatibility for indices that have not been fully migrated. We
|
||||
[document this in our upgrade
|
||||
documentation](../update/index.md#upgrading-to-a-new-major-version). We also
|
||||
choose to replace the migration code with the halted migration
|
||||
[document this in our upgrade documentation](../update/index.md#upgrading-to-a-new-major-version).
|
||||
We also choose to replace the migration code with the halted migration
|
||||
and remove tests so that:
|
||||
|
||||
- We don't need to maintain any code that is called from our Advanced Search
|
||||
|
|
@ -400,16 +399,15 @@ that may contain information to help diagnose performance issues.
|
|||
|
||||
### Performance Bar
|
||||
|
||||
Elasticsearch requests will be displayed in the [`Performance
|
||||
Bar`](../administration/monitoring/performance/performance_bar.md), which can
|
||||
Elasticsearch requests will be displayed in the
|
||||
[`Performance Bar`](../administration/monitoring/performance/performance_bar.md), which can
|
||||
be used both locally in development and on any deployed GitLab instance to
|
||||
diagnose poor search performance. This will show the exact queries being made,
|
||||
which is useful to diagnose why a search might be slow.
|
||||
|
||||
### Correlation ID and `X-Opaque-Id`
|
||||
|
||||
Our [correlation
|
||||
ID](distributed_tracing.md#developer-guidelines-for-working-with-correlation-ids)
|
||||
Our [correlation ID](distributed_tracing.md#developer-guidelines-for-working-with-correlation-ids)
|
||||
is forwarded by all requests from Rails to Elasticsearch as the
|
||||
[`X-Opaque-Id`](https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html#_identifying_running_tasks)
|
||||
header which allows us to track any
|
||||
|
|
@ -497,8 +495,8 @@ theoretically be used to figure out what needs to be replayed are:
|
|||
These updates can be replayed by triggering another
|
||||
`ElasticDeleteProjectWorker`.
|
||||
|
||||
With the above methods and taking regular [Elasticsearch
|
||||
snapshots](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html)
|
||||
With the above methods and taking regular
|
||||
[Elasticsearch snapshots](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html)
|
||||
we should be able to recover from different kinds of data loss issues in a
|
||||
relatively short period of time compared to indexing everything from
|
||||
scratch.
|
||||
|
|
|
|||
|
|
@ -160,9 +160,10 @@ and Helm Chart configuration (see [example merge request](https://gitlab.com/git
|
|||
#### Rationale
|
||||
|
||||
This was done because to avoid [thread deadlocks](https://github.com/ruby/net-imap/issues/14), `MailRoom` needs
|
||||
an updated version of the `net-imap` gem. However, this [version of the net-imap cannot be installed by an unprivileged
|
||||
user](https://github.com/ruby/net-imap/issues/14) due to [an error installing the digest
|
||||
gem](https://github.com/ruby/digest/issues/14). [This bug in the Ruby interpreter](https://bugs.ruby-lang.org/issues/17761) was fixed in Ruby
|
||||
an updated version of the `net-imap` gem. However, this
|
||||
[version of the net-imap cannot be installed by an unprivileged user](https://github.com/ruby/net-imap/issues/14) due to
|
||||
[an error installing the digest gem](https://github.com/ruby/digest/issues/14).
|
||||
[This bug in the Ruby interpreter](https://bugs.ruby-lang.org/issues/17761) was fixed in Ruby
|
||||
3.0.2.
|
||||
|
||||
Updating the gem directly in the GitLab Rails `Gemfile` caused a [production incident](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/4053)
|
||||
|
|
|
|||
|
|
@ -729,8 +729,9 @@ In this case, we can either:
|
|||
- Skip passing a cursor.
|
||||
- Pass `null` explicitly to `after`.
|
||||
|
||||
After data is fetched, we can use the `update`-hook as an opportunity [to customize
|
||||
the data that is set in the Vue component property](https://apollo.vuejs.org/api/smart-query.html#options). This allows us to get a hold of the `pageInfo` object among other data.
|
||||
After data is fetched, we can use the `update`-hook as an opportunity
|
||||
[to customize the data that is set in the Vue component property](https://apollo.vuejs.org/api/smart-query.html#options).
|
||||
This allows us to get a hold of the `pageInfo` object among other data.
|
||||
|
||||
In the `result`-hook, we can inspect the `pageInfo` object to see if we need to fetch
|
||||
the next page. Note that we also keep a `requestCount` to ensure that the application
|
||||
|
|
|
|||
|
|
@ -77,9 +77,9 @@ performance.getEntriesByType('mark');
|
|||
performance.getEntriesByType('measure');
|
||||
```
|
||||
|
||||
Using `getEntriesByName()` or `getEntriesByType()` returns an Array of [the PerformanceMeasure
|
||||
objects](https://developer.mozilla.org/en-US/docs/Web/API/PerformanceMeasure) which contain
|
||||
information about the measurement's start time and duration.
|
||||
Using `getEntriesByName()` or `getEntriesByType()` returns an Array of
|
||||
[the PerformanceMeasure objects](https://developer.mozilla.org/en-US/docs/Web/API/PerformanceMeasure)
|
||||
which contain information about the measurement's start time and duration.
|
||||
|
||||
### User Timing API utility
|
||||
|
||||
|
|
|
|||
|
|
@ -364,8 +364,8 @@ export default initialState => ({
|
|||
|
||||
We made the conscious decision to avoid this pattern to improve the ability to
|
||||
discover and search our frontend codebase. The same applies
|
||||
when [providing data to a Vue app](vue.md#providing-data-from-haml-to-javascript). The reasoning for this is described in [this
|
||||
discussion](https://gitlab.com/gitlab-org/frontend/rfcs/-/issues/56#note_302514865):
|
||||
when [providing data to a Vue app](vue.md#providing-data-from-haml-to-javascript). The reasoning for this is described in
|
||||
[this discussion](https://gitlab.com/gitlab-org/frontend/rfcs/-/issues/56#note_302514865):
|
||||
|
||||
> Consider a `someStateKey` is being used in the store state. You _may_ not be
|
||||
> able to grep for it directly if it was provided only by `el.dataset`. Instead,
|
||||
|
|
|
|||
|
|
@ -10,8 +10,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
|
||||
Each Sidekiq worker, controller action, or API endpoint
|
||||
must declare a `feature_category` attribute. This attribute maps each
|
||||
of these to a [feature
|
||||
category](https://about.gitlab.com/handbook/product/categories/). This
|
||||
of these to a [feature category](https://about.gitlab.com/handbook/product/categories/). This
|
||||
is done for error budgeting, alert routing, and team attribution.
|
||||
|
||||
The list of feature categories can be found in the file `config/feature_categories.yml`.
|
||||
|
|
@ -29,8 +28,7 @@ product categories. When this occurs, you can automatically update
|
|||
and generate a new version of the file, which needs to be committed to
|
||||
the repository.
|
||||
|
||||
The [Scalability
|
||||
team](https://about.gitlab.com/handbook/engineering/infrastructure/team/scalability/)
|
||||
The [Scalability team](https://about.gitlab.com/handbook/engineering/infrastructure/team/scalability/)
|
||||
currently maintains the `feature_categories.yml` file. They will automatically be
|
||||
notified on Slack when the file becomes outdated.
|
||||
|
||||
|
|
|
|||
|
|
@ -61,8 +61,7 @@ to a gem, go through these steps:
|
|||
1. Follow the [instructions for new projects](https://about.gitlab.com/handbook/engineering/gitlab-repositories/#creating-a-new-project).
|
||||
1. Follow the instructions for setting up a [CI/CD configuration](https://about.gitlab.com/handbook/engineering/gitlab-repositories/#cicd-configuration).
|
||||
1. Follow the instructions for [publishing a project](https://about.gitlab.com/handbook/engineering/gitlab-repositories/#publishing-a-project).
|
||||
- See [issue
|
||||
#325463](https://gitlab.com/gitlab-org/gitlab/-/issues/325463)
|
||||
- See [issue #325463](https://gitlab.com/gitlab-org/gitlab/-/issues/325463)
|
||||
for an example.
|
||||
- In some cases we may want to move a gem to its own namespace. Some
|
||||
examples might be that it will naturally have more than one project
|
||||
|
|
@ -74,8 +73,8 @@ to a gem, go through these steps:
|
|||
apply if someone who currently works at GitLab wants to maintain
|
||||
the gem beyond their time working at GitLab.
|
||||
|
||||
When publishing a gem to RubyGems.org, also note the section on [gem
|
||||
owners](https://about.gitlab.com/handbook/developer-onboarding/#ruby-gems)
|
||||
When publishing a gem to RubyGems.org, also note the section on
|
||||
[gem owners](https://about.gitlab.com/handbook/developer-onboarding/#ruby-gems)
|
||||
in the handbook.
|
||||
|
||||
## Upgrade Rails
|
||||
|
|
@ -113,8 +112,7 @@ gem 'thor', '>= 1.1.1'
|
|||
```
|
||||
|
||||
Here we're using the operator `>=` (greater than or equal to) rather
|
||||
than `~>` ([pessimistic
|
||||
operator](https://thoughtbot.com/blog/rubys-pessimistic-operator))
|
||||
than `~>` ([pessimistic operator](https://thoughtbot.com/blog/rubys-pessimistic-operator))
|
||||
making it possible to upgrade `license_finder` or any other gem to a
|
||||
version that depends on `thor 1.2`.
|
||||
|
||||
|
|
@ -134,15 +132,14 @@ that also relied on `thor` but had its version pinned to a vulnerable
|
|||
one. These changes are easy to miss in the `Gemfile.lock`. Pinning the
|
||||
version would result in a conflict that would need to be solved.
|
||||
|
||||
To avoid upgrading indirect dependencies, we can use [`bundle update
|
||||
--conservative`](https://bundler.io/man/bundle-update.1.html#OPTIONS).
|
||||
To avoid upgrading indirect dependencies, we can use
|
||||
[`bundle update --conservative`](https://bundler.io/man/bundle-update.1.html#OPTIONS).
|
||||
|
||||
When submitting a merge request including a dependency update,
|
||||
include a link to the Gem diff between the 2 versions in the merge request
|
||||
description. You can find this link on `rubygems.org`, select
|
||||
**Review Changes** to generate a comparison
|
||||
between the versions on `diffend.io`. For example, this is the gem
|
||||
diff for [`thor` 1.0.0 vs
|
||||
1.0.1](https://my.diffend.io/gems/thor/1.0.0/1.0.1). Use the
|
||||
diff for [`thor` 1.0.0 vs 1.0.1](https://my.diffend.io/gems/thor/1.0.0/1.0.1). Use the
|
||||
links directly generated from RubyGems, since the links from GitLab or other code-hosting
|
||||
platforms might not reflect the code that's actually published.
|
||||
|
|
|
|||
|
|
@ -128,8 +128,8 @@ Secondary-->>Client: admin/geo/replication/projects logged in response (session
|
|||
|
||||
## Git pull
|
||||
|
||||
For historical reasons, the `push_from_secondary` path is used to forward a Git pull. There is [an issue proposing to
|
||||
rename this route](https://gitlab.com/gitlab-org/gitlab/-/issues/292690) to avoid confusion.
|
||||
For historical reasons, the `push_from_secondary` path is used to forward a Git pull. There is
|
||||
[an issue proposing to rename this route](https://gitlab.com/gitlab-org/gitlab/-/issues/292690) to avoid confusion.
|
||||
|
||||
### Git pull over HTTP(s)
|
||||
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@ GitLab implements Git object deduplication.
|
|||
|
||||
### Understanding Git alternates
|
||||
|
||||
At the Git level, we achieve deduplication by using [Git
|
||||
alternates](https://git-scm.com/docs/gitrepository-layout#gitrepository-layout-objects).
|
||||
At the Git level, we achieve deduplication by using
|
||||
[Git alternates](https://git-scm.com/docs/gitrepository-layout#gitrepository-layout-objects).
|
||||
Git alternates is a mechanism that lets a repository borrow objects from
|
||||
another repository on the same machine.
|
||||
|
||||
|
|
@ -44,8 +44,8 @@ reliable decide if an object is no longer needed.
|
|||
|
||||
### Git alternates in GitLab: pool repositories
|
||||
|
||||
GitLab organizes this object borrowing by [creating special **pool
|
||||
repositories**](../administration/repository_storage_types.md) which are hidden from the user. We then use Git
|
||||
GitLab organizes this object borrowing by [creating special **pool repositories**](../administration/repository_storage_types.md)
|
||||
which are hidden from the user. We then use Git
|
||||
alternates to let a collection of project repositories borrow from a
|
||||
single pool repository. We call such a collection of project
|
||||
repositories a pool. Pools form star-shaped networks of repositories
|
||||
|
|
@ -99,9 +99,8 @@ are as follows:
|
|||
|
||||
### Assumptions
|
||||
|
||||
- All repositories in a pool must use [hashed
|
||||
storage](../administration/repository_storage_types.md). This is so
|
||||
that we don't have to ever worry about updating paths in
|
||||
- All repositories in a pool must use [hashed storage](../administration/repository_storage_types.md).
|
||||
This is so that we don't have to ever worry about updating paths in
|
||||
`object/info/alternates` files.
|
||||
- All repositories in a pool must be on the same Gitaly storage shard.
|
||||
The Git alternates mechanism relies on direct disk access across
|
||||
|
|
|
|||
|
|
@ -79,8 +79,7 @@ During RSpec tests, the Gitaly instance writes logs to `gitlab/log/gitaly-test.l
|
|||
While Gitaly can handle all Git access, many of GitLab customers still
|
||||
run Gitaly atop NFS. The legacy Rugged implementation for Git calls may
|
||||
be faster than the Gitaly RPC due to N+1 Gitaly calls and other
|
||||
reasons. See [the
|
||||
issue](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/57317) for more
|
||||
reasons. See [the issue](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/57317) for more
|
||||
details.
|
||||
|
||||
Until GitLab has eliminated most of these inefficiencies or the use of
|
||||
|
|
|
|||
|
|
@ -71,8 +71,8 @@ This worker imports all pull requests. For every pull request a job for the
|
|||
|
||||
### 5. Stage::ImportPullRequestsMergedByWorker
|
||||
|
||||
This worker imports the pull requests' _merged-by_ user information. The [_List pull
|
||||
requests_](https://docs.github.com/en/rest/pulls#list-pull-requests)
|
||||
This worker imports the pull requests' _merged-by_ user information. The
|
||||
[_List pull requests_](https://docs.github.com/en/rest/pulls#list-pull-requests)
|
||||
API doesn't provide this information. Therefore, this stage must fetch each merged pull request
|
||||
individually to import this information. A
|
||||
`Gitlab::GithubImport::ImportPullRequestMergedByWorker` job is scheduled for each fetched pull
|
||||
|
|
|
|||
|
|
@ -44,9 +44,9 @@ end with a timestamp and the first 12 characters of the commit identifier:
|
|||
|
||||
If a VCS tag matches one of these patterns, it is ignored.
|
||||
|
||||
For a complete understanding of Go modules and versioning, see [this series of
|
||||
blog posts](https://go.dev/blog/using-go-modules) on the official Go
|
||||
website.
|
||||
For a complete understanding of Go modules and versioning, see
|
||||
[this series of blog posts](https://go.dev/blog/using-go-modules)
|
||||
on the official Go website.
|
||||
|
||||
## 'Module' vs 'Package'
|
||||
|
||||
|
|
|
|||
|
|
@ -145,18 +145,16 @@ Go GitLab linter plugins are maintained in the [`gitlab-org/language-tools/go/li
|
|||
## Dependencies
|
||||
|
||||
Dependencies should be kept to the minimum. The introduction of a new
|
||||
dependency should be argued in the merge request, as per our [Approval
|
||||
Guidelines](../code_review.md#approval-guidelines). Both [License
|
||||
Scanning](../../user/compliance/license_compliance/index.md)
|
||||
**(ULTIMATE)** and [Dependency
|
||||
Scanning](../../user/application_security/dependency_scanning/index.md)
|
||||
**(ULTIMATE)** should be activated on all projects to ensure new dependencies
|
||||
dependency should be argued in the merge request, as per our [Approval Guidelines](../code_review.md#approval-guidelines).
|
||||
Both [License Scanning](../../user/compliance/license_compliance/index.md)
|
||||
and [Dependency Scanning](../../user/application_security/dependency_scanning/index.md)
|
||||
should be activated on all projects to ensure new dependencies
|
||||
security status and license compatibility.
|
||||
|
||||
### Modules
|
||||
|
||||
In Go 1.11 and later, a standard dependency system is available behind the name [Go
|
||||
Modules](https://github.com/golang/go/wiki/Modules). It provides a way to
|
||||
In Go 1.11 and later, a standard dependency system is available behind the name
|
||||
[Go Modules](https://github.com/golang/go/wiki/Modules). It provides a way to
|
||||
define and lock dependencies for reproducible builds. It should be used
|
||||
whenever possible.
|
||||
|
||||
|
|
@ -168,8 +166,8 @@ projects, and makes merge requests easier to review.
|
|||
In some cases, such as building a Go project for it to act as a dependency of a
|
||||
CI run for another project, removing the `vendor/` directory means the code must
|
||||
be downloaded repeatedly, which can lead to intermittent problems due to rate
|
||||
limiting or network failures. In these circumstances, you should [cache the
|
||||
downloaded code between](../../ci/caching/index.md#cache-go-dependencies).
|
||||
limiting or network failures. In these circumstances, you should
|
||||
[cache the downloaded code between](../../ci/caching/index.md#cache-go-dependencies).
|
||||
|
||||
There was a
|
||||
[bug on modules checksums](https://github.com/golang/go/issues/29278) in Go versions earlier than v1.11.4, so make
|
||||
|
|
@ -330,18 +328,15 @@ A few things to keep in mind when adding context:
|
|||
### References for working with errors
|
||||
|
||||
- [Go 1.13 errors](https://go.dev/blog/go1.13-errors).
|
||||
- [Programing with
|
||||
errors](https://peter.bourgon.org/blog/2019/09/11/programming-with-errors.html).
|
||||
- [Don't just check errors, handle them
|
||||
gracefully](https://dave.cheney.net/2016/04/27/dont-just-check-errors-handle-them-gracefully).
|
||||
- [Programing with errors](https://peter.bourgon.org/blog/2019/09/11/programming-with-errors.html).
|
||||
- [Don't just check errors, handle them gracefully](https://dave.cheney.net/2016/04/27/dont-just-check-errors-handle-them-gracefully).
|
||||
|
||||
## CLIs
|
||||
|
||||
Every Go program is launched from the command line.
|
||||
[`cli`](https://github.com/urfave/cli) is a convenient package to create command
|
||||
line apps. It should be used whether the project is a daemon or a simple CLI
|
||||
tool. Flags can be mapped to [environment
|
||||
variables](https://github.com/urfave/cli#values-from-the-environment) directly,
|
||||
tool. Flags can be mapped to [environment variables](https://github.com/urfave/cli#values-from-the-environment) directly,
|
||||
which documents and centralizes at the same time all the possible command line
|
||||
interactions with the program. Don't use `os.GetEnv`, it hides variables deep
|
||||
in the code.
|
||||
|
|
@ -362,8 +357,7 @@ Every binary ideally must have structured (JSON) logging in place as it helps
|
|||
with searching and filtering the logs. At GitLab we use structured logging in
|
||||
JSON format, as all our infrastructure assumes that. When using
|
||||
[Logrus](https://github.com/sirupsen/logrus) you can turn on structured
|
||||
logging simply by using the build in [JSON
|
||||
formatter](https://github.com/sirupsen/logrus#formatters). This follows the
|
||||
logging simply by using the build in [JSON formatter](https://github.com/sirupsen/logrus#formatters). This follows the
|
||||
same logging type we use in our [Ruby applications](../logging.md#use-structured-json-logging).
|
||||
|
||||
#### How to use Logrus
|
||||
|
|
@ -414,8 +408,7 @@ should be used in functions that can block and passed as the first parameter.
|
|||
Every project should have a `Dockerfile` at the root of their repository, to
|
||||
build and run the project. Since Go program are static binaries, they should
|
||||
not require any external dependency, and shells in the final image are useless.
|
||||
We encourage [Multistage
|
||||
builds](https://docs.docker.com/develop/develop-images/multistage-build/):
|
||||
We encourage [Multistage builds](https://docs.docker.com/develop/develop-images/multistage-build/):
|
||||
|
||||
- They let the user build the project with the right Go version and
|
||||
dependencies.
|
||||
|
|
|
|||
|
|
@ -200,8 +200,7 @@ refresh_service.execute(oldrev, newrev, ref)
|
|||
|
||||
See ["Why is it bad style to `rescue Exception => e` in Ruby?"](https://stackoverflow.com/questions/10048173/why-is-it-bad-style-to-rescue-exception-e-in-ruby).
|
||||
|
||||
This rule is [enforced automatically by
|
||||
RuboCop](https://gitlab.com/gitlab-org/gitlab-foss/blob/8-4-stable/.rubocop.yml#L911-914)._
|
||||
This rule is [enforced automatically by RuboCop](https://gitlab.com/gitlab-org/gitlab-foss/blob/8-4-stable/.rubocop.yml#L911-914)._
|
||||
|
||||
## Do not use inline JavaScript in views
|
||||
|
||||
|
|
|
|||
|
|
@ -509,8 +509,8 @@ which is shared by some of the analyzers that GitLab maintains. You can [contrib
|
|||
new generic identifiers to if needed. Analyzers may also produce vendor-specific or product-specific
|
||||
identifiers, which don't belong in the [common library](https://gitlab.com/gitlab-org/security-products/analyzers/common).
|
||||
|
||||
The first item of the `identifiers` array is called the [primary
|
||||
identifier](../../user/application_security/terminology/#primary-identifier).
|
||||
The first item of the `identifiers` array is called the
|
||||
[primary identifier](../../user/application_security/terminology/index.md#primary-identifier).
|
||||
The primary identifier is particularly important, because it is used to
|
||||
[track vulnerabilities](#tracking-and-merging-vulnerabilities) as new commits are pushed to the repository.
|
||||
Identifiers are also used to [merge duplicate vulnerabilities](#tracking-and-merging-vulnerabilities)
|
||||
|
|
|
|||
|
|
@ -394,8 +394,8 @@ query for every mention of `@alice`.
|
|||
Caching data per transaction can be done using
|
||||
[RequestStore](https://github.com/steveklabnik/request_store) (use
|
||||
`Gitlab::SafeRequestStore` to avoid having to remember to check
|
||||
`RequestStore.active?`). Caching data in Redis can be done using [Rails' caching
|
||||
system](https://guides.rubyonrails.org/caching_with_rails.html).
|
||||
`RequestStore.active?`). Caching data in Redis can be done using
|
||||
[Rails' caching system](https://guides.rubyonrails.org/caching_with_rails.html).
|
||||
|
||||
## Pagination
|
||||
|
||||
|
|
@ -414,8 +414,7 @@ The main styles of pagination are:
|
|||
|
||||
The ultimately scalable solution for pagination is to use Keyset-based pagination.
|
||||
However, we don't have support for that at GitLab at that moment. You
|
||||
can follow the progress looking at [API: Keyset Pagination
|
||||
](https://gitlab.com/groups/gitlab-org/-/epics/2039).
|
||||
can follow the progress looking at [API: Keyset Pagination](https://gitlab.com/groups/gitlab-org/-/epics/2039).
|
||||
|
||||
Take into consideration the following when choosing a pagination strategy:
|
||||
|
||||
|
|
|
|||
|
|
@ -1197,8 +1197,8 @@ If using a model in the migrations, you should first
|
|||
[clear the column cache](https://api.rubyonrails.org/classes/ActiveRecord/ModelSchema/ClassMethods.html#method-i-reset_column_information)
|
||||
using `reset_column_information`.
|
||||
|
||||
If using a model that leverages single table inheritance (STI), there are [special
|
||||
considerations](database/single_table_inheritance.md#in-migrations).
|
||||
If using a model that leverages single table inheritance (STI), there are
|
||||
[special considerations](database/single_table_inheritance.md#in-migrations).
|
||||
|
||||
This avoids problems where a column that you are using was altered and cached
|
||||
in a previous migration.
|
||||
|
|
|
|||
|
|
@ -35,12 +35,10 @@ one of the variables. Everything could touch anything.
|
|||
People are saying multiple inheritance is bad. Mixing multiple modules with
|
||||
multiple instance variables scattering everywhere suffer from the same issue.
|
||||
The same applies to `ActiveSupport::Concern`. See:
|
||||
[Consider replacing concerns with dedicated classes & composition](
|
||||
https://gitlab.com/gitlab-org/gitlab/-/issues/16270)
|
||||
[Consider replacing concerns with dedicated classes & composition](https://gitlab.com/gitlab-org/gitlab/-/issues/16270)
|
||||
|
||||
There's also a similar idea:
|
||||
[Use decorators and interface segregation to solve overgrowing models problem](
|
||||
https://gitlab.com/gitlab-org/gitlab/-/issues/14235)
|
||||
[Use decorators and interface segregation to solve overgrowing models problem](https://gitlab.com/gitlab-org/gitlab/-/issues/14235)
|
||||
|
||||
Note that `included` doesn't solve the whole issue. They define the
|
||||
dependencies, but they still allow each modules to talk implicitly via the
|
||||
|
|
|
|||
|
|
@ -927,8 +927,7 @@ SOME_CONSTANT = 'bar'
|
|||
|
||||
You might want millions of project rows in your local database, for example,
|
||||
in order to compare relative query performance, or to reproduce a bug. You could
|
||||
do this by hand with SQL commands or using [Mass Inserting Rails
|
||||
Models](mass_insert.md) functionality.
|
||||
do this by hand with SQL commands or using [Mass Inserting Rails Models](mass_insert.md) functionality.
|
||||
|
||||
Assuming you are working with ActiveRecord models, you might also find these links helpful:
|
||||
|
||||
|
|
|
|||
|
|
@ -74,8 +74,7 @@ Do not use boolean operators such as `&&` and `||` within the rule DSL,
|
|||
as conditions within rule blocks are objects, not booleans. The same
|
||||
applies for ternary operators (`condition ? ... : ...`), and `if`
|
||||
blocks. These operators cannot be overridden, and are hence banned via a
|
||||
[custom
|
||||
cop](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/49771).
|
||||
[custom cop](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/49771).
|
||||
|
||||
## Scores, Order, Performance
|
||||
|
||||
|
|
|
|||
|
|
@ -27,8 +27,8 @@ We strive to run GitLab using the latest Rails releases to benefit from performa
|
|||
1. Run `yarn patch-package @rails/ujs` after updating this to ensure our local patch file version matches.
|
||||
1. Create an MR with the `pipeline:run-all-rspec` label and see if pipeline breaks.
|
||||
1. To resolve and debug spec failures use `git bisect` against the rails repository. See the [debugging section](#git-bisect-against-rails) below.
|
||||
1. Include links to the Gem diffs between the two versions in the merge request description. For example, this is the gem diff for [`activesupport` 6.1.3.2 to
|
||||
6.1.4.1](https://my.diffend.io/gems/activerecord/6.1.3.2/6.1.4.1).
|
||||
1. Include links to the Gem diffs between the two versions in the merge request description. For example, this is the gem diff for
|
||||
[`activesupport` 6.1.3.2 to 6.1.4.1](https://my.diffend.io/gems/activerecord/6.1.3.2/6.1.4.1).
|
||||
|
||||
### Prepare an MR for Gitaly
|
||||
|
||||
|
|
|
|||
|
|
@ -265,7 +265,7 @@ instances to cope without this functional partition.
|
|||
If we decide to keep the migration code:
|
||||
|
||||
- We should document the migration steps.
|
||||
- If we used a feature flag, we should ensure it's an [ops type feature
|
||||
flag](../feature_flags/index.md#ops-type), as these are long-lived flags.
|
||||
- If we used a feature flag, we should ensure it's an
|
||||
[ops type feature flag](../feature_flags/index.md#ops-type), as these are long-lived flags.
|
||||
|
||||
Otherwise, we can remove the flags and conclude the project.
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
|
||||
# Routing
|
||||
|
||||
The GitLab backend is written primarily with Rails so it uses [Rails
|
||||
routing](https://guides.rubyonrails.org/routing.html). Beside Rails best
|
||||
The GitLab backend is written primarily with Rails so it uses
|
||||
[Rails routing](https://guides.rubyonrails.org/routing.html). Beside Rails best
|
||||
practices, there are few rules unique to the GitLab application. To
|
||||
support subgroups, GitLab project and group routes use the wildcard
|
||||
character to match project and group routes. For example, we might have
|
||||
|
|
|
|||
|
|
@ -35,8 +35,8 @@ The application has a tight coupling to the database schema. When the
|
|||
application starts, Rails queries the database schema, caching the tables and
|
||||
column types for the data requested. Because of this schema cache, dropping a
|
||||
column or table while the application is running can produce 500 errors to the
|
||||
user. This is why we have a [process for dropping columns and other
|
||||
no-downtime changes](database/avoiding_downtime_in_migrations.md).
|
||||
user. This is why we have a
|
||||
[process for dropping columns and other no-downtime changes](database/avoiding_downtime_in_migrations.md).
|
||||
|
||||
#### Multi-tenancy
|
||||
|
||||
|
|
@ -61,11 +61,11 @@ There are two ways to deal with this:
|
|||
- Sharding. Distribute data across multiple databases.
|
||||
|
||||
Partitioning is a built-in PostgreSQL feature and requires minimal changes
|
||||
in the application. However, it [requires PostgreSQL
|
||||
11](https://www.2ndquadrant.com/en/blog/partitioning-evolution-postgresql-11/).
|
||||
in the application. However, it
|
||||
[requires PostgreSQL 11](https://www.2ndquadrant.com/en/blog/partitioning-evolution-postgresql-11/).
|
||||
|
||||
For example, a natural way to partition is to [partition tables by
|
||||
dates](https://gitlab.com/groups/gitlab-org/-/epics/2023). For example,
|
||||
For example, a natural way to partition is to
|
||||
[partition tables by dates](https://gitlab.com/groups/gitlab-org/-/epics/2023). For example,
|
||||
the `events` and `audit_events` table are natural candidates for this
|
||||
kind of partitioning.
|
||||
|
||||
|
|
@ -77,10 +77,10 @@ to abstract data access into API calls that abstract the database from
|
|||
the application, but this is a significant amount of work.
|
||||
|
||||
There are solutions that may help abstract the sharding to some extent
|
||||
from the application. For example, we want to look at [Citus
|
||||
Data](https://www.citusdata.com/product/community) closely. Citus Data
|
||||
provides a Rails plugin that adds a [tenant ID to ActiveRecord
|
||||
models](https://www.citusdata.com/blog/2017/01/05/easily-scale-out-multi-tenant-apps/).
|
||||
from the application. For example, we want to look at
|
||||
[Citus Data](https://www.citusdata.com/product/community) closely. Citus Data
|
||||
provides a Rails plugin that adds a
|
||||
[tenant ID to ActiveRecord models](https://www.citusdata.com/blog/2017/01/05/easily-scale-out-multi-tenant-apps/).
|
||||
|
||||
Sharding can also be done based on feature verticals. This is the
|
||||
microservice approach to sharding, where each service represents a
|
||||
|
|
@ -97,12 +97,12 @@ systems.
|
|||
|
||||
#### Database size
|
||||
|
||||
A recent [database checkup shows a breakdown of the table sizes on
|
||||
GitLab.com](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/8022#master-1022016101-8).
|
||||
A recent
|
||||
[database checkup shows a breakdown of the table sizes on GitLab.com](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/8022#master-1022016101-8).
|
||||
Since `merge_request_diff_files` contains over 1 TB of data, we want to
|
||||
reduce/eliminate this table first. GitLab has support for [storing diffs in
|
||||
object storage](../administration/merge_request_diffs.md), which we [want to do on
|
||||
GitLab.com](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/7356).
|
||||
reduce/eliminate this table first. GitLab has support for
|
||||
[storing diffs in object storage](../administration/merge_request_diffs.md), which we
|
||||
[want to do on GitLab.com](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/7356).
|
||||
|
||||
#### High availability
|
||||
|
||||
|
|
@ -128,8 +128,7 @@ some actions that aren't traditionally available in standard load balancers. For
|
|||
example, the application considers a replica only if its replication lag is low
|
||||
(for example, WAL data behind by less than 100 MB).
|
||||
|
||||
More [details are in a blog
|
||||
post](https://about.gitlab.com/blog/2017/10/02/scaling-the-gitlab-database/).
|
||||
More [details are in a blog post](https://about.gitlab.com/blog/2017/10/02/scaling-the-gitlab-database/).
|
||||
|
||||
### PgBouncer
|
||||
|
||||
|
|
@ -150,8 +149,8 @@ limitation:
|
|||
- Use a multi-threaded connection pooler (for example,
|
||||
[Odyssey](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/7776).
|
||||
|
||||
On some Linux systems, it's possible to run [multiple PgBouncer instances on
|
||||
the same port](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/4796).
|
||||
On some Linux systems, it's possible to run
|
||||
[multiple PgBouncer instances on the same port](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/4796).
|
||||
|
||||
On GitLab.com, we run multiple PgBouncer instances on different ports to
|
||||
avoid saturating a single core.
|
||||
|
|
|
|||
|
|
@ -18,15 +18,14 @@ several possible situations:
|
|||
|
||||
## Adding new workers
|
||||
|
||||
On GitLab.com, we [do not currently have a Sidekiq deployment in the
|
||||
canary stage](https://gitlab.com/gitlab-org/gitlab/-/issues/19239). This
|
||||
means that a new worker than can be scheduled from an HTTP endpoint may
|
||||
On GitLab.com, we
|
||||
[do not currently have a Sidekiq deployment in the canary stage](https://gitlab.com/gitlab-org/gitlab/-/issues/19239).
|
||||
This means that a new worker than can be scheduled from an HTTP endpoint may
|
||||
be scheduled from canary but not run on Sidekiq until the full
|
||||
production deployment is complete. This can be several hours later than
|
||||
scheduling the job. For some workers, this will not be a problem. For
|
||||
others - particularly [latency-sensitive
|
||||
jobs](worker_attributes.md#latency-sensitive-jobs) - this will result in a poor user
|
||||
experience.
|
||||
others - particularly [latency-sensitive jobs](worker_attributes.md#latency-sensitive-jobs) -
|
||||
this will result in a poor user experience.
|
||||
|
||||
This only applies to new worker classes when they are first introduced.
|
||||
As we recommend [using feature flags](../feature_flags/) as a general
|
||||
|
|
|
|||
|
|
@ -78,9 +78,8 @@ GitLab supports two deduplication strategies:
|
|||
- `until_executing`, which is the default strategy
|
||||
- `until_executed`
|
||||
|
||||
More [deduplication strategies have been
|
||||
suggested](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/195). If
|
||||
you are implementing a worker that could benefit from a different
|
||||
More [deduplication strategies have been suggested](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/195).
|
||||
If you are implementing a worker that could benefit from a different
|
||||
strategy, please comment in the issue.
|
||||
|
||||
#### Until Executing
|
||||
|
|
|
|||
|
|
@ -31,8 +31,7 @@ the [routing rules](../../administration/operations/extra_sidekiq_routing.md#que
|
|||
|
||||
## Retries
|
||||
|
||||
Sidekiq defaults to using [25
|
||||
retries](https://github.com/mperham/sidekiq/wiki/Error-Handling#automatic-job-retry),
|
||||
Sidekiq defaults to using [25 retries](https://github.com/mperham/sidekiq/wiki/Error-Handling#automatic-job-retry),
|
||||
with back-off between each retry. 25 retries means that the last retry
|
||||
would happen around three weeks after the first attempt (assuming all 24
|
||||
prior retries failed).
|
||||
|
|
@ -179,8 +178,7 @@ in the default execution mode - using
|
|||
[`sidekiq-cluster`](../../administration/operations/extra_sidekiq_processes.md)
|
||||
does not account for weights.
|
||||
|
||||
As we are [moving towards using `sidekiq-cluster` in
|
||||
Free](https://gitlab.com/gitlab-org/gitlab/-/issues/34396), newly-added
|
||||
As we are [moving towards using `sidekiq-cluster` in Free](https://gitlab.com/gitlab-org/gitlab/-/issues/34396), newly-added
|
||||
workers do not need to have weights specified. They can use the
|
||||
default weight, which is 1.
|
||||
|
||||
|
|
|
|||
|
|
@ -11,8 +11,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
> [Introduced](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/9) in GitLab 12.8.
|
||||
|
||||
To have some more information about workers in the logs, we add
|
||||
[metadata to the jobs in the form of an
|
||||
`ApplicationContext`](../logging.md#logging-context-metadata-through-rails-or-grape-requests).
|
||||
[metadata to the jobs in the form of an `ApplicationContext`](../logging.md#logging-context-metadata-through-rails-or-grape-requests).
|
||||
In most cases, when scheduling a job from a request, this context is already
|
||||
deducted from the request and added to the scheduled job.
|
||||
|
||||
|
|
|
|||
|
|
@ -86,13 +86,11 @@ but that always reduces work.
|
|||
To do this, we want to calculate the expected increase in total execution time
|
||||
and RPS (throughput) for the new shard. We can get these values from:
|
||||
|
||||
- The [Queue Detail
|
||||
dashboard](https://dashboards.gitlab.net/d/sidekiq-queue-detail/sidekiq-queue-detail)
|
||||
- The [Queue Detail dashboard](https://dashboards.gitlab.net/d/sidekiq-queue-detail/sidekiq-queue-detail)
|
||||
has values for the queue itself. For a new queue, we can look for
|
||||
queues that have similar patterns or are scheduled in similar
|
||||
circumstances.
|
||||
- The [Shard Detail
|
||||
dashboard](https://dashboards.gitlab.net/d/sidekiq-shard-detail/sidekiq-shard-detail)
|
||||
- The [Shard Detail dashboard](https://dashboards.gitlab.net/d/sidekiq-shard-detail/sidekiq-shard-detail)
|
||||
has Total Execution Time and Throughput (RPS). The Shard Utilization
|
||||
panel displays if there is currently any excess capacity for this
|
||||
shard.
|
||||
|
|
|
|||
|
|
@ -422,8 +422,8 @@ Use the coverage reports to ensure your tests cover 100% of your code.
|
|||
### System / Feature tests
|
||||
|
||||
NOTE:
|
||||
Before writing a new system test, [please consider **not**
|
||||
writing one](testing_levels.md#consider-not-writing-a-system-test)!
|
||||
Before writing a new system test,
|
||||
[please consider **not** writing one](testing_levels.md#consider-not-writing-a-system-test)!
|
||||
|
||||
- Feature specs should be named `ROLE_ACTION_spec.rb`, such as
|
||||
`user_changes_password_spec.rb`.
|
||||
|
|
@ -909,8 +909,8 @@ By default, Sidekiq jobs are enqueued into a jobs array and aren't processed.
|
|||
If a test queues Sidekiq jobs and need them to be processed, the
|
||||
`:sidekiq_inline` trait can be used.
|
||||
|
||||
The `:sidekiq_might_not_need_inline` trait was added when [Sidekiq inline mode was
|
||||
changed to fake mode](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/15479)
|
||||
The `:sidekiq_might_not_need_inline` trait was added when
|
||||
[Sidekiq inline mode was changed to fake mode](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/15479)
|
||||
to all the tests that needed Sidekiq to actually process jobs. Tests with
|
||||
this trait should be either fixed to not rely on Sidekiq processing jobs, or their
|
||||
`:sidekiq_might_not_need_inline` trait should be updated to `:sidekiq_inline` if
|
||||
|
|
@ -1239,8 +1239,7 @@ The `match_schema` matcher allows validating that the subject matches a
|
|||
a JSON string or a JSON-compatible data structure.
|
||||
|
||||
`match_response_schema` is a convenience matcher for using with a
|
||||
response object. from a [request
|
||||
spec](testing_levels.md#integration-tests).
|
||||
response object. from a [request spec](testing_levels.md#integration-tests).
|
||||
|
||||
Examples:
|
||||
|
||||
|
|
|
|||
|
|
@ -415,8 +415,8 @@ except(page).to have_no_text('hidden')
|
|||
Unfortunately, that's not automatically the case for the predicate methods that we add to our
|
||||
[page objects](page_objects.md). We need to [create our own negatable matchers](https://relishapp.com/rspec/rspec-expectations/v/3-9/docs/custom-matchers/define-a-custom-matcher#matcher-with-separate-logic-for-expect().to-and-expect().not-to).
|
||||
|
||||
The initial example uses the `have_job` matcher which is derived from the [`has_job?` predicate
|
||||
method of the `Page::Project::Pipeline::Show` page object](https://gitlab.com/gitlab-org/gitlab/-/blob/87864b3047c23b4308f59c27a3757045944af447/qa/qa/page/project/pipeline/show.rb#L53).
|
||||
The initial example uses the `have_job` matcher which is derived from the
|
||||
[`has_job?` predicate method of the `Page::Project::Pipeline::Show` page object](https://gitlab.com/gitlab-org/gitlab/-/blob/87864b3047c23b4308f59c27a3757045944af447/qa/qa/page/project/pipeline/show.rb#L53).
|
||||
To create a negatable matcher, we use `has_no_job?` for the negative case:
|
||||
|
||||
```ruby
|
||||
|
|
|
|||
|
|
@ -217,8 +217,8 @@ If enabling the feature flag results in E2E test failures, you can browse the ar
|
|||
|
||||
If an end-to-end test enables a feature flag, the end-to-end test suite can be used to test changes in a merge request
|
||||
by running the `package-and-qa` job in the merge request pipeline. If the feature flag and relevant changes have already been merged, you can confirm that the tests
|
||||
pass on the default branch. The end-to-end tests run on the default branch every two hours, and the results are posted to a [Test
|
||||
Session Report, which is available in the testcase-sessions project](https://gitlab.com/gitlab-org/quality/testcase-sessions/-/issues?label_name%5B%5D=found%3Amain).
|
||||
pass on the default branch. The end-to-end tests run on the default branch every two hours, and the results are posted to a
|
||||
[Test Session Report, which is available in the testcase-sessions project](https://gitlab.com/gitlab-org/quality/testcase-sessions/-/issues?label_name%5B%5D=found%3Amain).
|
||||
|
||||
If the relevant tests do not enable the feature flag themselves, you can check if the tests will need to be updated by opening
|
||||
a draft merge request that enables the flag by default via a [feature flag definition file](../../feature_flags/index.md#feature-flag-definition-and-validation).
|
||||
|
|
|
|||
|
|
@ -140,8 +140,8 @@ a flaky test we first want to make sure that it's no longer flaky.
|
|||
We can do that using the `ce:custom-parallel` and `ee:custom-parallel` jobs.
|
||||
Both are manual jobs that you can configure using custom variables.
|
||||
When clicking the name (not the play icon) of one of the parallel jobs,
|
||||
you are prompted to enter variables. You can use any of [the variables
|
||||
that can be used with `gitlab-qa`](https://gitlab.com/gitlab-org/gitlab-qa/blob/master/docs/what_tests_can_be_run.md#supported-gitlab-environment-variables)
|
||||
you are prompted to enter variables. You can use any of
|
||||
[the variables that can be used with `gitlab-qa`](https://gitlab.com/gitlab-org/gitlab-qa/blob/master/docs/what_tests_can_be_run.md#supported-gitlab-environment-variables)
|
||||
as well as these:
|
||||
|
||||
| Variable | Description |
|
||||
|
|
@ -150,8 +150,9 @@ as well as these:
|
|||
| `QA_TESTS` | The tests to run (no default, which means run all the tests in the scenario). Use file paths as you would when running tests via RSpec, for example, `qa/specs/features/ee/browser_ui` would include all the `EE` UI tests. |
|
||||
| `QA_RSPEC_TAGS` | The RSpec tags to add (no default) |
|
||||
|
||||
For now, [manual jobs with custom variables don't use the same variable
|
||||
when retried](https://gitlab.com/gitlab-org/gitlab/-/issues/31367), so if you want to run the same tests multiple times,
|
||||
For now,
|
||||
[manual jobs with custom variables don't use the same variable when retried](https://gitlab.com/gitlab-org/gitlab/-/issues/31367),
|
||||
so if you want to run the same tests multiple times,
|
||||
specify the same variables in each `custom-parallel` job (up to as
|
||||
many of the 10 available jobs that you want to run).
|
||||
|
||||
|
|
@ -164,8 +165,8 @@ automatically started: it runs the QA smoke suite against the
|
|||
You can also manually start the `review-qa-all`: it runs the full QA suite
|
||||
against the [Review App](../review_apps.md).
|
||||
|
||||
**This runs end-to-end tests against a Review App based on [the official GitLab
|
||||
Helm chart](https://gitlab.com/gitlab-org/charts/gitlab/), itself deployed with custom
|
||||
**This runs end-to-end tests against a Review App based on
|
||||
[the official GitLab Helm chart](https://gitlab.com/gitlab-org/charts/gitlab/), itself deployed with custom
|
||||
[Cloud Native components](https://gitlab.com/gitlab-org/build/CNG) built from your merge request's changes.**
|
||||
|
||||
See [Review Apps](../review_apps.md) for more details about Review Apps.
|
||||
|
|
@ -243,8 +244,8 @@ Each type of scheduled pipeline generates a static link for the latest test repo
|
|||
If you are not [testing code in a merge request](#testing-code-in-merge-requests),
|
||||
there are two main options for running the tests. If you want to run
|
||||
the existing tests against a live GitLab instance or against a pre-built Docker image,
|
||||
use the [GitLab QA orchestrator](https://gitlab.com/gitlab-org/gitlab-qa/tree/master/README.md). See also [examples
|
||||
of the test scenarios you can run via the orchestrator](https://gitlab.com/gitlab-org/gitlab-qa/blob/master/docs/what_tests_can_be_run.md#examples).
|
||||
use the [GitLab QA orchestrator](https://gitlab.com/gitlab-org/gitlab-qa/tree/master/README.md). See also
|
||||
[examples of the test scenarios you can run via the orchestrator](https://gitlab.com/gitlab-org/gitlab-qa/blob/master/docs/what_tests_can_be_run.md#examples).
|
||||
|
||||
On the other hand, if you would like to run against a local development GitLab
|
||||
environment, you can use the [GitLab Development Kit (GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit/).
|
||||
|
|
@ -262,8 +263,8 @@ architecture. See the [documentation about it](https://gitlab.com/gitlab-org/git
|
|||
|
||||
Once you decided where to put [test environment orchestration scenarios](https://gitlab.com/gitlab-org/gitlab-qa/tree/master/lib/gitlab/qa/scenario) and
|
||||
[instance-level scenarios](https://gitlab.com/gitlab-org/gitlab-foss/tree/master/qa/qa/specs/features), take a look at the [GitLab QA README](https://gitlab.com/gitlab-org/gitlab/-/tree/master/qa/README.md),
|
||||
the [GitLab QA orchestrator README](https://gitlab.com/gitlab-org/gitlab-qa/tree/master/README.md), and [the already existing
|
||||
instance-level scenarios](https://gitlab.com/gitlab-org/gitlab-foss/tree/master/qa/qa/specs/features).
|
||||
the [GitLab QA orchestrator README](https://gitlab.com/gitlab-org/gitlab-qa/tree/master/README.md),
|
||||
and [the already existing instance-level scenarios](https://gitlab.com/gitlab-org/gitlab-foss/tree/master/qa/qa/specs/features).
|
||||
|
||||
### Consider **not** writing an end-to-end test
|
||||
|
||||
|
|
|
|||
|
|
@ -9,8 +9,8 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
This document describes various guidelines and best practices for automated
|
||||
testing of the GitLab project.
|
||||
|
||||
It is meant to be an _extension_ of the [Thoughtbot testing
|
||||
style guide](https://github.com/thoughtbot/guides/tree/master/testing-rspec). If
|
||||
It is meant to be an _extension_ of the
|
||||
[Thoughtbot testing style guide](https://github.com/thoughtbot/guides/tree/master/testing-rspec). If
|
||||
this guide defines a rule that contradicts the Thoughtbot guide, this guide
|
||||
takes precedence. Some guidelines may be repeated verbatim to stress their
|
||||
importance.
|
||||
|
|
|
|||
|
|
@ -317,8 +317,8 @@ To test these you usually have to:
|
|||
- Verify that the expected jobs were scheduled, with the correct set
|
||||
of records, the correct batch size, interval, etc.
|
||||
|
||||
The behavior of the background migration itself needs to be verified in a [separate
|
||||
test for the background migration class](#example-background-migration-test).
|
||||
The behavior of the background migration itself needs to be verified in a
|
||||
[separate test for the background migration class](#example-background-migration-test).
|
||||
|
||||
This spec tests the
|
||||
[`db/post_migrate/20210701111909_backfill_issues_upvotes_count.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/v14.1.0-ee/db/post_migrate/20210701111909_backfill_issues_upvotes_count.rb)
|
||||
|
|
|
|||
|
|
@ -37,8 +37,7 @@ A list of software preinstalled on the Windows images is available at: [Preinsta
|
|||
|
||||
## GCP Windows image for development
|
||||
|
||||
The [shared Windows GitLab
|
||||
runners](https://about.gitlab.com/releases/2020/01/22/gitlab-12-7-released/#windows-shared-runners-on-gitlabcom-beta)
|
||||
The [shared Windows GitLab runners](https://about.gitlab.com/releases/2020/01/22/gitlab-12-7-released/#windows-shared-runners-on-gitlabcom-beta)
|
||||
are built with [Packer](https://www.packer.io/).
|
||||
|
||||
The Infrastructure as Code repository for building the Google Cloud images is available at:
|
||||
|
|
|
|||
|
|
@ -233,8 +233,8 @@ The first thing that appears is the sign-in page. GitLab creates an administrato
|
|||
The credentials are:
|
||||
|
||||
- Username: `root`
|
||||
- Password: the password is automatically created, and there are [two ways to
|
||||
find it](https://docs.bitnami.com/azure/faq/get-started/find-credentials/).
|
||||
- Password: the password is automatically created, and there are
|
||||
[two ways to find it](https://docs.bitnami.com/azure/faq/get-started/find-credentials/).
|
||||
|
||||
After signing in, be sure to immediately [change the password](../../user/profile/index.md#change-your-password).
|
||||
|
||||
|
|
|
|||
|
|
@ -129,8 +129,8 @@ sudo apt-get install libkrb5-dev
|
|||
|
||||
### Git
|
||||
|
||||
From GitLab 13.6, we recommend you use the [Git version provided by
|
||||
Gitaly](https://gitlab.com/gitlab-org/gitaly/-/issues/2729)
|
||||
From GitLab 13.6, we recommend you use the
|
||||
[Git version provided by Gitaly](https://gitlab.com/gitlab-org/gitaly/-/issues/2729)
|
||||
that:
|
||||
|
||||
- Is always at the version required by GitLab.
|
||||
|
|
@ -239,8 +239,8 @@ sudo make install
|
|||
|
||||
GitLab has several daemons written in Go. To install
|
||||
GitLab we need a Go compiler. The instructions below assume you use 64-bit
|
||||
Linux. You can find downloads for other platforms at the [Go download
|
||||
page](https://go.dev/dl).
|
||||
Linux. You can find downloads for other platforms at the
|
||||
[Go download page](https://go.dev/dl).
|
||||
|
||||
```shell
|
||||
# Remove former Go installation folder
|
||||
|
|
|
|||
|
|
@ -466,8 +466,7 @@ Before doing a major version GitLab upgrade, you should have completed all
|
|||
migrations that exist up until the latest minor version before that major
|
||||
version. If you have halted migrations, these need to be resolved and
|
||||
[retried](#retry-a-halted-migration) before proceeding with a major version
|
||||
upgrade. Read more about [upgrading to a new major
|
||||
version](../../update/index.md#upgrading-to-a-new-major-version).
|
||||
upgrade. Read more about [upgrading to a new major version](../../update/index.md#upgrading-to-a-new-major-version).
|
||||
|
||||
## GitLab Advanced Search Rake tasks
|
||||
|
||||
|
|
@ -577,9 +576,9 @@ due to large volumes of data being indexed.
|
|||
|
||||
WARNING:
|
||||
Indexing a large instance generates a lot of Sidekiq jobs.
|
||||
Make sure to prepare for this task by having a [Scalable and Highly Available
|
||||
Setup](../../administration/reference_architectures/index.md) or creating [extra
|
||||
Sidekiq processes](../../administration/operations/extra_sidekiq_processes.md).
|
||||
Make sure to prepare for this task by having a
|
||||
[Scalable and Highly Available Setup](../../administration/reference_architectures/index.md) or creating
|
||||
[extra Sidekiq processes](../../administration/operations/extra_sidekiq_processes.md).
|
||||
|
||||
1. [Configure your Elasticsearch host and port](#enable-advanced-search).
|
||||
1. Create empty indices:
|
||||
|
|
|
|||
|
|
@ -447,8 +447,7 @@ mattermost['env'] = {
|
|||
}
|
||||
```
|
||||
|
||||
Refer to the [Mattermost Configuration Settings
|
||||
documentation](https://docs.mattermost.com/administration/config-settings.html)
|
||||
Refer to the [Mattermost Configuration Settings documentation](https://docs.mattermost.com/administration/config-settings.html)
|
||||
for details about categories and configuration values.
|
||||
|
||||
There are a few exceptions to this rule:
|
||||
|
|
|
|||
|
|
@ -939,8 +939,8 @@ Make sure this information is provided.
|
|||
|
||||
Another issue that can result in this error is when the correct information is being sent by
|
||||
the IdP, but the attributes don't match the names in the OmniAuth `info` hash. In this case,
|
||||
you must set `attribute_statements` in the SAML configuration to [map the attribute names in
|
||||
your SAML Response to the corresponding OmniAuth `info` hash names](#attribute_statements).
|
||||
you must set `attribute_statements` in the SAML configuration to
|
||||
[map the attribute names in your SAML Response to the corresponding OmniAuth `info` hash names](#attribute_statements).
|
||||
|
||||
### Key validation error, Digest mismatch or Fingerprint mismatch
|
||||
|
||||
|
|
|
|||
|
|
@ -16,8 +16,8 @@ Our current policy is:
|
|||
- Backporting security fixes **to the previous two monthly releases in addition to the current stable release**. (See [security releases](#security-releases).)
|
||||
|
||||
In rare cases, release managers may make an exception and backport to more than
|
||||
the last two monthly releases. See [Backporting to older
|
||||
releases](#backporting-to-older-releases) for more information.
|
||||
the last two monthly releases. See
|
||||
[Backporting to older releases](#backporting-to-older-releases) for more information.
|
||||
|
||||
## Versioning
|
||||
|
||||
|
|
|
|||
|
|
@ -452,8 +452,9 @@ gitlab_rails['backup_upload_storage_options'] = {
|
|||
|
||||
##### SSE-KMS
|
||||
|
||||
To enable SSE-KMS, you'll need the [KMS key via its Amazon Resource Name (ARN)
|
||||
in the `arn:aws:kms:region:acct-id:key/key-id` format](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html). Under the `backup_upload_storage_options` configuration setting, set:
|
||||
To enable SSE-KMS, you'll need the
|
||||
[KMS key via its Amazon Resource Name (ARN) in the `arn:aws:kms:region:acct-id:key/key-id` format](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html).
|
||||
Under the `backup_upload_storage_options` configuration setting, set:
|
||||
|
||||
- `server_side_encryption` to `aws:kms`.
|
||||
- `server_side_encryption_kms_key_id` to the ARN of the key.
|
||||
|
|
|
|||
|
|
@ -12,8 +12,8 @@ An application data backup creates an archive file that contains the database,
|
|||
all repositories and all attachments.
|
||||
|
||||
You can only restore a backup to **exactly the same version and type (CE/EE)**
|
||||
of GitLab on which it was created. The best way to [migrate your projects
|
||||
from one server to another](#migrate-to-a-new-server) is through a backup and restore.
|
||||
of GitLab on which it was created. The best way to
|
||||
[migrate your projects from one server to another](#migrate-to-a-new-server) is through a backup and restore.
|
||||
|
||||
WARNING:
|
||||
GitLab doesn't back up items that aren't stored on the file system. If you're
|
||||
|
|
@ -190,8 +190,8 @@ tables will [be logged by PostgreSQL](../administration/logs/index.md#postgresql
|
|||
ERROR: relation "tablename" does not exist at character 123
|
||||
```
|
||||
|
||||
This happens because the task uses `pg_dump`, which [sets a null search
|
||||
path and explicitly includes the schema in every SQL query](https://gitlab.com/gitlab-org/gitlab/-/issues/23211)
|
||||
This happens because the task uses `pg_dump`, which
|
||||
[sets a null search path and explicitly includes the schema in every SQL query](https://gitlab.com/gitlab-org/gitlab/-/issues/23211)
|
||||
to address [CVE-2018-1058](https://www.postgresql.org/about/news/postgresql-103-968-9512-9417-and-9322-released-1834/).
|
||||
|
||||
Since connections are reused with PgBouncer in transaction pooling mode,
|
||||
|
|
|
|||
|
|
@ -24,8 +24,8 @@ limitation.
|
|||
You can take steps to prevent unintentional sharing and information
|
||||
destruction. This limitation is the reason why only certain people are allowed
|
||||
to [add users to a project](../user/project/members/index.md)
|
||||
and why only a GitLab administrator can [force push a protected
|
||||
branch](../user/project/protected_branches.md).
|
||||
and why only a GitLab administrator can
|
||||
[force push a protected branch](../user/project/protected_branches.md).
|
||||
|
||||
<!-- ## Troubleshooting
|
||||
|
||||
|
|
|
|||
|
|
@ -299,8 +299,8 @@ for your personal or group namespace. CI/CD minutes are a **one-time purchase**,
|
|||
NOTE:
|
||||
Free namespaces are subject to a 5GB storage and 10GB transfer [soft limit](https://about.gitlab.com/pricing). Once all storage is available to view in the usage quota workflow, GitLab will automatically enforce the namespace storage limit and the project limit will be removed. This change will be announced separately. The storage and transfer add-on can be purchased to increase the limits.
|
||||
|
||||
Projects have a free storage quota of 10 GB. To exceed this quota you must first [purchase one or
|
||||
more storage subscription units](#purchase-more-storage-and-transfer). Each unit provides 10 GB of additional
|
||||
Projects have a free storage quota of 10 GB. To exceed this quota you must first
|
||||
[purchase one or more storage subscription units](#purchase-more-storage-and-transfer). Each unit provides 10 GB of additional
|
||||
storage per namespace. A storage subscription is renewed annually. For more details, see
|
||||
[Usage Quotas](../../user/usage_quotas.md).
|
||||
|
||||
|
|
|
|||
|
|
@ -109,8 +109,8 @@ Purchases in the Customers Portal require a credit card on record as a payment m
|
|||
multiple credit cards to your account, so that purchases for different products are charged to the
|
||||
correct card.
|
||||
|
||||
If you would like to use an alternative method to pay, please [contact our Sales
|
||||
team](https://about.gitlab.com/sales/).
|
||||
If you would like to use an alternative method to pay, please
|
||||
[contact our Sales team](https://about.gitlab.com/sales/).
|
||||
|
||||
To change your payment method:
|
||||
|
||||
|
|
|
|||
|
|
@ -458,8 +458,8 @@ To use Auto Deploy on a Kubernetes 1.16+ cluster:
|
|||
```
|
||||
|
||||
1. If you have an in-cluster PostgreSQL database installed with
|
||||
`AUTO_DEVOPS_POSTGRES_CHANNEL` set to `1`, follow the [guide to upgrade
|
||||
PostgreSQL](upgrading_postgresql.md).
|
||||
`AUTO_DEVOPS_POSTGRES_CHANNEL` set to `1`, follow the
|
||||
[guide to upgrade PostgreSQL](upgrading_postgresql.md).
|
||||
|
||||
1. If you are deploying your application for the first time and are using
|
||||
GitLab 12.9 or 12.10, set `AUTO_DEVOPS_POSTGRES_CHANNEL` to `2`.
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue