Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
0f554877c4
commit
8ee978407a
|
|
@ -139,15 +139,6 @@ workflow:
|
|||
variables:
|
||||
<<: *next-ruby-variables
|
||||
PIPELINE_NAME: 'Scheduled Ruby $RUBY_VERSION $CI_COMMIT_BRANCH branch'
|
||||
# To create mappings with crystalball oneshot coverage strategy, simplecov needs to be disabled
|
||||
# Collect these mappings in nightly schedule only for now
|
||||
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PIPELINE_SOURCE == "schedule" && $SCHEDULE_TYPE == "nightly"'
|
||||
variables:
|
||||
<<: [*default-ruby-variables, *default-branch-pipeline-failure-variables]
|
||||
CRYSTALBALL: "true"
|
||||
CRYSTALBALL_COVERAGE_STRATEGY: "true"
|
||||
SIMPLECOV: 0
|
||||
PIPELINE_NAME: 'Scheduled Ruby $RUBY_VERSION $CI_COMMIT_BRANCH branch'
|
||||
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PIPELINE_SOURCE == "schedule"'
|
||||
variables:
|
||||
<<: [*default-ruby-variables, *default-branch-pipeline-failure-variables]
|
||||
|
|
|
|||
4
Gemfile
4
Gemfile
|
|
@ -76,7 +76,7 @@ gem 'rugged', '~> 1.6', feature_category: :gitaly
|
|||
gem 'faraday', '~> 2', feature_category: :shared
|
||||
gem 'faraday-retry', '~> 2', feature_category: :shared
|
||||
# Logger is a dependency of Faraday, but Logger 1.6.0 does not work with Chef.
|
||||
gem 'logger', '~> 1.6.0', feature_category: :shared
|
||||
gem 'logger', '~> 1.7.0', feature_category: :shared
|
||||
|
||||
gem 'marginalia', '~> 1.11.1', feature_category: :database
|
||||
|
||||
|
|
@ -534,7 +534,7 @@ group :development, :test do
|
|||
gem 'influxdb-client', '~> 3.1', require: false, feature_category: :tooling
|
||||
|
||||
gem 'knapsack', '~> 4.0.0', feature_category: :tooling
|
||||
gem 'gitlab-crystalball', '~> 0.8.0', require: false, feature_category: :tooling
|
||||
gem 'gitlab-crystalball', '~> 1.0.0', require: false, feature_category: :tooling
|
||||
gem 'test_file_finder', '~> 0.3.1', feature_category: :tooling
|
||||
|
||||
gem 'simple_po_parser', '~> 1.1.6', require: false, feature_category: :shared
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@
|
|||
{"name":"gitlab","version":"4.19.0","platform":"ruby","checksum":"3f645e3e195dbc24f0834fbf83e8ccfb2056d8e9712b01a640aad418a6949679"},
|
||||
{"name":"gitlab-chronic","version":"0.10.6","platform":"ruby","checksum":"a244d11a1396d2aac6ae9b2f326adf1605ec1ad20c29f06e8b672047d415a9ac"},
|
||||
{"name":"gitlab-cloud-connector","version":"1.13.0","platform":"ruby","checksum":"c2b0bcd1bc775208825a119ad0c8b8e2f6eac6c762e9514f8026d63ace9086e8"},
|
||||
{"name":"gitlab-crystalball","version":"0.8.2","platform":"ruby","checksum":"9b116b1040feba72dd53bdbf5264b4849ee719ca54e5cc17afb256fdc429bf00"},
|
||||
{"name":"gitlab-crystalball","version":"1.0.0","platform":"ruby","checksum":"74f56646345a5bc130da64ee5c2a90fad1bd70b26b551928676030fddaf76201"},
|
||||
{"name":"gitlab-dangerfiles","version":"4.9.2","platform":"ruby","checksum":"d5c050f685d8720f6e70191a7d1216854d860dbdea5b455f87abe7542e005798"},
|
||||
{"name":"gitlab-experiment","version":"0.9.1","platform":"ruby","checksum":"f230ee742154805a755d5f2539dc44d93cdff08c5bbbb7656018d61f93d01f48"},
|
||||
{"name":"gitlab-fog-azure-rm","version":"2.2.0","platform":"ruby","checksum":"31aa7c2170f57874053144e7f716ec9e15f32e71ffbd2c56753dce46e2e78ba9"},
|
||||
|
|
@ -375,7 +375,7 @@
|
|||
{"name":"llhttp-ffi","version":"0.4.0","platform":"ruby","checksum":"e5f7327db3cf8007e648342ef76347d6e0ae545a8402e519cca9c886eb37b001"},
|
||||
{"name":"locale","version":"2.1.4","platform":"ruby","checksum":"522f9973ef3eee64aac9bca06d21db2fba675fa3d2cf61d21f42d1ca18a9f780"},
|
||||
{"name":"lockbox","version":"1.3.0","platform":"ruby","checksum":"ca8e5806e4e0c56d1d762ac5cf401940ff53fc37554ef623d3313c7a6331a3ea"},
|
||||
{"name":"logger","version":"1.6.6","platform":"ruby","checksum":"dd618d24e637715472732e7eed02e33cfbdf56deaad225edd0f1f89d38024017"},
|
||||
{"name":"logger","version":"1.7.0","platform":"ruby","checksum":"196edec7cc44b66cfb40f9755ce11b392f21f7967696af15d274dde7edff0203"},
|
||||
{"name":"lograge","version":"0.11.2","platform":"ruby","checksum":"4cbd1554b86f545d795eff15a0c24fd25057d2ac4e1caa5fc186168b3da932ef"},
|
||||
{"name":"loofah","version":"2.24.1","platform":"ruby","checksum":"655a30842b70ec476410b347ab1cd2a5b92da46a19044357bbd9f401b009a337"},
|
||||
{"name":"lookbook","version":"2.3.4","platform":"ruby","checksum":"16484c9eb514ac0c23c4b59cfd5a52697141d35056e3a9c2a22b314c1b887605"},
|
||||
|
|
|
|||
|
|
@ -747,7 +747,7 @@ GEM
|
|||
gitlab-cloud-connector (1.13.0)
|
||||
activesupport (~> 7.0)
|
||||
jwt (~> 2.9.3)
|
||||
gitlab-crystalball (0.8.2)
|
||||
gitlab-crystalball (1.0.0)
|
||||
git (< 4)
|
||||
ostruct (< 1)
|
||||
gitlab-dangerfiles (4.9.2)
|
||||
|
|
@ -1145,7 +1145,7 @@ GEM
|
|||
rake (~> 13.0)
|
||||
locale (2.1.4)
|
||||
lockbox (1.3.0)
|
||||
logger (1.6.6)
|
||||
logger (1.7.0)
|
||||
lograge (0.11.2)
|
||||
actionpack (>= 4)
|
||||
activesupport (>= 4)
|
||||
|
|
@ -2159,7 +2159,7 @@ DEPENDENCIES
|
|||
gitlab-backup-cli!
|
||||
gitlab-chronic (~> 0.10.5)
|
||||
gitlab-cloud-connector (~> 1.11)
|
||||
gitlab-crystalball (~> 0.8.0)
|
||||
gitlab-crystalball (~> 1.0.0)
|
||||
gitlab-dangerfiles (~> 4.9.0)
|
||||
gitlab-duo-workflow-service-client (~> 0.2)!
|
||||
gitlab-experiment (~> 0.9.1)
|
||||
|
|
@ -2246,7 +2246,7 @@ DEPENDENCIES
|
|||
licensee (~> 9.16)
|
||||
listen (~> 3.7)
|
||||
lockbox (~> 1.3.0)
|
||||
logger (~> 1.6.0)
|
||||
logger (~> 1.7.0)
|
||||
lograge (~> 0.5)
|
||||
loofah (~> 2.24.0)
|
||||
lookbook (~> 2.3)
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@
|
|||
{"name":"gitlab","version":"4.19.0","platform":"ruby","checksum":"3f645e3e195dbc24f0834fbf83e8ccfb2056d8e9712b01a640aad418a6949679"},
|
||||
{"name":"gitlab-chronic","version":"0.10.6","platform":"ruby","checksum":"a244d11a1396d2aac6ae9b2f326adf1605ec1ad20c29f06e8b672047d415a9ac"},
|
||||
{"name":"gitlab-cloud-connector","version":"1.13.0","platform":"ruby","checksum":"c2b0bcd1bc775208825a119ad0c8b8e2f6eac6c762e9514f8026d63ace9086e8"},
|
||||
{"name":"gitlab-crystalball","version":"0.8.2","platform":"ruby","checksum":"9b116b1040feba72dd53bdbf5264b4849ee719ca54e5cc17afb256fdc429bf00"},
|
||||
{"name":"gitlab-crystalball","version":"1.0.0","platform":"ruby","checksum":"74f56646345a5bc130da64ee5c2a90fad1bd70b26b551928676030fddaf76201"},
|
||||
{"name":"gitlab-dangerfiles","version":"4.9.2","platform":"ruby","checksum":"d5c050f685d8720f6e70191a7d1216854d860dbdea5b455f87abe7542e005798"},
|
||||
{"name":"gitlab-experiment","version":"0.9.1","platform":"ruby","checksum":"f230ee742154805a755d5f2539dc44d93cdff08c5bbbb7656018d61f93d01f48"},
|
||||
{"name":"gitlab-fog-azure-rm","version":"2.2.0","platform":"ruby","checksum":"31aa7c2170f57874053144e7f716ec9e15f32e71ffbd2c56753dce46e2e78ba9"},
|
||||
|
|
@ -375,7 +375,7 @@
|
|||
{"name":"llhttp-ffi","version":"0.4.0","platform":"ruby","checksum":"e5f7327db3cf8007e648342ef76347d6e0ae545a8402e519cca9c886eb37b001"},
|
||||
{"name":"locale","version":"2.1.4","platform":"ruby","checksum":"522f9973ef3eee64aac9bca06d21db2fba675fa3d2cf61d21f42d1ca18a9f780"},
|
||||
{"name":"lockbox","version":"1.3.0","platform":"ruby","checksum":"ca8e5806e4e0c56d1d762ac5cf401940ff53fc37554ef623d3313c7a6331a3ea"},
|
||||
{"name":"logger","version":"1.6.6","platform":"ruby","checksum":"dd618d24e637715472732e7eed02e33cfbdf56deaad225edd0f1f89d38024017"},
|
||||
{"name":"logger","version":"1.7.0","platform":"ruby","checksum":"196edec7cc44b66cfb40f9755ce11b392f21f7967696af15d274dde7edff0203"},
|
||||
{"name":"lograge","version":"0.11.2","platform":"ruby","checksum":"4cbd1554b86f545d795eff15a0c24fd25057d2ac4e1caa5fc186168b3da932ef"},
|
||||
{"name":"loofah","version":"2.24.1","platform":"ruby","checksum":"655a30842b70ec476410b347ab1cd2a5b92da46a19044357bbd9f401b009a337"},
|
||||
{"name":"lookbook","version":"2.3.4","platform":"ruby","checksum":"16484c9eb514ac0c23c4b59cfd5a52697141d35056e3a9c2a22b314c1b887605"},
|
||||
|
|
|
|||
|
|
@ -741,7 +741,7 @@ GEM
|
|||
gitlab-cloud-connector (1.13.0)
|
||||
activesupport (~> 7.0)
|
||||
jwt (~> 2.9.3)
|
||||
gitlab-crystalball (0.8.2)
|
||||
gitlab-crystalball (1.0.0)
|
||||
git (< 4)
|
||||
ostruct (< 1)
|
||||
gitlab-dangerfiles (4.9.2)
|
||||
|
|
@ -1139,7 +1139,7 @@ GEM
|
|||
rake (~> 13.0)
|
||||
locale (2.1.4)
|
||||
lockbox (1.3.0)
|
||||
logger (1.6.6)
|
||||
logger (1.7.0)
|
||||
lograge (0.11.2)
|
||||
actionpack (>= 4)
|
||||
activesupport (>= 4)
|
||||
|
|
@ -2154,7 +2154,7 @@ DEPENDENCIES
|
|||
gitlab-backup-cli!
|
||||
gitlab-chronic (~> 0.10.5)
|
||||
gitlab-cloud-connector (~> 1.11)
|
||||
gitlab-crystalball (~> 0.8.0)
|
||||
gitlab-crystalball (~> 1.0.0)
|
||||
gitlab-dangerfiles (~> 4.9.0)
|
||||
gitlab-duo-workflow-service-client (~> 0.2)!
|
||||
gitlab-experiment (~> 0.9.1)
|
||||
|
|
@ -2241,7 +2241,7 @@ DEPENDENCIES
|
|||
licensee (~> 9.16)
|
||||
listen (~> 3.7)
|
||||
lockbox (~> 1.3.0)
|
||||
logger (~> 1.6.0)
|
||||
logger (~> 1.7.0)
|
||||
lograge (~> 0.5)
|
||||
loofah (~> 2.24.0)
|
||||
lookbook (~> 2.3)
|
||||
|
|
|
|||
|
|
@ -72,6 +72,7 @@ export default {
|
|||
data() {
|
||||
return {
|
||||
copyTooltipText: this.$options.i18n.copyTooltipText,
|
||||
isWaitingForMutation: false,
|
||||
};
|
||||
},
|
||||
computed: {
|
||||
|
|
@ -181,10 +182,17 @@ export default {
|
|||
url: setUrlParams({ [DETAIL_VIEW_QUERY_PARAM_NAME]: params }),
|
||||
});
|
||||
},
|
||||
handleClose(isClickedOutside) {
|
||||
handleClose(isClickedOutside, bypassPendingRequests = false) {
|
||||
const { queryManager } = this.$apollo.provider.clients.defaultClient;
|
||||
// We only need this check when the user is on a board and the mutation is pending.
|
||||
this.isWaitingForMutation =
|
||||
this.isBoard &&
|
||||
window.pendingApolloRequests - queryManager.inFlightLinkObservables.size > 0;
|
||||
|
||||
/* Do not close when a modal is open, or when the user is focused in an editor/input.
|
||||
*/
|
||||
if (
|
||||
(this.isWaitingForMutation && !bypassPendingRequests) ||
|
||||
document.body.classList.contains('modal-open') ||
|
||||
document.activeElement?.closest('.js-editor') != null ||
|
||||
document.activeElement.classList.contains('gl-form-input')
|
||||
|
|
@ -206,7 +214,7 @@ export default {
|
|||
|
||||
this.$emit('close');
|
||||
},
|
||||
handleClickOutside(event) {
|
||||
async handleClickOutside(event) {
|
||||
for (const selector of this.$options.defaultExcludedSelectors) {
|
||||
const excludedElements = document.querySelectorAll(selector);
|
||||
for (const parent of excludedElements) {
|
||||
|
|
@ -225,11 +233,27 @@ export default {
|
|||
}
|
||||
}
|
||||
}
|
||||
// If on board, wait for all tasks to be resolved before closing the drawer.
|
||||
if (this.isBoard) {
|
||||
await this.$nextTick();
|
||||
}
|
||||
|
||||
this.handleClose(true);
|
||||
},
|
||||
focusOnHeaderLink() {
|
||||
this.$refs?.workItemUrl?.$el?.focus();
|
||||
},
|
||||
handleWorkItemUpdated(e) {
|
||||
this.$emit('work-item-updated', e);
|
||||
|
||||
// Force to close the drawer after 100ms even if requests are still pending
|
||||
// to not let UI hanging.
|
||||
if (this.isWaitingForMutation) {
|
||||
setTimeout(() => {
|
||||
this.handleClose(false, true);
|
||||
}, 100);
|
||||
}
|
||||
},
|
||||
},
|
||||
i18n: {
|
||||
copyTooltipText: __('Copy item URL'),
|
||||
|
|
@ -320,6 +344,7 @@ export default {
|
|||
is-drawer
|
||||
class="work-item-drawer !gl-pt-0 xl:!gl-px-6"
|
||||
@deleteWorkItem="deleteWorkItem"
|
||||
@work-item-updated="handleWorkItemUpdated"
|
||||
@workItemTypeChanged="$emit('workItemTypeChanged', $event)"
|
||||
v-on="$listeners"
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -156,6 +156,27 @@
|
|||
],
|
||||
"pattern": "^<?[0-9]+\\.[0-9]+$"
|
||||
},
|
||||
"status": {
|
||||
"type": [
|
||||
"string"
|
||||
],
|
||||
"enum": [
|
||||
"active",
|
||||
"removed"
|
||||
]
|
||||
},
|
||||
"milestone_removed": {
|
||||
"type": [
|
||||
"string"
|
||||
],
|
||||
"pattern": "^<?[0-9]+\\.[0-9]+$"
|
||||
},
|
||||
"removed_by_url": {
|
||||
"type": [
|
||||
"string"
|
||||
],
|
||||
"format": "uri"
|
||||
},
|
||||
"classification": {
|
||||
"type": [
|
||||
"string",
|
||||
|
|
@ -182,5 +203,10 @@
|
|||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "schema/status.json"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,44 @@
|
|||
{
|
||||
"allOf": [
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"status": {
|
||||
"const": "removed"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"status"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"required": [
|
||||
"removed_by_url",
|
||||
"milestone_removed"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"status": {
|
||||
"const": "active"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"status"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
"removed_by_url": {
|
||||
"type": "null"
|
||||
},
|
||||
"milestone_removed": {
|
||||
"type": "null"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
name: workhorse_circuit_breaker
|
||||
description:
|
||||
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/514018
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/190323
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/541105
|
||||
milestone: '18.0'
|
||||
group: group::source code
|
||||
type: beta
|
||||
default_enabled: false
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
name: reduce_lock_usage_during_partition_creation
|
||||
description: Reduce the lock contention used by the partition manager to create new partitions
|
||||
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/538988
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/190634
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/541890
|
||||
milestone: '18.1'
|
||||
group: group::database frameworks
|
||||
type: gitlab_com_derisk
|
||||
default_enabled: false
|
||||
|
|
@ -24,6 +24,17 @@ pages from within your instance.
|
|||
|
||||
{{< /alert >}}
|
||||
|
||||
## Container registry URL
|
||||
|
||||
The URL to the container image you want depends on the version of the GitLab Docs you need. See the following table
|
||||
as a guide for the URL to use in the following sections.
|
||||
|
||||
| GitLab version | Container registry | Container image URL |
|
||||
|:---------------|:---------------------------------------------------------------------------------------------|:--------------------|
|
||||
| 17.8 and later | <https://gitlab.com/gitlab-org/technical-writing/docs-gitlab-com/container_registry/8244403> | `registry.gitlab.com/gitlab-org/technical-writing/docs-gitlab-com/archives:<version>` |
|
||||
| 15.5 - 17.7 | <https://gitlab.com/gitlab-org/gitlab-docs/container_registry/3631228> | `registry.gitlab.com/gitlab-org/gitlab-docs/archives:<version>` |
|
||||
| 10.3 - 15.4 | <https://gitlab.com/gitlab-org/gitlab-docs/container_registry/631635> | `registry.gitlab.com/gitlab-org/gitlab-docs:<version>` |
|
||||
|
||||
## Documentation self-hosting options
|
||||
|
||||
To host the GitLab product documentation, you can use:
|
||||
|
|
@ -285,23 +296,8 @@ GitLab Docs are using.
|
|||
|
||||
### The Docker image is not found
|
||||
|
||||
If you're using version 15.5 and earlier, you should drop `/archives` from the
|
||||
Docker image name.
|
||||
|
||||
For example:
|
||||
|
||||
```plaintext
|
||||
registry.gitlab.com/gitlab-org/gitlab-docs:15.5
|
||||
```
|
||||
|
||||
If you are using version 17.8 or earlier, Docker images are in the
|
||||
`gitlab-docs` project.
|
||||
|
||||
For example:
|
||||
|
||||
```plaintext
|
||||
registry.gitlab.com/gitlab-org/gitlab-docs:17.2
|
||||
```
|
||||
If you get an error that the Docker image is not found, check if you're using
|
||||
the [correct registry URL](#container-registry-url).
|
||||
|
||||
### Docker-hosted documentation site fails to redirect
|
||||
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ gitaly['configuration'] = {
|
|||
Gitaly.
|
||||
|
||||
This limits the number of in-flight RPC calls for the given RPCs. The limit is applied per
|
||||
repository. In the example above:
|
||||
repository. In the previous example:
|
||||
|
||||
- Each repository served by the Gitaly server can have at most 20 simultaneous `PostUploadPackWithSidechannel` and
|
||||
`SSHUploadPackWithSidechannel` RPC calls in flight.
|
||||
|
|
@ -123,7 +123,7 @@ gitaly['pack_objects_limiting'] = {
|
|||
- `max_queue_length` is the maximum size the concurrency queue (per key) can grow to before requests are rejected by Gitaly.
|
||||
- `max_queue_wait` is the maximum amount of time a request can wait in the concurrency queue to be picked up by Gitaly.
|
||||
|
||||
In the example above:
|
||||
In the previous example:
|
||||
|
||||
- Each remote IP can have at most 15 simultaneous pack-object processes in flight on a Gitaly node.
|
||||
- If another request comes in from an IP that has used up its 15 slots, that request gets queued.
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ tokens.
|
|||
## Run Gitaly on its own server
|
||||
|
||||
By default, Gitaly is run on the same server as Gitaly clients and is
|
||||
configured as above. Single-server installations are best served by
|
||||
configured as described previously. Single-server installations are best served by
|
||||
this default configuration used by:
|
||||
|
||||
- [Linux package installations](https://docs.gitlab.com/omnibus/).
|
||||
|
|
|
|||
|
|
@ -27,7 +27,8 @@ If your availability needs to span multiple zones or multiple locations, read ab
|
|||
|
||||
## Scenarios for failure modes and available mitigation paths
|
||||
|
||||
The following table outlines failure modes and mitigation paths for the product offerings detailed in the tables above. Note - Gitaly Cluster install assumes an odd number replication factor of 3 or greater
|
||||
The following table outlines failure modes and mitigation paths for the product offerings detailed in the previous tables.
|
||||
Gitaly Cluster install assumes an odd number replication factor of 3 or greater.
|
||||
|
||||
| Gitaly Mode | Loss of Single Gitaly Node | Application / Data Corruption | Regional Outage (Loss of Instance) | Notes |
|
||||
| ----------- | -------------------------- | ----------------------------- | ---------------------------------- | ----- |
|
||||
|
|
|
|||
|
|
@ -462,7 +462,8 @@ praefect['configuration'] = {
|
|||
# ...
|
||||
dbname: 'praefect_production_direct',
|
||||
# There is no need to repeat the following. Parameters of direct
|
||||
# database connection will fall back to the values above.
|
||||
# database connection will fall back to the values specified in the
|
||||
# database block.
|
||||
#
|
||||
# host: PGBOUNCER_HOST,
|
||||
# port: 6432,
|
||||
|
|
@ -1044,7 +1045,7 @@ Prerequisites:
|
|||
```
|
||||
|
||||
1. Save the file and [reconfigure GitLab](../restart_gitlab.md#reconfigure-a-linux-package-installation).
|
||||
1. Repeat the above steps on each Praefect server to use with
|
||||
1. Repeat the previous steps on each Praefect server to use with
|
||||
service discovery.
|
||||
1. On the Praefect clients (except Gitaly servers), edit `gitlab_rails['repositories_storages']` in
|
||||
`/etc/gitlab/gitlab.rb` as follows. Replace `CONSUL_SERVER` with the IP or
|
||||
|
|
@ -1061,7 +1062,7 @@ Prerequisites:
|
|||
|
||||
1. Use `dig` from the Praefect clients to confirm that each IP address has been registered to
|
||||
`praefect.service.consul` with `dig A praefect.service.consul @CONSUL_SERVER -p 8600`.
|
||||
Replace `CONSUL_SERVER` with the value configured above and all Praefect node IP addresses
|
||||
Replace `CONSUL_SERVER` with the value configured previously and all Praefect node IP addresses
|
||||
should be present in the output.
|
||||
1. Save the file and [reconfigure GitLab](../restart_gitlab.md#reconfigure-a-linux-package-installation).
|
||||
|
||||
|
|
@ -1222,7 +1223,7 @@ For more information on Gitaly server configuration, see our
|
|||
|
||||
{{< alert type="note" >}}
|
||||
|
||||
The steps above must be completed for each Gitaly node!
|
||||
The previous steps must be completed for each Gitaly node!
|
||||
|
||||
{{< /alert >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -70,8 +70,8 @@ current assignments: gitaly-1, gitaly-2, gitaly-3
|
|||
|
||||
This ensures that the repository is replicated to the new node and the `repository_assignments` table gets updated with the name of new Gitaly node.
|
||||
|
||||
If the [default replication factor](praefect.md#configure-replication-factor) is set, new nodes are not automatically included in replication. You must follow the steps
|
||||
described above.
|
||||
If the [default replication factor](praefect.md#configure-replication-factor) is set, new nodes are not automatically included in replication.
|
||||
You must follow the steps described previously.
|
||||
|
||||
After you [verify](#check-for-data-loss) that repository is successfully replicated to the new node:
|
||||
|
||||
|
|
|
|||
|
|
@ -17,14 +17,14 @@ You should enable the following logging settings:
|
|||
- `log_autovacuum_min_duration=0`: log all autovacuum executions. Autovacuum is a key component for overall PostgreSQL engine performance. Essential for troubleshooting and tuning if dead tuples are not being removed from tables.
|
||||
- `log_min_duration_statement=1000`: log slow queries (slower than 1 second).
|
||||
|
||||
The full description of the above parameter settings can be found in
|
||||
The full description of these parameter settings can be found in
|
||||
[PostgreSQL error reporting and logging documentation](https://www.postgresql.org/docs/16/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHAT).
|
||||
|
||||
## Amazon RDS
|
||||
|
||||
The Amazon Relational Database Service (RDS) provides a large number of [monitoring metrics](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Monitoring.html) and [logging interfaces](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Monitor_Logs_Events.html). Here are a few you should configure:
|
||||
|
||||
- Change all above [recommended PostgreSQL Logging settings](#recommended-postgresql-logging-settings) through [RDS Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithDBInstanceParamGroups.html).
|
||||
- Change all [recommended PostgreSQL Logging settings](#recommended-postgresql-logging-settings) through [RDS Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithDBInstanceParamGroups.html).
|
||||
- As the recommended logging parameters are [dynamic in RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.PostgreSQL.CommonDBATasks.Parameters.html) you don't require a reboot after changing these settings.
|
||||
- The PostgreSQL logs can be observed through the [RDS console](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/logs-events-streams-console.html).
|
||||
- Enable [RDS performance insight](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) allows you to visualise your database load with many important performance metrics of a PostgreSQL database engine.
|
||||
|
|
|
|||
|
|
@ -255,7 +255,7 @@ the database. Each of the listed services below use the following formula to def
|
|||
- `headroom` can be configured via `DB_POOL_HEADROOM` environment variable (default to `10`)
|
||||
|
||||
To calculate the `default_pool_size`, multiply the number of instances of `puma`, `sidekiq` and `geo-logcursor` by the
|
||||
number of connections each can consume as per listed above. The total is the suggested `default_pool_size`.
|
||||
number of connections each can consume as per listed previously. The total is the suggested `default_pool_size`.
|
||||
|
||||
If you are using more than one PgBouncer with an internal Load Balancer, you may be able to divide the
|
||||
`default_pool_size` by the number of instances to guarantee an evenly distributed load between them.
|
||||
|
|
|
|||
|
|
@ -733,7 +733,7 @@ After deploying the configuration follow these steps:
|
|||
|
||||
Patroni is an opinionated solution for PostgreSQL high-availability. It takes the control of PostgreSQL, overrides its configuration, and manages its lifecycle (start, stop, restart). Patroni is the only option for PostgreSQL 12+ clustering and for cascading replication for Geo deployments.
|
||||
|
||||
The fundamental [architecture](#example-recommended-setup-manual-steps) (mentioned above) does not change for Patroni.
|
||||
The fundamental [architecture](#example-recommended-setup-manual-steps) does not change for Patroni.
|
||||
You do not need any special consideration for Patroni while provisioning your database nodes. Patroni heavily relies on Consul to store the state of the cluster and elect a leader. Any failure in Consul cluster and its leader election propagates to the Patroni cluster as well.
|
||||
|
||||
Patroni monitors the cluster and handles any failover. When the primary node fails, it works with Consul to notify PgBouncer. On failure, Patroni handles the transitioning of the old primary to a replica and rejoins it to the cluster automatically.
|
||||
|
|
|
|||
|
|
@ -333,7 +333,7 @@ reads against a replica with different locale data.
|
|||
|
||||
## Additional Geo variations
|
||||
|
||||
The above upgrade procedures are not set in stone. With Geo there are potentially more options,
|
||||
The upgrade procedures documented previously are not set in stone. With Geo there are potentially more options,
|
||||
because there exists redundant infrastructure. You could consider modifications to suit your use-case,
|
||||
but be sure to weigh it against the added complexity. Here are some examples:
|
||||
|
||||
|
|
|
|||
|
|
@ -31051,6 +31051,7 @@ Represents an instance-level LDAP link.
|
|||
| ---- | ---- | ----------- |
|
||||
| <a id="ldapadminrolelinkadminmemberrole"></a>`adminMemberRole` | [`AdminMemberRole!`](#adminmemberrole) | Custom admin member role. |
|
||||
| <a id="ldapadminrolelinkcn"></a>`cn` | [`String`](#string) | Common Name (CN) of the LDAP group. |
|
||||
| <a id="ldapadminrolelinkcreatedat"></a>`createdAt` | [`Time!`](#time) | Timestamp of when the role link was created. |
|
||||
| <a id="ldapadminrolelinkfilter"></a>`filter` | [`String`](#string) | Search filter for the LDAP group. |
|
||||
| <a id="ldapadminrolelinkid"></a>`id` | [`ID!`](#id) | ID of the LDAP link. |
|
||||
| <a id="ldapadminrolelinklastsuccessfulsyncat"></a>`lastSuccessfulSyncAt` | [`Time`](#time) | Timestamp of the last successful sync. |
|
||||
|
|
|
|||
|
|
@ -22,6 +22,13 @@ All event definitions are stored in the following directories:
|
|||
- [`config/events`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/config/events)
|
||||
- [`ee/config/events`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/ee/config/events)
|
||||
|
||||
Removed events are stored in the `/removed` subfolders:
|
||||
|
||||
- [`config/events/removed`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/config/events/removed)
|
||||
- [`ee/config/events/removed`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/ee/config/events/removed)
|
||||
|
||||
See the [event lifecycle](event_lifecycle.md) guide for more details.
|
||||
|
||||
Each event is defined in a separate YAML file consisting of the following fields:
|
||||
|
||||
| Field | Required | Additional information |
|
||||
|
|
@ -33,6 +40,9 @@ Each event is defined in a separate YAML file consisting of the following fields
|
|||
| `identifiers` | no | A list of identifiers sent with the event. Can be set to one or more of `project`, `user`, `namespace` or `feature_enabled_by_namespace_ids` |
|
||||
| `product_group` | yes | The [group](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml) that owns the event. |
|
||||
| `milestone` | no | The milestone when the event is introduced. |
|
||||
| `status` | no | The status of the event. Can be set to one of `active`, `removed`, or `null`. |
|
||||
| `milestone_removed` | no | The milestone when the event is removed. |
|
||||
| `removed_by_url` | no | The URL to the merge request that removed the event. |
|
||||
| `introduced_by_url` | no | The URL to the merge request that introduced the event. |
|
||||
| `tiers` | yes | The [tiers](https://handbook.gitlab.com/handbook/marketing/brand-and-product-marketing/product-and-solution-marketing/tiers/) where the tracked feature is available. Can be set to one or more of `free`, `premium`, or `ultimate`. |
|
||||
| `additional_properties` | no | A list of additional properties that are sent with the event. Each additional property must have a record entry with a `description` field. It is required to add all the additional properties that would be sent with the event in the event definition file. Built-in properties are: `label` (string), `property` (string) and `value` (numeric). [Custom](quick_start.md#additional-properties) properties can be added if the built-in options are not sufficient. |
|
||||
|
|
|
|||
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
stage: Monitor
|
||||
group: Analytics Instrumentation
|
||||
info: Any user with at least the Maintainer role can merge updates to this content. For details, see https://docs.gitlab.com/ee/development/development_processes.html#development-guidelines-review.
|
||||
title: Event lifecycle
|
||||
---
|
||||
|
||||
The following guidelines explain the steps to follow at each stage of an event's lifecycle.
|
||||
|
||||
## Add an event
|
||||
|
||||
See the [event definition guide](event_definition_guide.md) for more details.
|
||||
|
||||
## Remove an event
|
||||
|
||||
To remove an event:
|
||||
|
||||
1. Move the event definition file to the `/removed` subfolder.
|
||||
1. Update the event definition file to set the `status` field to `removed`.
|
||||
1. Update the event definition file to set the `milestone_removed` field to the milestone when the event was removed.
|
||||
1. Update the event definition file to set the `removed_by_url` field to the URL of the merge request that removed the event.
|
||||
1. Remove the event tracking from the codebase.
|
||||
1. Remove the event tracking tests.
|
||||
|
|
@ -689,12 +689,14 @@ The limit varies depending on your plan and the number of seats in your subscrip
|
|||
|
||||
### Security policy limits
|
||||
|
||||
| Policy type | Default limit |
|
||||
|:--------------------------------------------------------------------|:-----------------------|
|
||||
| Merge request approval policy | 5 per security policy project |
|
||||
| Scan execution policy | 5 per security policy project |
|
||||
| Pipeline execution policy | 5 per security policy project |
|
||||
| Vulnerability management policy | 5 per security policy project |
|
||||
The maximum number of policies that you can add to a security policy project. These limits apply to each policy type individually. For example, you can have five merge request approval policies and five scan execution policies in the same security policy project.
|
||||
|
||||
| Policy type | Default limit |
|
||||
|--------------------------------------------------------|-------------------------------------------|
|
||||
| Merge request approval policies | Five policies per security policy project |
|
||||
| Scan execution policies | Five policies per security policy project |
|
||||
| Pipeline execution policies | Five policies per security policy project |
|
||||
| Vulnerability management policies | Five policies per security policy project |
|
||||
|
||||
### Other limits
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@ module API
|
|||
class Base < ::API::Base
|
||||
include Gitlab::RackLoadBalancingHelpers
|
||||
|
||||
WORKHORSE_CIRCUIT_BREAKER_HEADER = 'Enable-Workhorse-Circuit-Breaker'
|
||||
|
||||
before { authenticate_by_gitlab_shell_token! }
|
||||
|
||||
before do
|
||||
|
|
@ -147,6 +149,12 @@ module API
|
|||
def two_factor_push_otp_check
|
||||
{ success: false, message: 'Feature is not available' }
|
||||
end
|
||||
|
||||
def add_workhorse_circuit_breaker_header(params)
|
||||
return unless params[:protocol] == 'ssh' && request.headers['Gitlab-Shell-Api-Request'].present?
|
||||
|
||||
header(WORKHORSE_CIRCUIT_BREAKER_HEADER, "true")
|
||||
end
|
||||
end
|
||||
|
||||
namespace 'internal' do
|
||||
|
|
@ -170,6 +178,7 @@ module API
|
|||
post "/allowed", feature_category: :source_code_management do
|
||||
# It was moved to a separate method so that EE can alter its behaviour more
|
||||
# easily.
|
||||
add_workhorse_circuit_breaker_header(params) if Feature.enabled?(:workhorse_circuit_breaker, project)
|
||||
check_allowed(params)
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -217,7 +217,7 @@ module API
|
|||
redirect_url += "?#{query_parameters_except_order_by.compact.to_param}"
|
||||
end
|
||||
|
||||
redirect redirect_url
|
||||
redirect expose_path(redirect_url)
|
||||
end
|
||||
|
||||
desc 'Create a release' do
|
||||
|
|
@ -373,6 +373,7 @@ module API
|
|||
end
|
||||
end
|
||||
|
||||
helpers ::API::Helpers::RelatedResourcesHelpers
|
||||
helpers do
|
||||
def authorize_read_group_releases!
|
||||
authorize! :read_release, user_group
|
||||
|
|
|
|||
|
|
@ -50,17 +50,6 @@ module Gitlab
|
|||
SQL
|
||||
end
|
||||
|
||||
def to_sql
|
||||
from_sql = conn.quote(from)
|
||||
to_sql = conn.quote(to)
|
||||
|
||||
<<~SQL
|
||||
CREATE TABLE IF NOT EXISTS #{fully_qualified_partition}
|
||||
PARTITION OF #{conn.quote_table_name(table)}
|
||||
FOR VALUES FROM (#{from_sql}) TO (#{to_sql})
|
||||
SQL
|
||||
end
|
||||
|
||||
def ==(other)
|
||||
table == other.table && partition_name == other.partition_name && from == other.from && to == other.to
|
||||
end
|
||||
|
|
|
|||
|
|
@ -48,14 +48,6 @@ module Gitlab
|
|||
SQL
|
||||
end
|
||||
|
||||
def to_sql
|
||||
<<~SQL.squish
|
||||
CREATE TABLE IF NOT EXISTS #{fully_qualified_partition}
|
||||
PARTITION OF #{quote_table_name(table)}
|
||||
FOR VALUES IN (#{quoted_values})
|
||||
SQL
|
||||
end
|
||||
|
||||
def to_detach_sql
|
||||
<<~SQL.squish
|
||||
ALTER TABLE #{quote_table_name(table)}
|
||||
|
|
|
|||
|
|
@ -111,20 +111,8 @@ module Gitlab
|
|||
# with_lock_retries starts a requires_new transaction most of the time, but not on the last iteration
|
||||
with_lock_retries do
|
||||
connection.transaction(requires_new: false) do # so we open a transaction here if not already in progress
|
||||
if Feature.enabled?(:reduce_lock_usage_during_partition_creation)
|
||||
create_partition_tables(partitions)
|
||||
attach_partition_tables(partitions)
|
||||
else
|
||||
# Partitions might not get created (IF NOT EXISTS) so explicit locking will not happen.
|
||||
# This LOCK TABLE ensures to have exclusive lock as the first step.
|
||||
quoted_table_name = connection.quote_table_name(model.table_name)
|
||||
connection.execute("LOCK TABLE #{quoted_table_name} IN ACCESS EXCLUSIVE MODE")
|
||||
|
||||
partitions.each do |partition|
|
||||
connection.execute(partition.to_sql)
|
||||
process_created_partition(partition)
|
||||
end
|
||||
end
|
||||
create_partition_tables(partitions)
|
||||
attach_partition_tables(partitions)
|
||||
|
||||
model.partitioning_strategy.after_adding_partitions
|
||||
end
|
||||
|
|
|
|||
|
|
@ -48,14 +48,6 @@ module Gitlab
|
|||
SQL
|
||||
end
|
||||
|
||||
def to_sql
|
||||
<<~SQL
|
||||
CREATE TABLE IF NOT EXISTS #{fully_qualified_partition}
|
||||
PARTITION OF #{quote_table_name(table)}
|
||||
FOR VALUES IN (#{quote(value)})
|
||||
SQL
|
||||
end
|
||||
|
||||
def to_detach_sql
|
||||
<<~SQL
|
||||
ALTER TABLE #{quote_table_name(table)}
|
||||
|
|
|
|||
|
|
@ -48,17 +48,6 @@ module Gitlab
|
|||
SQL
|
||||
end
|
||||
|
||||
def to_sql
|
||||
from_sql = from ? conn.quote(from.to_date.iso8601) : 'MINVALUE'
|
||||
to_sql = conn.quote(to.to_date.iso8601)
|
||||
|
||||
<<~SQL
|
||||
CREATE TABLE IF NOT EXISTS #{fully_qualified_partition}
|
||||
PARTITION OF #{conn.quote_table_name(table)}
|
||||
FOR VALUES FROM (#{from_sql}) TO (#{to_sql})
|
||||
SQL
|
||||
end
|
||||
|
||||
def to_detach_sql
|
||||
<<~SQL
|
||||
ALTER TABLE #{conn.quote_table_name(table)}
|
||||
|
|
|
|||
|
|
@ -35624,6 +35624,9 @@ msgstr ""
|
|||
msgid "LDAP uid:"
|
||||
msgstr ""
|
||||
|
||||
msgid "LDAP|%{dateTime} (%{timeAgo})"
|
||||
msgstr ""
|
||||
|
||||
msgid "LDAP|(Inactive because syncing with an LDAP user filter is not included in the current license)"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -35651,6 +35654,9 @@ msgstr ""
|
|||
msgid "LDAP|Default, minimum permission level for LDAP group members of %{group_name}."
|
||||
msgstr ""
|
||||
|
||||
msgid "LDAP|Ended at:"
|
||||
msgstr ""
|
||||
|
||||
msgid "LDAP|Group cn"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -35672,6 +35678,12 @@ msgstr ""
|
|||
msgid "LDAP|LDAP user filter"
|
||||
msgstr ""
|
||||
|
||||
msgid "LDAP|Last successful sync:"
|
||||
msgstr ""
|
||||
|
||||
msgid "LDAP|Never synced"
|
||||
msgstr ""
|
||||
|
||||
msgid "LDAP|No LDAP synchronizations"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -35690,6 +35702,15 @@ msgstr ""
|
|||
msgid "LDAP|Start typing"
|
||||
msgstr ""
|
||||
|
||||
msgid "LDAP|Started at:"
|
||||
msgstr ""
|
||||
|
||||
msgid "LDAP|Sync created at:"
|
||||
msgstr ""
|
||||
|
||||
msgid "LDAP|Sync error:"
|
||||
msgstr ""
|
||||
|
||||
msgid "LDAP|Sync method"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -35699,6 +35720,9 @@ msgstr ""
|
|||
msgid "LDAP|This query must use valid %{ldap_link_start}LDAP Search Filter Syntax%{ldap_link_end}. Synchronize %{group_name}'s members with this LDAP user filter."
|
||||
msgstr ""
|
||||
|
||||
msgid "LDAP|Total runtime:"
|
||||
msgstr ""
|
||||
|
||||
msgid "LDAP|User filter"
|
||||
msgstr ""
|
||||
|
||||
|
|
@ -37854,6 +37878,9 @@ msgstr ""
|
|||
msgid "MemberRole|Sync scheduled"
|
||||
msgstr ""
|
||||
|
||||
msgid "MemberRole|Sync status:"
|
||||
msgstr ""
|
||||
|
||||
msgid "MemberRole|The CSV report contains a list of users, assigned role and access in all groups, subgroups, and projects. When the export is completed, it will be sent as an attachment to %{email}."
|
||||
msgstr ""
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ module InternalEventsCli
|
|||
NEW_EVENT_FIELDS = [
|
||||
:description,
|
||||
:internal_events,
|
||||
:status,
|
||||
:category,
|
||||
:action,
|
||||
:value_type,
|
||||
|
|
@ -19,6 +20,7 @@ module InternalEventsCli
|
|||
|
||||
EVENT_DEFAULTS = {
|
||||
internal_events: true,
|
||||
status: 'active',
|
||||
product_group: nil,
|
||||
introduced_by_url: 'TODO'
|
||||
}.freeze
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@ module CrystalballEnv
|
|||
|
||||
# Primary strategy currently used for predictive testing
|
||||
enable_described_strategy
|
||||
# Alternative coverage based strategy currently being evaluated for predictive testing
|
||||
# See: https://gitlab.com/groups/gitlab-org/quality/analytics/-/epics/13
|
||||
# Coverage based strategy. See: https://gitlab.com/groups/gitlab-org/quality/analytics/-/epics/13
|
||||
enable_coverage_strategy if ENV['CRYSTALBALL_COVERAGE_STRATEGY'] == 'true'
|
||||
end
|
||||
|
||||
|
|
@ -21,7 +20,7 @@ module CrystalballEnv
|
|||
Crystalball::MapGenerator.start! do |config|
|
||||
config.map_storage_path = "crystalball/described/#{map_storage_name}.yml"
|
||||
|
||||
execution_detector = Crystalball::MapGenerator::ObjectSourcesDetector.new(exclude_prefixes: EXCLUDED_PREFIXES)
|
||||
execution_detector = Crystalball::MapGenerator::ObjectSourcesDetector.new(**excluded_prefixes)
|
||||
config.register Crystalball::MapGenerator::DescribedClassStrategy.new(execution_detector: execution_detector)
|
||||
end
|
||||
end
|
||||
|
|
@ -29,15 +28,21 @@ module CrystalballEnv
|
|||
def enable_coverage_strategy
|
||||
Crystalball::MapGenerator.start! do |config|
|
||||
config.map_storage_path = "crystalball/coverage/#{map_storage_name}.yml"
|
||||
config.hook_type = :context
|
||||
|
||||
config.register Crystalball::MapGenerator::OneshotCoverageStrategy.new(exclude_prefixes: EXCLUDED_PREFIXES)
|
||||
execution_detector = Crystalball::MapGenerator::CoverageStrategy::ExecutionDetector.new(**excluded_prefixes)
|
||||
config.register Crystalball::MapGenerator::CoverageStrategy.new(execution_detector: execution_detector)
|
||||
|
||||
# https://toptal.github.io/crystalball/map_generators/#actionviewstrategy
|
||||
# https://gitlab.com/gitlab-org/ruby/gems/crystalball/-/blob/main/docs/map_generators.md?ref_type=heads#actionviewstrategy
|
||||
# require 'crystalball/rails/map_generator/action_view_strategy'
|
||||
# config.register Crystalball::Rails::MapGenerator::ActionViewStrategy.new
|
||||
end
|
||||
end
|
||||
|
||||
def excluded_prefixes
|
||||
{ exclude_prefixes: EXCLUDED_PREFIXES }
|
||||
end
|
||||
|
||||
def map_storage_name
|
||||
(ENV['CI_JOB_NAME'] || 'crystalball_data').gsub(%r{[/ ]}, '_')
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
description: Internal Event CLI is opened
|
||||
internal_events: true
|
||||
status: active
|
||||
action: internal_events_cli_opened
|
||||
product_group: analytics_instrumentation
|
||||
product_categories:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
description: Engineer uses Internal Event CLI to define a new event
|
||||
internal_events: true
|
||||
status: active
|
||||
action: internal_events_cli_used
|
||||
identifiers:
|
||||
- project
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
description: Engineer uses Internal Event CLI to define a new event
|
||||
internal_events: true
|
||||
status: active
|
||||
action: internal_events_cli_used
|
||||
identifiers:
|
||||
- project
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
description: Engineer uses Internal Event CLI to define a new event
|
||||
internal_events: true
|
||||
status: active
|
||||
action: internal_events_cli_used
|
||||
identifiers:
|
||||
- feature_enabled_by_namespace_ids
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
description: Engineer uses Internal Event CLI to define a new event
|
||||
internal_events: true
|
||||
status: active
|
||||
action: internal_events_cli_used
|
||||
identifiers:
|
||||
- project
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
description: Engineer uses Internal Event CLI to define a new event
|
||||
internal_events: true
|
||||
status: active
|
||||
action: internal_events_cli_used
|
||||
identifiers:
|
||||
- project
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
description: Engineer uses Internal Event CLI to define a new event
|
||||
internal_events: true
|
||||
status: active
|
||||
action: internal_events_cli_used
|
||||
identifiers:
|
||||
- project
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
description: random event string
|
||||
internal_events: true
|
||||
status: active
|
||||
action: random_name
|
||||
identifiers:
|
||||
- project
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
description: Internal Event CLI is opened
|
||||
internal_events: true
|
||||
status: active
|
||||
action: internal_events_cli_opened
|
||||
identifiers:
|
||||
- project
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
description: Engineer closes Internal Event CLI
|
||||
internal_events: true
|
||||
status: active
|
||||
action: internal_events_cli_closed
|
||||
identifiers:
|
||||
- project
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
description: Engineer uses Internal Event CLI to define a new event
|
||||
internal_events: true
|
||||
status: active
|
||||
action: internal_events_cli_used
|
||||
identifiers:
|
||||
- project
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
description: Engineer uses Internal Event CLI to define a new event
|
||||
internal_events: true
|
||||
status: active
|
||||
action: internal_events_cli_used
|
||||
identifiers:
|
||||
- project
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ describe('WorkItemDrawer', () => {
|
|||
|
||||
const createComponent = ({
|
||||
open = false,
|
||||
isBoard = false,
|
||||
activeItem = { id: '1', iid: '1', webUrl: 'test', fullPath: 'gitlab-org/gitlab' },
|
||||
issuableType = TYPE_ISSUE,
|
||||
clickOutsideExcludeSelector = undefined,
|
||||
|
|
@ -62,6 +63,7 @@ describe('WorkItemDrawer', () => {
|
|||
open,
|
||||
issuableType,
|
||||
clickOutsideExcludeSelector,
|
||||
isBoard,
|
||||
},
|
||||
listeners: {
|
||||
customEvent: mockListener,
|
||||
|
|
@ -421,6 +423,52 @@ describe('WorkItemDrawer', () => {
|
|||
await nextTick();
|
||||
|
||||
expect(document.activeElement).toBe(document.getElementById('listItem-gitlab-org/gitlab/1'));
|
||||
expect(wrapper.emitted('close')).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('when drawer is opened from a board', () => {
|
||||
let originalWindow;
|
||||
|
||||
beforeEach(() => {
|
||||
originalWindow = global.window;
|
||||
delete global.window;
|
||||
|
||||
global.window = {
|
||||
...originalWindow,
|
||||
pendingApolloRequests: 2,
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
global.window = originalWindow;
|
||||
});
|
||||
|
||||
it('does not close drawer immediately when `pendingApolloRequests` exist when clicking to close drawer', () => {
|
||||
createComponent({ isBoard: true, open: true });
|
||||
|
||||
findGlDrawer().vm.$emit('close');
|
||||
|
||||
// `close` wasn't called right away, it has a delay
|
||||
expect(wrapper.emitted('close')).toBeUndefined();
|
||||
});
|
||||
|
||||
it('does not close drawer immediately when `pendingApolloRequests` exist when clicking outside', () => {
|
||||
createComponent({ isBoard: true, open: true });
|
||||
|
||||
document.dispatchEvent(new MouseEvent('click'));
|
||||
|
||||
// `close` wasn't called right away, it has a delay
|
||||
expect(wrapper.emitted('close')).toBeUndefined();
|
||||
});
|
||||
|
||||
it('closes drawer when `bypassPendingRequests` is true regardless of pending mutations', () => {
|
||||
createComponent({ isBoard: true, open: true });
|
||||
|
||||
findGlDrawer().vm.$emit('close', false, true);
|
||||
|
||||
// `close` was force called after the timeout by setting `bypassPendingRequests` to true
|
||||
expect(wrapper.emitted('close')).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -86,17 +86,32 @@ RSpec.describe Gitlab::Database::Partitioning::IntRangePartition, feature_catego
|
|||
end
|
||||
end
|
||||
|
||||
describe '#to_sql' do
|
||||
subject(:to_sql) { described_class.new(table, from, to).to_sql }
|
||||
describe '#to_create_sql' do
|
||||
subject(:to_create_sql) { described_class.new(table, from, to).to_create_sql }
|
||||
|
||||
let(:table) { 'foo' }
|
||||
let(:from) { '1' }
|
||||
let(:to) { '10' }
|
||||
|
||||
it 'transforms to a CREATE TABLE statement' do
|
||||
expect(to_sql).to eq(<<~SQL)
|
||||
it 'creates a table with LIKE statement' do
|
||||
expect(to_create_sql).to eq(<<~SQL)
|
||||
CREATE TABLE IF NOT EXISTS "#{Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA}"."foo_1"
|
||||
PARTITION OF "foo"
|
||||
(LIKE "foo" INCLUDING ALL)
|
||||
SQL
|
||||
end
|
||||
end
|
||||
|
||||
describe '#to_attach_sql' do
|
||||
subject(:to_attach_sql) { described_class.new(table, from, to).to_attach_sql }
|
||||
|
||||
let(:table) { 'foo' }
|
||||
let(:from) { '1' }
|
||||
let(:to) { '10' }
|
||||
|
||||
it 'creates an ALTER TABLE ATTACH PARTITION statement' do
|
||||
expect(to_attach_sql).to eq(<<~SQL)
|
||||
ALTER TABLE "foo"
|
||||
ATTACH PARTITION "#{Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA}"."foo_1"
|
||||
FOR VALUES FROM ('1') TO ('10')
|
||||
SQL
|
||||
end
|
||||
|
|
|
|||
|
|
@ -93,12 +93,21 @@ RSpec.describe Gitlab::Database::Partitioning::MultipleNumericListPartition, fea
|
|||
end
|
||||
end
|
||||
|
||||
describe '#to_sql' do
|
||||
describe '#to_create_sql' do
|
||||
subject(:partition) { described_class.new('table', 10) }
|
||||
|
||||
it 'generates SQL' do
|
||||
sql = 'CREATE TABLE IF NOT EXISTS "gitlab_partitions_dynamic"."table_10" PARTITION OF "table" FOR VALUES IN (10)'
|
||||
expect(partition.to_sql).to eq(sql)
|
||||
it 'generates SQL to create a table using LIKE' do
|
||||
sql = 'CREATE TABLE IF NOT EXISTS "gitlab_partitions_dynamic"."table_10" (LIKE "table" INCLUDING ALL)'
|
||||
expect(partition.to_create_sql).to eq(sql)
|
||||
end
|
||||
end
|
||||
|
||||
describe '#to_attach_sql' do
|
||||
subject(:partition) { described_class.new('table', 10) }
|
||||
|
||||
it 'generates SQL to attach a partition' do
|
||||
sql = 'ALTER TABLE "table" ATTACH PARTITION "gitlab_partitions_dynamic"."table_10" FOR VALUES IN (10)'
|
||||
expect(partition.to_attach_sql).to eq(sql)
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -27,13 +27,11 @@ RSpec.describe Gitlab::Database::Partitioning::PartitionManager, feature_categor
|
|||
instance_double(Gitlab::Database::Partitioning::TimePartition,
|
||||
table: 'bar',
|
||||
partition_name: 'foo',
|
||||
to_sql: "SELECT 1",
|
||||
to_create_sql: "CREATE TABLE _partition_1",
|
||||
to_attach_sql: "ALTER TABLE foo ATTACH PARTITION _partition_1"),
|
||||
instance_double(Gitlab::Database::Partitioning::TimePartition,
|
||||
table: 'bar',
|
||||
partition_name: 'foo2',
|
||||
to_sql: "SELECT 2",
|
||||
to_create_sql: "CREATE TABLE _partition_2",
|
||||
to_attach_sql: "ALTER TABLE foo2 ATTACH PARTITION _partition_2")
|
||||
]
|
||||
|
|
@ -49,42 +47,27 @@ RSpec.describe Gitlab::Database::Partitioning::PartitionManager, feature_categor
|
|||
expect(partitioning_strategy).to receive(:validate_and_fix)
|
||||
|
||||
stub_exclusive_lease(described_class::MANAGEMENT_LEASE_KEY % table, timeout: described_class::LEASE_TIMEOUT)
|
||||
stub_feature_flags(reduce_lock_usage_during_partition_creation: false)
|
||||
end
|
||||
|
||||
context 'with reduce_lock_usage_during_partition_creation feature flag enabled' do
|
||||
before do
|
||||
stub_feature_flags(reduce_lock_usage_during_partition_creation: true)
|
||||
end
|
||||
|
||||
it 'creates and attaches the partition in 2 steps', :aggregate_failures do
|
||||
expect(connection).not_to receive(:execute).with("LOCK TABLE \"#{table}\" IN ACCESS EXCLUSIVE MODE")
|
||||
expect(manager).to receive(:create_partition_tables).with(partitions)
|
||||
expect(manager).to receive(:attach_partition_tables).with(partitions)
|
||||
|
||||
sync_partitions
|
||||
end
|
||||
end
|
||||
|
||||
it 'creates the partition' do
|
||||
expect(connection).to receive(:execute).with("LOCK TABLE \"#{table}\" IN ACCESS EXCLUSIVE MODE")
|
||||
expect(connection).to receive(:execute).with(partitions.first.to_sql)
|
||||
expect(connection).to receive(:execute).with(partitions.second.to_sql)
|
||||
it 'creates and attaches the partition in 2 steps', :aggregate_failures do
|
||||
expect(connection).not_to receive(:execute).with("LOCK TABLE \"#{table}\" IN ACCESS EXCLUSIVE MODE")
|
||||
expect(manager).to receive(:create_partition_tables).with(partitions)
|
||||
expect(manager).to receive(:attach_partition_tables).with(partitions)
|
||||
|
||||
sync_partitions
|
||||
end
|
||||
|
||||
context 'with explicitly provided connection' do
|
||||
let(:connection) { Ci::ApplicationRecord.connection }
|
||||
let(:manager) { described_class.new(model, connection: connection) }
|
||||
|
||||
it 'uses the explicitly provided connection when any' do
|
||||
it 'uses the explicitly provided connection when any', :aggregate_failures do
|
||||
skip_if_multiple_databases_not_setup(:ci)
|
||||
|
||||
expect(connection).to receive(:execute).with("LOCK TABLE \"#{table}\" IN ACCESS EXCLUSIVE MODE")
|
||||
expect(connection).to receive(:execute).with(partitions.first.to_sql)
|
||||
expect(connection).to receive(:execute).with(partitions.second.to_sql)
|
||||
expect(manager).to receive(:create_partition_tables).with(partitions)
|
||||
expect(manager).to receive(:attach_partition_tables).with(partitions)
|
||||
|
||||
described_class.new(model, connection: connection).sync_partitions
|
||||
sync_partitions
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -331,7 +314,8 @@ RSpec.describe Gitlab::Database::Partitioning::PartitionManager, feature_categor
|
|||
|
||||
# Also create all future partitions so that the sync is only trying to detach old partitions
|
||||
my_model.partitioning_strategy.missing_partitions.each do |p|
|
||||
connection.execute p.to_sql
|
||||
connection.execute p.to_create_sql
|
||||
connection.execute p.to_attach_sql
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -43,8 +43,8 @@ RSpec.describe Gitlab::Database::Partitioning::TimePartition, feature_category:
|
|||
end
|
||||
end
|
||||
|
||||
describe '#to_sql' do
|
||||
subject { described_class.new(table, from, to, partition_name: partition_name).to_sql }
|
||||
describe '#to_create_sql' do
|
||||
subject { described_class.new(table, from, to, partition_name: partition_name).to_create_sql }
|
||||
|
||||
let(:table) { 'foo' }
|
||||
let(:from) { '2020-04-01 00:00:00' }
|
||||
|
|
@ -52,10 +52,27 @@ RSpec.describe Gitlab::Database::Partitioning::TimePartition, feature_category:
|
|||
let(:suffix) { '202004' }
|
||||
let(:partition_name) { "#{table}_#{suffix}" }
|
||||
|
||||
it 'transforms to a CREATE TABLE statement' do
|
||||
it 'creates a table with LIKE statement' do
|
||||
expect(subject).to eq(<<~SQL)
|
||||
CREATE TABLE IF NOT EXISTS "#{Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA}"."foo_202004"
|
||||
PARTITION OF "foo"
|
||||
(LIKE "foo" INCLUDING ALL)
|
||||
SQL
|
||||
end
|
||||
end
|
||||
|
||||
describe '#to_attach_sql' do
|
||||
subject { described_class.new(table, from, to, partition_name: partition_name).to_attach_sql }
|
||||
|
||||
let(:table) { 'foo' }
|
||||
let(:from) { '2020-04-01 00:00:00' }
|
||||
let(:to) { '2020-05-01 00:00:00' }
|
||||
let(:suffix) { '202004' }
|
||||
let(:partition_name) { "#{table}_#{suffix}" }
|
||||
|
||||
it 'creates an ALTER TABLE ATTACH PARTITION statement' do
|
||||
expect(subject).to eq(<<~SQL)
|
||||
ALTER TABLE "foo"
|
||||
ATTACH PARTITION "#{Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA}"."foo_202004"
|
||||
FOR VALUES FROM ('2020-04-01') TO ('2020-05-01')
|
||||
SQL
|
||||
end
|
||||
|
|
@ -66,8 +83,8 @@ RSpec.describe Gitlab::Database::Partitioning::TimePartition, feature_category:
|
|||
|
||||
it 'uses MINVALUE instead' do
|
||||
expect(subject).to eq(<<~SQL)
|
||||
CREATE TABLE IF NOT EXISTS "#{Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA}"."foo_000000"
|
||||
PARTITION OF "foo"
|
||||
ALTER TABLE "foo"
|
||||
ATTACH PARTITION "#{Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA}"."foo_000000"
|
||||
FOR VALUES FROM (MINVALUE) TO ('2020-05-01')
|
||||
SQL
|
||||
end
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@
|
|||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Tracking::EventDefinitionValidator, feature_category: :service_ping do
|
||||
using RSpec::Parameterized::TableSyntax
|
||||
|
||||
let(:attributes) do
|
||||
{
|
||||
description: 'Created issues',
|
||||
|
|
@ -23,8 +25,6 @@ RSpec.describe Gitlab::Tracking::EventDefinitionValidator, feature_category: :se
|
|||
let(:definition) { Gitlab::Tracking::EventDefinition.new(path, attributes) }
|
||||
|
||||
describe '#validate' do
|
||||
using RSpec::Parameterized::TableSyntax
|
||||
|
||||
where(:attribute, :value) do
|
||||
:description | 1
|
||||
:category | nil
|
||||
|
|
@ -37,6 +37,9 @@ RSpec.describe Gitlab::Tracking::EventDefinitionValidator, feature_category: :se
|
|||
:tiers | %(pro)
|
||||
:product_categories | 'bad_category'
|
||||
:product_categories | ['bad_category']
|
||||
:status | 'destroyed'
|
||||
:removed_by_url | 'non/url'
|
||||
:milestone_removed | 'a.b.c'
|
||||
end
|
||||
|
||||
with_them do
|
||||
|
|
@ -99,5 +102,42 @@ RSpec.describe Gitlab::Tracking::EventDefinitionValidator, feature_category: :se
|
|||
it { is_expected.to be(error?) }
|
||||
end
|
||||
end
|
||||
|
||||
describe 'status' do
|
||||
let(:attributes) do
|
||||
{
|
||||
description: 'Created issues',
|
||||
category: 'issues',
|
||||
action: 'create',
|
||||
internal_events: true,
|
||||
product_group: 'activation',
|
||||
introduced_by_url: "https://gitlab.com/example/-/merge_requests/123",
|
||||
milestone: "1.0",
|
||||
tiers: %w[free]
|
||||
}
|
||||
end
|
||||
|
||||
where(:status, :milestone_removed, :removed_by_url, :error?) do
|
||||
'active' | nil | nil | false
|
||||
'removed' | '1.0' | 'https://gitlab.com/example/-/merge_requests/123' | false
|
||||
'removed' | nil | 'https://gitlab.com/example/-/merge_requests/123' | true
|
||||
'removed' | '1.0' | nil | true
|
||||
'removed' | nil | nil | true
|
||||
'active' | '1.0' | nil | true
|
||||
'active' | nil | 'https://gitlab.com/example/-/merge_requests/123' | true
|
||||
end
|
||||
|
||||
with_them do
|
||||
before do
|
||||
attributes[:status] = status
|
||||
attributes[:milestone_removed] = milestone_removed if milestone_removed
|
||||
attributes[:removed_by_url] = removed_by_url if removed_by_url
|
||||
end
|
||||
|
||||
subject { described_class.new(definition).validation_errors.any? }
|
||||
|
||||
it { is_expected.to be(error?) }
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1434,6 +1434,65 @@ RSpec.describe API::Internal::Base, feature_category: :system_access do
|
|||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'workhorse circuit breaker header' do
|
||||
let(:request) do
|
||||
post(api('/internal/allowed'),
|
||||
params: {
|
||||
key_id: key.id,
|
||||
project: project.full_path,
|
||||
gl_repository: gl_repository,
|
||||
protocol: protocol
|
||||
}, headers: headers
|
||||
)
|
||||
end
|
||||
|
||||
shared_examples 'a response with no circuit breaker header' do
|
||||
it 'does not add a Enable-Workhorse-Circuit-Breaker header to the response' do
|
||||
request
|
||||
|
||||
expect(response.headers['Enable-Workhorse-Circuit-Breaker']).to be_nil
|
||||
end
|
||||
end
|
||||
|
||||
context 'with the feature flag enabled' do
|
||||
context 'with a ssh protocol and Gitlab-Shell-Api-Request header' do
|
||||
let(:protocol) { 'ssh' }
|
||||
let(:headers) { gitlab_shell_internal_api_request_header }
|
||||
|
||||
it 'adds a Enable-Workhorse-Circuit-Breaker header to the response' do
|
||||
request
|
||||
|
||||
expect(response.headers['Enable-Workhorse-Circuit-Breaker']).to eq('true')
|
||||
end
|
||||
end
|
||||
|
||||
context 'without ssh protocol' do
|
||||
let(:protocol) { 'http' }
|
||||
let(:headers) { gitlab_shell_internal_api_request_header }
|
||||
|
||||
it_behaves_like 'a response with no circuit breaker header'
|
||||
end
|
||||
|
||||
context 'without the Gitlab-Shell-Api-Request header' do
|
||||
let(:protocol) { 'ssh' }
|
||||
let(:headers) { {} }
|
||||
|
||||
it_behaves_like 'a response with no circuit breaker header'
|
||||
end
|
||||
end
|
||||
|
||||
context 'with the feature flag disabled' do
|
||||
let(:protocol) { 'ssh' }
|
||||
let(:headers) { gitlab_shell_internal_api_request_header }
|
||||
|
||||
before do
|
||||
stub_feature_flags(workhorse_circuit_breaker: false)
|
||||
end
|
||||
|
||||
it_behaves_like 'a response with no circuit breaker header'
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe 'POST /internal/post_receive', :clean_gitlab_redis_shared_state do
|
||||
|
|
|
|||
|
|
@ -804,6 +804,21 @@ RSpec.describe API::Releases, :aggregate_failures, feature_category: :release_or
|
|||
expect(uri.path).to eq("/api/v4/projects/#{project.id}/releases/#{release_b.tag}/downloads/bin/example.exe")
|
||||
end
|
||||
end
|
||||
|
||||
context 'when a relative URL is configured' do
|
||||
before do
|
||||
stub_config_setting(relative_url_root: '/gitlab-relative-url')
|
||||
end
|
||||
|
||||
it 'includes the relative URL in the redirect' do
|
||||
get api("/projects/#{project.id}/releases/permalink/latest", maintainer)
|
||||
|
||||
uri = URI(response.header["Location"])
|
||||
|
||||
expect(response).to have_gitlab_http_status(:redirect)
|
||||
expect(uri.path).to eq("/gitlab-relative-url/api/v4/projects/#{project.id}/releases/#{release_b.tag}")
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -82,6 +82,7 @@ RSpec.describe Tooling::Danger::StableBranch, feature_category: :delivery do
|
|||
let(:source_branch) { 'my_bug_branch' }
|
||||
let(:feature_label_present) { false }
|
||||
let(:bug_label_present) { true }
|
||||
let(:tier_3_label_present) { false }
|
||||
let(:pipeline_expedite_label_present) { false }
|
||||
let(:flaky_test_label_present) { false }
|
||||
let(:response_success) { true }
|
||||
|
|
@ -154,6 +155,7 @@ RSpec.describe Tooling::Danger::StableBranch, feature_category: :delivery do
|
|||
.and_return(pipeline_expedite_label_present)
|
||||
allow(fake_helper).to receive(:mr_has_labels?).with('failure::flaky-test')
|
||||
.and_return(flaky_test_label_present)
|
||||
allow(fake_helper).to receive(:mr_has_labels?).with('pipeline::tier-3').and_return(tier_3_label_present)
|
||||
allow(fake_helper).to receive(:changes_by_category).and_return(changes_by_category_response)
|
||||
allow(HTTParty).to receive(:get).with(/page=1/).and_return(version_response)
|
||||
allow(fake_helper).to receive(:api).and_return(fake_api)
|
||||
|
|
@ -194,111 +196,119 @@ RSpec.describe Tooling::Danger::StableBranch, feature_category: :delivery do
|
|||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when no test-on-omnibus bridge is found' do
|
||||
let(:pipeline_bridges_response) { nil }
|
||||
|
||||
it_behaves_like 'with a failure', described_class::NEEDS_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when test-on-omnibus bridge is created' do
|
||||
let(:pipeline_bridge_state) { 'created' }
|
||||
|
||||
it_behaves_like 'with a warning', described_class::WARN_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when test-on-omnibus bridge has been canceled and no downstream pipeline is generated' do
|
||||
let(:pipeline_bridge_state) { 'canceled' }
|
||||
|
||||
let(:pipeline_bridges_response) do
|
||||
[
|
||||
{
|
||||
'name' => 'e2e:test-on-omnibus-ee',
|
||||
'status' => pipeline_bridge_state,
|
||||
'downstream_pipeline' => nil
|
||||
}
|
||||
]
|
||||
end
|
||||
|
||||
it_behaves_like 'with a failure', described_class::NEEDS_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when test-on-omnibus job is in a non-successful state' do
|
||||
let(:package_and_qa_state) { 'running' }
|
||||
|
||||
it_behaves_like 'with a warning', described_class::WARN_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when test-on-omnibus job is in manual state' do
|
||||
let(:package_and_qa_state) { 'manual' }
|
||||
|
||||
it_behaves_like 'with a failure', described_class::NEEDS_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when test-on-omnibus job is canceled' do
|
||||
let(:package_and_qa_state) { 'canceled' }
|
||||
|
||||
it_behaves_like 'with a failure', described_class::NEEDS_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when no pipeline is found' do
|
||||
before do
|
||||
allow(gitlab_gem_client).to receive(:mr_json).and_return({})
|
||||
end
|
||||
|
||||
it_behaves_like 'with a failure', described_class::NEEDS_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when not an applicable version' do
|
||||
let(:target_branch) { '15-0-stable-ee' }
|
||||
|
||||
it 'warns about the test-on-omnibus pipeline and the version' do
|
||||
expect(stable_branch).to receive(:warn).with(described_class::WARN_PACKAGE_AND_TEST_MESSAGE)
|
||||
expect(stable_branch).to receive(:warn).with(described_class::VERSION_WARNING_MESSAGE)
|
||||
|
||||
subject
|
||||
end
|
||||
end
|
||||
|
||||
context 'with multiple test-on-omnibus pipelines' do
|
||||
let(:pipeline_bridges_response) do
|
||||
[
|
||||
{
|
||||
'name' => 'e2e:test-on-omnibus-ee',
|
||||
'status' => 'success',
|
||||
'downstream_pipeline' => {
|
||||
'id' => '123',
|
||||
'status' => package_and_qa_state
|
||||
}
|
||||
},
|
||||
{
|
||||
'name' => 'follow-up:e2e:test-on-omnibus-ee',
|
||||
'status' => 'failed',
|
||||
'downstream_pipeline' => {
|
||||
'id' => '456',
|
||||
'status' => 'failed'
|
||||
}
|
||||
}
|
||||
]
|
||||
end
|
||||
|
||||
context 'without a pipeline::tier-3 label' do
|
||||
it_behaves_like 'without a failure'
|
||||
end
|
||||
|
||||
context 'when the version API request fails' do
|
||||
let(:response_success) { false }
|
||||
context 'with a pipeline::tier-3 label' do
|
||||
let(:tier_3_label_present) { true }
|
||||
|
||||
it 'warns about the test-on-omnibus pipeline and the version request' do
|
||||
expect(stable_branch).to receive(:warn).with(described_class::WARN_PACKAGE_AND_TEST_MESSAGE)
|
||||
expect(stable_branch).to receive(:warn).with(described_class::FAILED_VERSION_REQUEST_MESSAGE)
|
||||
context 'when no test-on-omnibus bridge is found' do
|
||||
let(:pipeline_bridges_response) { nil }
|
||||
|
||||
subject
|
||||
it_behaves_like 'with a failure', described_class::NEEDS_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when test-on-omnibus bridge is created' do
|
||||
let(:pipeline_bridge_state) { 'created' }
|
||||
|
||||
it_behaves_like 'with a warning', described_class::WARN_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when test-on-omnibus bridge has been canceled and no downstream pipeline is generated' do
|
||||
let(:pipeline_bridge_state) { 'canceled' }
|
||||
|
||||
let(:pipeline_bridges_response) do
|
||||
[
|
||||
{
|
||||
'name' => 'e2e:test-on-omnibus-ee',
|
||||
'status' => pipeline_bridge_state,
|
||||
'downstream_pipeline' => nil
|
||||
}
|
||||
]
|
||||
end
|
||||
|
||||
it_behaves_like 'with a failure', described_class::NEEDS_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when test-on-omnibus job is in a non-successful state' do
|
||||
let(:package_and_qa_state) { 'running' }
|
||||
|
||||
it_behaves_like 'with a warning', described_class::WARN_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when test-on-omnibus job is in manual state' do
|
||||
let(:package_and_qa_state) { 'manual' }
|
||||
|
||||
it_behaves_like 'with a failure', described_class::NEEDS_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when test-on-omnibus job is canceled' do
|
||||
let(:package_and_qa_state) { 'canceled' }
|
||||
|
||||
it_behaves_like 'with a failure', described_class::NEEDS_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when no pipeline is found' do
|
||||
before do
|
||||
allow(gitlab_gem_client).to receive(:mr_json).and_return({})
|
||||
end
|
||||
|
||||
it_behaves_like 'with a failure', described_class::NEEDS_PACKAGE_AND_TEST_MESSAGE
|
||||
it_behaves_like 'bypassing when flaky test or docs only'
|
||||
end
|
||||
|
||||
context 'when not an applicable version' do
|
||||
let(:target_branch) { '15-0-stable-ee' }
|
||||
|
||||
it 'warns about the test-on-omnibus pipeline and the version' do
|
||||
expect(stable_branch).to receive(:warn).with(described_class::WARN_PACKAGE_AND_TEST_MESSAGE)
|
||||
expect(stable_branch).to receive(:warn).with(described_class::VERSION_WARNING_MESSAGE)
|
||||
|
||||
subject
|
||||
end
|
||||
end
|
||||
|
||||
context 'with multiple test-on-omnibus pipelines' do
|
||||
let(:pipeline_bridges_response) do
|
||||
[
|
||||
{
|
||||
'name' => 'e2e:test-on-omnibus-ee',
|
||||
'status' => 'success',
|
||||
'downstream_pipeline' => {
|
||||
'id' => '123',
|
||||
'status' => package_and_qa_state
|
||||
}
|
||||
},
|
||||
{
|
||||
'name' => 'follow-up:e2e:test-on-omnibus-ee',
|
||||
'status' => 'failed',
|
||||
'downstream_pipeline' => {
|
||||
'id' => '456',
|
||||
'status' => 'failed'
|
||||
}
|
||||
}
|
||||
]
|
||||
end
|
||||
|
||||
it_behaves_like 'without a failure'
|
||||
end
|
||||
|
||||
context 'when the version API request fails' do
|
||||
let(:response_success) { false }
|
||||
|
||||
it 'warns about the test-on-omnibus pipeline and the version request' do
|
||||
expect(stable_branch).to receive(:warn).with(described_class::WARN_PACKAGE_AND_TEST_MESSAGE)
|
||||
expect(stable_branch).to receive(:warn).with(described_class::FAILED_VERSION_REQUEST_MESSAGE)
|
||||
|
||||
subject
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -78,6 +78,8 @@ module Tooling
|
|||
|
||||
fail PIPELINE_EXPEDITED_ERROR_MESSAGE if has_pipeline_expedited_label?
|
||||
|
||||
return unless has_tier_3_label?
|
||||
|
||||
status = package_and_test_bridge_and_pipeline_status
|
||||
|
||||
if status.nil? || FAILING_PACKAGE_AND_TEST_STATUSES.include?(status) # rubocop:disable Style/GuardClause
|
||||
|
|
@ -147,6 +149,10 @@ module Tooling
|
|||
has_bug_label? || has_only_documentation_changes?
|
||||
end
|
||||
|
||||
def has_tier_3_label?
|
||||
helper.mr_has_labels?('pipeline::tier-3')
|
||||
end
|
||||
|
||||
def has_only_documentation_changes?
|
||||
categories_changed = helper.changes_by_category.keys
|
||||
return false unless categories_changed.size == 1
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ require (
|
|||
go.uber.org/goleak v1.3.0
|
||||
gocloud.dev v0.40.1-0.20241107185025-56954848c3aa
|
||||
golang.org/x/image v0.20.0
|
||||
golang.org/x/net v0.39.0
|
||||
golang.org/x/net v0.40.0
|
||||
golang.org/x/oauth2 v0.28.0
|
||||
google.golang.org/grpc v1.71.1
|
||||
google.golang.org/protobuf v1.36.6
|
||||
|
|
@ -155,11 +155,11 @@ require (
|
|||
go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.37.0 // indirect
|
||||
golang.org/x/crypto v0.38.0 // indirect
|
||||
golang.org/x/mod v0.23.0 // indirect
|
||||
golang.org/x/sync v0.13.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/text v0.24.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.30.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect
|
||||
|
|
|
|||
|
|
@ -681,8 +681,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
|
||||
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
|
@ -772,8 +772,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
|||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/oauth2 v0.0.0-20170912212905-13449ad91cb2/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
|
@ -807,8 +807,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
|
||||
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -870,8 +870,8 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
|
|
@ -894,8 +894,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/time v0.0.0-20170424234030-8be79e1e0910/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
|
@ -20,10 +19,12 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
Timeout = 60 * time.Second // Timeout is the duration to transition to half-open when open
|
||||
Interval = 180 * time.Second // Interval is the duration to clear consecutive failures (and other gobreaker.Counts) when closed
|
||||
MaxRequests = 1 // MaxRequests is the number of failed requests to open the circuit breaker when half-open
|
||||
ConsecutiveFailures = 5 // ConsecutiveFailures is the number of consecutive failures to open the circuit breaker when closed
|
||||
Timeout = 60 * time.Second // Timeout is the duration to transition to half-open when open
|
||||
Interval = 180 * time.Second // Interval is the duration to clear consecutive failures (and other gobreaker.Counts) when closed
|
||||
MaxRequests = 1 // MaxRequests is the number of failed requests to open the circuit breaker when half-open
|
||||
ConsecutiveFailures = 5 // ConsecutiveFailures is the number of consecutive failures to open the circuit breaker when closed
|
||||
enableCircuitBreakerHeader = "Enable-Workhorse-Circuit-Breaker"
|
||||
errorMsg = "This endpoint has been requested too many times. Try again later."
|
||||
)
|
||||
|
||||
type roundTripper struct {
|
||||
|
|
@ -62,10 +63,7 @@ func (r roundTripper) RoundTrip(req *http.Request) (res *http.Response, err erro
|
|||
return nil, roundTripErr
|
||||
}
|
||||
|
||||
err = roundTripRes.Body.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = roundTripRes.Body.Close() }()
|
||||
|
||||
return roundTripRes, responseToError(roundTripRes)
|
||||
})
|
||||
|
|
@ -75,7 +73,6 @@ func (r roundTripper) RoundTrip(req *http.Request) (res *http.Response, err erro
|
|||
}
|
||||
|
||||
if errors.Is(executeErr, gobreaker.ErrOpenState) {
|
||||
errorMsg := "This endpoint has been requested too many times. Try again later."
|
||||
resp := &http.Response{
|
||||
StatusCode: http.StatusTooManyRequests,
|
||||
Body: io.NopCloser(bytes.NewBufferString(errorMsg)),
|
||||
|
|
@ -141,17 +138,9 @@ func getRedisKey(req *http.Request) (string, error) {
|
|||
|
||||
// If there was a Too Many Requests error in the http response, return an error to be passed into IsSuccessful()
|
||||
func responseToError(res *http.Response) error {
|
||||
if res.StatusCode != http.StatusTooManyRequests {
|
||||
if res.Header.Get(enableCircuitBreakerHeader) != "true" || res.StatusCode != http.StatusTooManyRequests {
|
||||
return nil
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
|
||||
defer func() { _ = res.Body.Close() }()
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(body))
|
||||
|
||||
return errors.New(string(body))
|
||||
return errors.New("rate limited")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,7 +49,8 @@ func TestRoundTripCircuitBreaker(t *testing.T) {
|
|||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
delegateResponseHeader := http.Header{
|
||||
tc.name: []string{tc.name},
|
||||
tc.name: []string{tc.name},
|
||||
enableCircuitBreakerHeader: []string{"true"},
|
||||
}
|
||||
mockRT := &mockRoundTripper{
|
||||
response: &http.Response{
|
||||
|
|
@ -104,6 +105,64 @@ func TestRoundTripCircuitBreaker(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestResponseToErrorHeaderCondition(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
headerValue string
|
||||
statusCode int
|
||||
expectedError bool
|
||||
expectedErrMsg string
|
||||
}{
|
||||
{
|
||||
name: "Header true with 429 status",
|
||||
headerValue: "true",
|
||||
statusCode: http.StatusTooManyRequests,
|
||||
expectedError: true,
|
||||
expectedErrMsg: "rate limited",
|
||||
},
|
||||
{
|
||||
name: "Header false with 429 status",
|
||||
headerValue: "false",
|
||||
statusCode: http.StatusTooManyRequests,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Missing header with 429 status",
|
||||
headerValue: "",
|
||||
statusCode: http.StatusTooManyRequests,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Header true with 200 status",
|
||||
headerValue: "true",
|
||||
statusCode: http.StatusOK,
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
header := http.Header{}
|
||||
if tc.headerValue != "" {
|
||||
header.Set(enableCircuitBreakerHeader, tc.headerValue)
|
||||
}
|
||||
|
||||
res := &http.Response{
|
||||
StatusCode: tc.statusCode,
|
||||
Header: header,
|
||||
}
|
||||
|
||||
err := responseToError(res)
|
||||
|
||||
if tc.expectedError {
|
||||
require.EqualError(t, err, tc.expectedErrMsg)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRedisConfigErrors(t *testing.T) {
|
||||
mockRT := &mockRoundTripper{
|
||||
response: &http.Response{
|
||||
|
|
|
|||
Loading…
Reference in New Issue