Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
e5f1831403
commit
f5987db869
|
|
@ -150,6 +150,7 @@ review-stop:
|
|||
GIT_STRATEGY: none
|
||||
STORAGE_CREDENTIALS: $QA_ALLURE_REPORT_GCS_CREDENTIALS
|
||||
GITLAB_AUTH_TOKEN: $GITLAB_QA_MR_ALLURE_REPORT_TOKEN
|
||||
allow_failure: true
|
||||
script:
|
||||
- |
|
||||
allure-report-publisher upload gcs \
|
||||
|
|
|
|||
|
|
@ -1125,10 +1125,8 @@
|
|||
changes: *ci-review-patterns
|
||||
- <<: *if-dot-com-gitlab-org-merge-request
|
||||
changes: *frontend-patterns
|
||||
allow_failure: true
|
||||
- <<: *if-dot-com-gitlab-org-merge-request
|
||||
changes: *code-qa-patterns
|
||||
allow_failure: true
|
||||
|
||||
.review:rules:review-qa-smoke-report:
|
||||
rules:
|
||||
|
|
@ -1139,11 +1137,9 @@
|
|||
when: always
|
||||
- <<: *if-dot-com-gitlab-org-merge-request
|
||||
changes: *frontend-patterns
|
||||
allow_failure: true
|
||||
when: always
|
||||
- <<: *if-dot-com-gitlab-org-merge-request
|
||||
changes: *code-qa-patterns
|
||||
allow_failure: true
|
||||
when: always
|
||||
|
||||
.review:rules:review-qa-all:
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
55cb537898bce04e5e44be074a4d3d441e1f62b6
|
||||
67e5b2eaf3eb6c3a6068a212f3aac9e3f3f5201d
|
||||
|
|
|
|||
|
|
@ -68,9 +68,9 @@ GitLab is an open source project and we are very happy to accept community contr
|
|||
|
||||
To work on GitLab itself, we recommend setting up your development environment with [the GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit).
|
||||
If you do not use the GitLab Development Kit you need to install and setup all the dependencies yourself, this is a lot of work and error prone.
|
||||
One small thing you also have to do when installing it yourself is to copy the example development Unicorn configuration file:
|
||||
One small thing you also have to do when installing it yourself is to copy the example development Puma configuration file:
|
||||
|
||||
cp config/unicorn.rb.example.development config/unicorn.rb
|
||||
cp config/puma.rb.example.development config/puma.rb
|
||||
|
||||
Instructions on how to start GitLab and how to run the tests can be found in the [getting started section of the GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit#getting-started).
|
||||
|
||||
|
|
|
|||
|
|
@ -2,16 +2,13 @@
|
|||
import filesQuery from 'shared_queries/repository/files.query.graphql';
|
||||
import createFlash from '~/flash';
|
||||
import { __ } from '../../locale';
|
||||
import { TREE_PAGE_SIZE, TREE_INITIAL_FETCH_COUNT } from '../constants';
|
||||
import getRefMixin from '../mixins/get_ref';
|
||||
import projectPathQuery from '../queries/project_path.query.graphql';
|
||||
import { readmeFile } from '../utils/readme';
|
||||
import FilePreview from './preview/index.vue';
|
||||
import FileTable from './table/index.vue';
|
||||
|
||||
const LIMIT = 1000;
|
||||
const PAGE_SIZE = 100;
|
||||
export const INITIAL_FETCH_COUNT = LIMIT / PAGE_SIZE;
|
||||
|
||||
export default {
|
||||
components: {
|
||||
FileTable,
|
||||
|
|
@ -47,7 +44,7 @@ export default {
|
|||
isLoadingFiles: false,
|
||||
isOverLimit: false,
|
||||
clickedShowMore: false,
|
||||
pageSize: PAGE_SIZE,
|
||||
pageSize: TREE_PAGE_SIZE,
|
||||
fetchCounter: 0,
|
||||
};
|
||||
},
|
||||
|
|
@ -56,7 +53,7 @@ export default {
|
|||
return readmeFile(this.entries.blobs);
|
||||
},
|
||||
hasShowMore() {
|
||||
return !this.clickedShowMore && this.fetchCounter === INITIAL_FETCH_COUNT;
|
||||
return !this.clickedShowMore && this.fetchCounter === TREE_INITIAL_FETCH_COUNT;
|
||||
},
|
||||
},
|
||||
|
||||
|
|
@ -107,7 +104,7 @@ export default {
|
|||
if (pageInfo?.hasNextPage) {
|
||||
this.nextPageCursor = pageInfo.endCursor;
|
||||
this.fetchCounter += 1;
|
||||
if (this.fetchCounter < INITIAL_FETCH_COUNT || this.clickedShowMore) {
|
||||
if (this.fetchCounter < TREE_INITIAL_FETCH_COUNT || this.clickedShowMore) {
|
||||
this.fetchFiles();
|
||||
this.clickedShowMore = false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,4 @@
|
|||
const TREE_PAGE_LIMIT = 1000; // the maximum amount of items per page
|
||||
|
||||
export const TREE_PAGE_SIZE = 100; // the amount of items to be fetched per (batch) request
|
||||
export const TREE_INITIAL_FETCH_COUNT = TREE_PAGE_LIMIT / TREE_PAGE_SIZE; // the amount of (batch) requests to make
|
||||
|
|
@ -49,7 +49,7 @@ module Projects
|
|||
|
||||
def has_prometheus?(environment_scope)
|
||||
finders_for_scope(environment_scope).any? do |finder|
|
||||
finder.cluster.application_prometheus_available?
|
||||
finder.cluster.integration_prometheus_available?
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -74,7 +74,6 @@ module EnvironmentsHelper
|
|||
'metrics_dashboard_base_path' => metrics_dashboard_base_path(environment, project),
|
||||
'current_environment_name' => environment.name,
|
||||
'has_metrics' => "#{environment.has_metrics?}",
|
||||
'prometheus_status' => "#{environment.prometheus_status}",
|
||||
'environment_state' => "#{environment.state}"
|
||||
}
|
||||
end
|
||||
|
|
|
|||
|
|
@ -141,13 +141,13 @@ module Clusters
|
|||
end
|
||||
|
||||
def install_knative_metrics
|
||||
return [] unless cluster.application_prometheus_available?
|
||||
return [] unless cluster.application_prometheus&.available?
|
||||
|
||||
[Gitlab::Kubernetes::KubectlCmd.apply_file(METRICS_CONFIG)]
|
||||
end
|
||||
|
||||
def delete_knative_istio_metrics
|
||||
return [] unless cluster.application_prometheus_available?
|
||||
return [] unless cluster.application_prometheus&.available?
|
||||
|
||||
[Gitlab::Kubernetes::KubectlCmd.delete("--ignore-not-found", "-f", METRICS_CONFIG)]
|
||||
end
|
||||
|
|
|
|||
|
|
@ -104,8 +104,8 @@ module Clusters
|
|||
delegate :available?, to: :application_helm, prefix: true, allow_nil: true
|
||||
delegate :available?, to: :application_ingress, prefix: true, allow_nil: true
|
||||
delegate :available?, to: :application_knative, prefix: true, allow_nil: true
|
||||
delegate :available?, to: :application_elastic_stack, prefix: true, allow_nil: true
|
||||
delegate :available?, to: :integration_elastic_stack, prefix: true, allow_nil: true
|
||||
delegate :available?, to: :integration_prometheus, prefix: true, allow_nil: true
|
||||
delegate :external_ip, to: :application_ingress, prefix: true, allow_nil: true
|
||||
delegate :external_hostname, to: :application_ingress, prefix: true, allow_nil: true
|
||||
|
||||
|
|
@ -142,7 +142,7 @@ module Clusters
|
|||
scope :with_available_elasticstack, -> { joins(:application_elastic_stack).merge(::Clusters::Applications::ElasticStack.available) }
|
||||
scope :with_available_cilium, -> { joins(:application_cilium).merge(::Clusters::Applications::Cilium.available) }
|
||||
scope :distinct_with_deployed_environments, -> { joins(:environments).merge(::Deployment.success).distinct }
|
||||
scope :preload_elasticstack, -> { preload(:application_elastic_stack) }
|
||||
scope :preload_elasticstack, -> { preload(:integration_elastic_stack) }
|
||||
scope :preload_environments, -> { preload(:environments) }
|
||||
|
||||
scope :managed, -> { where(managed: true) }
|
||||
|
|
@ -325,7 +325,7 @@ module Clusters
|
|||
end
|
||||
|
||||
def elastic_stack_adapter
|
||||
application_elastic_stack || integration_elastic_stack
|
||||
integration_elastic_stack
|
||||
end
|
||||
|
||||
def elasticsearch_client
|
||||
|
|
@ -333,11 +333,7 @@ module Clusters
|
|||
end
|
||||
|
||||
def elastic_stack_available?
|
||||
if application_elastic_stack_available? || integration_elastic_stack_available?
|
||||
true
|
||||
else
|
||||
false
|
||||
end
|
||||
!!integration_elastic_stack_available?
|
||||
end
|
||||
|
||||
def kubernetes_namespace_for(environment, deployable: environment.last_deployable)
|
||||
|
|
@ -391,12 +387,8 @@ module Clusters
|
|||
end
|
||||
end
|
||||
|
||||
def application_prometheus_available?
|
||||
integration_prometheus&.available? || application_prometheus&.available?
|
||||
end
|
||||
|
||||
def prometheus_adapter
|
||||
integration_prometheus || application_prometheus
|
||||
integration_prometheus
|
||||
end
|
||||
|
||||
private
|
||||
|
|
|
|||
|
|
@ -335,10 +335,6 @@ class Environment < ApplicationRecord
|
|||
prometheus_adapter.query(:environment, self) if has_metrics_and_can_query?
|
||||
end
|
||||
|
||||
def prometheus_status
|
||||
deployment_platform&.cluster&.application_prometheus&.status_name
|
||||
end
|
||||
|
||||
def additional_metrics(*args)
|
||||
return unless has_metrics_and_can_query?
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ module Integrations
|
|||
attr_reader :wiki_page_url
|
||||
attr_reader :action
|
||||
attr_reader :description
|
||||
attr_reader :diff_url
|
||||
|
||||
def initialize(params)
|
||||
super
|
||||
|
|
@ -16,6 +17,7 @@ module Integrations
|
|||
@title = obj_attr[:title]
|
||||
@wiki_page_url = obj_attr[:url]
|
||||
@description = obj_attr[:message]
|
||||
@diff_url = obj_attr[:diff_url]
|
||||
|
||||
@action =
|
||||
case obj_attr[:action]
|
||||
|
|
@ -44,19 +46,23 @@ module Integrations
|
|||
private
|
||||
|
||||
def message
|
||||
"#{user_combined_name} #{action} #{wiki_page_link} in #{project_link}: *#{title}*"
|
||||
"#{user_combined_name} #{action} #{wiki_page_link} (#{diff_link}) in #{project_link}: *#{title}*"
|
||||
end
|
||||
|
||||
def description_message
|
||||
[{ text: format(@description), color: attachment_color }]
|
||||
end
|
||||
|
||||
def diff_link
|
||||
link('Compare changes', diff_url)
|
||||
end
|
||||
|
||||
def project_link
|
||||
"[#{project_name}](#{project_url})"
|
||||
link(project_name, project_url)
|
||||
end
|
||||
|
||||
def wiki_page_link
|
||||
"[wiki page](#{wiki_page_url})"
|
||||
link('wiki page', wiki_page_url)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -276,6 +276,10 @@ class Note < ApplicationRecord
|
|||
noteable_type == 'AlertManagement::Alert'
|
||||
end
|
||||
|
||||
def for_vulnerability?
|
||||
noteable_type == "Vulnerability"
|
||||
end
|
||||
|
||||
def for_project_snippet?
|
||||
noteable.is_a?(ProjectSnippet)
|
||||
end
|
||||
|
|
@ -411,6 +415,8 @@ class Note < ApplicationRecord
|
|||
'snippet'
|
||||
elsif for_alert_mangement_alert?
|
||||
'alert_management_alert'
|
||||
elsif for_vulnerability?
|
||||
'security_resource'
|
||||
else
|
||||
noteable_type.demodulize.underscore
|
||||
end
|
||||
|
|
|
|||
|
|
@ -2364,7 +2364,7 @@ class Project < ApplicationRecord
|
|||
end
|
||||
|
||||
def mark_primary_write_location
|
||||
# Overriden in EE
|
||||
::Gitlab::Database::LoadBalancing::Sticking.mark_primary_write_location(:project, self.id)
|
||||
end
|
||||
|
||||
def toggle_ci_cd_settings!(settings_attribute)
|
||||
|
|
|
|||
|
|
@ -20,14 +20,16 @@ class ProjectFeatureUsage < ApplicationRecord
|
|||
end
|
||||
|
||||
def log_jira_dvcs_integration_usage(cloud: true)
|
||||
integration_field = self.class.jira_dvcs_integration_field(cloud: cloud)
|
||||
::Gitlab::Database::LoadBalancing::Session.without_sticky_writes do
|
||||
integration_field = self.class.jira_dvcs_integration_field(cloud: cloud)
|
||||
|
||||
# The feature usage is used only once later to query the feature usage in a
|
||||
# long date range. Therefore, we just need to update the timestamp once per
|
||||
# day
|
||||
return if persisted? && updated_today?(integration_field)
|
||||
# The feature usage is used only once later to query the feature usage in a
|
||||
# long date range. Therefore, we just need to update the timestamp once per
|
||||
# day
|
||||
break if persisted? && updated_today?(integration_field)
|
||||
|
||||
persist_jira_dvcs_usage(integration_field)
|
||||
persist_jira_dvcs_usage(integration_field)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
|
|
|||
|
|
@ -117,8 +117,8 @@ class PrometheusService < MonitoringService
|
|||
return false if template?
|
||||
return false unless project
|
||||
|
||||
project.all_clusters.enabled.eager_load(:application_prometheus).any? do |cluster|
|
||||
cluster.application_prometheus&.available?
|
||||
project.all_clusters.enabled.eager_load(:integration_prometheus).any? do |cluster|
|
||||
cluster.integration_prometheus_available?
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,12 @@ module AuthorizedProjectUpdate
|
|||
# Using this approach (instead of eg. User.each_batch) keeps the arguments
|
||||
# the same for AuthorizedProjectUpdate::UserRefreshOverUserRangeWorker
|
||||
# even if the user list changes, so we can deduplicate these jobs.
|
||||
(1..User.maximum(:id)).each_slice(BATCH_SIZE).with_index do |batch, index|
|
||||
|
||||
# Since UserRefreshOverUserRangeWorker has set data_consistency to delayed,
|
||||
# a job enqueued without a delay could fail because the replica could not catch up with the primary.
|
||||
# To prevent this, we start the index from `1` instead of `0` so as to ensure that
|
||||
# no UserRefreshOverUserRangeWorker job is enqueued without a delay.
|
||||
(1..User.maximum(:id)).each_slice(BATCH_SIZE).with_index(1) do |batch, index|
|
||||
delay = DELAY_INTERVAL * index
|
||||
AuthorizedProjectUpdate::UserRefreshOverUserRangeWorker.perform_in(delay, *batch.minmax)
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
# DEPRECATED: To be removed as part of https://gitlab.com/groups/gitlab-org/-/epics/5877
|
||||
module Clusters
|
||||
module Applications
|
||||
class ScheduleUpdateService
|
||||
|
|
@ -7,14 +8,14 @@ module Clusters
|
|||
|
||||
attr_accessor :application, :project
|
||||
|
||||
def initialize(application, project)
|
||||
@application = application
|
||||
def initialize(cluster_prometheus_adapter, project)
|
||||
@application = cluster_prometheus_adapter&.cluster&.application_prometheus
|
||||
@project = project
|
||||
end
|
||||
|
||||
def execute
|
||||
return unless application
|
||||
return unless application.managed_prometheus?
|
||||
return if application.externally_installed?
|
||||
|
||||
if recently_scheduled?
|
||||
worker_class.perform_in(BACKOFF_DELAY, application.name, application.id, project.id, Time.current)
|
||||
|
|
|
|||
|
|
@ -105,9 +105,9 @@ module Projects
|
|||
|
||||
cluster = alert.environment.deployment_platform&.cluster
|
||||
return unless cluster&.enabled?
|
||||
return unless cluster.application_prometheus_available?
|
||||
return unless cluster.integration_prometheus_available?
|
||||
|
||||
cluster.application_prometheus || cluster.integration_prometheus
|
||||
cluster.integration_prometheus
|
||||
end
|
||||
|
||||
def find_alert(project, metric)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
# DEPRECATED: To be removed as part of https://gitlab.com/groups/gitlab-org/-/epics/5877
|
||||
module Prometheus
|
||||
class CreateDefaultAlertsService < BaseService
|
||||
include Gitlab::Utils::StrongMemoize
|
||||
|
|
@ -53,12 +54,12 @@ module Prometheus
|
|||
end
|
||||
|
||||
def schedule_prometheus_update
|
||||
return unless prometheus_application
|
||||
return unless prometheus_adapter
|
||||
|
||||
::Clusters::Applications::ScheduleUpdateService.new(prometheus_application, project).execute
|
||||
::Clusters::Applications::ScheduleUpdateService.new(prometheus_adapter, project).execute
|
||||
end
|
||||
|
||||
def prometheus_application
|
||||
def prometheus_adapter
|
||||
environment.cluster_prometheus_adapter
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
%section.settings.no-animate.expanded.cluster-health-graphs.qa-cluster-health-section#cluster-health
|
||||
- if @cluster&.application_prometheus_available?
|
||||
- if @cluster&.integration_prometheus_available?
|
||||
#prometheus-graphs{ data: @cluster.health_data(clusterable) }
|
||||
|
||||
- else
|
||||
|
|
|
|||
|
|
@ -1,8 +0,0 @@
|
|||
---
|
||||
name: remove_description_html_in_release_api
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60380
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/329188
|
||||
milestone: '13.12'
|
||||
type: development
|
||||
group: group::release
|
||||
default_enabled: true
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
---
|
||||
name: remove_description_html_in_release_api_override
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60380
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/329188
|
||||
milestone: '13.12'
|
||||
type: development
|
||||
group: group::release
|
||||
default_enabled: false
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
---
|
||||
name: usage_data_p_terraform_state_api_unique_users
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/50224
|
||||
rollout_issue_url:
|
||||
milestone: '13.8'
|
||||
type: development
|
||||
group: group::configure
|
||||
default_enabled: true
|
||||
|
|
@ -207,28 +207,6 @@ Some nameservers (like [Consul](https://www.consul.io/docs/discovery/dns#udp-bas
|
|||
queried over UDP. To overcome this issue, you can use TCP for querying by setting
|
||||
`use_tcp` to `true`.
|
||||
|
||||
### Forking
|
||||
|
||||
NOTE:
|
||||
Starting with GitLab 13.0, Puma is the default web server used in GitLab
|
||||
all-in-one package based installations as well as GitLab Helm chart deployments.
|
||||
|
||||
If you use an application server that forks, such as Unicorn, you _have to_
|
||||
update your Unicorn configuration to start service discovery _after_ a fork.
|
||||
Failure to do so leads to service discovery only running in the parent
|
||||
process. If you are using Unicorn, then you can add the following to your
|
||||
Unicorn configuration file:
|
||||
|
||||
```ruby
|
||||
after_fork do |server, worker|
|
||||
defined?(Gitlab::Database::LoadBalancing) &&
|
||||
Gitlab::Database::LoadBalancing.start_service_discovery
|
||||
end
|
||||
```
|
||||
|
||||
This ensures that service discovery is started in both the parent and all
|
||||
child processes.
|
||||
|
||||
## Balancing queries
|
||||
|
||||
Read-only `SELECT` queries balance among all the secondary hosts.
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ The following are GitLab upgrade validation tests we performed.
|
|||
- Outcome: Partial success because we observed downtime during the upgrade of the primary and secondary sites.
|
||||
- Follow up issues/actions:
|
||||
- [Fix zero-downtime upgrade process/instructions for multi-node Geo deployments](https://gitlab.com/gitlab-org/gitlab/-/issues/225684)
|
||||
- [Geo:check Rake task: Exclude AuthorizedKeysCommand check if node not running Puma/Unicorn](https://gitlab.com/gitlab-org/gitlab/-/issues/225454)
|
||||
- [Geo:check Rake task: Exclude AuthorizedKeysCommand check if node not running Puma](https://gitlab.com/gitlab-org/gitlab/-/issues/225454)
|
||||
- [Update instructions in the next upgrade issue to include monitoring HAProxy dashboards](https://gitlab.com/gitlab-org/gitlab/-/issues/225359)
|
||||
|
||||
[Upgrade Geo multi-node installation](https://gitlab.com/gitlab-org/gitlab/-/issues/208104):
|
||||
|
|
@ -53,7 +53,7 @@ The following are GitLab upgrade validation tests we performed.
|
|||
- Outcome: Partial success because we did not run the looping pipeline during the demo to validate
|
||||
zero-downtime.
|
||||
- Follow up issues:
|
||||
- [Clarify how Puma/Unicorn should include deploy node](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5460)
|
||||
- [Clarify how Puma should include deploy node](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5460)
|
||||
- [Investigate MR creation failure after upgrade to 12.9.10](https://gitlab.com/gitlab-org/gitlab/-/issues/223282) Closed as false positive.
|
||||
|
||||
### February 2020
|
||||
|
|
|
|||
|
|
@ -222,7 +222,6 @@ the **primary** database. Use the following as a guide.
|
|||
sidekiq['enable'] = false
|
||||
sidekiq_cluster['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
```
|
||||
|
||||
After making these changes, [reconfigure GitLab](../../restart_gitlab.md#omnibus-gitlab-reconfigure) so the changes take effect.
|
||||
|
|
@ -294,7 +293,6 @@ Configure the tracking database.
|
|||
sidekiq['enable'] = false
|
||||
sidekiq_cluster['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
```
|
||||
|
||||
After making these changes, [reconfigure GitLab](../../restart_gitlab.md#omnibus-gitlab-reconfigure) so the changes take effect.
|
||||
|
|
@ -440,7 +438,6 @@ application servers above, with some changes to run only the `sidekiq` service:
|
|||
redis_exporter['enable'] = false
|
||||
repmgr['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
|
||||
##
|
||||
## The unique identifier for the Geo node.
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ The following list depicts the network architecture of Gitaly:
|
|||
- Gitaly addresses must be specified in such a way that they resolve correctly for **all** Gitaly
|
||||
clients.
|
||||
- Gitaly clients are:
|
||||
- Puma or Unicorn.
|
||||
- Puma.
|
||||
- Sidekiq.
|
||||
- GitLab Workhorse.
|
||||
- GitLab Shell.
|
||||
|
|
|
|||
|
|
@ -346,7 +346,6 @@ When GitLab calls a function that has a "Rugged patch", it performs two checks:
|
|||
the GitLab use of "Rugged patch" code.
|
||||
- If the feature flag is not set, GitLab tries accessing the file system underneath the
|
||||
Gitaly server directly. If it can, it uses the "Rugged patch":
|
||||
- If using Unicorn.
|
||||
- If using Puma and [thread count](../../install/requirements.md#puma-threads) is set
|
||||
to `1`.
|
||||
|
||||
|
|
|
|||
|
|
@ -739,7 +739,7 @@ When enabled, access logs are generated in
|
|||
packages or in `/home/git/gitlab/log/sidekiq_exporter.log` for
|
||||
installations from source.
|
||||
|
||||
If Prometheus metrics and the Web Exporter are both enabled, Puma/Unicorn
|
||||
If Prometheus metrics and the Web Exporter are both enabled, Puma
|
||||
starts a Web server and listen to the defined port (default: `8083`), and access logs
|
||||
are generated:
|
||||
|
||||
|
|
|
|||
|
|
@ -227,7 +227,7 @@ To use an external Prometheus server:
|
|||
gitlab_rails['monitoring_whitelist'] = ['127.0.0.0/8', '192.168.0.1']
|
||||
```
|
||||
|
||||
1. On **all** GitLab Rails(Puma/Unicorn, Sidekiq) servers, set the Prometheus server IP address and listen port. For example:
|
||||
1. On **all** GitLab Rails(Puma, Sidekiq) servers, set the Prometheus server IP address and listen port. For example:
|
||||
|
||||
```ruby
|
||||
gitlab_rails['prometheus_address'] = '192.168.0.1:9090'
|
||||
|
|
|
|||
|
|
@ -445,11 +445,11 @@ In case of NFS-related problems, it can be helpful to trace
|
|||
the file system requests that are being made by using `perf`:
|
||||
|
||||
```shell
|
||||
sudo perf trace -e 'nfs4:*' -p $(pgrep -fd ',' puma && pgrep -fd ',' unicorn)
|
||||
sudo perf trace -e 'nfs4:*' -p $(pgrep -fd ',' puma)
|
||||
```
|
||||
|
||||
On Ubuntu 16.04, use:
|
||||
|
||||
```shell
|
||||
sudo perf trace --no-syscalls --event 'nfs4:*' -p $(pgrep -fd ',' puma && pgrep -fd ',' unicorn)
|
||||
sudo perf trace --no-syscalls --event 'nfs4:*' -p $(pgrep -fd ',' puma)
|
||||
```
|
||||
|
|
|
|||
|
|
@ -226,5 +226,5 @@ the database. The following instructions can be used to build OpenSSH 7.5:
|
|||
GitLab supports `authorized_keys` database lookups with [SELinux](https://en.wikipedia.org/wiki/Security-Enhanced_Linux).
|
||||
|
||||
Because the SELinux policy is static, GitLab doesn't support the ability to change
|
||||
internal Unicorn ports at the moment. Administrators would have to create a special `.te`
|
||||
internal webserver ports at the moment. Administrators would have to create a special `.te`
|
||||
file for the environment, since it isn't generated dynamically.
|
||||
|
|
|
|||
|
|
@ -7,8 +7,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
# Switching to Puma **(FREE SELF)**
|
||||
|
||||
As of GitLab 12.9, [Puma](https://github.com/puma/puma) has replaced [Unicorn](https://yhbt.net/unicorn/)
|
||||
as the default web server. From GitLab 13.0, the following run Puma instead of Unicorn unless
|
||||
explicitly configured not to:
|
||||
as the default web server. From GitLab 14.0, the following run Puma:
|
||||
|
||||
- All-in-one package-based installations.
|
||||
- Helm chart-based installations.
|
||||
|
|
@ -25,7 +24,7 @@ Multi-threaded Puma can therefore still serve more requests than a single proces
|
|||
|
||||
## Configuring Puma to replace Unicorn
|
||||
|
||||
Beginning with GitLab 13.0, Puma is the default application server. We plan to remove support for
|
||||
Beginning with GitLab 13.0, Puma is the default application server. We removed support for
|
||||
Unicorn in GitLab 14.0.
|
||||
|
||||
When switching to Puma, Unicorn server configuration
|
||||
|
|
|
|||
|
|
@ -19,10 +19,10 @@ The configuration for doing so depends on your desired outcome.
|
|||
The first thing you'll want to accomplish is to ensure that no changes can be
|
||||
made to your repositories. There's two ways you can accomplish that:
|
||||
|
||||
- Either stop Unicorn/Puma to make the internal API unreachable:
|
||||
- Either stop Puma to make the internal API unreachable:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl stop puma # or unicorn
|
||||
sudo gitlab-ctl stop puma
|
||||
```
|
||||
|
||||
- Or, open up a Rails console:
|
||||
|
|
@ -46,19 +46,19 @@ made to your repositories. There's two ways you can accomplish that:
|
|||
## Shut down the GitLab UI
|
||||
|
||||
If you don't mind shutting down the GitLab UI, then the easiest approach is to
|
||||
stop `sidekiq` and `puma`/`unicorn`, and you'll effectively ensure that no
|
||||
stop `sidekiq` and `puma`, and you'll effectively ensure that no
|
||||
changes can be made to GitLab:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl stop sidekiq
|
||||
sudo gitlab-ctl stop puma # or unicorn
|
||||
sudo gitlab-ctl stop puma
|
||||
```
|
||||
|
||||
When you're ready to revert this:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl start sidekiq
|
||||
sudo gitlab-ctl start puma # or unicorn
|
||||
sudo gitlab-ctl start puma
|
||||
```
|
||||
|
||||
## Make the database read-only
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ To set up GitLab and its components to accommodate up to 10,000 users:
|
|||
provides access to the Git repositories.
|
||||
1. [Configure Sidekiq](#configure-sidekiq).
|
||||
1. [Configure the main GitLab Rails application](#configure-gitlab-rails)
|
||||
to run Puma/Unicorn, Workhorse, GitLab Shell, and to serve all frontend
|
||||
to run Puma, Workhorse, GitLab Shell, and to serve all frontend
|
||||
requests (which include UI, API, and Git over HTTP/SSH).
|
||||
1. [Configure Prometheus](#configure-prometheus) to monitor your GitLab
|
||||
environment.
|
||||
|
|
@ -1538,7 +1538,6 @@ To configure the Praefect nodes, on each one:
|
|||
redis['enable'] = false
|
||||
nginx['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
grafana['enable'] = false
|
||||
|
|
@ -1667,7 +1666,6 @@ On each node:
|
|||
redis['enable'] = false
|
||||
nginx['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
grafana['enable'] = false
|
||||
|
|
@ -2235,7 +2233,6 @@ To configure the Monitoring node:
|
|||
redis_exporter['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
node_exporter['enable'] = false
|
||||
gitlab_exporter['enable'] = false
|
||||
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ To set up GitLab and its components to accommodate up to 25,000 users:
|
|||
provides access to the Git repositories.
|
||||
1. [Configure Sidekiq](#configure-sidekiq).
|
||||
1. [Configure the main GitLab Rails application](#configure-gitlab-rails)
|
||||
to run Puma/Unicorn, Workhorse, GitLab Shell, and to serve all frontend
|
||||
to run Puma, Workhorse, GitLab Shell, and to serve all frontend
|
||||
requests (which include UI, API, and Git over HTTP/SSH).
|
||||
1. [Configure Prometheus](#configure-prometheus) to monitor your GitLab
|
||||
environment.
|
||||
|
|
@ -1540,7 +1540,6 @@ To configure the Praefect nodes, on each one:
|
|||
redis['enable'] = false
|
||||
nginx['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
grafana['enable'] = false
|
||||
|
|
@ -1669,7 +1668,6 @@ On each node:
|
|||
redis['enable'] = false
|
||||
nginx['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
grafana['enable'] = false
|
||||
|
|
@ -2239,7 +2237,6 @@ To configure the Monitoring node:
|
|||
redis_exporter['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
node_exporter['enable'] = false
|
||||
gitlab_exporter['enable'] = false
|
||||
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ To set up GitLab and its components to accommodate up to 2,000 users:
|
|||
1. [Configure Gitaly](#configure-gitaly), which provides access to the Git
|
||||
repositories.
|
||||
1. [Configure the main GitLab Rails application](#configure-gitlab-rails)
|
||||
to run Puma/Unicorn, Workhorse, GitLab Shell, and to serve all frontend
|
||||
to run Puma, Workhorse, GitLab Shell, and to serve all frontend
|
||||
requests (which include UI, API, and Git over HTTP/SSH).
|
||||
1. [Configure Prometheus](#configure-prometheus) to monitor your GitLab
|
||||
environment.
|
||||
|
|
@ -351,7 +351,6 @@ Omnibus:
|
|||
sidekiq['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
postgresql['enable'] = false
|
||||
nginx['enable'] = false
|
||||
prometheus['enable'] = false
|
||||
|
|
@ -457,7 +456,6 @@ To configure the Gitaly server, on the server node you want to use for Gitaly:
|
|||
redis['enable'] = false
|
||||
nginx['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
grafana['enable'] = false
|
||||
|
|
@ -791,7 +789,6 @@ running [Prometheus](../monitoring/prometheus/index.md) and
|
|||
redis_exporter['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
node_exporter['enable'] = false
|
||||
gitlab_exporter['enable'] = false
|
||||
|
||||
|
|
|
|||
|
|
@ -169,7 +169,7 @@ To set up GitLab and its components to accommodate up to 3,000 users:
|
|||
provides access to the Git repositories.
|
||||
1. [Configure Sidekiq](#configure-sidekiq).
|
||||
1. [Configure the main GitLab Rails application](#configure-gitlab-rails)
|
||||
to run Puma/Unicorn, Workhorse, GitLab Shell, and to serve all frontend
|
||||
to run Puma, Workhorse, GitLab Shell, and to serve all frontend
|
||||
requests (which include UI, API, and Git over HTTP/SSH).
|
||||
1. [Configure Prometheus](#configure-prometheus) to monitor your GitLab
|
||||
environment.
|
||||
|
|
@ -1238,7 +1238,6 @@ To configure the Praefect nodes, on each one:
|
|||
redis['enable'] = false
|
||||
nginx['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
grafana['enable'] = false
|
||||
|
|
@ -1367,7 +1366,6 @@ On each node:
|
|||
redis['enable'] = false
|
||||
nginx['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
grafana['enable'] = false
|
||||
|
|
@ -1916,7 +1914,6 @@ running [Prometheus](../monitoring/prometheus/index.md) and
|
|||
redis_exporter['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
node_exporter['enable'] = false
|
||||
gitlab_exporter['enable'] = false
|
||||
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ To set up GitLab and its components to accommodate up to 50,000 users:
|
|||
provides access to the Git repositories.
|
||||
1. [Configure Sidekiq](#configure-sidekiq).
|
||||
1. [Configure the main GitLab Rails application](#configure-gitlab-rails)
|
||||
to run Puma/Unicorn, Workhorse, GitLab Shell, and to serve all frontend
|
||||
to run Puma, Workhorse, GitLab Shell, and to serve all frontend
|
||||
requests (which include UI, API, and Git over HTTP/SSH).
|
||||
1. [Configure Prometheus](#configure-prometheus) to monitor your GitLab
|
||||
environment.
|
||||
|
|
@ -1547,7 +1547,6 @@ To configure the Praefect nodes, on each one:
|
|||
redis['enable'] = false
|
||||
nginx['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
grafana['enable'] = false
|
||||
|
|
@ -1676,7 +1675,6 @@ On each node:
|
|||
redis['enable'] = false
|
||||
nginx['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
grafana['enable'] = false
|
||||
|
|
@ -2253,7 +2251,6 @@ To configure the Monitoring node:
|
|||
redis_exporter['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
node_exporter['enable'] = false
|
||||
gitlab_exporter['enable'] = false
|
||||
|
||||
|
|
|
|||
|
|
@ -161,7 +161,7 @@ To set up GitLab and its components to accommodate up to 5,000 users:
|
|||
provides access to the Git repositories.
|
||||
1. [Configure Sidekiq](#configure-sidekiq).
|
||||
1. [Configure the main GitLab Rails application](#configure-gitlab-rails)
|
||||
to run Puma/Unicorn, Workhorse, GitLab Shell, and to serve all frontend
|
||||
to run Puma, Workhorse, GitLab Shell, and to serve all frontend
|
||||
requests (which include UI, API, and Git over HTTP/SSH).
|
||||
1. [Configure Prometheus](#configure-prometheus) to monitor your GitLab
|
||||
environment.
|
||||
|
|
@ -1229,7 +1229,6 @@ To configure the Praefect nodes, on each one:
|
|||
redis['enable'] = false
|
||||
nginx['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
grafana['enable'] = false
|
||||
|
|
@ -1358,7 +1357,6 @@ On each node:
|
|||
redis['enable'] = false
|
||||
nginx['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
gitlab_workhorse['enable'] = false
|
||||
grafana['enable'] = false
|
||||
|
|
@ -1904,7 +1902,6 @@ running [Prometheus](../monitoring/prometheus/index.md) and
|
|||
redis_exporter['enable'] = false
|
||||
sidekiq['enable'] = false
|
||||
puma['enable'] = false
|
||||
unicorn['enable'] = false
|
||||
node_exporter['enable'] = false
|
||||
gitlab_exporter['enable'] = false
|
||||
|
||||
|
|
|
|||
|
|
@ -155,7 +155,7 @@ and more. However, this is not enabled by default. To enable it, define the
|
|||
gitlab_rails['env'] = {"ENABLE_RBTRACE" => "1"}
|
||||
```
|
||||
|
||||
Then reconfigure the system and restart Unicorn and Sidekiq. To run this
|
||||
Then reconfigure the system and restart Puma and Sidekiq. To run this
|
||||
in Omnibus, run as root:
|
||||
|
||||
```ruby
|
||||
|
|
|
|||
|
|
@ -177,8 +177,8 @@ strace -tt -T -f -y -yy -s 1024 -p <pid>
|
|||
|
||||
# -o output file
|
||||
|
||||
# run strace on all unicorn processes
|
||||
ps auwx | grep unicorn | awk '{ print " -p " $2}' | xargs strace -tt -T -f -y -yy -s 1024 -o /tmp/unicorn.txt
|
||||
# run strace on all puma processes
|
||||
ps auwx | grep puma | awk '{ print " -p " $2}' | xargs strace -tt -T -f -y -yy -s 1024 -o /tmp/puma.txt
|
||||
```
|
||||
|
||||
Be aware that strace can have major impacts to system performance when it is running.
|
||||
|
|
|
|||
|
|
@ -10,7 +10,8 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
> - Using this API you can manipulate GitLab [Release](../../user/project/releases/index.md) entries.
|
||||
> - For manipulating links as a release asset, see [Release Links API](links.md).
|
||||
> - Release Evidences were [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/26019) in GitLab 12.5.
|
||||
> - `description_html` field was [removed](https://gitlab.com/gitlab-org/gitlab/-/issues/299447) in GitLab 13.12.
|
||||
> - `description_html` became an opt-in field [with GitLab 13.12 for performance reasons](https://gitlab.com/gitlab-org/gitlab/-/issues/299447).
|
||||
Please pass the `include_html_description` query string parameter if you need it.
|
||||
|
||||
## List Releases
|
||||
|
||||
|
|
@ -25,6 +26,7 @@ GET /projects/:id/releases
|
|||
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](../README.md#namespaced-path-encoding). |
|
||||
| `order_by` | string | no | The field to use as order. Either `released_at` (default) or `created_at`. |
|
||||
| `sort` | string | no | The direction of the order. Either `desc` (default) for descending order or `asc` for ascending order. |
|
||||
| `include_html_description` | boolean | no | If `true`, a response includes HTML rendered Markdown of the release description. |
|
||||
|
||||
Example request:
|
||||
|
||||
|
|
@ -228,6 +230,7 @@ GET /projects/:id/releases/:tag_name
|
|||
| ------------- | -------------- | -------- | ----------------------------------------------------------------------------------- |
|
||||
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](../README.md#namespaced-path-encoding). |
|
||||
| `tag_name` | string | yes | The Git tag the release is associated with. |
|
||||
| `include_html_description` | boolean | no | If `true`, a response includes HTML rendered Markdown of the release description. |
|
||||
|
||||
Example request:
|
||||
|
||||
|
|
|
|||
|
|
@ -50,6 +50,7 @@ The JWT's payload looks like this:
|
|||
"user_login": "myuser" # GitLab @username
|
||||
"user_email": "myuser@example.com", # Email of the user executing the job
|
||||
"pipeline_id": "1212", #
|
||||
"pipeline_source": "web", # Pipeline source, see: https://docs.gitlab.com/ee/ci/yaml/#common-if-clauses-for-rules
|
||||
"job_id": "1212", #
|
||||
"ref": "auto-deploy-2020-04-01", # Git ref for this job
|
||||
"ref_type": "branch", # Git ref type, branch or tag
|
||||
|
|
|
|||
|
|
@ -695,8 +695,7 @@ Sidekiq is a Ruby background job processor that pulls jobs from the Redis queue
|
|||
|
||||
#### Puma
|
||||
|
||||
Starting with GitLab 13.0, Puma is the default web server and Unicorn has been
|
||||
disabled by default.
|
||||
Starting with GitLab 13.0, Puma is the default web server.
|
||||
|
||||
- [Project page](https://gitlab.com/gitlab-org/gitlab/blob/master/README.md)
|
||||
- Configuration:
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ Replace `secret` with your own secret token.
|
|||
After you have enabled the chaos endpoints and restarted the application, you can start testing using the endpoints.
|
||||
|
||||
By default, when invoking a chaos endpoint, the web worker process which receives the request handles it. This means, for example, that if the Kill
|
||||
operation is invoked, the Puma or Unicorn worker process handling the request is killed. To test these operations in Sidekiq, the `async` parameter on
|
||||
operation is invoked, the Puma worker process handling the request is killed. To test these operations in Sidekiq, the `async` parameter on
|
||||
each endpoint can be set to `true`. This runs the chaos process in a Sidekiq worker.
|
||||
|
||||
## Memory leaks
|
||||
|
|
@ -80,7 +80,6 @@ curl "http://localhost:3000/-/chaos/leakmem?memory_mb=1024&duration_s=10&token=s
|
|||
This endpoint attempts to fully utilise a single core, at 100%, for the given period.
|
||||
|
||||
Depending on your rack server setup, your request may timeout after a predetermined period (normally 60 seconds).
|
||||
If you're using Unicorn, this is done by killing the worker process.
|
||||
|
||||
```plaintext
|
||||
GET /-/chaos/cpu_spin
|
||||
|
|
@ -105,7 +104,6 @@ This endpoint attempts to fully utilise a single core, and interleave it with DB
|
|||
This endpoint can be used to model yielding execution to another threads when running concurrently.
|
||||
|
||||
Depending on your rack server setup, your request may timeout after a predetermined period (normally 60 seconds).
|
||||
If you're using Unicorn, this is done by killing the worker process.
|
||||
|
||||
```plaintext
|
||||
GET /-/chaos/db_spin
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ large database imports.
|
|||
echo "postgresql['checkpoint_segments'] = 64" | sudo tee -a /etc/gitlab/gitlab.rb
|
||||
sudo touch /etc/gitlab/skip-auto-reconfigure
|
||||
sudo gitlab-ctl reconfigure
|
||||
sudo gitlab-ctl stop unicorn
|
||||
sudo gitlab-ctl stop puma
|
||||
sudo gitlab-ctl stop sidekiq
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ to the relevant internal client.
|
|||
|
||||
All calls to the Kubernetes API must be in a background process. Don't
|
||||
perform Kubernetes API calls within a web request. This blocks
|
||||
Unicorn, and can lead to a denial-of-service (DoS) attack in GitLab as
|
||||
webserver, and can lead to a denial-of-service (DoS) attack in GitLab as
|
||||
the Kubernetes cluster response times are outside of our control.
|
||||
|
||||
The easiest way to ensure your calls happen a background process is to
|
||||
|
|
|
|||
|
|
@ -292,13 +292,13 @@ in a batch style.
|
|||
|
||||
**Summary:** You should set a reasonable timeout when the system invokes HTTP calls
|
||||
to external services (such as Kubernetes), and it should be executed in Sidekiq, not
|
||||
in Puma/Unicorn threads.
|
||||
in Puma threads.
|
||||
|
||||
Often, GitLab needs to communicate with an external service such as Kubernetes
|
||||
clusters. In this case, it's hard to estimate when the external service finishes
|
||||
the requested process, for example, if it's a user-owned cluster that's inactive for some reason,
|
||||
GitLab might wait for the response forever ([Example](https://gitlab.com/gitlab-org/gitlab/-/issues/31475)).
|
||||
This could result in Puma/Unicorn timeout and should be avoided at all cost.
|
||||
This could result in Puma timeout and should be avoided at all cost.
|
||||
|
||||
You should set a reasonable timeout, gracefully handle exceptions and surface the
|
||||
errors in UI or logging internally.
|
||||
|
|
@ -598,10 +598,10 @@ Each feature that accepts data uploads or allows to download them needs to use
|
|||
saved directly to Object Storage by Workhorse, and all downloads needs to be served
|
||||
by Workhorse.
|
||||
|
||||
Performing uploads/downloads via Unicorn/Puma is an expensive operation,
|
||||
as it blocks the whole processing slot (worker or thread) for the duration of the upload.
|
||||
Performing uploads/downloads via Puma is an expensive operation,
|
||||
as it blocks the whole processing slot (thread) for the duration of the upload.
|
||||
|
||||
Performing uploads/downloads via Unicorn/Puma also has a problem where the operation
|
||||
Performing uploads/downloads via Puma also has a problem where the operation
|
||||
can time out, which is especially problematic for slow clients. If clients take a long time
|
||||
to upload/download the processing slot might be killed due to request processing
|
||||
timeout (usually between 30s-60s).
|
||||
|
|
|
|||
|
|
@ -4,47 +4,120 @@ group: unassigned
|
|||
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
|
||||
---
|
||||
|
||||
# Compatibility with multiple versions of the application running at the same time
|
||||
# Backwards compatibility across updates
|
||||
|
||||
When adding or changing features, we must be aware that there may be multiple versions of the application running
|
||||
at the same time and connected to the same PostgreSQL and Redis databases. This could happen during a rolling deploy
|
||||
when the servers are updated one by one.
|
||||
GitLab deployments can be broken down into many components. Updating GitLab is not atomic. Therefore, **many components must be backwards-compatible**.
|
||||
|
||||
During a rolling deploy, post-deployment DB migrations are run after all the servers have been updated. This means the
|
||||
servers could be in these intermediate states:
|
||||
## Common gotchas
|
||||
|
||||
1. Old application code running with new DB migrations already executed
|
||||
1. New application code running with new DB migrations but without new post-deployment DB migrations
|
||||
In a sense, these scenarios are all transient states. But they can often persist for several hours in a live, production environment. Therefore we must treat them with the same care as permanent states.
|
||||
|
||||
We must make sure that the application works properly in these states.
|
||||
### When modifying a Sidekiq worker
|
||||
|
||||
For GitLab.com, we also run a set of canary servers which run a more recent version of the application. Users with
|
||||
the canary cookie set would be handled by these servers. Some URL patterns may also be forced to the canary servers,
|
||||
even without the cookie being set. This also means that some pages may match the pattern and get handled by canary servers,
|
||||
but AJAX requests to URLs (like the GraphQL endpoint) fail to match the pattern.
|
||||
For example when [changing arguments](sidekiq_style_guide.md#changing-the-arguments-for-a-worker):
|
||||
|
||||
With this canary setup, we'd be in this mixed-versions state for an extended period of time until canary is promoted to
|
||||
production and post-deployment migrations run.
|
||||
- Is it ok if jobs are being enqueued with the old signature but executed by the new monthly release?
|
||||
- Is it ok if jobs are being enqueued with the new signature but executed by the previous monthly release?
|
||||
|
||||
Also be aware that during a deployment to production, Web, API, and
|
||||
Sidekiq nodes are updated in parallel, but they may finish at
|
||||
different times. That means there may be a window of time when the
|
||||
application code is not in sync across the whole fleet. Changes that
|
||||
cut across Sidekiq, Web, and/or the API may [introduce unexpected
|
||||
errors until the deployment is complete](#builds-failing-due-to-varying-deployment-times-across-node-types).
|
||||
### When adding a new Sidekiq worker
|
||||
|
||||
Is it ok if these jobs don't get executed for several hours because [Sidekiq nodes are not yet updated](sidekiq_style_guide.md#adding-new-workers)?
|
||||
|
||||
### When modifying JavaScript
|
||||
|
||||
Is it ok when a browser has the new JavaScript code, but the Rails code is running the previous monthly release on:
|
||||
|
||||
- the REST API?
|
||||
- the GraphQL API?
|
||||
- internal APIs in controllers?
|
||||
|
||||
### When adding a pre-deployment migration
|
||||
|
||||
Is it ok if the pre-deployment migration has executed, but the web, Sidekiq, and API nodes are running the previous release?
|
||||
|
||||
### When adding a post-deployment migration
|
||||
|
||||
Is it ok if all GitLab nodes have been updated, but the post-deployment migrations don't get executed until a couple days later?
|
||||
|
||||
### When adding a background migration
|
||||
|
||||
Is it ok if all nodes have been updated, and then the post-deployment migrations get executed a couple days later, and then the background migrations take a week to finish?
|
||||
|
||||
## A walkthrough of an update
|
||||
|
||||
Backwards compatibility problems during updates are often very subtle. This is why it is worth familiarizing yourself with [update instructions](../update/index.md), [reference architectures](../administration/reference_architectures/index.md), and [GitLab.com's architecture](https://about.gitlab.com/handbook/engineering/infrastructure/production/architecture/). But to illustrate how these problems arise, take a look at this example of a simple update.
|
||||
|
||||
- 🚢 New version
|
||||
- 🙂 Old version
|
||||
|
||||
In this example, you can imagine that we are updating by one monthly release. But refer to [How long must code be backwards-compatible?](#how-long-must-code-be-backwards-compatible).
|
||||
|
||||
| Update step | Postgres DB | Web nodes | API nodes | Sidekiq nodes | Compatibility concerns |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| Initial state | 🙂 | 🙂 | 🙂 | 🙂 | |
|
||||
| Ran pre-deployment migrations | 🚢 except post-deploy migrations | 🙂 | 🙂 | 🙂 | Rails code in 🙂 is making DB calls to 🚢 |
|
||||
| Update web nodes | 🚢 except post-deploy migrations | 🚢 | 🙂 | 🙂 | JavaScript in 🚢 is making API calls to 🙂. Rails code in 🚢 is enqueuing jobs that are getting run by Sidekiq nodes in 🙂 |
|
||||
| Update API and Sidekiq nodes | 🚢 except post-deploy migrations | 🚢 | 🚢 | 🚢 | Rails code in 🚢 is making DB calls without post-deployment migrations or background migrations |
|
||||
| Run post-deployment migrations | 🚢 | 🚢 | 🚢 | 🚢 | Rails code in 🚢 is making DB calls without background migrations |
|
||||
| Background migrations finish | 🚢 | 🚢 | 🚢 | 🚢 | |
|
||||
|
||||
This example is not exhaustive. GitLab can be deployed in many different ways. Even each update step is not atomic. For example, with rolling deploys, nodes within a group are temporarily on different versions. You should assume that a lot of time passes between update steps. This is often true on GitLab.com.
|
||||
|
||||
## How long must code be backwards-compatible?
|
||||
|
||||
For users following [zero-downtime update instructions](../update/index.md#upgrading-without-downtime), the answer is one monthly release. For example:
|
||||
|
||||
- 13.11 => 13.12
|
||||
- 13.12 => 14.0
|
||||
- 14.0 => 14.1
|
||||
|
||||
For GitLab.com, there can be multiple tiny version updates per day, so GitLab.com doesn't constrain how far changes must be backwards-compatible.
|
||||
|
||||
Many users [skip some monthly releases](../update/index.md#upgrading-to-a-new-major-version), for example:
|
||||
|
||||
- 13.0 => 13.12
|
||||
|
||||
These users accept some downtime during the update. Unfortunately we can't ignore this case completely. For example, 13.12 may execute Sidekiq jobs from 13.0, which illustrates why [we avoid removing arguments from jobs until a major release](sidekiq_style_guide.md#deprecate-and-remove-an-argument). The main question is: Will the deployment get to a good state after the update is complete?
|
||||
|
||||
## What kind of components can GitLab be broken down into?
|
||||
|
||||
The [50,000 reference architecture](../administration/reference_architectures/50k_users.md) runs GitLab on 48+ nodes. GitLab.com is [bigger than that](https://about.gitlab.com/handbook/engineering/infrastructure/production/architecture/), plus a portion of the [infrastructure runs on Kubernetes](https://about.gitlab.com/handbook/engineering/infrastructure/production/kubernetes/gitlab-com/), plus there is a ["canary" stage which receives updates first](https://about.gitlab.com/handbook/engineering/#sts=Canary%20Testing).
|
||||
|
||||
But the problem isn't just that there are many nodes. The bigger problem is that a deployment can be divided into different contexts. And GitLab.com is not the only one that does this. Some possible divisions:
|
||||
|
||||
- "Canary web app nodes": Handle non-API requests from a subset of users
|
||||
- "Git app nodes": Handle Git requests
|
||||
- "Web app nodes": Handle web requests
|
||||
- "API app nodes": Handle API requests
|
||||
- "Sidekiq app nodes": Handle Sidekiq jobs
|
||||
- "Postgres database": Handle internal Postgres calls
|
||||
- "Redis database": Handle internal Redis calls
|
||||
- "Gitaly nodes": Handle internal Gitaly calls
|
||||
|
||||
During an update, there will be [two different versions of GitLab running in different contexts](#a-walkthrough-of-an-update). For example, [a web node may enqueue jobs which get run on an old Sidekiq node](#when-modifying-a-sidekiq-worker).
|
||||
|
||||
## Doesn't the order of update steps matter?
|
||||
|
||||
Yes! We have specific instructions for [zero-downtime updates](../update/index.md#upgrading-without-downtime) because it allows us to ignore some permutations of compatibility. This is why we don't worry about Rails code making DB calls to an old Postgres database schema.
|
||||
|
||||
## I've identified a potential backwards compatibility problem, what can I do about it?
|
||||
|
||||
### Feature flags
|
||||
|
||||
One way to handle this is to use a feature flag that is disabled by
|
||||
default. The feature flag can be enabled when the deployment is in a
|
||||
consistent state. However, this method of synchronization doesn't
|
||||
guarantee that customers with on-premise instances can [upgrade with
|
||||
consistent state. However, this method of synchronization **does not
|
||||
guarantee** that customers with on-premise instances can [update with
|
||||
zero downtime](https://docs.gitlab.com/omnibus/update/#zero-downtime-updates)
|
||||
because point releases bundle many changes together. Minimizing the time
|
||||
between when versions are out of sync across the fleet may help mitigate
|
||||
errors caused by upgrades.
|
||||
because point releases bundle many changes together.
|
||||
|
||||
## Requirements for zero downtime upgrades
|
||||
### Graceful degradation
|
||||
|
||||
One way to guarantee zero downtime upgrades for on-premise instances is following the
|
||||
As an example, when adding a new feature with frontend and API changes, it may be possible to write the frontend such that the new feature degrades gracefully against old API responses. This may help avoid needing to spread a change over 3 releases.
|
||||
|
||||
### Expand and contract pattern
|
||||
|
||||
One way to guarantee zero downtime updates for on-premise instances is following the
|
||||
[expand and contract pattern](https://martinfowler.com/bliki/ParallelChange.html).
|
||||
|
||||
This means that every breaking change is broken down in three phases: expand, migrate, and contract.
|
||||
|
|
@ -53,7 +126,7 @@ This means that every breaking change is broken down in three phases: expand, mi
|
|||
1. **migrate**: all consumers are updated to make use of the new implementation.
|
||||
1. **contract**: backward compatibility is removed.
|
||||
|
||||
Those three phases **must be part of different milestones**, to allow zero downtime upgrades.
|
||||
Those three phases **must be part of different milestones**, to allow zero downtime updates.
|
||||
|
||||
Depending on the support level for the feature, the contract phase could be delayed until the next major release.
|
||||
|
||||
|
|
|
|||
|
|
@ -283,8 +283,8 @@ Currently supported profiling targets are:
|
|||
- Sidekiq
|
||||
|
||||
NOTE:
|
||||
The Puma master process is not supported. Neither is Unicorn.
|
||||
Sending SIGUSR2 to either of those triggers restarts. In the case of Puma,
|
||||
The Puma master process is not supported.
|
||||
Sending SIGUSR2 to it triggers restarts. In the case of Puma,
|
||||
take care to only send the signal to Puma workers.
|
||||
|
||||
This can be done via `pkill -USR2 puma:`. The `:` distinguishes between `puma
|
||||
|
|
|
|||
|
|
@ -129,7 +129,7 @@ way that increases execution time by several orders of magnitude.
|
|||
|
||||
### Impact
|
||||
|
||||
The resource, for example Unicorn, Puma, or Sidekiq, can be made to hang as it takes
|
||||
The resource, for example Puma, or Sidekiq, can be made to hang as it takes
|
||||
a long time to evaluate the bad regex match. The evaluation time may require manual
|
||||
termination of the resource.
|
||||
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ records should use stubs/doubles as much as possible.
|
|||
| `config/` | `spec/config/` | RSpec | |
|
||||
| `config/initializers/` | `spec/initializers/` | RSpec | |
|
||||
| `config/routes.rb`, `config/routes/` | `spec/routing/` | RSpec | |
|
||||
| `config/puma.example.development.rb`, `config/unicorn.rb.example` | `spec/rack_servers/` | RSpec | |
|
||||
| `config/puma.example.development.rb` | `spec/rack_servers/` | RSpec | |
|
||||
| `db/` | `spec/db/` | RSpec | |
|
||||
| `db/{post_,}migrate/` | `spec/migrations/` | RSpec | More details in the [Testing Rails migrations guide](testing_migrations_guide.md). |
|
||||
| `Gemfile` | `spec/dependencies/`, `spec/sidekiq/` | RSpec | |
|
||||
|
|
|
|||
|
|
@ -545,7 +545,6 @@ sudo -u git -H editor config/resque.yml
|
|||
```
|
||||
|
||||
Make sure to edit both `gitlab.yml` and `puma.rb` to match your setup.
|
||||
If you want to use the Unicorn web server, see [Using Unicorn](#using-unicorn) for the additional steps.
|
||||
|
||||
If you want to use HTTPS, see [Using HTTPS](#using-https) for the additional steps.
|
||||
|
||||
|
|
@ -996,24 +995,6 @@ You also need to change the corresponding options (e.g. `ssh_user`, `ssh_host`,
|
|||
|
||||
Apart from the always supported Markdown style, there are other rich text files that GitLab can display. But you might have to install a dependency to do so. See the [`github-markup` gem README](https://github.com/gitlabhq/markup#markups) for more information.
|
||||
|
||||
### Using Unicorn
|
||||
|
||||
As of GitLab 12.9, [Puma](https://github.com/puma/puma) has replaced Unicorn as the default web server for installations from source.
|
||||
If you want to switch back to Unicorn, follow these steps:
|
||||
|
||||
1. Finish the GitLab setup so you have it up and running.
|
||||
1. Copy the supplied example Unicorn configuration file into place:
|
||||
|
||||
```shell
|
||||
cd /home/git/gitlab
|
||||
|
||||
# Copy config file for the web server
|
||||
sudo -u git -H cp config/unicorn.rb.example config/unicorn.rb
|
||||
```
|
||||
|
||||
1. Edit the system `init.d` script and set `USE_WEB_SERVER="unicorn"`. If you have `/etc/default/gitlab`, then you should edit it instead.
|
||||
1. Restart GitLab.
|
||||
|
||||
### Using Sidekiq instead of Sidekiq Cluster
|
||||
|
||||
As of GitLab 12.10, Source installations are using `bin/sidekiq-cluster` for managing Sidekiq processes.
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ relative URL is:
|
|||
|
||||
- `/home/git/gitlab/config/initializers/relative_url.rb`
|
||||
- `/home/git/gitlab/config/gitlab.yml`
|
||||
- `/home/git/gitlab/config/unicorn.rb`
|
||||
- `/home/git/gitlab/config/puma.rb`
|
||||
- `/home/git/gitlab-shell/config.yml`
|
||||
- `/etc/default/gitlab`
|
||||
|
||||
|
|
@ -88,7 +88,7 @@ Make sure to follow all steps below:
|
|||
relative_url_root: /gitlab
|
||||
```
|
||||
|
||||
1. Edit `/home/git/gitlab/config/unicorn.rb` and uncomment/change the
|
||||
1. Edit `/home/git/gitlab/config/puma.rb` and uncomment/change the
|
||||
following line:
|
||||
|
||||
```ruby
|
||||
|
|
|
|||
|
|
@ -201,22 +201,6 @@ of [legacy Rugged code](../administration/gitaly/index.md#direct-access-to-git-i
|
|||
higher, due to how [Ruby MRI multi-threading](https://en.wikipedia.org/wiki/Global_interpreter_lock)
|
||||
works.
|
||||
|
||||
## Unicorn Workers
|
||||
|
||||
For most instances we recommend using: (CPU cores * 1.5) + 1 = Unicorn workers.
|
||||
For example a node with 4 cores would have 7 Unicorn workers.
|
||||
|
||||
For all machines that have 2GB and up we recommend a minimum of three Unicorn workers.
|
||||
If you have a 1GB machine we recommend to configure only two Unicorn workers to prevent excessive
|
||||
swapping.
|
||||
|
||||
As long as you have enough available CPU and memory capacity, it's okay to increase the number of
|
||||
Unicorn workers and this usually helps to reduce the response time of the applications and
|
||||
increase the ability to handle parallel requests.
|
||||
|
||||
To change the Unicorn workers when you have the Omnibus package (which defaults to the
|
||||
recommendation above) please see [the Unicorn settings in the Omnibus GitLab documentation](https://docs.gitlab.com/omnibus/settings/unicorn.html).
|
||||
|
||||
## Redis and Sidekiq
|
||||
|
||||
Redis stores all user sessions and the background task queue.
|
||||
|
|
|
|||
|
|
@ -813,13 +813,13 @@ the CSRF check.
|
|||
|
||||
To bypass this you can add `skip_before_action :verify_authenticity_token` to the
|
||||
`omniauth_callbacks_controller.rb` file immediately after the `class` line and
|
||||
comment out the `protect_from_forgery` line using a `#`. Restart Unicorn for this
|
||||
comment out the `protect_from_forgery` line using a `#`. Restart Puma for this
|
||||
change to take effect. This allows the error to hit GitLab, where it can then
|
||||
be seen in the usual logs, or as a flash message on the login screen.
|
||||
|
||||
That file is located in `/opt/gitlab/embedded/service/gitlab-rails/app/controllers`
|
||||
for Omnibus installations and by default in `/home/git/gitlab/app/controllers` for
|
||||
installations from source. Restart Unicorn using the `sudo gitlab-ctl restart unicorn`
|
||||
installations from source. Restart Puma using the `sudo gitlab-ctl restart puma`
|
||||
command on Omnibus installations and `sudo service gitlab restart` on installations
|
||||
from source.
|
||||
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ Similar to the Kubernetes case, if you have scaled out your GitLab cluster to
|
|||
use multiple application servers, you should pick a designated node (that isn't
|
||||
auto-scaled away) for running the backup Rake task. Because the backup Rake
|
||||
task is tightly coupled to the main Rails application, this is typically a node
|
||||
on which you're also running Unicorn/Puma or Sidekiq.
|
||||
on which you're also running Puma or Sidekiq.
|
||||
|
||||
Example output:
|
||||
|
||||
|
|
@ -928,7 +928,6 @@ Stop the processes that are connected to the database. Leave the rest of GitLab
|
|||
running:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl stop unicorn
|
||||
sudo gitlab-ctl stop puma
|
||||
sudo gitlab-ctl stop sidekiq
|
||||
# Verify
|
||||
|
|
@ -996,7 +995,6 @@ For Docker installations, the restore task can be run from host:
|
|||
|
||||
```shell
|
||||
# Stop the processes that are connected to the database
|
||||
docker exec -it <name of container> gitlab-ctl stop unicorn
|
||||
docker exec -it <name of container> gitlab-ctl stop puma
|
||||
docker exec -it <name of container> gitlab-ctl stop sidekiq
|
||||
|
||||
|
|
|
|||
|
|
@ -91,10 +91,10 @@ need to enable the bundled PostgreSQL:
|
|||
1. [Reconfigure GitLab](../administration/restart_gitlab.md#omnibus-gitlab-reconfigure)
|
||||
for the changes to take effect.
|
||||
|
||||
1. Start Unicorn and PostgreSQL so that we can prepare the schema:
|
||||
1. Start Puma and PostgreSQL so that we can prepare the schema:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl start unicorn
|
||||
sudo gitlab-ctl start puma
|
||||
sudo gitlab-ctl start postgresql
|
||||
```
|
||||
|
||||
|
|
@ -104,10 +104,10 @@ need to enable the bundled PostgreSQL:
|
|||
sudo gitlab-rake db:create db:migrate
|
||||
```
|
||||
|
||||
1. Stop Unicorn to prevent other database access from interfering with the loading of data:
|
||||
1. Stop Puma to prevent other database access from interfering with the loading of data:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl stop unicorn
|
||||
sudo gitlab-ctl stop puma
|
||||
```
|
||||
|
||||
After these steps, you have a fresh PostgreSQL database with up-to-date schema.
|
||||
|
|
|
|||
|
|
@ -371,7 +371,7 @@ First, let's stop all of GitLab. Omnibus users can do so by running the
|
|||
following on their GitLab servers:
|
||||
|
||||
```shell
|
||||
sudo gitlab-ctl stop unicorn
|
||||
sudo gitlab-ctl stop puma
|
||||
sudo gitlab-ctl stop sidekiq
|
||||
sudo gitlab-ctl stop mailroom
|
||||
```
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ In GitLab 11.11, **secondary** nodes can use identical external URLs as long as
|
|||
a unique `name` is set for each Geo node. The `gitlab.rb` setting
|
||||
`gitlab_rails['geo_node_name']` must:
|
||||
|
||||
- Be set for each GitLab instance that runs `unicorn`, `sidekiq`, or `geo_logcursor`.
|
||||
- Be set for each GitLab instance that runs `puma`, `sidekiq`, or `geo_logcursor`.
|
||||
- Match a Geo node name.
|
||||
|
||||
The load balancer must use sticky sessions in order to avoid authentication
|
||||
|
|
|
|||
|
|
@ -344,7 +344,7 @@ For multi-node systems we recommend ingesting the logs into services like Elasti
|
|||
|:------------------------|:---------|
|
||||
| `application.log` | GitLab user activity |
|
||||
| `git_json.log` | Failed GitLab interaction with Git repositories |
|
||||
| `production.log` | Requests received from Unicorn, and the actions taken to serve those requests |
|
||||
| `production.log` | Requests received from Puma, and the actions taken to serve those requests |
|
||||
| `sidekiq.log` | Background jobs |
|
||||
| `repocheck.log` | Repository activity |
|
||||
| `integrations_json.log` | Activity between GitLab and integrated systems |
|
||||
|
|
|
|||
|
|
@ -20,9 +20,8 @@ To access Gitaly timeout settings:
|
|||
The following timeouts can be modified:
|
||||
|
||||
- **Default Timeout Period**. This timeout is the default for most Gitaly calls. It should be shorter than the
|
||||
worker timeout that can be configured for [Puma](https://docs.gitlab.com/omnibus/settings/puma.html#puma-settings)
|
||||
or [Unicorn](https://docs.gitlab.com/omnibus/settings/unicorn.html). Used to make sure that Gitaly
|
||||
calls made within a web request cannot exceed the entire request timeout.
|
||||
worker timeout that can be configured for [Puma](https://docs.gitlab.com/omnibus/settings/puma.html#puma-settings).
|
||||
Used to make sure that Gitaly calls made within a web request cannot exceed the entire request timeout.
|
||||
Defaults to 55 seconds.
|
||||
|
||||
- **Fast Timeout Period**. This is the timeout for very short Gitaly calls. Defaults to 10 seconds.
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ module API
|
|||
expose :name
|
||||
expose :tag, as: :tag_name, if: ->(_, _) { can_download_code? }
|
||||
expose :description
|
||||
expose :description_html, unless: ->(_, _) { remove_description_html? } do |entity|
|
||||
expose :description_html, if: -> (_, options) { options[:include_html_description] } do |entity|
|
||||
MarkupHelper.markdown_field(entity, :description, current_user: options[:current_user])
|
||||
end
|
||||
expose :created_at
|
||||
|
|
@ -45,11 +45,6 @@ module API
|
|||
def can_read_milestone?
|
||||
Ability.allowed?(options[:current_user], :read_milestone, object.project)
|
||||
end
|
||||
|
||||
def remove_description_html?
|
||||
::Feature.enabled?(:remove_description_html_in_release_api, object.project, default_enabled: :yaml) &&
|
||||
::Feature.disabled?(:remove_description_html_in_release_api_override, object.project)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -29,6 +29,8 @@ module API
|
|||
desc: 'Return releases ordered by `released_at` or `created_at`.'
|
||||
optional :sort, type: String, values: %w[asc desc], default: 'desc',
|
||||
desc: 'Return releases sorted in `asc` or `desc` order.'
|
||||
optional :include_html_description, type: Boolean,
|
||||
desc: 'If `true`, a response includes HTML rendered markdown of the release description.'
|
||||
end
|
||||
get ':id/releases' do
|
||||
releases = ::ReleasesFinder.new(user_project, current_user, declared_params.slice(:order_by, :sort)).execute
|
||||
|
|
@ -43,7 +45,8 @@ module API
|
|||
# context is unnecessary here.
|
||||
cache_context: -> (_) { "user:{#{current_user&.id}}" },
|
||||
expires_in: 5.minutes,
|
||||
current_user: current_user
|
||||
current_user: current_user,
|
||||
include_html_description: params[:include_html_description]
|
||||
end
|
||||
|
||||
desc 'Get a single project release' do
|
||||
|
|
@ -53,11 +56,13 @@ module API
|
|||
end
|
||||
params do
|
||||
requires :tag_name, type: String, desc: 'The name of the tag', as: :tag
|
||||
optional :include_html_description, type: Boolean,
|
||||
desc: 'If `true`, a response includes HTML rendered markdown of the release description.'
|
||||
end
|
||||
get ':id/releases/:tag_name', requirements: RELEASE_ENDPOINT_REQUIREMENTS do
|
||||
authorize_download_code!
|
||||
|
||||
present release, with: Entities::Release, current_user: current_user
|
||||
present release, with: Entities::Release, current_user: current_user, include_html_description: params[:include_html_description]
|
||||
end
|
||||
|
||||
desc 'Create a new release' do
|
||||
|
|
|
|||
|
|
@ -54,6 +54,7 @@ module Gitlab
|
|||
user_login: user&.username,
|
||||
user_email: user&.email,
|
||||
pipeline_id: build.pipeline.id.to_s,
|
||||
pipeline_source: build.pipeline.source.to_s,
|
||||
job_id: build.id.to_s,
|
||||
ref: source_ref,
|
||||
ref_type: ref_type,
|
||||
|
|
|
|||
|
|
@ -18,7 +18,8 @@ module Gitlab
|
|||
wiki: wiki.hook_attrs,
|
||||
object_attributes: wiki_page.hook_attrs.merge(
|
||||
url: Gitlab::UrlBuilder.build(wiki_page),
|
||||
action: action
|
||||
action: action,
|
||||
diff_url: Gitlab::UrlBuilder.build(wiki_page, action: :diff, version_id: wiki_page.version.id)
|
||||
)
|
||||
}
|
||||
end
|
||||
|
|
|
|||
|
|
@ -19,9 +19,6 @@ module Gitlab
|
|||
end
|
||||
|
||||
def cluster_prometheus_adapter
|
||||
application = cluster&.application_prometheus
|
||||
return application if application&.available?
|
||||
|
||||
integration = cluster&.integration_prometheus
|
||||
integration if integration&.available?
|
||||
end
|
||||
|
|
|
|||
|
|
@ -344,7 +344,6 @@
|
|||
category: terraform
|
||||
redis_slot: terraform
|
||||
aggregation: weekly
|
||||
feature_flag: usage_data_p_terraform_state_api_unique_users
|
||||
# Pipeline Authoring
|
||||
- name: o_pipeline_authoring_unique_users_committing_ciconfigfile
|
||||
category: pipeline_authoring
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1698
locale/bg/gitlab.po
1698
locale/bg/gitlab.po
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1710
locale/de/gitlab.po
1710
locale/de/gitlab.po
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1698
locale/eo/gitlab.po
1698
locale/eo/gitlab.po
File diff suppressed because it is too large
Load Diff
1730
locale/es/gitlab.po
1730
locale/es/gitlab.po
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1706
locale/fr/gitlab.po
1706
locale/fr/gitlab.po
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue