Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2023-08-23 09:10:51 +00:00
parent 76a58e19f0
commit 9134f0b500
46 changed files with 1200 additions and 379 deletions

View File

@ -11,6 +11,8 @@ workhorse:verify:
.workhorse:test:
extends: .workhorse:rules:workhorse
image: ${REGISTRY_HOST}/${REGISTRY_GROUP}/gitlab-build-images/debian-${DEBIAN_VERSION}-ruby-${RUBY_VERSION}-golang-${GO_VERSION}-rust-${RUST_VERSION}:rubygems-${RUBYGEMS_VERSION}-git-2.36-exiftool-12.60
services:
- name: redis:${REDIS_VERSION}-alpine
variables:
GITALY_ADDRESS: "tcp://127.0.0.1:8075"
stage: test
@ -22,6 +24,8 @@ workhorse:verify:
- bundle_install_script
- go version
- scripts/gitaly-test-build
- cp workhorse/config.toml.example workhorse/config.toml
- sed -i 's|URL.*$|URL = "redis://redis:6379"|g' workhorse/config.toml
script:
- make -C workhorse test
@ -30,6 +34,7 @@ workhorse:test go:
parallel:
matrix:
- GO_VERSION: ["1.18", "1.19", "1.20"]
REDIS_VERSION: ["7.0", "6.2"]
script:
- make -C workhorse test-coverage
coverage: '/\d+.\d+%/'
@ -43,11 +48,15 @@ workhorse:test fips:
parallel:
matrix:
- GO_VERSION: ["1.18", "1.19", "1.20"]
REDIS_VERSION: ["7.0", "6.2"]
image: ${REGISTRY_HOST}/${REGISTRY_GROUP}/gitlab-build-images/ubi-${UBI_VERSION}-ruby-${RUBY_VERSION}-golang-${GO_VERSION}-rust-${RUST_VERSION}:rubygems-${RUBYGEMS_VERSION}-git-2.36-exiftool-12.60
variables:
FIPS_MODE: 1
workhorse:test race:
extends: .workhorse:test
parallel:
matrix:
- REDIS_VERSION: ["7.0", "6.2"]
script:
- make -C workhorse test-race

View File

@ -19,5 +19,4 @@
- actions.each do |action|
= action
%button.gl-button.gl-banner-close.btn-sm.btn-icon.js-close{ @close_options, class: close_class, type: 'button' }
= sprite_icon('close', size: 16, css_class: 'dismiss-icon')
= render Pajamas::ButtonComponent.new(category: :tertiary, variant: close_button_variant, size: :small, icon: 'close', button_options: @close_options)

View File

@ -27,7 +27,7 @@ module Pajamas
@svg_path = svg_path.to_s
@banner_options = banner_options
@button_options = button_options
@close_options = close_options
@close_options = format_options(options: close_options, css_classes: %w[js-close gl-banner-close])
end
VARIANT_OPTIONS = [:introduction, :promotion].freeze
@ -41,11 +41,11 @@ module Pajamas
classes.join(' ')
end
def close_class
def close_button_variant
if introduction?
'btn-confirm btn-confirm-tertiary'
:confirm
else
'btn-default btn-default-tertiary'
:default
end
end

View File

@ -221,8 +221,7 @@ class Admin::UsersController < Admin::ApplicationController
respond_to do |format|
result = Users::UpdateService.new(current_user, user_params_with_pass.merge(user: user)).execute do |user|
user.skip_reconfirmation!
user.send_only_admin_changed_your_password_notification! if admin_making_changes_for_another_user?
prepare_user_for_update(user)
end
if result[:status] == :success
@ -393,6 +392,12 @@ class Admin::UsersController < Admin::ApplicationController
@can_impersonate = helpers.can_impersonate_user(user, impersonation_in_progress?)
@impersonation_error_text = @can_impersonate ? nil : helpers.impersonation_error_text(user, impersonation_in_progress?)
end
# method overriden in EE
def prepare_user_for_update(user)
user.skip_reconfirmation!
user.send_only_admin_changed_your_password_notification! if admin_making_changes_for_another_user?
end
end
Admin::UsersController.prepend_mod_with('Admin::UsersController')

View File

@ -430,6 +430,7 @@ class NotificationService
def send_service_desk_notification(note)
return unless note.noteable_type == 'Issue'
return if note.confidential
return unless note.project.service_desk_enabled?
issue = note.noteable
recipients = issue.email_participants_emails

View File

@ -1,8 +0,0 @@
---
name: code_suggestions_tokens_from_customers_dot
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/125405
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/417762
milestone: '16.3'
type: development
group: group::application performance
default_enabled: true

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
class RemoveApplicationSettingsDashboardNotificationLimitColumn < Gitlab::Database::Migration[2.1]
enable_lock_retries!
def change
remove_column :application_settings, :dashboard_notification_limit, :integer, default: 0, null: false
end
end

View File

@ -0,0 +1,24 @@
# frozen_string_literal: true
class DropTemporaryIndexOnVulnerabilityReadsDismissalReason < Gitlab::Database::Migration[2.1]
disable_ddl_transaction!
INDEX_NAME = "tmp_idx_vuln_reads_where_dismissal_reason_null"
DISMISSED_STATE = 2
def up
remove_concurrent_index_by_name(
:vulnerability_reads,
INDEX_NAME
)
end
def down
add_concurrent_index(
:vulnerability_reads,
%i[id],
where: "state = #{DISMISSED_STATE} AND dismissal_reason IS NULL",
name: INDEX_NAME
)
end
end

View File

@ -0,0 +1 @@
7a773942b3b4364ff5148947795823ab0755339d23762e1002e8ab7887cacfb6

View File

@ -0,0 +1 @@
a80bf5eeb8bbcee7d751eb4cd68c3a9abd2e5e356c0677f1ed5bbf679bd1656c

View File

@ -11849,7 +11849,6 @@ CREATE TABLE application_settings (
encrypted_jitsu_administrator_password_iv bytea,
dashboard_limit_enabled boolean DEFAULT false NOT NULL,
dashboard_limit integer DEFAULT 0 NOT NULL,
dashboard_notification_limit integer DEFAULT 0 NOT NULL,
can_create_group boolean DEFAULT true NOT NULL,
lock_maven_package_requests_forwarding boolean DEFAULT false NOT NULL,
lock_pypi_package_requests_forwarding boolean DEFAULT false NOT NULL,
@ -34196,8 +34195,6 @@ CREATE INDEX tmp_idx_orphaned_approval_project_rules ON approval_project_rules U
CREATE INDEX tmp_idx_packages_on_project_id_when_npm_not_pending_destruction ON packages_packages USING btree (project_id) WHERE ((package_type = 2) AND (status <> 4));
CREATE INDEX tmp_idx_vuln_reads_where_dismissal_reason_null ON vulnerability_reads USING btree (id) WHERE ((state = 2) AND (dismissal_reason IS NULL));
CREATE INDEX tmp_idx_vulns_on_converted_uuid ON vulnerability_occurrences USING btree (id, uuid_convert_string_to_uuid) WHERE (uuid_convert_string_to_uuid = '00000000-0000-0000-0000-000000000000'::uuid);
CREATE INDEX tmp_index_ci_job_artifacts_on_expire_at_where_locked_unknown ON ci_job_artifacts USING btree (expire_at, job_id) WHERE ((locked = 2) AND (expire_at IS NOT NULL));

View File

@ -18,7 +18,7 @@ CI/CD to be disabled by default in new projects by modifying the settings in:
- `gitlab.rb` for Linux package installations.
Existing projects that already had CI/CD enabled are unchanged. Also, this setting only changes
the project default, so project owners [can still enable CI/CD in the project settings](../ci/enable_or_disable_ci.md#enable-cicd-in-a-project).
the project default, so project owners [can still enable CI/CD in the project settings](../ci/enable_or_disable_ci.md).
For self-compiled installations:

View File

@ -22,9 +22,9 @@ Manual and Timed rollouts are included automatically in projects controlled by
[Auto DevOps](../../topics/autodevops/index.md), but they are also configurable through
GitLab CI/CD in the `.gitlab-ci.yml` configuration file.
Manually triggered rollouts can be implemented with your [Continuous Delivery](../introduction/index.md#continuous-delivery)
methodology, while timed rollouts do not require intervention and can be part of your
[Continuous Deployment](../introduction/index.md#continuous-deployment) strategy.
Manually triggered rollouts can be implemented with Continuous Delivery,
while timed rollouts do not require intervention and can be part of your
Continuous Deployment strategy.
You can also combine both of them in a way that the app is deployed automatically
unless you eventually intervene manually if necessary.

View File

@ -6,29 +6,12 @@ description: "Learn how to use GitLab CI/CD, the GitLab built-in Continuous Inte
type: index
---
# GitLab CI/CD **(FREE ALL)**
# Get started with GitLab CI/CD **(FREE ALL)**
GitLab CI/CD is a tool for software development using the continuous methodologies:
Use GitLab CI/CD to automatically build, test, deploy, and monitor your applications.
- [Continuous Integration (CI)](introduction/index.md#continuous-integration)
- [Continuous Delivery (CD)](introduction/index.md#continuous-delivery)
- [Continuous Deployment (CD)](introduction/index.md#continuous-deployment)
NOTE:
Out-of-the-box management systems can decrease hours spent on maintaining toolchains by 10% or more.
Watch our ["Mastering continuous software development"](https://about.gitlab.com/webcast/mastering-ci-cd/)
webcast to learn about continuous methods and how GitLab CI/CD can help you simplify and scale software development.
Use GitLab CI/CD to catch bugs and errors early in
the development cycle. Ensure that all the code deployed to
production complies with the code standards you established for
your app.
GitLab CI/CD can automatically build, test, deploy, and
monitor your applications by using [Auto DevOps](../topics/autodevops/index.md).
For a complete overview of these methodologies and GitLab CI/CD,
read the [Introduction to CI/CD with GitLab](introduction/index.md).
GitLab CI/CD can catch bugs and errors early in the development cycle. It can ensure that
all the code deployed to production complies with your established code standards.
<div class="video-fallback">
Video demonstration of continuous integration with GitLab CI/CD: <a href="https://www.youtube.com/watch?v=ljth1Q5oJoo">Continuous Integration with GitLab (overview demo)</a>.
@ -37,90 +20,33 @@ read the [Introduction to CI/CD with GitLab](introduction/index.md).
<iframe src="https://www.youtube-nocookie.com/embed/ljth1Q5oJoo" frameborder="0" allowfullscreen> </iframe>
</figure>
## Concepts
If you are new to GitLab CI/CD, get started with a tutorial:
GitLab CI/CD uses a number of concepts to describe and run your build and deploy.
- [Create and run your first GitLab CI/CD pipeline](quick_start/index.md)
- [Create a complex pipeline](quick_start/tutorial.md)
| Concept | Description |
|:--------------------------------------------------------|:--------------------------------------------------------------------------------------|
| [Pipelines](pipelines/index.md) | Structure your CI/CD process through pipelines. |
| [CI/CD variables](variables/index.md) | Reuse values based on a variable/value key pair. |
| [Environments](environments/index.md) | Deploy your application to different environments (for example, staging, production). |
| [Job artifacts](jobs/job_artifacts.md) | Output, use, and reuse job artifacts. |
| [Cache dependencies](caching/index.md) | Cache your dependencies for a faster execution. |
| [GitLab Runner](https://docs.gitlab.com/runner/) | Configure your own runners to execute your scripts. |
| [Pipeline efficiency](pipelines/pipeline_efficiency.md) | Configure your pipelines to run quickly and efficiently. |
| [Test cases](test_cases/index.md) | Create testing scenarios. |
## CI/CD methodologies
## Configuration
With the continuous method of software development, you continuously build,
test, and deploy iterative code changes. This iterative process helps reduce
the chance that you develop new code based on buggy or failed previous versions.
With this method, you strive to have less human intervention or even no intervention at all,
from the development of new code until its deployment.
GitLab CI/CD supports numerous configuration options:
The three primary approaches for CI/CD are:
| Configuration | Description |
|:---------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------|
| [Schedule pipelines](pipelines/schedules.md) | Schedule pipelines to run as often as you need. |
| [Custom path for `.gitlab-ci.yml`](pipelines/settings.md#specify-a-custom-cicd-configuration-file) | Define a custom path for the CI/CD configuration file. |
| [Git submodules for CI/CD](git_submodules.md) | Configure jobs for using Git submodules. |
| [SSH keys for CI/CD](ssh_keys/index.md) | Using SSH keys in your CI pipelines. |
| [Pipeline triggers](triggers/index.md) | Trigger pipelines through the API. |
| [Merge request pipelines](pipelines/merge_request_pipelines.md) | Design a pipeline structure for running a pipeline in merge requests. |
| [Integrate with Kubernetes clusters](../user/infrastructure/clusters/index.md) | Connect your project to Google Kubernetes Engine (GKE) or an existing Kubernetes cluster. |
| [Optimize GitLab and GitLab Runner for large repositories](large_repositories/index.md) | Recommended strategies for handling large repositories. |
| [`.gitlab-ci.yml` full reference](yaml/index.md) | All the attributes you can use with GitLab CI/CD. |
- [Continuous Integration (CI)](https://en.wikipedia.org/wiki/Continuous_integration)
- [Continuous Delivery (CD)](https://en.wikipedia.org/wiki/Continuous_delivery)
- [Continuous Deployment (CD)](https://en.wikipedia.org/wiki/Continuous_deployment)
Certain operations can only be performed according to the
[user](../user/permissions.md#gitlab-cicd-permissions) and [job](../user/permissions.md#job-permissions) permissions.
Out-of-the-box management systems can decrease hours spent on maintaining toolchains by 10% or more.
Watch our ["Mastering continuous software development"](https://about.gitlab.com/webcast/mastering-ci-cd/)
webcast to learn about continuous methods and how built-in GitLab CI/CD can help you simplify and scale software development.
## Features
GitLab CI/CD features, grouped by DevOps stage, include:
| Feature | Description |
|:---------------------------------------------------------------------------------------------|:------------|
| **Configure** | |
| [Auto DevOps](../topics/autodevops/index.md) | Set up your app's entire lifecycle. |
| [ChatOps](chatops/index.md) | Trigger CI jobs from chat, with results sent back to the channel. |
| [Connect to cloud services](cloud_services/index.md) | Connect to cloud providers using OpenID Connect (OIDC) to retrieve temporary credentials to access services or secrets. |
| **Verify** | |
| [CI services](services/index.md) | Link Docker containers with your base image. |
| [GitLab CI/CD for external repositories](ci_cd_for_external_repos/index.md) | Get the benefits of GitLab CI/CD combined with repositories in GitHub and Bitbucket Cloud. |
| [Interactive Web Terminals](interactive_web_terminal/index.md) | Open an interactive web terminal to debug the running jobs. |
| [Review Apps](review_apps/index.md) | Configure GitLab CI/CD to preview code changes. |
| [Unit test reports](testing/unit_test_reports.md) | Identify test failures directly on merge requests. |
| [Using Docker images](docker/using_docker_images.md) | Use GitLab and GitLab Runner with Docker to build and test applications. |
| **Release** | |
| [Auto Deploy](../topics/autodevops/stages.md#auto-deploy) | Deploy your application to a production environment in a Kubernetes cluster. |
| [Building Docker images](docker/using_docker_build.md) | Maintain Docker-based projects using GitLab CI/CD. |
| [Canary Deployments](../user/project/canary_deployments.md) | Ship features to only a portion of your pods and let a percentage of your user base to visit the temporarily deployed feature. |
| [Deploy boards](../user/project/deploy_boards.md) | Check the current health and status of each CI/CD environment running on Kubernetes. |
| [Feature flags](../operations/feature_flags.md) | Deploy your features behind Feature flags. |
| [GitLab Pages](../user/project/pages/index.md) | Deploy static websites. |
| [GitLab Releases](../user/project/releases/index.md) | Add release notes to Git tags. |
| [Cloud deployment](cloud_deployment/index.md) | Deploy your application to a main cloud provider. |
| **Secure** | |
| [Code Quality](testing/code_quality.md) | Analyze your source code quality. |
| [Container Scanning](../user/application_security/container_scanning/index.md) | Scan your container images for known vulnerabilities. |
| [Coverage-guided fuzz testing](../user/application_security/coverage_fuzzing/index.md) | Test your application's behavior by providing randomized input. |
| [Dynamic Application Security Testing](../user/application_security/dast/index.md) | Test your application's runtime behavior for vulnerabilities. |
| [Dependency Scanning](../user/application_security/dependency_scanning/index.md) | Analyze your dependencies for known vulnerabilities. |
| [Infrastructure as Code scanning](../user/application_security/iac_scanning/index.md) | Scan your IaC configuration files for known vulnerabilities. |
| [License Scanning](../user/compliance/license_scanning_of_cyclonedx_files/index.md) | Search your project dependencies for their licenses. | Search your project dependencies for their licenses. |
| [Secret Detection](../user/application_security/secret_detection/index.md) | Search your application's source code for secrets. |
| [Static Application Security Testing](../user/application_security/sast/index.md) | Test your application's source code for known vulnerabilities. |
| [Web API fuzz testing](../user/application_security/api_fuzzing/index.md) | Test your application's API behavior by providing randomized input. |
| **Govern** | |
| [Compliance frameworks](../user/group/compliance_frameworks.md) | Enforce a GitLab CI/CD configuration on all projects in a group. |
| [Scan execution policies](../user/application_security/policies/scan-execution-policies.md) | Enforce security scans run on a specified schedule or with the project pipeline. |
| [Scan results policies](../user/application_security/policies/scan-result-policies.md) | Enforce action based on results of a pipeline security scan. |
## Examples
See the [CI/CD examples](examples/index.md) page for example project code and tutorials for
using GitLab CI/CD with various:
- App frameworks
- Languages
- Platforms
- <i class="fa fa-youtube-play youtube" aria-hidden="true"></i>Learn how to: [configure CI/CD](https://www.youtube.com/watch?v=opdLqwz6tcE).
- [Make the case for CI/CD in your organization](https://about.gitlab.com/devops-tools/github-vs-gitlab/).
- Learn how [Verizon reduced rebuilds](https://about.gitlab.com/blog/2019/02/14/verizon-customer-story/) from 30 days to under 8 hours with GitLab.
- <i class="fa fa-youtube-play youtube" aria-hidden="true"></i> [Get a deeper look at GitLab CI/CD](https://youtu.be/l5705U8s_nQ?t=369).
## Administration
@ -142,38 +68,3 @@ See also:
[GitLab Workflow VS Code extension](../user/project/repository/vscode.md) helps you
[validate your configuration](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#validate-gitlab-ci-configuration)
and [view your pipeline status](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#information-about-your-branch-pipelines-mr-closing-issue)
See also the [Why CI/CD?](https://docs.google.com/presentation/d/1OGgk2Tcxbpl7DJaIOzCX4Vqg3dlwfELC3u2jEeCBbDk) presentation.
### Major version changes (breaking)
As GitLab CI/CD has evolved, certain breaking changes have
been necessary.
For GitLab 15.0 and later, all breaking changes are documented on the following pages:
- [Deprecations](../update/deprecations.md)
- [Removals](../update/removals.md)
The breaking changes for [GitLab Runner](https://docs.gitlab.com/runner/) in earlier
major version releases are:
- 14.0: No breaking changes.
- 13.0:
- [Remove Backported `os.Expand`](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4915).
- [Remove Fedora 29 package support](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/16158).
- [Remove macOS 32-bit support](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/25466).
- [Removed `debug/jobs/list?v=1` endpoint](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/6361).
- [Remove support for array of strings when defining services for Docker executor](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4922).
- [Remove `--docker-services` flag on register command](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/6404).
- [Remove legacy build directory caching](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4180).
- [Remove `FF_USE_LEGACY_VOLUMES_MOUNTING_ORDER` feature flag](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/6581).
- [Remove support for Windows Server 1803](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/6553).
- 12.0:
- [Use `refspec` to clone/fetch Git repository](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4069).
- [Old cache configuration](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4070).
- [Old metrics server configuration](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4072).
- [Remove `FF_K8S_USE_ENTRYPOINT_OVER_COMMAND`](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4073).
- [Remove Linux distributions that reach EOL](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/1130).
- [Update command line API for helper images](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4013).
- [Remove old `git clean` flow](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4175).

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 77 KiB

View File

@ -1,116 +1,11 @@
---
stage: Verify
group: Pipeline Authoring
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
description: "An overview of Continuous Integration, Continuous Delivery, and Continuous Deployment, as well as an introduction to GitLab CI/CD."
type: concepts
redirect_to: 'index.md'
remove_date: '2023-11-24'
---
# CI/CD concepts **(FREE ALL)**
This document was moved to [another location](../index.md).
With the continuous method of software development, you continuously build,
test, and deploy iterative code changes. This iterative process helps reduce
the chance that you develop new code based on buggy or failed previous versions.
With this method, you strive to have less human intervention or even no intervention at all,
from the development of new code until its deployment.
The three primary approaches for the continuous method are:
- [Continuous Integration](#continuous-integration)
- [Continuous Delivery](#continuous-delivery)
- [Continuous Deployment](#continuous-deployment)
Out-of-the-box management systems can decrease hours spent on maintaining toolchains by 10% or more.
Watch our ["Mastering continuous software development"](https://about.gitlab.com/webcast/mastering-ci-cd/)
webcast to learn about continuous methods and how built-in GitLab CI/CD can help you simplify and scale software development.
- <i class="fa fa-youtube-play youtube" aria-hidden="true"></i>Learn how to: [configure CI/CD](https://www.youtube.com/watch?v=opdLqwz6tcE).
- [Make the case for CI/CD in your organization](https://about.gitlab.com/devops-tools/github-vs-gitlab/).
- Learn how [Verizon reduced rebuilds](https://about.gitlab.com/blog/2019/02/14/verizon-customer-story/) from 30 days to under 8 hours with GitLab.
## Continuous Integration
Consider an application that has its code stored in a Git
repository in GitLab. Developers push code changes every day,
multiple times a day. For every push to the repository, you
can create a set of scripts to build and test your application
automatically. These scripts help decrease the chances that you introduce errors in your application.
This practice is known as [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration).
Each change submitted to an application, even to development branches,
is built and tested automatically and continuously. These tests ensure the
changes pass all tests, guidelines, and code compliance
standards you established for your application.
[GitLab itself](https://gitlab.com/gitlab-org/gitlab) is an
example of a project that uses Continuous Integration as a software
development method. For every push to the project, a set
of checks run against the code.
## Continuous Delivery
[Continuous Delivery](https://continuousdelivery.com/) is a step
beyond Continuous Integration. Not only is your application
built and tested each time a code change is pushed to the codebase,
the application is also deployed continuously. However, with continuous
delivery, you trigger the deployments manually.
Continuous Delivery checks the code automatically, but it requires
human intervention to manually and strategically trigger the deployment
of the changes.
## Continuous Deployment
Continuous Deployment is another step beyond Continuous Integration, similar to
Continuous Delivery. The difference is that instead of deploying your
application manually, you set it to be deployed automatically.
Human intervention is not required.
## GitLab CI/CD
[GitLab CI/CD](../quick_start/index.md) is the part of GitLab that you use
for all of the continuous methods (Continuous Integration,
Delivery, and Deployment). With GitLab CI/CD, you can test, build,
and publish your software with no third-party application or integration needed.
<i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
For an overview, see [Introduction to GitLab CI/CD](https://www.youtube.com/watch?v=l5705U8s_nQ&t=397) from an April 2020 GitLab meetup.
### GitLab CI/CD workflow
GitLab CI/CD fits in a common development workflow.
You can start by discussing a code implementation in an issue
and working locally on your proposed changes. Then you can push your
commits to a feature branch in a remote repository that's hosted in GitLab.
The push triggers the CI/CD pipeline for your project. Then, GitLab CI/CD:
- Runs automated scripts (sequentially or in parallel) to:
- Build and test your application.
- Preview the changes in a Review App, the same as you
would see on your `localhost`.
After the implementation works as expected:
- Get your code reviewed and approved.
- Merge the feature branch into the default branch.
- GitLab CI/CD deploys your changes automatically to a production environment.
If something goes wrong, you can roll back your changes.
![GitLab workflow example](img/gitlab_workflow_example_11_9.png)
This workflow shows the major steps in the GitLab process.
You don't need any external tools to deliver your software and
you can visualize all the steps in the GitLab UI.
### A deeper look into the CI/CD workflow
If you look deeper into the workflow, you can see
the features available in GitLab at each stage of the DevOps
lifecycle.
![Deeper look into the basic CI/CD workflow](img/gitlab_workflow_example_extended_v12_3.png)
<i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
[Get a deeper look at GitLab CI/CD](https://youtu.be/l5705U8s_nQ?t=369).
<!-- This redirect file can be deleted after <2023-11-24>. -->
<!-- Redirects that point to other docs in the same project expire in three months. -->
<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->

View File

@ -201,9 +201,6 @@ GitLab takes advantage of our connected ecosystem to automatically pull these ki
your merge requests, pipeline details pages, and other locations. You may find that you actually don't
need to configure anything to have these appear.
Our [CI/CD feature index](../index.md#features) has the full list of bundled features and links to the documentation for each.
Refer to this index if these features aren't working as expected, or if you'd like to see what's available.
### Templates
For advanced CI/CD teams, project templates can enable the reuse of pipeline configurations,

View File

@ -17,10 +17,15 @@ Your jobs can run on:
- [Windows runners](saas/windows_saas_runner.md) ([Beta](../../policy/experiment-beta-support.md#beta))
- [macOS runners](saas/macos_saas_runner.md) ([Beta](../../policy/experiment-beta-support.md#beta))
Refer to the compute minutes [cost factor](../../ci/pipelines/cicd_minutes.md#cost-factor) for the cost factor applied to the machine type based on size.
The number of minutes you can use on these runners depends on the [maximum number of compute minutes](../pipelines/cicd_minutes.md)
For more information about the cost factor applied to the machine type based on size, see [cost factor](../../ci/pipelines/cicd_minutes.md#cost-factor).
The number of minutes you can use on these runners depends on the [maximum number of units of compute](../pipelines/cicd_minutes.md)
in your [subscription plan](https://about.gitlab.com/pricing/).
[Untagged](../../ci/runners/configure_runners.md#use-tags-to-control-which-jobs-a-runner-can-run) jobs automatically run in containers
on the `small` Linux runners.
The objective is to make 90% of CI/CD jobs start executing in 120 seconds or less. The error rate should be less than 0.5%.
## How SaaS runners work
When you use SaaS runners:
@ -30,9 +35,6 @@ When you use SaaS runners:
- The virtual machine where your job runs has `sudo` access with no password.
- The storage is shared by the operating system, the image with pre-installed software, and a copy of your cloned repository.
This means that the available free disk space for your jobs to use is reduced.
- [Untagged](../../ci/runners/configure_runners.md#use-tags-to-control-which-jobs-a-runner-can-run) jobs automatically run in containers
on the `small` Linux runners.
- The objective is to make 90% of CI jobs start executing in 120 seconds or less. The error rate target will be less than 0.5%.
NOTE:
Jobs handled by SaaS runners on GitLab.com **time out after 3 hours**, regardless of the timeout configured in a project.
@ -67,3 +69,65 @@ takes over the task of securely deleting the virtual machine and associated data
- Inbound communication from the public internet to the temporary VM is not allowed.
- Firewall rules do not permit communication between VMs.
- The only internal communication allowed to the temporary VMs is from the runner manager.
## Supported image lifecycle
For runners on macOS and Windows, you can only run jobs on supported images. You cannot bring your own image. Supported images have the following lifecycle:
- Beta
- Generally Available
- Deprecated
### Beta
To gather feedback on an image prior to making the image Generally Available (GA) and to address
any issues, new images are released as Beta. Any jobs running on Beta images are not
covered by the service-level agreement. If you use Beta images, you can provide feedback
by creating an issue.
### Generally Available
A Generally Available (GA) image is released after the image completes a Beta phase
and is considered suitable for general use. To become GA, the
image must fulfill the following requirements:
- Successful completion of a Beta phase by resolving all reported significant bugs
- Compatibility of installed software with the underlying OS
Jobs running on GA images are covered by the defined service-level agreement. Over time, these images are deprecated.
### Deprecated
A maximum of two Generally Available (GA) images are supported at a time. After a new GA image is released,
the oldest GA image becomes deprecated. A deprecated image is no longer
updated and is deleted after 3 months in accordance with the [deprecation guidelines](../../development/deprecation_guidelines/index.md).
## Major version changes (breaking)
As GitLab CI/CD and Runner have evolved, certain breaking changes have been necessary.
For GitLab 15.0 and later, all breaking changes are documented on the following page:
- [Deprecations and removals](../../update/deprecations.md)
The breaking changes for GitLab Runner in earlier major version releases are:
- 14.0: No breaking changes.
- 13.0:
- [Remove Backported `os.Expand`](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4915).
- [Remove Fedora 29 package support](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/16158).
- [Remove macOS 32-bit support](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/25466).
- [Removed `debug/jobs/list?v=1` endpoint](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/6361).
- [Remove support for array of strings when defining services for Docker executor](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4922).
- [Remove `--docker-services` flag on register command](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/6404).
- [Remove legacy build directory caching](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4180).
- [Remove `FF_USE_LEGACY_VOLUMES_MOUNTING_ORDER` feature flag](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/6581).
- [Remove support for Windows Server 1803](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/6553).
- 12.0:
- [Use `refspec` to clone/fetch Git repository](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4069).
- [Old cache configuration](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4070).
- [Old metrics server configuration](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4072).
- [Remove `FF_K8S_USE_ENTRYPOINT_OVER_COMMAND`](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4073).
- [Remove Linux distributions that reach EOL](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/1130).
- [Update command line API for helper images](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4013).
- [Remove old `git clean` flow](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4175).

View File

@ -38,28 +38,33 @@ in your `.gitlab-ci.yml` file.
Each image runs a specific version of macOS and Xcode.
| VM image | Status |
|---------------------------|---------------|
| `macos-12-xcode-13` | `maintenance` |
| `macos-12-xcode-14` | `maintenance` |
| (none, awaiting macOS 13) | `beta` |
| VM image | Status |
|----------------------------|--------|
| `macos-12-xcode-13` | `GA` |
| `macos-12-xcode-14` | `GA` |
| `macos-13-xcode-14` | `Beta` |
NOTE:
If your job requires tooling or dependencies not available in our available images, those can only be installed in the job execution.
## Image update policy
## Image update policy for macOS
GitLab expects to release new images based on this cadence:
macOS and Xcode follow a yearly release cadence, during which GitLab increments its versions synchronously. GitLab typically supports multiple versions of preinstalled tools. For more information, see
a [full list of preinstalled software](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/job-images/-/tree/main/toolchain).
macOS updates:
GitLab provides `stable` and `latest` macOS images that follow different update patterns:
- **For new OS versions:** When Apple releases a new macOS version to developers (like macOS `12`), GitLab will plan to release an image based on the OS within the next 30 business days. The image is considered `beta` and the contents of the image (including tool versions) are subject to change until the first patch release (`12.1`). The long-term name will not include `beta` (for example, `macos-12-xcode-13`), so customers are moved automatically out of beta over time. GitLab will try to minimize breaking changes between the first two minor versions but makes no guarantees. Tooling often gets critical bug fixes after the first public release of an OS version.
- **Stable image:** The `stable` images and installed components are updated every release. Images without the `:latest` prefix are considered stable images.
- **Latest image:** The `latest` images are typically updated on a weekly cadence and use a `:latest` prefix in the image name. Using the `latest` image results in more regularly updated components and shorter update times for Homebrew or asdf. The `latest` images are used to test software components before releasing the components to the `stable` images.
By definition, the `latest` images are always Beta.
A `latest` image is not available.
- **After the first patch release (`12.1`):**
- The image moves to `maintenance` mode. The tools GitLab builds into the image with Homebrew and asdf are frozen. GitLab continues making Xcode updates, security updates, and any non-breaking changes deemed necessary.
- The image for the previous OS version (`11`) moves to `frozen` mode. GitLab then does only unavoidable changes: security updates, runner version upgrades, and setting the production password.
### Release process
Both macOS and Xcode follow a yearly release cadence. As time goes on, GitLab increments their versions synchronously (meaning we build macOS 11 with Xcode 12, macOS 12 with Xcode 13, and so on).
When Apple releases a new macOS version, GitLab releases both `stable` and `latest` images based on the OS in the next release. Both images are Beta.
With the release of the first patch to macOS, the `stable` image becomes Generally Available (GA).
As only two GA images are supported at a time, the prior OS version becomes deprecated and is deleted after three months in accordance with the [supported image lifecycle](../index.md#supported-image-lifecycle).
## Example `.gitlab-ci.yml` file

View File

@ -35,7 +35,7 @@ You can execute your job in one of the following Windows versions:
| Version tag | Status |
|----------------|---------------|
| `windows-1809` | `maintenance` |
| `windows-1809` | `Beta` |
You can find a full list of available pre-installed software in
the [pre-installed software documentation](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/gcp/windows-containers/blob/main/cookbooks/preinstalled-software/README.md).
@ -81,16 +81,15 @@ test:
- echo "running scripts in the test job"
```
## Limitations and known issues
## Known issues
- All the limitations mentioned in our [Beta definition](../../../policy/experiment-beta-support.md#beta).
- The average provisioning time for a new Windows VM is 5 minutes.
This means that you may notice slower build start times
on the Windows runner fleet during the beta. In a future
release we intend to update the autoscaler to enable
the pre-provisioning of virtual machines. This is intended to significantly reduce
the time it takes to provision a VM on the Windows fleet. You can
follow along in the [related issue](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/autoscaler/-/issues/32).
- For more information about support for Beta features, see [Beta](../../../policy/experiment-beta-support.md#beta).
- The average provisioning time for a new Windows virtual machine (VM) is five minutes, so
you might notice slower start times for builds on the Windows runner
fleet during the Beta. Updating the autoscaler to enable the pre-provisioning
of virtual machines is proposed in a future release. This update is intended to
significantly reduce the time it takes to provision a VM on the Windows fleet.
For more information, see [issue 32](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/autoscaler/-/issues/32).
- The Windows runner fleet may be unavailable occasionally
for maintenance or updates.
- The job may stay in a pending state for longer than the

View File

@ -76,7 +76,7 @@ In GitLab 14.10, a [flag](../../../../administration/feature_flags.md) named `ce
Prerequisites:
- For a [GitLab CI/CD workflow](../ci_cd_workflow.md), ensure that
[GitLab CI/CD is enabled](../../../../ci/enable_or_disable_ci.md#enable-cicd-in-a-project).
[GitLab CI/CD is not disabled](../../../../ci/enable_or_disable_ci.md#disable-cicd-in-a-project).
You must register an agent before you can install the agent in your cluster. To register an agent:

View File

@ -79,7 +79,7 @@ merge. This configuration works for both:
- GitLab CI/CD pipelines.
- Pipelines run from an [external CI integration](../integrations/index.md#available-integrations).
As a result, [disabling GitLab CI/CD pipelines](../../../ci/enable_or_disable_ci.md)
As a result, [disabling GitLab CI/CD pipelines](../../../ci/enable_or_disable_ci.md#disable-cicd-in-a-project)
does not disable this feature, but you can use pipelines from external
CI providers with it.

View File

@ -47462,9 +47462,6 @@ msgstr ""
msgid "The parent epic is confidential and can only contain confidential epics and issues"
msgstr ""
msgid "The parsed YAML is too big"
msgstr ""
msgid "The password for the Jenkins server."
msgstr ""
@ -56642,6 +56639,9 @@ msgstr ""
msgid "must be less than the limit of %{tag_limit} tags"
msgstr ""
msgid "must be owned by the user's enterprise group"
msgstr ""
msgid "must be set for a project namespace"
msgstr ""

View File

@ -94,10 +94,10 @@ module QA
--set="auth.signing_key=#{Runtime::Env.workspaces_oauth_signing_key}" \
--set="ingress.host.workspaceDomain=#{Runtime::Env.workspaces_proxy_domain}" \
--set="ingress.host.wildcardDomain=*.#{Runtime::Env.workspaces_proxy_domain}" \
--set="ingress.tls.workspaceDomainCert=#{Runtime::Env.workspaces_domain_cert}" \
--set="ingress.tls.workspaceDomainKey=#{Runtime::Env.workspaces_domain_key}" \
--set="ingress.tls.wildcardDomainCert=#{Runtime::Env.workspaces_wildcard_cert}" \
--set="ingress.tls.wildcardDomainKey=#{Runtime::Env.workspaces_wildcard_key}" \
--set="ingress.tls.workspaceDomainCert=$(cat #{Runtime::Env.workspaces_domain_cert})" \
--set="ingress.tls.workspaceDomainKey=$(cat #{Runtime::Env.workspaces_domain_key})" \
--set="ingress.tls.wildcardDomainCert=$(cat #{Runtime::Env.workspaces_wildcard_cert})" \
--set="ingress.tls.wildcardDomainKey=$(cat #{Runtime::Env.workspaces_wildcard_key})" \
--set="ingress.className=nginx"
CMD
end

View File

@ -32,7 +32,7 @@ printf "${Color_Off}"
printf "${BBlue}Running Remote Development backend specs${Color_Off}\n\n"
bin/rspec -r spec_helper \
$(find . -path '**/remote_development/**/*_spec.rb') \
$(find . -path './**/remote_development/*_spec.rb' | grep -v 'qa/qa') \
ee/spec/graphql/types/query_type_spec.rb \
ee/spec/graphql/types/subscription_type_spec.rb \
ee/spec/requests/api/internal/kubernetes_spec.rb \

View File

@ -27,7 +27,7 @@ RSpec.describe Pajamas::BannerComponent, type: :component do
end
it 'renders a close button' do
expect(page).to have_css "button.gl-banner-close"
expect(page).to have_css "button.gl-button.gl-banner-close"
end
describe 'button_text and button_link' do

View File

@ -57,14 +57,6 @@ RSpec.describe Gitlab::Patch::RedisCacheStore, :use_clean_rails_redis_caching, f
context 'when cache is Rails.cache' do
let(:cache) { Rails.cache }
context 'when reading using secondary store as default' do
before do
stub_feature_flags(use_primary_store_as_default_for_cache: false)
end
it_behaves_like 'reading using cache stores'
end
it_behaves_like 'reading using cache stores'
end

View File

@ -10,6 +10,29 @@ RSpec.describe Admin::UsersController, :enable_admin_mode, feature_category: :us
sign_in(admin)
end
describe 'PATCH #update' do
let(:user) { create(:user) }
context "when admin changes user email" do
let(:new_email) { 'new-email@example.com' }
subject(:request) { patch admin_user_path(user), params: { user: { email: new_email } } }
it 'allows change user email', :aggregate_failures do
expect { request }
.to change { user.reload.email }.from(user.email).to(new_email)
expect(response).to redirect_to(admin_user_path(user))
expect(flash[:notice]).to eq('User was successfully updated.')
end
it 'does not email the user with confirmation_instructions' do
expect { request }
.not_to have_enqueued_mail(DeviseMailer, :confirmation_instructions)
end
end
end
describe 'PUT #block' do
context 'when request format is :json' do
subject(:request) { put block_admin_user_path(user, format: :json) }

View File

@ -463,6 +463,14 @@ RSpec.describe API::Ci::Jobs, feature_category: :continuous_integration do
it { expect(response).to have_gitlab_http_status(:bad_request) }
end
it_behaves_like 'an endpoint with keyset pagination' do
let_it_be(:another_build) { create(:ci_build, :success, :tags, project: project, pipeline: pipeline) }
let(:first_record) { project.builds.last }
let(:second_record) { project.builds.first }
let(:api_call) { api("/projects/#{project.id}/jobs", user) }
end
end
context 'unauthorized user' do

View File

@ -6,7 +6,6 @@ RSpec.describe API::Groups, feature_category: :groups_and_projects do
include GroupAPIHelpers
include UploadHelpers
include WorkhorseHelpers
include KeysetPaginationHelpers
let_it_be(:user1) { create(:user, can_create_group: false) }
let_it_be(:user2) { create(:user) }
@ -196,37 +195,10 @@ RSpec.describe API::Groups, feature_category: :groups_and_projects do
end
end
context 'keyset pagination' do
context 'on making requests with supported ordering structure' do
it 'paginates the records correctly', :aggregate_failures do
# first page
get api('/groups'), params: { pagination: 'keyset', per_page: 1 }
expect(response).to have_gitlab_http_status(:ok)
records = json_response
expect(records.size).to eq(1)
expect(records.first['id']).to eq(group_1.id)
params_for_next_page = pagination_params_from_next_url(response)
expect(params_for_next_page).to include('cursor')
get api('/groups'), params: params_for_next_page
expect(response).to have_gitlab_http_status(:ok)
records = Gitlab::Json.parse(response.body)
expect(records.size).to eq(1)
expect(records.first['id']).to eq(group_2.id)
end
end
context 'on making requests with unsupported ordering structure' do
it 'returns error', :aggregate_failures do
get api('/groups'), params: { pagination: 'keyset', per_page: 1, order_by: 'path', sort: 'desc' }
expect(response).to have_gitlab_http_status(:method_not_allowed)
expect(json_response['error']).to eq('Keyset pagination is not yet available for this type of request')
end
end
it_behaves_like 'an endpoint with keyset pagination', invalid_order: 'path' do
let(:first_record) { group_1 }
let(:second_record) { group_2 }
let(:api_call) { api('/groups') }
end
end
end

View File

@ -553,6 +553,12 @@ RSpec.describe API::ProjectPackages, feature_category: :package_registry do
let(:per_page) { 2 }
it_behaves_like 'an endpoint with keyset pagination' do
let(:first_record) { pipeline3 }
let(:second_record) { pipeline2 }
let(:api_call) { api(package_pipelines_url, user) }
end
context 'with no cursor supplied' do
subject { get api(package_pipelines_url, user), params: { per_page: per_page } }

View File

@ -576,6 +576,14 @@ RSpec.describe NotificationService, :mailer, feature_category: :team_planning do
end
it_behaves_like 'notification with exact metric events', 1
context 'when service desk is disabled' do
before do
project.update!(service_desk_enabled: false)
end
it_behaves_like 'no participants are notified'
end
end
context 'do exist and note is confidential' do

View File

@ -1,3 +1,6 @@
stages:
- dast
dast:
stage: dast
image:
@ -7,4 +10,6 @@ dast:
allow_failure: true
dast_configuration:
site_profile: "site_profile_name_included"
scanner_profile: "scanner_profile_name_included"
scanner_profile: "scanner_profile_name_included"
script:
- echo "Runs DAST!"

View File

@ -11,3 +11,13 @@ RSpec::Matchers.define :include_limited_pagination_headers do |expected|
expect(actual.headers).to include('X-Per-Page', 'X-Page', 'X-Next-Page', 'X-Prev-Page', 'Link')
end
end
RSpec::Matchers.define :include_keyset_url_params do |expected|
include KeysetPaginationHelpers
match do |actual|
params_for_next_page = pagination_params_from_next_url(actual)
expect(params_for_next_page).to include('cursor')
end
end

View File

@ -0,0 +1,48 @@
# frozen_string_literal: true
RSpec.shared_examples 'an endpoint with keyset pagination' do |invalid_order: 'name', invalid_sort: 'asc'|
include KeysetPaginationHelpers
let(:keyset_params) { { pagination: 'keyset', per_page: 1 } }
let(:additional_params) { {} }
subject do
get api_call, params: keyset_params.merge(additional_params)
response
end
context 'on making requests with supported ordering structure' do
it 'includes keyset url params in the url response' do
is_expected.to have_gitlab_http_status(:ok)
is_expected.to include_keyset_url_params
end
it 'does not include pagination headers' do
is_expected.to have_gitlab_http_status(:ok)
is_expected.not_to include_pagination_headers
end
it 'paginates the records correctly', :aggregate_failures do
is_expected.to have_gitlab_http_status(:ok)
records = json_response
expect(records.size).to eq(1)
expect(records.first['id']).to eq(first_record.id)
get api_call, params: pagination_params_from_next_url(response)
expect(response).to have_gitlab_http_status(:ok)
records = Gitlab::Json.parse(response.body)
expect(records.size).to eq(1)
expect(records.first['id']).to eq(second_record.id)
end
end
context 'on making requests with unsupported ordering structure' do
let(:additional_params) { { order_by: invalid_order, sort: invalid_sort } }
it 'returns error', :aggregate_failures do
is_expected.to have_gitlab_http_status(:method_not_allowed)
expect(json_response['error']).to eq('Keyset pagination is not yet available for this type of request')
end
end
end

View File

@ -8,12 +8,6 @@ RSpec.describe RedisCommands::Recorder, :use_clean_rails_redis_caching do
let(:cache) { Rails.cache }
let(:pattern) { nil }
before do
# do not need to test for positive case since this is testing
# a spec support class
stub_feature_flags(use_primary_and_secondary_stores_for_cache: false)
end
describe '#initialize' do
context 'with a block' do
it 'records Redis commands' do

View File

@ -22,6 +22,7 @@ require (
github.com/mitchellh/copystructure v1.2.0
github.com/prometheus/client_golang v1.16.0
github.com/rafaeljusto/redigomock/v3 v3.1.2
github.com/redis/go-redis/v9 v9.0.5
github.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a
github.com/sirupsen/logrus v1.9.3
github.com/smartystreets/goconvey v1.7.2
@ -65,6 +66,7 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/client9/reopen v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dlclark/regexp2 v1.4.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/gogo/protobuf v1.3.2 // indirect

View File

@ -869,6 +869,8 @@ github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl
github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bradfitz/gomemcache v0.0.0-20170208213004-1952afaa557d/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/bsm/ginkgo/v2 v2.7.0 h1:ItPMPH90RbmZJt5GtkcNvIRuGEdwlBItdNVoyzaNQao=
github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y=
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
@ -1068,6 +1070,8 @@ github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8l
github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/godo v1.78.0/go.mod h1:GBmu8MkjZmNARE7IXRPmkbbnocNN8+uBm0xbEVw2LCs=
@ -2047,6 +2051,8 @@ github.com/rafaeljusto/redigomock/v3 v3.1.2 h1:B4Y0XJQiPjpwYmkH55aratKX1VfR+JRqz
github.com/rafaeljusto/redigomock/v3 v3.1.2/go.mod h1:F9zPqz8rMriScZkPtUiLJoLruYcpGo/XXREpeyasREM=
github.com/rakyll/embedmd v0.0.0-20171029212350-c8060a0752a2/go.mod h1:7jOTMgqac46PZcF54q6l2hkLEG8op93fZu61KmxWDV4=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/redis/go-redis/v9 v9.0.5 h1:CuQcn5HIEeK7BgElubPP8CGtE0KakrnbBSTLjathl5o=
github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=

View File

@ -0,0 +1,185 @@
package goredis
import (
"context"
"errors"
"fmt"
"net"
"time"
redis "github.com/redis/go-redis/v9"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/config"
_ "gitlab.com/gitlab-org/gitlab/workhorse/internal/helper"
internalredis "gitlab.com/gitlab-org/gitlab/workhorse/internal/redis"
)
var (
rdb *redis.Client
// found in https://github.com/redis/go-redis/blob/c7399b6a17d7d3e2a57654528af91349f2468529/sentinel.go#L626
errSentinelMasterAddr error = errors.New("redis: all sentinels specified in configuration are unreachable")
)
const (
// Max Idle Connections in the pool.
defaultMaxIdle = 1
// Max Active Connections in the pool.
defaultMaxActive = 1
// Timeout for Read operations on the pool. 1 second is technically overkill,
// it's just for sanity.
defaultReadTimeout = 1 * time.Second
// Timeout for Write operations on the pool. 1 second is technically overkill,
// it's just for sanity.
defaultWriteTimeout = 1 * time.Second
// Timeout before killing Idle connections in the pool. 3 minutes seemed good.
// If you _actually_ hit this timeout often, you should consider turning of
// redis-support since it's not necessary at that point...
defaultIdleTimeout = 3 * time.Minute
)
// createDialer references https://github.com/redis/go-redis/blob/b1103e3d436b6fe98813ecbbe1f99dc8d59b06c9/options.go#L214
// it intercepts the error and tracks it via a Prometheus counter
func createDialer(sentinels []string) func(ctx context.Context, network, addr string) (net.Conn, error) {
return func(ctx context.Context, network, addr string) (net.Conn, error) {
var isSentinel bool
for _, sentinelAddr := range sentinels {
if sentinelAddr == addr {
isSentinel = true
break
}
}
dialTimeout := 5 * time.Second // go-redis default
destination := "redis"
if isSentinel {
// This timeout is recommended for Sentinel-support according to the guidelines.
// https://redis.io/topics/sentinel-clients#redis-service-discovery-via-sentinel
// For every address it should try to connect to the Sentinel,
// using a short timeout (in the order of a few hundreds of milliseconds).
destination = "sentinel"
dialTimeout = 500 * time.Millisecond
}
netDialer := &net.Dialer{
Timeout: dialTimeout,
KeepAlive: 5 * time.Minute,
}
conn, err := netDialer.DialContext(ctx, network, addr)
if err != nil {
internalredis.ErrorCounter.WithLabelValues("dial", destination).Inc()
} else {
if !isSentinel {
internalredis.TotalConnections.Inc()
}
}
return conn, err
}
}
// implements the redis.Hook interface for instrumentation
type sentinelInstrumentationHook struct{}
func (s sentinelInstrumentationHook) DialHook(next redis.DialHook) redis.DialHook {
return func(ctx context.Context, network, addr string) (net.Conn, error) {
conn, err := next(ctx, network, addr)
if err != nil && err.Error() == errSentinelMasterAddr.Error() {
// check for non-dialer error
internalredis.ErrorCounter.WithLabelValues("master", "sentinel").Inc()
}
return conn, err
}
}
func (s sentinelInstrumentationHook) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
return func(ctx context.Context, cmd redis.Cmder) error {
return next(ctx, cmd)
}
}
func (s sentinelInstrumentationHook) ProcessPipelineHook(next redis.ProcessPipelineHook) redis.ProcessPipelineHook {
return func(ctx context.Context, cmds []redis.Cmder) error {
return next(ctx, cmds)
}
}
func GetRedisClient() *redis.Client {
return rdb
}
// Configure redis-connection
func Configure(cfg *config.RedisConfig) error {
if cfg == nil {
return nil
}
var err error
if len(cfg.Sentinel) > 0 {
rdb = configureSentinel(cfg)
} else {
rdb, err = configureRedis(cfg)
}
return err
}
func configureRedis(cfg *config.RedisConfig) (*redis.Client, error) {
if cfg.URL.Scheme == "tcp" {
cfg.URL.Scheme = "redis"
}
opt, err := redis.ParseURL(cfg.URL.String())
if err != nil {
return nil, err
}
opt.DB = getOrDefault(cfg.DB, 0)
opt.Password = cfg.Password
opt.PoolSize = getOrDefault(cfg.MaxActive, defaultMaxActive)
opt.MaxIdleConns = getOrDefault(cfg.MaxIdle, defaultMaxIdle)
opt.ConnMaxIdleTime = defaultIdleTimeout
opt.ReadTimeout = defaultReadTimeout
opt.WriteTimeout = defaultWriteTimeout
opt.Dialer = createDialer([]string{})
return redis.NewClient(opt), nil
}
func configureSentinel(cfg *config.RedisConfig) *redis.Client {
sentinels := make([]string, len(cfg.Sentinel))
for i := range cfg.Sentinel {
sentinelDetails := cfg.Sentinel[i]
sentinels[i] = fmt.Sprintf("%s:%s", sentinelDetails.Hostname(), sentinelDetails.Port())
}
client := redis.NewFailoverClient(&redis.FailoverOptions{
MasterName: cfg.SentinelMaster,
SentinelAddrs: sentinels,
Password: cfg.Password,
DB: getOrDefault(cfg.DB, 0),
PoolSize: getOrDefault(cfg.MaxActive, defaultMaxActive),
MaxIdleConns: getOrDefault(cfg.MaxIdle, defaultMaxIdle),
ConnMaxIdleTime: defaultIdleTimeout,
ReadTimeout: defaultReadTimeout,
WriteTimeout: defaultWriteTimeout,
Dialer: createDialer(sentinels),
})
client.AddHook(sentinelInstrumentationHook{})
return client
}
func getOrDefault(ptr *int, val int) int {
if ptr != nil {
return *ptr
}
return val
}

View File

@ -0,0 +1,107 @@
package goredis
import (
"context"
"net"
"testing"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/config"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/helper"
)
func mockRedisServer(t *testing.T, connectReceived *bool) string {
// go-redis does not deal with port 0
ln, err := net.Listen("tcp", "127.0.0.1:6389")
require.Nil(t, err)
go func() {
defer ln.Close()
conn, err := ln.Accept()
require.Nil(t, err)
*connectReceived = true
conn.Write([]byte("OK\n"))
}()
return ln.Addr().String()
}
func TestConfigureNoConfig(t *testing.T) {
rdb = nil
Configure(nil)
require.Nil(t, rdb, "rdb client should be nil")
}
func TestConfigureValidConfigX(t *testing.T) {
testCases := []struct {
scheme string
}{
{
scheme: "redis",
},
{
scheme: "tcp",
},
}
for _, tc := range testCases {
t.Run(tc.scheme, func(t *testing.T) {
connectReceived := false
a := mockRedisServer(t, &connectReceived)
parsedURL := helper.URLMustParse(tc.scheme + "://" + a)
cfg := &config.RedisConfig{URL: config.TomlURL{URL: *parsedURL}}
Configure(cfg)
require.NotNil(t, GetRedisClient().Conn(), "Pool should not be nil")
// goredis initialise connections lazily
rdb.Ping(context.Background())
require.True(t, connectReceived)
rdb = nil
})
}
}
func TestConnectToSentinel(t *testing.T) {
testCases := []struct {
scheme string
}{
{
scheme: "redis",
},
{
scheme: "tcp",
},
}
for _, tc := range testCases {
t.Run(tc.scheme, func(t *testing.T) {
connectReceived := false
a := mockRedisServer(t, &connectReceived)
addrs := []string{tc.scheme + "://" + a}
var sentinelUrls []config.TomlURL
for _, a := range addrs {
parsedURL := helper.URLMustParse(a)
sentinelUrls = append(sentinelUrls, config.TomlURL{URL: *parsedURL})
}
cfg := &config.RedisConfig{Sentinel: sentinelUrls}
Configure(cfg)
require.NotNil(t, GetRedisClient().Conn(), "Pool should not be nil")
// goredis initialise connections lazily
rdb.Ping(context.Background())
require.True(t, connectReceived)
rdb = nil
})
}
}

View File

@ -0,0 +1,236 @@
package goredis
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
"github.com/jpillora/backoff"
"github.com/redis/go-redis/v9"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/log"
internalredis "gitlab.com/gitlab-org/gitlab/workhorse/internal/redis"
)
type KeyWatcher struct {
mu sync.Mutex
subscribers map[string][]chan string
shutdown chan struct{}
reconnectBackoff backoff.Backoff
redisConn *redis.Client
conn *redis.PubSub
}
func NewKeyWatcher() *KeyWatcher {
return &KeyWatcher{
shutdown: make(chan struct{}),
reconnectBackoff: backoff.Backoff{
Min: 100 * time.Millisecond,
Max: 60 * time.Second,
Factor: 2,
Jitter: true,
},
}
}
const channelPrefix = "workhorse:notifications:"
func countAction(action string) { internalredis.TotalActions.WithLabelValues(action).Add(1) }
func (kw *KeyWatcher) receivePubSubStream(ctx context.Context, pubsub *redis.PubSub) error {
kw.mu.Lock()
// We must share kw.conn with the goroutines that call SUBSCRIBE and
// UNSUBSCRIBE because Redis pubsub subscriptions are tied to the
// connection.
kw.conn = pubsub
kw.mu.Unlock()
defer func() {
kw.mu.Lock()
defer kw.mu.Unlock()
kw.conn.Close()
kw.conn = nil
// Reset kw.subscribers because it is tied to Redis server side state of
// kw.conn and we just closed that connection.
for _, chans := range kw.subscribers {
for _, ch := range chans {
close(ch)
internalredis.KeyWatchers.Dec()
}
}
kw.subscribers = nil
}()
for {
msg, err := kw.conn.Receive(ctx)
if err != nil {
log.WithError(fmt.Errorf("keywatcher: pubsub receive: %v", err)).Error()
return nil
}
switch msg := msg.(type) {
case *redis.Subscription:
internalredis.RedisSubscriptions.Set(float64(msg.Count))
case *redis.Pong:
// Ignore.
case *redis.Message:
internalredis.TotalMessages.Inc()
internalredis.ReceivedBytes.Add(float64(len(msg.Payload)))
if strings.HasPrefix(msg.Channel, channelPrefix) {
kw.notifySubscribers(msg.Channel[len(channelPrefix):], string(msg.Payload))
}
default:
log.WithError(fmt.Errorf("keywatcher: unknown: %T", msg)).Error()
return nil
}
}
}
func (kw *KeyWatcher) Process(client *redis.Client) {
log.Info("keywatcher: starting process loop")
ctx := context.Background() // lint:allow context.Background
kw.mu.Lock()
kw.redisConn = client
kw.mu.Unlock()
for {
pubsub := client.Subscribe(ctx, []string{}...)
if err := pubsub.Ping(ctx); err != nil {
log.WithError(fmt.Errorf("keywatcher: %v", err)).Error()
time.Sleep(kw.reconnectBackoff.Duration())
continue
}
kw.reconnectBackoff.Reset()
if err := kw.receivePubSubStream(ctx, pubsub); err != nil {
log.WithError(fmt.Errorf("keywatcher: receivePubSubStream: %v", err)).Error()
}
}
}
func (kw *KeyWatcher) Shutdown() {
log.Info("keywatcher: shutting down")
kw.mu.Lock()
defer kw.mu.Unlock()
select {
case <-kw.shutdown:
// already closed
default:
close(kw.shutdown)
}
}
func (kw *KeyWatcher) notifySubscribers(key, value string) {
kw.mu.Lock()
defer kw.mu.Unlock()
chanList, ok := kw.subscribers[key]
if !ok {
countAction("drop-message")
return
}
countAction("deliver-message")
for _, c := range chanList {
select {
case c <- value:
default:
}
}
}
func (kw *KeyWatcher) addSubscription(ctx context.Context, key string, notify chan string) error {
kw.mu.Lock()
defer kw.mu.Unlock()
if kw.conn == nil {
// This can happen because CI long polling is disabled in this Workhorse
// process. It can also be that we are waiting for the pubsub connection
// to be established. Either way it is OK to fail fast.
return errors.New("no redis connection")
}
if len(kw.subscribers[key]) == 0 {
countAction("create-subscription")
if err := kw.conn.Subscribe(ctx, channelPrefix+key); err != nil {
return err
}
}
if kw.subscribers == nil {
kw.subscribers = make(map[string][]chan string)
}
kw.subscribers[key] = append(kw.subscribers[key], notify)
internalredis.KeyWatchers.Inc()
return nil
}
func (kw *KeyWatcher) delSubscription(ctx context.Context, key string, notify chan string) {
kw.mu.Lock()
defer kw.mu.Unlock()
chans, ok := kw.subscribers[key]
if !ok {
// This can happen if the pubsub connection dropped while we were
// waiting.
return
}
for i, c := range chans {
if notify == c {
kw.subscribers[key] = append(chans[:i], chans[i+1:]...)
internalredis.KeyWatchers.Dec()
break
}
}
if len(kw.subscribers[key]) == 0 {
delete(kw.subscribers, key)
countAction("delete-subscription")
if kw.conn != nil {
kw.conn.Unsubscribe(ctx, channelPrefix+key)
}
}
}
func (kw *KeyWatcher) WatchKey(ctx context.Context, key, value string, timeout time.Duration) (internalredis.WatchKeyStatus, error) {
notify := make(chan string, 1)
if err := kw.addSubscription(ctx, key, notify); err != nil {
return internalredis.WatchKeyStatusNoChange, err
}
defer kw.delSubscription(ctx, key, notify)
currentValue, err := kw.redisConn.Get(ctx, key).Result()
if errors.Is(err, redis.Nil) {
currentValue = ""
} else if err != nil {
return internalredis.WatchKeyStatusNoChange, fmt.Errorf("keywatcher: redis GET: %v", err)
}
if currentValue != value {
return internalredis.WatchKeyStatusAlreadyChanged, nil
}
select {
case <-kw.shutdown:
log.WithFields(log.Fields{"key": key}).Info("stopping watch due to shutdown")
return internalredis.WatchKeyStatusNoChange, nil
case currentValue := <-notify:
if currentValue == "" {
return internalredis.WatchKeyStatusNoChange, fmt.Errorf("keywatcher: redis GET failed")
}
if currentValue == value {
return internalredis.WatchKeyStatusNoChange, nil
}
return internalredis.WatchKeyStatusSeenChange, nil
case <-time.After(timeout):
return internalredis.WatchKeyStatusTimeout, nil
}
}

View File

@ -0,0 +1,301 @@
package goredis
import (
"context"
"os"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/config"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/redis"
)
var ctx = context.Background()
const (
runnerKey = "runner:build_queue:10"
)
func initRdb() {
buf, _ := os.ReadFile("../../config.toml")
cfg, _ := config.LoadConfig(string(buf))
Configure(cfg.Redis)
}
func (kw *KeyWatcher) countSubscribers(key string) int {
kw.mu.Lock()
defer kw.mu.Unlock()
return len(kw.subscribers[key])
}
// Forces a run of the `Process` loop against a mock PubSubConn.
func (kw *KeyWatcher) processMessages(t *testing.T, numWatchers int, value string, ready chan<- struct{}, wg *sync.WaitGroup) {
kw.mu.Lock()
kw.redisConn = rdb
psc := kw.redisConn.Subscribe(ctx, []string{}...)
kw.mu.Unlock()
errC := make(chan error)
go func() { errC <- kw.receivePubSubStream(ctx, psc) }()
require.Eventually(t, func() bool {
kw.mu.Lock()
defer kw.mu.Unlock()
return kw.conn != nil
}, time.Second, time.Millisecond)
close(ready)
require.Eventually(t, func() bool {
return kw.countSubscribers(runnerKey) == numWatchers
}, time.Second, time.Millisecond)
// send message after listeners are ready
kw.redisConn.Publish(ctx, channelPrefix+runnerKey, value)
// close subscription after all workers are done
wg.Wait()
kw.mu.Lock()
kw.conn.Close()
kw.mu.Unlock()
require.NoError(t, <-errC)
}
type keyChangeTestCase struct {
desc string
returnValue string
isKeyMissing bool
watchValue string
processedValue string
expectedStatus redis.WatchKeyStatus
timeout time.Duration
}
func TestKeyChangesInstantReturn(t *testing.T) {
initRdb()
testCases := []keyChangeTestCase{
// WatchKeyStatusAlreadyChanged
{
desc: "sees change with key existing and changed",
returnValue: "somethingelse",
watchValue: "something",
expectedStatus: redis.WatchKeyStatusAlreadyChanged,
timeout: time.Second,
},
{
desc: "sees change with key non-existing",
isKeyMissing: true,
watchValue: "something",
processedValue: "somethingelse",
expectedStatus: redis.WatchKeyStatusAlreadyChanged,
timeout: time.Second,
},
// WatchKeyStatusTimeout
{
desc: "sees timeout with key existing and unchanged",
returnValue: "something",
watchValue: "something",
expectedStatus: redis.WatchKeyStatusTimeout,
timeout: time.Millisecond,
},
{
desc: "sees timeout with key non-existing and unchanged",
isKeyMissing: true,
watchValue: "",
expectedStatus: redis.WatchKeyStatusTimeout,
timeout: time.Millisecond,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
// setup
if !tc.isKeyMissing {
rdb.Set(ctx, runnerKey, tc.returnValue, 0)
}
defer func() {
rdb.FlushDB(ctx)
}()
kw := NewKeyWatcher()
defer kw.Shutdown()
kw.redisConn = rdb
kw.conn = kw.redisConn.Subscribe(ctx, []string{}...)
val, err := kw.WatchKey(ctx, runnerKey, tc.watchValue, tc.timeout)
require.NoError(t, err, "Expected no error")
require.Equal(t, tc.expectedStatus, val, "Expected value")
})
}
}
func TestKeyChangesWhenWatching(t *testing.T) {
initRdb()
testCases := []keyChangeTestCase{
// WatchKeyStatusSeenChange
{
desc: "sees change with key existing",
returnValue: "something",
watchValue: "something",
processedValue: "somethingelse",
expectedStatus: redis.WatchKeyStatusSeenChange,
},
{
desc: "sees change with key non-existing, when watching empty value",
isKeyMissing: true,
watchValue: "",
processedValue: "something",
expectedStatus: redis.WatchKeyStatusSeenChange,
},
// WatchKeyStatusNoChange
{
desc: "sees no change with key existing",
returnValue: "something",
watchValue: "something",
processedValue: "something",
expectedStatus: redis.WatchKeyStatusNoChange,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
if !tc.isKeyMissing {
rdb.Set(ctx, runnerKey, tc.returnValue, 0)
}
kw := NewKeyWatcher()
defer kw.Shutdown()
defer func() {
rdb.FlushDB(ctx)
}()
wg := &sync.WaitGroup{}
wg.Add(1)
ready := make(chan struct{})
go func() {
defer wg.Done()
<-ready
val, err := kw.WatchKey(ctx, runnerKey, tc.watchValue, time.Second)
require.NoError(t, err, "Expected no error")
require.Equal(t, tc.expectedStatus, val, "Expected value")
}()
kw.processMessages(t, 1, tc.processedValue, ready, wg)
})
}
}
func TestKeyChangesParallel(t *testing.T) {
initRdb()
testCases := []keyChangeTestCase{
{
desc: "massively parallel, sees change with key existing",
returnValue: "something",
watchValue: "something",
processedValue: "somethingelse",
expectedStatus: redis.WatchKeyStatusSeenChange,
},
{
desc: "massively parallel, sees change with key existing, watching missing keys",
isKeyMissing: true,
watchValue: "",
processedValue: "somethingelse",
expectedStatus: redis.WatchKeyStatusSeenChange,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
runTimes := 100
if !tc.isKeyMissing {
rdb.Set(ctx, runnerKey, tc.returnValue, 0)
}
defer func() {
rdb.FlushDB(ctx)
}()
wg := &sync.WaitGroup{}
wg.Add(runTimes)
ready := make(chan struct{})
kw := NewKeyWatcher()
defer kw.Shutdown()
for i := 0; i < runTimes; i++ {
go func() {
defer wg.Done()
<-ready
val, err := kw.WatchKey(ctx, runnerKey, tc.watchValue, time.Second)
require.NoError(t, err, "Expected no error")
require.Equal(t, tc.expectedStatus, val, "Expected value")
}()
}
kw.processMessages(t, runTimes, tc.processedValue, ready, wg)
})
}
}
func TestShutdown(t *testing.T) {
initRdb()
kw := NewKeyWatcher()
kw.redisConn = rdb
kw.conn = kw.redisConn.Subscribe(ctx, []string{}...)
defer kw.Shutdown()
rdb.Set(ctx, runnerKey, "something", 0)
wg := &sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
val, err := kw.WatchKey(ctx, runnerKey, "something", 10*time.Second)
require.NoError(t, err, "Expected no error")
require.Equal(t, redis.WatchKeyStatusNoChange, val, "Expected value not to change")
}()
go func() {
defer wg.Done()
require.Eventually(t, func() bool { return kw.countSubscribers(runnerKey) == 1 }, 10*time.Second, time.Millisecond)
kw.Shutdown()
}()
wg.Wait()
require.Eventually(t, func() bool { return kw.countSubscribers(runnerKey) == 0 }, 10*time.Second, time.Millisecond)
// Adding a key after the shutdown should result in an immediate response
var val redis.WatchKeyStatus
var err error
done := make(chan struct{})
go func() {
val, err = kw.WatchKey(ctx, runnerKey, "something", 10*time.Second)
close(done)
}()
select {
case <-done:
require.NoError(t, err, "Expected no error")
require.Equal(t, redis.WatchKeyStatusNoChange, val, "Expected value not to change")
case <-time.After(100 * time.Millisecond):
t.Fatal("timeout waiting for WatchKey")
}
}

View File

@ -37,32 +37,32 @@ func NewKeyWatcher() *KeyWatcher {
}
var (
keyWatchers = promauto.NewGauge(
KeyWatchers = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "gitlab_workhorse_keywatcher_keywatchers",
Help: "The number of keys that is being watched by gitlab-workhorse",
},
)
redisSubscriptions = promauto.NewGauge(
RedisSubscriptions = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "gitlab_workhorse_keywatcher_redis_subscriptions",
Help: "Current number of keywatcher Redis pubsub subscriptions",
},
)
totalMessages = promauto.NewCounter(
TotalMessages = promauto.NewCounter(
prometheus.CounterOpts{
Name: "gitlab_workhorse_keywatcher_total_messages",
Help: "How many messages gitlab-workhorse has received in total on pubsub.",
},
)
totalActions = promauto.NewCounterVec(
TotalActions = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "gitlab_workhorse_keywatcher_actions_total",
Help: "Counts of various keywatcher actions",
},
[]string{"action"},
)
receivedBytes = promauto.NewCounter(
ReceivedBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "gitlab_workhorse_keywatcher_received_bytes_total",
Help: "How many bytes of messages gitlab-workhorse has received in total on pubsub.",
@ -72,7 +72,7 @@ var (
const channelPrefix = "workhorse:notifications:"
func countAction(action string) { totalActions.WithLabelValues(action).Add(1) }
func countAction(action string) { TotalActions.WithLabelValues(action).Add(1) }
func (kw *KeyWatcher) receivePubSubStream(conn redis.Conn) error {
kw.mu.Lock()
@ -93,7 +93,7 @@ func (kw *KeyWatcher) receivePubSubStream(conn redis.Conn) error {
for _, chans := range kw.subscribers {
for _, ch := range chans {
close(ch)
keyWatchers.Dec()
KeyWatchers.Dec()
}
}
kw.subscribers = nil
@ -102,13 +102,13 @@ func (kw *KeyWatcher) receivePubSubStream(conn redis.Conn) error {
for {
switch v := kw.conn.Receive().(type) {
case redis.Message:
totalMessages.Inc()
receivedBytes.Add(float64(len(v.Data)))
TotalMessages.Inc()
ReceivedBytes.Add(float64(len(v.Data)))
if strings.HasPrefix(v.Channel, channelPrefix) {
kw.notifySubscribers(v.Channel[len(channelPrefix):], string(v.Data))
}
case redis.Subscription:
redisSubscriptions.Set(float64(v.Count))
RedisSubscriptions.Set(float64(v.Count))
case error:
log.WithError(fmt.Errorf("keywatcher: pubsub receive: %v", v)).Error()
// Intermittent error, return nil so that it doesn't wait before reconnect
@ -205,7 +205,7 @@ func (kw *KeyWatcher) addSubscription(key string, notify chan string) error {
kw.subscribers = make(map[string][]chan string)
}
kw.subscribers[key] = append(kw.subscribers[key], notify)
keyWatchers.Inc()
KeyWatchers.Inc()
return nil
}
@ -224,7 +224,7 @@ func (kw *KeyWatcher) delSubscription(key string, notify chan string) {
for i, c := range chans {
if notify == c {
kw.subscribers[key] = append(chans[:i], chans[i+1:]...)
keyWatchers.Dec()
KeyWatchers.Dec()
break
}
}

View File

@ -45,14 +45,14 @@ const (
)
var (
totalConnections = promauto.NewCounter(
TotalConnections = promauto.NewCounter(
prometheus.CounterOpts{
Name: "gitlab_workhorse_redis_total_connections",
Help: "How many connections gitlab-workhorse has opened in total. Can be used to track Redis connection rate for this process",
},
)
errorCounter = promauto.NewCounterVec(
ErrorCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "gitlab_workhorse_redis_errors",
Help: "Counts different types of Redis errors encountered by workhorse, by type and destination (redis, sentinel)",
@ -100,7 +100,7 @@ func sentinelConn(master string, urls []config.TomlURL) *sentinel.Sentinel {
}
if err != nil {
errorCounter.WithLabelValues("dial", "sentinel").Inc()
ErrorCounter.WithLabelValues("dial", "sentinel").Inc()
return nil, err
}
return c, nil
@ -159,7 +159,7 @@ func sentinelDialer(dopts []redis.DialOption) redisDialerFunc {
return func() (redis.Conn, error) {
address, err := sntnl.MasterAddr()
if err != nil {
errorCounter.WithLabelValues("master", "sentinel").Inc()
ErrorCounter.WithLabelValues("master", "sentinel").Inc()
return nil, err
}
dopts = append(dopts, redis.DialNetDial(keepAliveDialer))
@ -214,9 +214,9 @@ func countDialer(dialer redisDialerFunc) redisDialerFunc {
return func() (redis.Conn, error) {
c, err := dialer()
if err != nil {
errorCounter.WithLabelValues("dial", "redis").Inc()
ErrorCounter.WithLabelValues("dial", "redis").Inc()
} else {
totalConnections.Inc()
TotalConnections.Inc()
}
return c, err
}

View File

@ -17,8 +17,10 @@ import (
"gitlab.com/gitlab-org/labkit/monitoring"
"gitlab.com/gitlab-org/labkit/tracing"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/builds"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/config"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/gitaly"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/goredis"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/queueing"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/redis"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/secret"
@ -224,9 +226,32 @@ func run(boot bootConfig, cfg config.Config) error {
secret.SetPath(boot.secretPath)
keyWatcher := redis.NewKeyWatcher()
if cfg.Redis != nil {
redis.Configure(cfg.Redis, redis.DefaultDialFunc)
go keyWatcher.Process()
var watchKeyFn builds.WatchKeyHandler
var goredisKeyWatcher *goredis.KeyWatcher
if os.Getenv("GITLAB_WORKHORSE_FF_GO_REDIS_ENABLED") == "true" {
log.Info("Using redis/go-redis")
goredisKeyWatcher = goredis.NewKeyWatcher()
if err := goredis.Configure(cfg.Redis); err != nil {
log.WithError(err).Error("unable to configure redis client")
}
if rdb := goredis.GetRedisClient(); rdb != nil {
go goredisKeyWatcher.Process(rdb)
}
watchKeyFn = goredisKeyWatcher.WatchKey
} else {
log.Info("Using gomodule/redigo")
if cfg.Redis != nil {
redis.Configure(cfg.Redis, redis.DefaultDialFunc)
go keyWatcher.Process()
}
watchKeyFn = keyWatcher.WatchKey
}
if err := cfg.RegisterGoCloudURLOpeners(); err != nil {
@ -241,7 +266,7 @@ func run(boot bootConfig, cfg config.Config) error {
gitaly.InitializeSidechannelRegistry(accessLogger)
up := wrapRaven(upstream.NewUpstream(cfg, accessLogger, keyWatcher.WatchKey))
up := wrapRaven(upstream.NewUpstream(cfg, accessLogger, watchKeyFn))
done := make(chan os.Signal, 1)
signal.Notify(done, syscall.SIGINT, syscall.SIGTERM)
@ -275,6 +300,10 @@ func run(boot bootConfig, cfg config.Config) error {
ctx, cancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout.Duration) // lint:allow context.Background
defer cancel()
if goredisKeyWatcher != nil {
goredisKeyWatcher.Shutdown()
}
keyWatcher.Shutdown()
return srv.Shutdown(ctx)
}