Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2020-09-24 15:09:51 +00:00
parent 6f15c2c272
commit a17eb314cf
74 changed files with 1159 additions and 60 deletions

View File

@ -1,3 +1,5 @@
import { parseBoolean } from '../../lib/utils/common_utils';
/**
* Adds the line number property
* @param Object line
@ -17,7 +19,7 @@ export const parseLine = (line = {}, lineNumber) => ({
* @param Number lineNumber
*/
export const parseHeaderLine = (line = {}, lineNumber) => ({
isClosed: false,
isClosed: parseBoolean(line.section_options?.collapsed),
isHeader: true,
line: parseLine(line, lineNumber),
lines: [],

View File

@ -1,25 +1,21 @@
<script>
import { mapState, mapActions } from 'vuex';
import {
GlDeprecatedSkeletonLoading as GlSkeletonLoading,
GlEmptyState,
GlLink,
GlButton,
} from '@gitlab/ui';
import { GlEmptyState, GlLink, GlButton } from '@gitlab/ui';
import { getParameterByName } from '~/lib/utils/common_utils';
import { __ } from '~/locale';
import ReleaseBlock from './release_block.vue';
import ReleasesPagination from './releases_pagination.vue';
import ReleaseSkeletonLoader from './release_skeleton_loader.vue';
export default {
name: 'ReleasesApp',
components: {
GlSkeletonLoading,
GlEmptyState,
ReleaseBlock,
ReleasesPagination,
GlLink,
GlButton,
ReleaseBlock,
ReleasesPagination,
ReleaseSkeletonLoader,
},
computed: {
...mapState('list', [
@ -77,7 +73,7 @@ export default {
{{ __('New release') }}
</gl-button>
<gl-skeleton-loading v-if="isLoading" class="js-loading" />
<release-skeleton-loader v-if="isLoading" class="js-loading" />
<gl-empty-state
v-else-if="shouldRenderEmptyState"

View File

@ -1,13 +1,13 @@
<script>
import { mapState, mapActions } from 'vuex';
import { GlDeprecatedSkeletonLoading as GlSkeletonLoading } from '@gitlab/ui';
import ReleaseBlock from './release_block.vue';
import ReleaseSkeletonLoader from './release_skeleton_loader.vue';
export default {
name: 'ReleaseShowApp',
components: {
GlSkeletonLoading,
ReleaseBlock,
ReleaseSkeletonLoader,
},
computed: {
...mapState('detail', ['isFetchingRelease', 'fetchError', 'release']),
@ -22,7 +22,7 @@ export default {
</script>
<template>
<div class="gl-mt-3">
<gl-skeleton-loading v-if="isFetchingRelease" />
<release-skeleton-loader v-if="isFetchingRelease" />
<release-block v-else-if="!fetchError" :release="release" />
</div>

View File

@ -0,0 +1,51 @@
<script>
import { GlSkeletonLoader } from '@gitlab/ui';
export default {
name: 'ReleaseSkeletonLoader',
components: { GlSkeletonLoader },
};
</script>
<template>
<gl-skeleton-loader :width="1248" :height="420">
<!-- Outside border -->
<path
d="M 4.5 0 C 2.0156486 0 0 2.0156486 0 4.5 L 0 415.5 C 0 417.98435 2.0156486 420 4.5 420 L 1243.5 420 C 1245.9844 420 1248 417.98435 1248 415.5 L 1248 4.5 C 1248 2.0156486 1245.9844 0 1243.5 0 L 4.5 0 z M 4.5 1 L 1243.5 1 C 1245.4476 1 1247 2.5523514 1247 4.5 L 1247 415.5 C 1247 417.44765 1245.4476 419 1243.5 419 L 4.5 419 C 2.5523514 419 1 417.44765 1 415.5 L 1 4.5 C 1 2.5523514 2.5523514 1 4.5 1 z "
/>
<!-- Header bottom border -->
<rect x="0" y="63.5" width="1248" height="1" />
<!-- Release title -->
<rect x="16" y="20" width="293" height="24" />
<!-- Edit (pencil) button -->
<rect x="1207" y="16" rx="4" width="32" height="32" />
<!-- Asset link 1 -->
<rect x="40" y="121" rx="4" width="16" height="16" />
<rect x="60" y="125" width="116" height="8" />
<!-- Asset link 2 -->
<rect x="40" y="145" rx="4" width="16" height="16" />
<rect x="60" y="149" width="132" height="8" />
<!-- Asset link 3 -->
<rect x="40" y="169" rx="4" width="16" height="16" />
<rect x="60" y="173" width="140" height="8" />
<!-- Asset link 4 -->
<rect x="40" y="193" rx="4" width="16" height="16" />
<rect x="60" y="197" width="112" height="8" />
<!-- Release notes -->
<rect x="16" y="228" width="480" height="8" />
<rect x="16" y="252" width="560" height="8" />
<rect x="16" y="276" width="480" height="8" />
<rect x="16" y="300" width="560" height="8" />
<rect x="16" y="324" width="320" height="8" />
<!-- Footer top border -->
<rect x="0" y="373" width="1248" height="1" />
</gl-skeleton-loader>
</template>

View File

@ -45,6 +45,9 @@ export const updateReleaseNotes = ({ commit }, notes) => commit(types.UPDATE_REL
export const updateReleaseMilestones = ({ commit }, milestones) =>
commit(types.UPDATE_RELEASE_MILESTONES, milestones);
export const updateReleaseGroupMilestones = ({ commit }, groupMilestones) =>
commit(types.UPDATE_RELEASE_GROUP_MILESTONES, groupMilestones);
export const addEmptyAssetLink = ({ commit }) => {
commit(types.ADD_EMPTY_ASSET_LINK);
};

View File

@ -9,6 +9,7 @@ export const UPDATE_CREATE_FROM = 'UPDATE_CREATE_FROM';
export const UPDATE_RELEASE_TITLE = 'UPDATE_RELEASE_TITLE';
export const UPDATE_RELEASE_NOTES = 'UPDATE_RELEASE_NOTES';
export const UPDATE_RELEASE_MILESTONES = 'UPDATE_RELEASE_MILESTONES';
export const UPDATE_RELEASE_GROUP_MILESTONES = 'UPDATE_RELEASE_GROUP_MILESTONES';
export const REQUEST_SAVE_RELEASE = 'REQUEST_SAVE_RELEASE';
export const RECEIVE_SAVE_RELEASE_SUCCESS = 'RECEIVE_SAVE_RELEASE_SUCCESS';

View File

@ -13,6 +13,7 @@ export default {
name: '',
description: '',
milestones: [],
groupMilestones: [],
assets: {
links: [],
},
@ -51,6 +52,10 @@ export default {
state.release.milestones = milestones;
},
[types.UPDATE_RELEASE_GROUP_MILESTONES](state, groupMilestones) {
state.release.groupMilestones = groupMilestones;
},
[types.REQUEST_SAVE_RELEASE](state) {
state.isUpdatingRelease = true;
},

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
class Admin::SessionsController < ApplicationController
include Authenticates2FAForAdminMode
include AuthenticatesWithTwoFactorForAdminMode
include InternalRedirect
include RendersLdapServers

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
module Authenticates2FAForAdminMode
module AuthenticatesWithTwoFactorForAdminMode
extend ActiveSupport::Concern
included do

View File

@ -11,7 +11,7 @@ module EnforcesTwoFactorAuthentication
extend ActiveSupport::Concern
included do
before_action :check_two_factor_requirement
before_action :check_two_factor_requirement, except: [:route_not_found]
# to include this in controllers inheriting from `ActionController::Metal`
# we need to add this block

View File

@ -1,8 +1,7 @@
# frozen_string_literal: true
class OmniauthCallbacksController < Devise::OmniauthCallbacksController
include AuthenticatesWithTwoFactor
include Authenticates2FAForAdminMode
include AuthenticatesWithTwoFactorForAdminMode
include Devise::Controllers::Rememberable
include AuthHelper
include InitializesCurrentUserMode

View File

@ -19,6 +19,7 @@ class UploadsController < ApplicationController
rescue_from UnknownUploadModelError, with: :render_404
skip_before_action :authenticate_user!
skip_before_action :check_two_factor_requirement, only: [:show]
before_action :upload_mount_satisfied?
before_action :authorize_access!, only: [:show]
before_action :authorize_create_access!, only: [:create, :authorize]

View File

@ -14,6 +14,10 @@ module Types
value 'MERGE_REQUESTS', 'Merge request count', value: :merge_requests
value 'GROUPS', 'Group count', value: :groups
value 'PIPELINES', 'Pipeline count', value: :pipelines
value 'PIPELINES_SUCCEEDED', 'Pipeline count with success status', value: :pipelines_succeeded
value 'PIPELINES_FAILED', 'Pipeline count with failed status', value: :pipelines_failed
value 'PIPELINES_CANCELED', 'Pipeline count with canceled status', value: :pipelines_canceled
value 'PIPELINES_SKIPPED', 'Pipeline count with skipped status', value: :pipelines_skipped
end
end
end

View File

@ -3,13 +3,19 @@
module Analytics
module InstanceStatistics
class Measurement < ApplicationRecord
EXPERIMENTAL_IDENTIFIERS = %i[pipelines_succeeded pipelines_failed pipelines_canceled pipelines_skipped].freeze
enum identifier: {
projects: 1,
users: 2,
issues: 3,
merge_requests: 4,
groups: 5,
pipelines: 6
pipelines: 6,
pipelines_succeeded: 7,
pipelines_failed: 8,
pipelines_canceled: 9,
pipelines_skipped: 10
}
IDENTIFIER_QUERY_MAPPING = {
@ -18,7 +24,11 @@ module Analytics
identifiers[:issues] => -> { Issue },
identifiers[:merge_requests] => -> { MergeRequest },
identifiers[:groups] => -> { Group },
identifiers[:pipelines] => -> { Ci::Pipeline }
identifiers[:pipelines] => -> { Ci::Pipeline },
identifiers[:pipelines_succeeded] => -> { Ci::Pipeline.success },
identifiers[:pipelines_failed] => -> { Ci::Pipeline.failed },
identifiers[:pipelines_canceled] => -> { Ci::Pipeline.canceled },
identifiers[:pipelines_skipped] => -> { Ci::Pipeline.skipped }
}.freeze
validates :recorded_at, :identifier, :count, presence: true
@ -26,6 +36,14 @@ module Analytics
scope :order_by_latest, -> { order(recorded_at: :desc) }
scope :with_identifier, -> (identifier) { where(identifier: identifier) }
def self.measurement_identifier_values
if Feature.enabled?(:store_ci_pipeline_counts_by_status)
identifiers.values
else
identifiers.values - EXPERIMENTAL_IDENTIFIERS.map { |identifier| identifiers[identifier] }
end
end
end
end
end

View File

@ -22,6 +22,7 @@ class AuditEvent < ApplicationRecord
validates :author_id, presence: true
validates :entity_id, presence: true
validates :entity_type, presence: true
validates :ip_address, ip_address: true
scope :by_entity_type, -> (entity_type) { where(entity_type: entity_type) }
scope :by_entity_id, -> (entity_id) { where(entity_id: entity_id) }

View File

@ -6,6 +6,7 @@ class AuthenticationEvent < ApplicationRecord
belongs_to :user, optional: true
validates :provider, :user_name, :result, presence: true
validates :ip_address, ip_address: true
enum result: {
failed: 0,

View File

@ -108,13 +108,25 @@ class AuditEventService
def log_security_event_to_database
return if Gitlab::Database.read_only?
AuditEvent.create(base_payload.merge(details: @details))
event = AuditEvent.new(base_payload.merge(details: @details))
save_or_track event
event
end
def log_authentication_event_to_database
return unless Gitlab::Database.read_write? && authentication_event?
AuthenticationEvent.create(authentication_event_payload)
event = AuthenticationEvent.new(authentication_event_payload)
save_or_track event
event
end
def save_or_track(event)
event.save!
rescue => e
Gitlab::ErrorTracking.track_exception(e, audit_event_type: event.class.to_s)
end
end

View File

@ -0,0 +1,39 @@
# frozen_string_literal: true
# IpAddressValidator
#
# Validates that an IP address is a valid IPv4 or IPv6 address.
# This should be coupled with a database column of type `inet`
#
# When using column type `inet` Rails will silently return the value
# as `nil` when the value is not valid according to its type cast
# using `IpAddr`. It's not very user friendly to return an error
# "IP Address can't be blank" when a value was clearly given but
# was not the right format. This validator will look at the value
# before Rails type casts it when the value itself is `nil`.
# This enables the validator to return a specific and useful error message.
#
# This validator allows `nil` values by default since the database
# allows null values by default. To disallow `nil` values, use in conjunction
# with `presence: true`.
#
# Do not use this validator with `allow_nil: true` or `allow_blank: true`.
# Because of Rails type casting, when an invalid value is set the attribute
# will return `nil` and Rails won't run this validator.
#
# Example:
#
# class Group < ActiveRecord::Base
# validates :ip_address, presence: true, ip_address: true
# end
#
class IpAddressValidator < ActiveModel::EachValidator
def validate_each(record, attribute, _)
value = record.public_send("#{attribute}_before_type_cast") # rubocop:disable GitlabSecurity/PublicSend
return if value.blank?
IPAddress.parse(value.to_s)
rescue ArgumentError
record.errors.add(attribute, _('must be a valid IPv4 or IPv6 address'))
end
end

View File

@ -17,10 +17,9 @@ module Analytics
return if Feature.disabled?(:store_instance_statistics_measurements, default_enabled: true)
recorded_at = Time.zone.now
measurement_identifiers = Analytics::InstanceStatistics::Measurement.identifiers
worker_arguments = Gitlab::Analytics::InstanceStatistics::WorkersArgumentBuilder.new(
measurement_identifiers: measurement_identifiers.values,
measurement_identifiers: ::Analytics::InstanceStatistics::Measurement.measurement_identifier_values,
recorded_at: recorded_at
).execute

View File

@ -95,6 +95,9 @@ class GitGarbageCollectWorker # rubocop:disable Scalability/IdempotentWorker
return if Gitlab::Database.read_only? # GitGarbageCollectWorker may be run on a Geo secondary
::Gitlab::Cleanup::OrphanLfsFileReferences.new(project, dry_run: false, logger: logger).run!
rescue => err
Gitlab::GitLogger.warn(message: "Cleaning up orphan LFS objects files failed", error: err.message)
Gitlab::ErrorTracking.track_and_raise_for_dev_exception(err)
end
def flush_ref_caches(project)

View File

@ -0,0 +1,5 @@
---
title: Pre-Collapsed Sections in CI Job Logs
merge_request: 42231
author: Kev @KevSlashNull
type: added

View File

@ -0,0 +1,5 @@
---
title: Optimise cleaning up LFS objects
merge_request: 42830
author:
type: performance

View File

@ -0,0 +1,5 @@
---
title: Reference pages_deployments in pages_metadata
merge_request: 42834
author:
type: added

View File

@ -0,0 +1,5 @@
---
title: Store pipeline counts by status for instance statistics
merge_request: 43027
author:
type: changed

View File

@ -0,0 +1,5 @@
---
title: Exclude 2FA from upload#show routes and 404s
merge_request: 42784
author:
type: fixed

View File

@ -0,0 +1,5 @@
---
title: Add validator for IP address/inet columns
merge_request: 42893
author:
type: added

View File

@ -0,0 +1,5 @@
---
title: Update skeleton loader shape on releases pages
merge_request: 43138
author:
type: added

View File

@ -1,7 +1,7 @@
---
name: prometheus_computed_alerts
introduced_by_url:
rollout_issue_url:
group:
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/13443
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/255304
group: group::health
type: development
default_enabled: false

View File

@ -0,0 +1,7 @@
---
name: store_ci_pipeline_counts_by_status
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/43027
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/254721
type: development
group: group::analytics
default_enabled: false

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
class AddPagesDeploymentIdToPagesMetadata < ActiveRecord::Migration[6.0]
DOWNTIME = false
def change
add_column :project_pages_metadata, :pages_deployment_id, :bigint
end
end

View File

@ -0,0 +1,20 @@
# frozen_string_literal: true
class AddForeignKeyToPagesDeploymentIdInProjectPagesMetadata < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
INDEX_NAME = 'index_project_pages_metadata_on_pages_deployment_id'
disable_ddl_transaction!
def up
add_concurrent_index(:project_pages_metadata, :pages_deployment_id, name: INDEX_NAME)
add_concurrent_foreign_key :project_pages_metadata, :pages_deployments, column: :pages_deployment_id, on_delete: :nullify
end
def down
remove_foreign_key_if_exists :project_pages_metadata, column: :pages_deployment_id
remove_concurrent_index_by_name(:project_pages_metadata, INDEX_NAME)
end
end

View File

@ -0,0 +1,21 @@
# frozen_string_literal: true
class ChangeIndexOnPipelineStatus < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
OLD_INDEX_NAME = 'index_ci_pipelines_on_status'
NEW_INDEX_NAME = 'index_ci_pipelines_on_status_and_id'
disable_ddl_transaction!
def up
add_concurrent_index :ci_pipelines, [:status, :id], name: NEW_INDEX_NAME
remove_concurrent_index_by_name :ci_pipelines, name: OLD_INDEX_NAME
end
def down
add_concurrent_index :ci_pipelines, :status, name: OLD_INDEX_NAME
remove_concurrent_index_by_name :ci_pipelines, name: NEW_INDEX_NAME
end
end

View File

@ -0,0 +1 @@
60835078e0a0bd191e9b1f0316f894c5223d6849277992b5034ed4ff9a798fe4

View File

@ -0,0 +1 @@
e4f9e918c86705409555cde065f30ba0c0c405dfd1918f47a169a5dc5c244a8d

View File

@ -0,0 +1 @@
ab044b609a29e9a179813de79dab9770665917a8ed78db907755a64f2d4aa47c

View File

@ -14712,7 +14712,8 @@ ALTER SEQUENCE project_mirror_data_id_seq OWNED BY project_mirror_data.id;
CREATE TABLE project_pages_metadata (
project_id bigint NOT NULL,
deployed boolean DEFAULT false NOT NULL,
artifacts_archive_id bigint
artifacts_archive_id bigint,
pages_deployment_id bigint
);
CREATE TABLE project_repositories (
@ -19711,7 +19712,7 @@ CREATE INDEX index_ci_pipelines_on_project_id_and_user_id_and_status_and_ref ON
CREATE INDEX index_ci_pipelines_on_project_idandrefandiddesc ON ci_pipelines USING btree (project_id, ref, id DESC);
CREATE INDEX index_ci_pipelines_on_status ON ci_pipelines USING btree (status);
CREATE INDEX index_ci_pipelines_on_status_and_id ON ci_pipelines USING btree (status, id);
CREATE INDEX index_ci_pipelines_on_user_id_and_created_at_and_config_source ON ci_pipelines USING btree (user_id, created_at, config_source);
@ -20819,6 +20820,8 @@ CREATE INDEX index_project_mirror_data_on_status ON project_mirror_data USING bt
CREATE INDEX index_project_pages_metadata_on_artifacts_archive_id ON project_pages_metadata USING btree (artifacts_archive_id);
CREATE INDEX index_project_pages_metadata_on_pages_deployment_id ON project_pages_metadata USING btree (pages_deployment_id);
CREATE UNIQUE INDEX index_project_pages_metadata_on_project_id ON project_pages_metadata USING btree (project_id);
CREATE INDEX index_project_pages_metadata_on_project_id_and_deployed_is_true ON project_pages_metadata USING btree (project_id) WHERE (deployed = true);
@ -21876,6 +21879,9 @@ ALTER TABLE ONLY notification_settings
ALTER TABLE ONLY lists
ADD CONSTRAINT fk_0d3f677137 FOREIGN KEY (board_id) REFERENCES boards(id) ON DELETE CASCADE;
ALTER TABLE ONLY project_pages_metadata
ADD CONSTRAINT fk_0fd5b22688 FOREIGN KEY (pages_deployment_id) REFERENCES pages_deployments(id) ON DELETE SET NULL;
ALTER TABLE ONLY group_deletion_schedules
ADD CONSTRAINT fk_11e3ebfcdd FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;

View File

@ -9388,6 +9388,26 @@ enum MeasurementIdentifier {
"""
PIPELINES
"""
Pipeline count with canceled status
"""
PIPELINES_CANCELED
"""
Pipeline count with failed status
"""
PIPELINES_FAILED
"""
Pipeline count with skipped status
"""
PIPELINES_SKIPPED
"""
Pipeline count with success status
"""
PIPELINES_SUCCEEDED
"""
Project count
"""

View File

@ -26010,6 +26010,30 @@
"description": "Pipeline count",
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "PIPELINES_SUCCEEDED",
"description": "Pipeline count with success status",
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "PIPELINES_FAILED",
"description": "Pipeline count with failed status",
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "PIPELINES_CANCELED",
"description": "Pipeline count with canceled status",
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "PIPELINES_SKIPPED",
"description": "Pipeline count with skipped status",
"isDeprecated": false,
"deprecationReason": null
}
],
"possibleTypes": null

View File

@ -3224,6 +3224,10 @@ Possible identifier types for a measurement.
| `ISSUES` | Issue count |
| `MERGE_REQUESTS` | Merge request count |
| `PIPELINES` | Pipeline count |
| `PIPELINES_CANCELED` | Pipeline count with canceled status |
| `PIPELINES_FAILED` | Pipeline count with failed status |
| `PIPELINES_SKIPPED` | Pipeline count with skipped status |
| `PIPELINES_SUCCEEDED` | Pipeline count with success status |
| `PROJECTS` | Project count |
| `USERS` | User count |

View File

@ -696,7 +696,7 @@ This is similar to creating a [New group](#new-group). You'll need the `parent_i
```shell
curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" --header "Content-Type: application/json" \
--data '{"path": "<subgroup_path>", "name": "<subgroup_name>", "parent_id": <parent_group_id> } \
--data '{"path": "<subgroup_path>", "name": "<subgroup_name>", "parent_id": <parent_group_id> }' \
"https://gitlab.example.com/api/v4/groups/"
```

View File

@ -0,0 +1,137 @@
---
comments: false
description: 'Next iteration of build logs architecture at GitLab'
---
# Cloud Native Build Logs
Cloud native and the adoption of Kubernetes has been recognised by GitLab to be
one of the top two biggest tailwinds that are helping us grow faster as a
company behind the project.
This effort is described in a more details [in the infrastructure team
handbook](https://about.gitlab.com/handbook/engineering/infrastructure/production/kubernetes/gitlab-com/).
## Traditional build logs
Traditional job logs depend a lot on availability of a local shared storage.
Every time a GitLab Runner sends a new partial build output, we write this
output to a file on a disk. This is simple, but this mechanism depends on
shared local storage - the same file needs to be available on every GitLab web
node machine, because GitLab Runner might connect to a different one every time
it performs an API request. Sidekiq also needs access to the file because when
a job is complete, a trace file contents will be sent to the object store.
## New architecture
New architecture writes data to Redis instead of writing build logs into a
file.
In order to make this performant and resilient enough, we implemented a chunked
I/O mechanism - we store data in Redis in chunks, and migrate them to an object
store once we reach a desired chunk size.
Simplified sequence diagram is available below.
```mermaid
sequenceDiagram
autonumber
participant U as User
participant R as Runner
participant G as GitLab (rails)
participant I as Redis
participant D as Database
participant O as Object store
loop incremental trace update sent by a runner
Note right of R: Runner appends a build trace
R->>+G: PATCH trace [build.id, offset, data]
G->>+D: find or create chunk [chunk.index]
D-->>-G: chunk [id, index]
G->>I: append chunk data [chunk.index, data]
G-->>-R: 200 OK
end
Note right of R: User retrieves a trace
U->>+G: GET build trace
loop every trace chunk
G->>+D: find chunk [index]
D-->>-G: chunk [id]
G->>+I: read chunk data [chunk.index]
I-->>-G: chunk data [data, size]
end
G-->>-U: build trace
Note right of R: Trace chunk is full
R->>+G: PATCH trace [build.id, offset, data]
G->>+D: find or create chunk [chunk.index]
D-->>-G: chunk [id, index]
G->>I: append chunk data [chunk.index, data]
G->>G: chunk full [index]
G-->>-R: 200 OK
G->>+I: read chunk data [chunk.index]
I-->>-G: chunk data [data, size]
G->>O: send chunk data [data, size]
G->>+D: update data store type [chunk.id]
G->>+I: delete chunk data [chunk.index]
```
## NFS coupling
In 2017, we experienced serious problems of scaling our NFS infrastructure. We
even tried to replace NFS with
[CephFS](https://docs.ceph.com/docs/master/cephfs/) - unsuccessfully.
Since that time it has become apparent that the cost of operations and
maintenance of a NFS cluster is significant and that if we ever decide to
migrate to Kubernetes [we need to decouple GitLab from a shared local storage
and
NFS](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/426#note_375646396).
1. NFS might be a single point of failure
1. NFS can only be reliably scaled vertically
1. Moving to Kubernetes means increasing the number of mount points by an order
of magnitude
1. NFS depends on extremely reliable network which can be difficult to provide
in Kubernetes environment
1. Storing customer data on NFS involves additional security risks
Moving GitLab to Kubernetes without NFS decoupling would result in an explosion
of complexity, maintenance cost and enormous, negative impact on availability.
## Iterations
1. ✓ Implement the new architecture in way that it does not depend on shared local storage
1. ✓ Evaluate performance and edge-cases, iterate to improve the new architecture
1. Design cloud native build logs correctness verification mechanisms
1. Build observability mechanisms around performance and correctness
1. Rollout the feature into production environment incrementally
The work needed to make the new architecture production ready and enabled on
GitLab.com is being tracked in [Cloud Native Build Logs on
GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/4275) epic.
Enabling this feature on GitLab.com is a subtask of [making the new
architecture generally
available](https://gitlab.com/groups/gitlab-org/-/epics/3791) for everyone.
## Who
Proposal:
| Role | Who
|------------------------------|-------------------------|
| Author | Grzegorz Bizon |
| Architecture Evolution Coach | Gerardo Lopez-Fernandez |
| Engineering Leader | Darby Frey |
| Domain Expert | Kamil Trzciński |
| Domain Expert | Sean McGivern |
DRIs:
| Role | Who
|------------------------------|------------------------|
| Product | Jason Yavorska |
| Leadership | Darby Frey |
| Engineering | Grzegorz Bizon |

View File

@ -0,0 +1,131 @@
---
comments: false
description: 'Making GitLab Pages a Cloud Native application - architecture blueprint.'
---
# GitLab Pages New Architecture
GitLab Pages is an important component of the GitLab product. It is mostly
being used to serve static content, and has a limited set of well defined
responsibilities. That being said, unfortunately it has become a blocker for
GitLab.com Kubernetes migration.
Cloud Native and the adoption of Kubernetes has been recognised by GitLab to be
one of the top two biggest tailwinds that are helping us grow faster as a
company behind the project.
This effort is described in more detail [in the infrastructure team handbook
page](https://about.gitlab.com/handbook/engineering/infrastructure/production/kubernetes/gitlab-com/).
GitLab Pages is tightly coupled with NFS and in order to unblock Kubernetes
migration a significant change to GitLab Pages' architecture is required. This
is an ongoing work that we have started more than a year ago. This blueprint
might be useful to understand why it is important, and what is the roadmap.
## How GitLab Pages Works
GitLab Pages is a daemon designed to serve static content, written in
[Go](https://golang.org/).
Initially, GitLab Pages has been designed to store static content on a local
shared block storage (NFS) in a hierarchical group > project directory
structure. Each directory, representing a project, was supposed to contain a
configuration file and static content that GitLab Pages daemon was supposed to
read and serve.
```mermaid
graph LR
A(GitLab Rails) -- Writes new pages deployment --> B[(NFS)]
C(GitLab Pages) -. Reads static content .-> B
```
This initial design has become outdated because of a few reasons - NFS coupling
being one of them - and we decided to replace it with more "decoupled
service"-like architecture. The new architecture, that we are working on, is
described in this blueprint.
## NFS coupling
In 2017, we experienced serious problems of scaling our NFS infrastructure. We
even tried to replace NFS with
[CephFS](https://docs.ceph.com/docs/master/cephfs/) - unsuccessfully.
Since that time it has become apparent that the cost of operations and
maintenance of a NFS cluster is significant and that if we ever decide to
migrate to Kubernetes [we need to decouple GitLab from a shared local storage
and
NFS](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/426#note_375646396).
1. NFS might be a single point of failure
1. NFS can only be reliably scaled vertically
1. Moving to Kubernetes means increasing the number of mount points by an order
of magnitude
1. NFS depends on extremely reliable network which can be difficult to provide
in Kubernetes environment
1. Storing customer data on NFS involves additional security risks
Moving GitLab to Kubernetes without NFS decoupling would result in an explosion
of complexity, maintenance cost and enormous, negative impact on availability.
## New GitLab Pages Architecture
- GitLab Pages is going to source domains' configuration from GitLab's internal
API, instead of reading `config.json` files from a local shared storage.
- GitLab Pages is going to serve static content from Object Storage.
```mermaid
graph TD
A(User) -- Pushes pages deployment --> B{GitLab}
C((GitLab Pages)) -. Reads configuration from API .-> B
C -. Reads static content .-> D[(Object Storage)]
C -- Serves static content --> E(Visitors)
```
This new architecture has been briefly described in [the blog
post](https://about.gitlab.com/blog/2020/08/03/how-gitlab-pages-uses-the-gitlab-api-to-serve-content/)
too.
## Iterations
1. ✓ Redesign GitLab Pages configuration source to use GitLab's API
1. ✓ Evaluate performance and build reliable caching mechanisms
1. ✓ Incrementally rollout the new source on GitLab.com
1. ✓ Make GitLab Pages API domains config source enabled by default
1. Enable experimentation with different servings through feature flags
1. Triangulate object store serving design through meaningful experiments
1. Design pages migration mechanisms that can work incrementally
1. Gradually migrate towards object storage serving on GitLab.com
[GitLab Pages Architecture](https://gitlab.com/groups/gitlab-org/-/epics/1316)
epic with detailed roadmap is also available.
## Who
Proposal:
| Role | Who
|------------------------------|-------------------------|
| Author | Grzegorz Bizon |
| Architecture Evolution Coach | Kamil Trzciński |
| Engineering Leader | Daniel Croft |
| Domain Expert | Grzegorz Bizon |
| Domain Expert | Vladimir Shushlin |
| Domain Expert | Jaime Martinez |
DRIs:
| Role | Who
|------------------------------|------------------------|
| Product | Jackie Porter |
| Leadership | Daniel Croft |
| Engineering | TBD |
Domain Experts:
| Role | Who
|------------------------------|------------------------|
| Domain Expert | Kamil Trzciński |
| Domain Expert | Grzegorz Bizon |
| Domain Expert | Vladimir Shushlin |
| Domain Expert | Jaime Martinez |
| Domain Expert | Krasimir Angelov |

View File

@ -0,0 +1,136 @@
---
comments: false
description: 'Internal usage of Feature Flags for GitLab development'
---
# Usage of Feature Flags for GitLab development
Usage of feature flags become crucial for the development of GitLab. The
feature flags are a convenient way to ship changes early, and safely rollout
them to wide audience ensuring that feature is stable and performant.
Since the presence of feature is controlled with a dedicated condition, a
developer can decide for a best time for testing the feature, ensuring that
feature is not enable prematurely.
## Challenges
The extensive usage of feature flags poses a few challenges
- Each feature flag that we add to codebase is a ~"technical debt" as it adds a
matrix of configurations.
- Testing each combination of feature flags is close to impossible, so we
instead try to optimise our testing of feature flags to the most common
scenarios.
- There's a growing challenge of maintaining a growing number of feature flags.
We sometimes forget how our feature flags are configured or why we haven't
yet removed the feature flag.
- The usage of feature flags can also be confusing to people outside of
development that might not fully understand dependence of ~feature or ~bug
fix on feature flag and how this feature flag is configured. Or if the feature
should be announced as part of release post.
- Maintaining feature flags poses additional challenge of having to manage
different configurations across different environments/target. We have
different configuration of feature flags for testing, for development, for
staging, for production and what is being shipped to our customers as part of
on-premise offering.
## Goals
The biggest challenge today with our feature flags usage is their implicit
nature. Feature flags are part of the codebase, making them hard to understand
outside of development function.
We should aim to make our feature flag based development to be accessible to
any interested party.
- developer / engineer
- can easily add a new feature flag, and configure it's state
- can quickly find who to reach if touches another feature flag
- can quickly find stale feature flags
- engineering manager
- can understand what feature flags her/his group manages
- engineering manager and director
- can understand how much ~"technical debt" is inflicted due to amount of feature flags that we have to manage
- can understand how many feature flags are added and removed in each release
- product manager and documentation writer
- can understand what features are gated by what feature flags
- can understand if feature and thus feature flag is generally available on GitLab.com
- can understand if feature and thus feature flag is enabled by default for on-premise installations
- delivery engineer
- can understand what feature flags are introduced and changed between subsequent deployments
- support and reliability engineer
- can understand how feature flags changed between releases: what feature flags become enabled, what removed
- can quickly find relevant information about feature flag to know individuals which might help with an ongoing support request or incident
## Proposal
To help with above goals we should aim to make our feature flags usage explicit
and understood by all involved parties.
Introduce a YAML-described `feature-flags/<name-of-feature.yml>` that would
allow us to have:
1. A central place where all feature flags are documented,
1. A description of why the given feature flag was introduced,
1. A what relevant issue and merge request it was introduced by,
1. Build automated documentation with all feature flags in the codebase,
1. Track how many feature flags are per given group
1. Track how many feature flags are added and removed between releases
1. Make this information easily accessible for all
1. Allow our customers to easily discover how to enable features and quickly
find out information what did change between different releases
### The `YAML`
```yaml
---
name: ci_disallow_to_create_merge_request_pipelines_in_target_project
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/40724
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/235119
group: group::progressive delivery
type: development
default_enabled: false
```
## Reasons
These are reason why these changes are needed:
- we have around 500 different feature flags today
- we have hard time tracking their usage
- we have ambiguous usage of feature flag with different `default_enabled:` and
different `actors` used
- we lack a clear indication who owns what feature flag and where to find
relevant informations
- we do not emphasise the desire to create feature flag rollout issue to
indicate that feature flag is in fact a ~"technical debt"
- we don't know exactly what feature flags we have in our codebase
- we don't know exactly how our feature flags are configured for different
environments: what is being used for `test`, what we ship for `on-premise`,
what is our settings for `staging`, `qa` and `production`
## Iterations
This work is being done as part of dedicated epic: [Improve internal usage of
Feature Flags](https://gitlab.com/groups/gitlab-org/-/epics/3551). This epic
describes a meta reasons for making these changes.
## Who
Proposal:
| Role | Who
|------------------------------|-------------------------|
| Author | Kamil Trzciński |
| Architecture Evolution Coach | Gerardo Lopez-Fernandez |
| Engineering Leader | Kamil Trzciński |
| Domain Expert | Shinya Maeda |
DRIs:
| Role | Who
|------------------------------|------------------------|
| Product | ? |
| Leadership | Craig Gomes |
| Engineering | Kamil Trzciński |

View File

@ -0,0 +1,9 @@
---
comments: false
description: 'Architecture Practice at GitLab'
---
# Architecture at GitLab
- [Architecture at GitLab](https://about.gitlab.com/handbook/engineering/architecture/)
- [Architecture Workflow](https://about.gitlab.com/handbook/engineering/architecture/workflow/)

View File

@ -461,6 +461,28 @@ this line should be hidden when collapsed
section_end:1560896353:my_first_section\r\e[0K
```
#### Pre-collapse sections
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/198413) in GitLab 13.5.
You can make the job log automatically collapse collapsible sections by adding the `collapsed` option to the section start.
Add `[collapsed=true]` after the section name and before the `\r`. The section end marker
remains unchanged:
- Section start marker with `[collapsed=true]`: `section_start:UNIX_TIMESTAMP:SECTION_NAME[collapsed=true]\r\e[0K` + `TEXT_OF_SECTION_HEADER`
- Section end marker: `section_end:UNIX_TIMESTAMP:SECTION_NAME\r\e[0K`
Add the updated section start text to the CI configuration. For example,
using `echo`:
```yaml
job1:
script:
- echo -e "section_start:`date +%s`:my_first_section[collapsed=true]\r\e[0KHeader of the 1st collapsible section"
- echo 'this line should be hidden automatically after loading the job log'
- echo -e "section_end:`date +%s`:my_first_section\r\e[0K"
```
## Visualize pipelines
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/5742) in GitLab 8.11.

View File

@ -68,6 +68,10 @@ which is enabled for the `cache` and `shared_state`
## Redis in structured logging
For GitLab Team Members: There are [basic](https://www.youtube.com/watch?v=Uhdj19Dc6vU) and
[advanced](https://youtu.be/jw1Wv2IJxzs) videos that show how you can work with the Redis
structured logging fields on GitLab.com.
Our [structured logging](logging.md#use-structured-json-logging) for web
requests and Sidekiq jobs contains fields for the duration, call count,
bytes written, and bytes read per Redis instance, along with a total for

View File

@ -126,10 +126,11 @@ happened over time, such as how many CI pipelines have run. They are monotonic a
Observations are facts collected from one or more GitLab instances and can carry arbitrary data. There are no
general guidelines around how to collect those, due to the individual nature of that data.
There are four types of counters which are all found in `usage_data.rb`:
There are several types of counters which are all found in `usage_data.rb`:
- **Ordinary Batch Counters:** Simple count of a given ActiveRecord_Relation
- **Distinct Batch Counters:** Distinct count of a given ActiveRecord_Relation on given column
- **Sum Batch Counters:** Sum the values of a given ActiveRecord_Relation on given column
- **Alternative Counters:** Used for settings and configurations
- **Redis Counters:** Used for in-memory counts.
@ -200,6 +201,28 @@ distinct_count(::Note.with_suggestions.where(time_period), :author_id, start: ::
distinct_count(::Clusters::Applications::CertManager.where(time_period).available.joins(:cluster), 'clusters.user_id')
```
### Sum Batch Counters
Handles `ActiveRecord::StatementInvalid` error
Sum the values of a given ActiveRecord_Relation on given column and handles errors.
Method: `sum(relation, column, batch_size: nil, start: nil, finish: nil)`
Arguments:
- `relation` the ActiveRecord_Relation to perform the operation
- `column` the column to sum on
- `batch_size`: if none set it will use default value 1000 from `Gitlab::Database::BatchCounter`
- `start`: custom start of the batch counting in order to avoid complex min calculations
- `end`: custom end of the batch counting in order to avoid complex min calculations
Examples:
```ruby
sum(JiraImportState.finished, :imported_issues_count)
```
### Redis Counters
Handles `::Redis::CommandError` and `Gitlab::UsageDataCounters::BaseCounter::UnknownEvent`
@ -372,7 +395,7 @@ w
Example usage for an existing event already defined in [known events](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events.yml):
Note that `usage_data_api` and `usage_data_#{event_name}` should be enabled in order to rack events using API.
Note that `usage_data_api` and `usage_data_#{event_name}` should be enabled in order to be able to track events
```javascript
import api from '~/api';

View File

@ -104,23 +104,24 @@ module Gitlab
action = scanner[1]
timestamp = scanner[2]
section = scanner[3]
options = parse_section_options(scanner[4])
section_name = sanitize_section_name(section)
if action == "start"
handle_section_start(scanner, section_name, timestamp)
elsif action == "end"
if action == 'start'
handle_section_start(scanner, section_name, timestamp, options)
elsif action == 'end'
handle_section_end(scanner, section_name, timestamp)
else
raise 'unsupported action'
end
end
def handle_section_start(scanner, section, timestamp)
def handle_section_start(scanner, section, timestamp, options)
# We make a new line for new section
flush_current_line
@state.open_section(section, timestamp)
@state.open_section(section, timestamp, options)
# we need to consume match after handling
# the open of section, as we want the section
@ -157,6 +158,18 @@ module Gitlab
def sanitize_section_name(section)
section.to_s.downcase.gsub(/[^a-z0-9]/, '-')
end
def parse_section_options(raw_options)
return unless raw_options
# We need to remove the square brackets and split
# by comma to get a list of the options
options = raw_options[1...-1].split ','
# Now split each option by equals to separate
# each in the format [key, value]
options.to_h { |option| option.split '=' }
end
end
end
end

View File

@ -32,7 +32,7 @@ module Gitlab
end
attr_reader :offset, :sections, :segments, :current_segment,
:section_header, :section_duration
:section_header, :section_duration, :section_options
def initialize(offset:, style:, sections: [])
@offset = offset
@ -68,6 +68,10 @@ module Gitlab
@sections << section
end
def set_section_options(options)
@section_options = options
end
def set_as_section_header
@section_header = true
end
@ -90,6 +94,7 @@ module Gitlab
result[:section] = sections.last if sections.any?
result[:section_header] = true if @section_header
result[:section_duration] = @section_duration if @section_duration
result[:section_options] = @section_options if @section_options
end
end
end

View File

@ -26,10 +26,11 @@ module Gitlab
Base64.urlsafe_encode64(state.to_json)
end
def open_section(section, timestamp)
def open_section(section, timestamp, options)
@open_sections[section] = timestamp
@current_line.add_section(section)
@current_line.set_section_options(options)
@current_line.set_as_section_header
end

View File

@ -19,6 +19,11 @@ module Gitlab
def run!
log_info("Looking for orphan LFS files for project #{project.name_with_namespace}")
if project.lfs_objects.empty?
log_info("Project #{project.name_with_namespace} is linked to 0 LFS objects. Nothing to do")
return
end
remove_orphan_references
end

View File

@ -220,8 +220,27 @@ module Gitlab
"Must start with a letter, and cannot end with '-'"
end
# The section start, e.g. section_start:12345678:NAME
def logs_section_prefix_regex
/section_((?:start)|(?:end)):(\d+):([a-zA-Z0-9_.-]+)/
end
# The optional section options, e.g. [collapsed=true]
def logs_section_options_regex
/(\[(?:\w+=\w+)(?:, ?(?:\w+=\w+))*\])?/
end
# The region end, always: \r\e\[0K
def logs_section_suffix_regex
/\r\033\[0K/
end
def build_trace_section_regex
@build_trace_section_regexp ||= /section_((?:start)|(?:end)):(\d+):([a-zA-Z0-9_.-]+)\r\033\[0K/.freeze
@build_trace_section_regexp ||= %r{
#{logs_section_prefix_regex}
#{logs_section_options_regex}
#{logs_section_suffix_regex}
}x.freeze
end
def markdown_code_or_html_blocks

View File

@ -22439,6 +22439,12 @@ msgstr ""
msgid "Seat Link is disabled, and cannot be configured through this form."
msgstr ""
msgid "Seats usage data as of %{last_enqueue_time}"
msgstr ""
msgid "Seats usage data is updated every day at 12:00pm UTC"
msgstr ""
msgid "Secondary"
msgstr ""
@ -30753,6 +30759,9 @@ msgstr ""
msgid "must be a root namespace"
msgstr ""
msgid "must be a valid IPv4 or IPv6 address"
msgstr ""
msgid "must be greater than start date"
msgstr ""

View File

@ -38,8 +38,11 @@ module QA
def remove_key(title)
click_link(title)
accept_alert do
click_element(:delete_key_button)
# Retrying due to https://gitlab.com/gitlab-org/gitlab/-/issues/255287
retry_on_exception do
accept_alert do
click_element(:delete_key_button)
end
end
end

View File

@ -13,5 +13,13 @@ FactoryBot.define do
trait :group_count do
identifier { :groups }
end
trait :pipelines_succeeded_count do
identifier { :pipelines_succeeded }
end
trait :pipelines_skipped_count do
identifier { :pipelines_skipped }
end
end
end

View File

@ -35,6 +35,14 @@ describe('Jobs Store Utils', () => {
lines: [],
});
});
it('pre-closes a section when specified in options', () => {
const headerLine = { content: [{ text: 'foo' }], section_options: { collapsed: 'true' } };
const parsedHeaderLine = parseHeaderLine(headerLine, 2);
expect(parsedHeaderLine.isClosed).toBe(true);
});
});
describe('parseLine', () => {

View File

@ -1,7 +1,7 @@
import Vuex from 'vuex';
import { shallowMount } from '@vue/test-utils';
import { GlDeprecatedSkeletonLoading as GlSkeletonLoading } from '@gitlab/ui';
import ReleaseShowApp from '~/releases/components/app_show.vue';
import ReleaseSkeletonLoader from '~/releases/components/release_skeleton_loader.vue';
import { release as originalRelease } from '../mock_data';
import ReleaseBlock from '~/releases/components/release_block.vue';
import { convertObjectPropsToCamelCase } from '~/lib/utils/common_utils';
@ -33,7 +33,7 @@ describe('Release show component', () => {
wrapper = shallowMount(ReleaseShowApp, { store });
};
const findLoadingSkeleton = () => wrapper.find(GlSkeletonLoading);
const findLoadingSkeleton = () => wrapper.find(ReleaseSkeletonLoader);
const findReleaseBlock = () => wrapper.find(ReleaseBlock);
it('calls fetchRelease when the component is created', () => {

View File

@ -0,0 +1,15 @@
import { mount } from '@vue/test-utils';
import { GlSkeletonLoader } from '@gitlab/ui';
import ReleaseSkeletonLoader from '~/releases/components/release_skeleton_loader.vue';
describe('release_skeleton_loader.vue', () => {
let wrapper;
beforeEach(() => {
wrapper = mount(ReleaseSkeletonLoader);
});
it('renders a GlSkeletonLoader', () => {
expect(wrapper.find(GlSkeletonLoader).exists()).toBe(true);
});
});

View File

@ -207,6 +207,15 @@ describe('Release detail actions', () => {
});
});
describe('updateReleaseGroupMilestones', () => {
it(`commits ${types.UPDATE_RELEASE_GROUP_MILESTONES} with the updated release group milestones`, () => {
const newReleaseGroupMilestones = ['v0.0', 'v0.1'];
return testAction(actions.updateReleaseGroupMilestones, newReleaseGroupMilestones, state, [
{ type: types.UPDATE_RELEASE_GROUP_MILESTONES, payload: newReleaseGroupMilestones },
]);
});
});
describe('addEmptyAssetLink', () => {
it(`commits ${types.ADD_EMPTY_ASSET_LINK}`, () => {
return testAction(actions.addEmptyAssetLink, undefined, state, [

View File

@ -30,6 +30,7 @@ describe('Release detail mutations', () => {
name: '',
description: '',
milestones: [],
groupMilestones: [],
assets: {
links: [],
},
@ -112,6 +113,26 @@ describe('Release detail mutations', () => {
});
});
describe(`${types.UPDATE_RELEASE_MILESTONES}`, () => {
it("updates the release's milestones", () => {
state.release = release;
const newReleaseMilestones = ['v0.0', 'v0.1'];
mutations[types.UPDATE_RELEASE_MILESTONES](state, newReleaseMilestones);
expect(state.release.milestones).toBe(newReleaseMilestones);
});
});
describe(`${types.UPDATE_RELEASE_GROUP_MILESTONES}`, () => {
it("updates the release's group milestones", () => {
state.release = release;
const newReleaseGroupMilestones = ['v0.0', 'v0.1'];
mutations[types.UPDATE_RELEASE_GROUP_MILESTONES](state, newReleaseGroupMilestones);
expect(state.release.groupMilestones).toBe(newReleaseGroupMilestones);
});
});
describe(`${types.REQUEST_SAVE_RELEASE}`, () => {
it('set state.isUpdatingRelease to true', () => {
mutations[types.REQUEST_SAVE_RELEASE](state);

View File

@ -5,9 +5,11 @@ require 'spec_helper'
RSpec.describe Resolvers::Admin::Analytics::InstanceStatistics::MeasurementsResolver do
include GraphqlHelpers
let_it_be(:admin_user) { create(:user, :admin) }
let(:current_user) { admin_user }
describe '#resolve' do
let_it_be(:user) { create(:user) }
let_it_be(:admin_user) { create(:user, :admin) }
let_it_be(:project_measurement_new) { create(:instance_statistics_measurement, :project_count, recorded_at: 2.days.ago) }
let_it_be(:project_measurement_old) { create(:instance_statistics_measurement, :project_count, recorded_at: 10.days.ago) }
@ -39,6 +41,37 @@ RSpec.describe Resolvers::Admin::Analytics::InstanceStatistics::MeasurementsReso
end
end
end
context 'when requesting pipeline counts by pipeline status' do
let_it_be(:pipelines_succeeded_measurement) { create(:instance_statistics_measurement, :pipelines_succeeded_count, recorded_at: 2.days.ago) }
let_it_be(:pipelines_skipped_measurement) { create(:instance_statistics_measurement, :pipelines_skipped_count, recorded_at: 2.days.ago) }
subject { resolve_measurements({ identifier: identifier }, { current_user: current_user }) }
context 'filter for pipelines_succeeded' do
let(:identifier) { 'pipelines_succeeded' }
it { is_expected.to eq([pipelines_succeeded_measurement]) }
end
context 'filter for pipelines_skipped' do
let(:identifier) { 'pipelines_skipped' }
it { is_expected.to eq([pipelines_skipped_measurement]) }
end
context 'filter for pipelines_failed' do
let(:identifier) { 'pipelines_failed' }
it { is_expected.to be_empty }
end
context 'filter for pipelines_canceled' do
let(:identifier) { 'pipelines_canceled' }
it { is_expected.to be_empty }
end
end
end
def resolve_measurements(args = {}, context = {})

View File

@ -58,6 +58,15 @@ RSpec.describe Gitlab::Ci::Ansi2json::Line do
end
end
describe '#set_section_options' do
it 'sets the current section\'s options' do
options = { collapsed: true }
subject.set_section_options(options)
expect(subject.to_h[:section_options]).to eq(options)
end
end
describe '#set_as_section_header' do
it 'change the section_header to true' do
expect { subject.set_as_section_header }

View File

@ -229,7 +229,7 @@ RSpec.describe Gitlab::Ci::Ansi2json do
expect(convert_json(trace)).to eq([
{
offset: 0,
content: [{ text: "section_end:1:2<div>hello</div>" }],
content: [{ text: 'section_end:1:2<div>hello</div>' }],
section: 'prepare-script',
section_header: true
},
@ -329,6 +329,32 @@ RSpec.describe Gitlab::Ci::Ansi2json do
])
end
end
context 'with section options' do
let(:option_section_start) { "section_start:#{section_start_time.to_i}:#{section_name}[collapsed=true,unused_option=123]\r\033[0K"}
it 'provides section options when set' do
trace = "#{option_section_start}hello#{section_end}"
expect(convert_json(trace)).to eq([
{
offset: 0,
content: [{ text: 'hello' }],
section: 'prepare-script',
section_header: true,
section_options: {
'collapsed' => 'true',
'unused_option' => '123'
}
},
{
offset: 83,
content: [],
section: 'prepare-script',
section_duration: '01:03'
}
])
end
end
end
describe 'incremental updates' do
@ -339,7 +365,7 @@ RSpec.describe Gitlab::Ci::Ansi2json do
context 'with split word' do
let(:pre_text) { "\e[1mHello " }
let(:text) { "World" }
let(:text) { 'World' }
let(:lines) do
[
@ -355,7 +381,7 @@ RSpec.describe Gitlab::Ci::Ansi2json do
context 'with split word on second line' do
let(:pre_text) { "Good\nmorning " }
let(:text) { "World" }
let(:text) { 'World' }
let(:lines) do
[
@ -514,7 +540,7 @@ RSpec.describe Gitlab::Ci::Ansi2json do
end
describe 'trucates' do
let(:text) { "Hello World" }
let(:text) { 'Hello World' }
let(:stream) { StringIO.new(text) }
let(:subject) { described_class.convert(stream) }
@ -522,11 +548,11 @@ RSpec.describe Gitlab::Ci::Ansi2json do
stream.seek(3, IO::SEEK_SET)
end
it "returns truncated output" do
it 'returns truncated output' do
expect(subject.truncated).to be_truthy
end
it "does not append output" do
it 'does not append output' do
expect(subject.append).to be_falsey
end
end

View File

@ -42,12 +42,24 @@ RSpec.describe Gitlab::Cleanup::OrphanLfsFileReferences do
expect(null_logger).to receive(:info).with("Looking for orphan LFS files for project #{project.name_with_namespace}")
expect(null_logger).to receive(:info).with("Removed invalid references: 1")
expect(ProjectCacheWorker).to receive(:perform_async).with(project.id, [], [:lfs_objects_size])
expect(service).to receive(:remove_orphan_references).and_call_original
expect { service.run! }.to change { project.lfs_objects.count }.from(2).to(1)
expect(LfsObjectsProject.exists?(invalid_reference.id)).to be_falsey
end
it 'does nothing if the project has no LFS objects' do
expect(null_logger).to receive(:info).with(/Looking for orphan LFS files/)
expect(null_logger).to receive(:info).with(/Nothing to do/)
project.lfs_objects_projects.delete_all
expect(service).not_to receive(:remove_orphan_references)
service.run!
end
context 'LFS object is in design repository' do
before do
expect(project.design_repository).to receive(:exists?).and_return(true)

View File

@ -10,14 +10,17 @@ RSpec.describe Gitlab::ImportExport::FastHashSerializer do
# all items are properly serialized while traversing the simple hash.
subject { Gitlab::Json.parse(Gitlab::Json.generate(described_class.new(project, tree).execute)) }
let!(:project) { setup_project }
let(:user) { create(:user) }
let_it_be(:user) { create(:user) }
let_it_be(:project) { setup_project }
let(:shared) { project.import_export_shared }
let(:reader) { Gitlab::ImportExport::Reader.new(shared: shared) }
let(:tree) { reader.project_tree }
before do
before_all do
project.add_maintainer(user)
end
before do
allow_any_instance_of(MergeRequest).to receive(:source_branch_sha).and_return('ABCD')
allow_any_instance_of(MergeRequest).to receive(:target_branch_sha).and_return('DCBA')
end
@ -224,7 +227,6 @@ RSpec.describe Gitlab::ImportExport::FastHashSerializer do
group: group,
approvals_before_merge: 1
)
allow(project).to receive(:commit).and_return(Commit.new(RepoHelpers.sample_commit, project))
issue = create(:issue, assignees: [user], project: project)
snippet = create(:project_snippet, project: project)

View File

@ -99,6 +99,36 @@ RSpec.describe Gitlab::Regex do
it { is_expected.not_to match('foo-') }
end
describe '.build_trace_section_regex' do
subject { described_class.build_trace_section_regex }
context 'without options' do
example = "section_start:1600445393032:NAME\r\033\[0K"
it { is_expected.to match(example) }
it { is_expected.to match("section_end:12345678:aBcDeFg1234\r\033\[0K") }
it { is_expected.to match("section_start:0:sect_for_alpha-v1.0\r\033\[0K") }
it { is_expected.not_to match("section_start:section:0\r\033\[0K") }
it { is_expected.not_to match("section_:1600445393032:NAME\r\033\[0K") }
it { is_expected.not_to match(example.upcase) }
end
context 'with options' do
it { is_expected.to match("section_start:1600445393032:NAME[collapsed=true]\r\033\[0K") }
it { is_expected.to match("section_start:1600445393032:NAME[collapsed=true, example_option=false]\r\033\[0K") }
it { is_expected.to match("section_start:1600445393032:NAME[collapsed=true,example_option=false]\r\033\[0K") }
it { is_expected.to match("section_start:1600445393032:NAME[numeric_option=1234567]\r\033\[0K") }
# Without splitting the regex in one for start and one for end,
# this is possible, however, it is ignored for section_end.
it { is_expected.to match("section_end:1600445393032:NAME[collapsed=true]\r\033\[0K") }
it { is_expected.not_to match("section_start:1600445393032:NAME[collapsed=[]]]\r\033\[0K") }
it { is_expected.not_to match("section_start:1600445393032:NAME[collapsed = true]\r\033\[0K") }
it { is_expected.not_to match("section_start:1600445393032:NAME[collapsed = true, example_option=false]\r\033\[0K") }
it { is_expected.not_to match("section_start:1600445393032:NAME[collapsed=true, example_option=false]\r\033\[0K") }
it { is_expected.not_to match("section_start:1600445393032:NAME[]\r\033\[0K") }
end
end
describe '.container_repository_name_regex' do
subject { described_class.container_repository_name_regex }

View File

@ -20,7 +20,11 @@ RSpec.describe Analytics::InstanceStatistics::Measurement, type: :model do
issues: 3,
merge_requests: 4,
groups: 5,
pipelines: 6
pipelines: 6,
pipelines_succeeded: 7,
pipelines_failed: 8,
pipelines_canceled: 9,
pipelines_skipped: 10
}.with_indifferent_access)
end
end
@ -42,4 +46,28 @@ RSpec.describe Analytics::InstanceStatistics::Measurement, type: :model do
it { is_expected.to match_array([measurement_1, measurement_2]) }
end
end
describe '#measurement_identifier_values' do
subject { described_class.measurement_identifier_values.count }
context 'when the `store_ci_pipeline_counts_by_status` feature flag is off' do
let(:expected_count) { Analytics::InstanceStatistics::Measurement.identifiers.size - Analytics::InstanceStatistics::Measurement::EXPERIMENTAL_IDENTIFIERS.size }
before do
stub_feature_flags(store_ci_pipeline_counts_by_status: false)
end
it { is_expected.to eq(expected_count) }
end
context 'when the `store_ci_pipeline_counts_by_status` feature flag is on' do
let(:expected_count) { Analytics::InstanceStatistics::Measurement.identifiers.size }
before do
stub_feature_flags(store_ci_pipeline_counts_by_status: true)
end
it { is_expected.to eq(expected_count) }
end
end
end

View File

@ -6,6 +6,13 @@ RSpec.describe AuditEvent do
let_it_be(:audit_event) { create(:project_audit_event) }
subject { audit_event }
describe 'validations' do
include_examples 'validates IP address' do
let(:attribute) { :ip_address }
let(:object) { create(:audit_event) }
end
end
describe '#as_json' do
context 'ip_address' do
subject { build(:group_audit_event, ip_address: '192.168.1.1').as_json }

View File

@ -11,6 +11,11 @@ RSpec.describe AuthenticationEvent do
it { is_expected.to validate_presence_of(:provider) }
it { is_expected.to validate_presence_of(:user_name) }
it { is_expected.to validate_presence_of(:result) }
include_examples 'validates IP address' do
let(:attribute) { :ip_address }
let(:object) { create(:authentication_event) }
end
end
describe 'scopes' do

View File

@ -57,7 +57,7 @@ RSpec.describe AuditEventService do
let(:audit_service) { described_class.new(user, user, with: 'standard') }
it 'creates an authentication event' do
expect(AuthenticationEvent).to receive(:create).with(
expect(AuthenticationEvent).to receive(:new).with(
user: user,
user_name: user.name,
ip_address: user.current_sign_in_ip,
@ -67,6 +67,17 @@ RSpec.describe AuditEventService do
audit_service.for_authentication.security_event
end
it 'tracks exceptions when the event cannot be created' do
allow(user).to receive_messages(current_sign_in_ip: 'invalid IP')
expect(Gitlab::ErrorTracking).to(
receive(:track_exception)
.with(ActiveRecord::RecordInvalid, audit_event_type: 'AuthenticationEvent').and_call_original
)
audit_service.for_authentication.security_event
end
end
end

View File

@ -0,0 +1,10 @@
# frozen_string_literal: true
RSpec.shared_examples 'validates IP address' do
subject { object }
it { is_expected.to allow_value('192.168.17.43').for(attribute.to_sym) }
it { is_expected.to allow_value('2001:0db8:85a3:0000:0000:8a2e:0370:7334').for(attribute.to_sym) }
it { is_expected.not_to allow_value('invalid IP').for(attribute.to_sym) }
end

View File

@ -0,0 +1,37 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe IpAddressValidator do
let(:model) do
Class.new do
include ActiveModel::Model
include ActiveModel::Validations
attr_accessor :ip_address
alias_method :ip_address_before_type_cast, :ip_address
validates :ip_address, ip_address: true
end.new
end
using RSpec::Parameterized::TableSyntax
where(:ip_address, :validity, :errors) do
'invalid IP' | false | { ip_address: ['must be a valid IPv4 or IPv6 address'] }
'192.168.17.43' | true | {}
'2001:0db8:85a3::8a2e:0370:7334' | true | {}
nil | true | {}
'' | true | {}
end
with_them do
before do
model.ip_address = ip_address
model.validate
end
it { expect(model.valid?).to eq(validity) }
it { expect(model.errors.messages).to eq(errors) }
end
end

View File

@ -18,7 +18,7 @@ RSpec.describe Analytics::InstanceStatistics::CounterJobWorker do
it 'counts a scope and stores the result' do
subject
measurement = Analytics::InstanceStatistics::Measurement.first
measurement = Analytics::InstanceStatistics::Measurement.users.first
expect(measurement.recorded_at).to be_like_time(recorded_at)
expect(measurement.identifier).to eq('users')
expect(measurement.count).to eq(2)
@ -33,7 +33,7 @@ RSpec.describe Analytics::InstanceStatistics::CounterJobWorker do
it 'sets 0 as the count' do
subject
measurement = Analytics::InstanceStatistics::Measurement.first
measurement = Analytics::InstanceStatistics::Measurement.groups.first
expect(measurement.recorded_at).to be_like_time(recorded_at)
expect(measurement.identifier).to eq('groups')
expect(measurement.count).to eq(0)
@ -51,4 +51,20 @@ RSpec.describe Analytics::InstanceStatistics::CounterJobWorker do
expect { subject }.not_to change { Analytics::InstanceStatistics::Measurement.count }
end
context 'when pipelines_succeeded identifier is passed' do
let_it_be(:pipeline) { create(:ci_pipeline, :success) }
let(:successful_pipelines_measurement_identifier) { ::Analytics::InstanceStatistics::Measurement.identifiers.fetch(:pipelines_succeeded) }
let(:job_args) { [successful_pipelines_measurement_identifier, pipeline.id, pipeline.id, recorded_at] }
it 'counts successful pipelines' do
subject
measurement = Analytics::InstanceStatistics::Measurement.pipelines_succeeded.first
expect(measurement.recorded_at).to be_like_time(recorded_at)
expect(measurement.identifier).to eq('pipelines_succeeded')
expect(measurement.count).to eq(1)
end
end
end

View File

@ -155,6 +155,17 @@ RSpec.describe GitGarbageCollectWorker do
expect(project.lfs_objects.reload).to include(lfs_object)
end
it 'catches and logs exceptions' do
expect_any_instance_of(Gitlab::Cleanup::OrphanLfsFileReferences)
.to receive(:run!)
.and_raise(/Failed/)
expect(Gitlab::GitLogger).to receive(:warn)
expect(Gitlab::ErrorTracking).to receive(:track_and_raise_for_dev_exception)
subject.perform(*params)
end
end
context 'with cleanup_lfs_during_gc feature flag disabled' do