Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
6b1cf9e9ce
commit
97ec5466b8
|
|
@ -4,6 +4,11 @@ import SecureFilesList from './components/secure_files_list.vue';
|
|||
|
||||
export const initCiSecureFiles = (selector = '#js-ci-secure-files') => {
|
||||
const containerEl = document.querySelector(selector);
|
||||
|
||||
if (!containerEl) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const { projectId } = containerEl.dataset;
|
||||
const { admin } = containerEl.dataset;
|
||||
const { fileSizeLimit } = containerEl.dataset;
|
||||
|
|
|
|||
|
|
@ -209,7 +209,7 @@ export default {
|
|||
:id="`asset-type-${index}`"
|
||||
ref="typeSelect"
|
||||
:value="link.linkType || $options.defaultTypeOptionValue"
|
||||
class="form-control pr-4"
|
||||
class="pr-4"
|
||||
name="asset-type"
|
||||
:options="$options.typeOptions"
|
||||
@change="updateAssetLinkType({ linkIdToUpdate: link.id, newType: $event })"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
name: database_async_index_destruction
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/92328
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/367964
|
||||
milestone: '15.3'
|
||||
type: ops
|
||||
group: group::database
|
||||
default_enabled: false
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class RemoveVulnerabilityOccurrencesMigratedToNewStructureColumn < Gitlab::Database::Migration[2.0]
|
||||
disable_ddl_transaction!
|
||||
|
||||
INDEX_NAME = 'index_vulnerability_occurrences_on_migrated_to_new_structure'
|
||||
|
||||
def up
|
||||
with_lock_retries do
|
||||
remove_column :vulnerability_occurrences, :migrated_to_new_structure
|
||||
end
|
||||
end
|
||||
|
||||
def down
|
||||
unless column_exists?(:vulnerability_occurrences, :migrated_to_new_structure)
|
||||
add_column :vulnerability_occurrences, :migrated_to_new_structure, :boolean, default: false, null: false
|
||||
end
|
||||
|
||||
add_concurrent_index :vulnerability_occurrences, [:migrated_to_new_structure, :id], name: INDEX_NAME
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1 @@
|
|||
d0e6dda681ec7980d1aeccd7976425edbe212b0676e6c57c6941f0ba73bbb8b4
|
||||
|
|
@ -22328,7 +22328,6 @@ CREATE TABLE vulnerability_occurrences (
|
|||
cve text,
|
||||
location jsonb,
|
||||
detection_method smallint DEFAULT 0 NOT NULL,
|
||||
migrated_to_new_structure boolean DEFAULT false NOT NULL,
|
||||
CONSTRAINT check_4a3a60f2ba CHECK ((char_length(solution) <= 7000)),
|
||||
CONSTRAINT check_ade261da6b CHECK ((char_length(description) <= 15000)),
|
||||
CONSTRAINT check_df6dd20219 CHECK ((char_length(message) <= 3000)),
|
||||
|
|
@ -30141,8 +30140,6 @@ CREATE INDEX index_vulnerability_occurrences_on_location_k8s_agent_id ON vulnera
|
|||
|
||||
CREATE INDEX index_vulnerability_occurrences_on_location_k8s_cluster_id ON vulnerability_occurrences USING gin ((((location -> 'kubernetes_resource'::text) -> 'cluster_id'::text))) WHERE (report_type = 7);
|
||||
|
||||
CREATE INDEX index_vulnerability_occurrences_on_migrated_to_new_structure ON vulnerability_occurrences USING btree (migrated_to_new_structure, id);
|
||||
|
||||
CREATE INDEX index_vulnerability_occurrences_on_primary_identifier_id ON vulnerability_occurrences USING btree (primary_identifier_id);
|
||||
|
||||
CREATE INDEX index_vulnerability_occurrences_on_project_fingerprint ON vulnerability_occurrences USING btree (project_fingerprint);
|
||||
|
|
|
|||
|
|
@ -312,3 +312,98 @@ Use the asynchronous index helpers on your local environment to test changes for
|
|||
1. Run `bundle exec rails db:migrate` so that it creates an entry in the `postgres_async_indexes` table.
|
||||
1. Run `bundle exec rails gitlab:db:reindex` so that the index is created asynchronously.
|
||||
1. To verify the index, open the PostgreSQL console using the [GDK](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/howto/postgresql.md) command `gdk psql` and run the command `\d <index_name>` to check that your newly created index exists.
|
||||
|
||||
## Drop indexes asynchronously
|
||||
|
||||
For very large tables, index destruction can be a challenge to manage.
|
||||
While `remove_concurrent_index` removes indexes in a way that does not block
|
||||
normal traffic, it can still be problematic if index destruction runs for
|
||||
during autovacuum. Necessary database operations like `autovacuum` cannot run, and
|
||||
the deployment process on GitLab.com is blocked while waiting for index
|
||||
destruction to finish.
|
||||
|
||||
To limit the impact on GitLab.com, use the following process to remove indexes
|
||||
asynchronously during weekend hours. Due to generally lower traffic and fewer deployments,
|
||||
index destruction can proceed at a lower level of risk.
|
||||
|
||||
1. [Schedule the index to be removed](#schedule-the-index-to-be-removed).
|
||||
1. [Verify the MR was deployed and the index exists in production](#verify-the-mr-was-deployed-and-the-index-exists-in-production).
|
||||
1. [Add a migration to create the index synchronously](#add-a-migration-to-create-the-index-synchronously).
|
||||
|
||||
### Schedule the index to be removed
|
||||
|
||||
Create an MR with a post-deployment migration which prepares the index
|
||||
for asynchronous destruction. For example. to destroy an index using
|
||||
the asynchronous index helpers:
|
||||
|
||||
```ruby
|
||||
# in db/post_migrate/
|
||||
|
||||
INDEX_NAME = 'index_ci_builds_on_some_column'
|
||||
|
||||
def up
|
||||
prepare_async_index_removal :ci_builds, :some_column, name: INDEX_NAME
|
||||
end
|
||||
|
||||
def down
|
||||
unprepare_async_index :ci_builds, :some_column, name: INDEX_NAME
|
||||
end
|
||||
```
|
||||
|
||||
This migration enters the index name and definition into the `postgres_async_indexes`
|
||||
table. The process that runs on weekends pulls indexes from this table and attempt
|
||||
to remove them.
|
||||
|
||||
You must test the database index changes locally before creating a merge request.
|
||||
|
||||
### Verify the MR was deployed and the index exists in production
|
||||
|
||||
You can verify if the MR was deployed to GitLab.com with
|
||||
`/chatops run auto_deploy status <merge_sha>`. To verify the existence of
|
||||
the index, you can:
|
||||
|
||||
- Use a meta-command in `#database-lab`, for example: `\d <index_name>`.
|
||||
- Make sure the index is not [`invalid`](https://www.postgresql.org/docs/12/sql-createindex.html#:~:text=The%20psql%20%5Cd%20command%20will%20report%20such%20an%20index%20as%20INVALID).
|
||||
- Ask someone in `#database` to check if the index exists.
|
||||
- If you have access, you can verify directly on production or in a
|
||||
production clone.
|
||||
|
||||
### Add a migration to destroy the index synchronously
|
||||
|
||||
After you verify the index exists in the production database, create a second
|
||||
merge request that removes the index synchronously. The schema changes must be
|
||||
updated and committed to `structure.sql` in this second merge request.
|
||||
The synchronous migration results in a no-op on GitLab.com, but you should still add the
|
||||
migration as expected for other installations. For example, to
|
||||
create the second migration for the previous asynchronous example:
|
||||
|
||||
**WARNING:**
|
||||
Verify that the index no longer exist in production before merging a second migration with `remove_concurrent_index_by_name`.
|
||||
If the second migration is deployed before the index has been destroyed,
|
||||
the index is destroyed synchronously when the second migration executes.
|
||||
|
||||
```ruby
|
||||
# in db/post_migrate/
|
||||
|
||||
INDEX_NAME = 'index_ci_builds_on_some_column'
|
||||
|
||||
disable_ddl_transaction!
|
||||
|
||||
def up
|
||||
remove_concurrent_index_by_name :ci_builds, name: INDEX_NAME
|
||||
end
|
||||
|
||||
def down
|
||||
add_concurrent_index :ci_builds, :some_column, INDEX_NAME
|
||||
end
|
||||
```
|
||||
|
||||
### Verify indexes removed asynchronously
|
||||
|
||||
To test changes for removing an index, use the asynchronous index helpers on your local environment:
|
||||
|
||||
1. Enable the feature flags by running `Feature.enable(:database_async_index_destruction)` and `Feature.enable(:database_reindexing)` in the Rails console.
|
||||
1. Run `bundle exec rails db:migrate` which should create an entry in the `postgres_async_indexes` table.
|
||||
1. Run `bundle exec rails gitlab:db:reindex` destroy the index asynchronously.
|
||||
1. To verify the index, open the PostgreSQL console by using the [GDK](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/howto/postgresql.md)
|
||||
command `gdk psql` and run `\d <index_name>` to check that the destroyed index no longer exists.
|
||||
|
|
|
|||
|
|
@ -167,6 +167,10 @@ If you're using Geo:
|
|||
After updating GitLab, upgrade your runners to match
|
||||
[your new GitLab version](https://docs.gitlab.com/runner/#gitlab-runner-versions).
|
||||
|
||||
#### GitLab agent for Kubernetes
|
||||
|
||||
If you have Kubernetes clusters connected with GitLab, [upgrade your GitLab agents for Kubernetes](../user/clusters/agent/install/index.md#update-the-agent-version) to match your new GitLab version.
|
||||
|
||||
#### Elasticsearch
|
||||
|
||||
After updating GitLab, you may have to upgrade
|
||||
|
|
|
|||
|
|
@ -118,6 +118,25 @@ create or update pipelines until their email address is confirmed.
|
|||
You can [change](../../../security/password_length_limits.md#modify-minimum-password-length-using-gitlab-ui)
|
||||
the minimum number of characters a user must have in their password using the GitLab UI.
|
||||
|
||||
### Password complexity requirements **(PREMIUM SELF)**
|
||||
|
||||
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/354965) in GitLab 15.2.
|
||||
|
||||
By default, the only requirement for user passwords is [minimum password length](#minimum-password-length-limit).
|
||||
You can add additional complexity requirements. Changes to password complexity requirements apply to new passwords:
|
||||
|
||||
- For new users that sign up.
|
||||
- For existing users that reset their password.
|
||||
|
||||
Existing passwords are unaffected. To change password complexity requirements:
|
||||
|
||||
1. On the top bar, select **Menu > Admin**.
|
||||
1. On the left sidebar, select **Settings > General**.
|
||||
1. Expand **Sign-up restrictions**.
|
||||
1. Under **Minimum password length (number of characters)**, select additional password complexity requirements. You can require numbers, uppercase letters, lowercase letters,
|
||||
and symbols.
|
||||
1. Select **Save changes**.
|
||||
|
||||
## Allow or deny sign ups using specific email domains
|
||||
|
||||
You can specify an inclusive or exclusive list of email domains which can be used for user sign up.
|
||||
|
|
|
|||
Binary file not shown.
|
After Width: | Height: | Size: 70 KiB |
|
|
@ -330,6 +330,8 @@ False positive detection is available in a subset of the [supported languages](#
|
|||
|
||||
- Ruby, in the Brakeman-based analyzer
|
||||
|
||||

|
||||
|
||||
### Advanced vulnerability tracking **(ULTIMATE)**
|
||||
|
||||
> [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/5144) in GitLab 14.2.
|
||||
|
|
@ -531,7 +533,7 @@ Some analyzers make it possible to filter out vulnerabilities under a given thre
|
|||
|
||||
| CI/CD variable | Default value | Description |
|
||||
|------------------------------|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `SAST_EXCLUDED_PATHS` | `spec, test, tests, tmp` | Exclude vulnerabilities from output based on the paths. This is a comma-separated list of patterns. Patterns can be globs, or file or folder paths (for example, `doc,spec`). Parent directories also match patterns. You might need to exclude temporary directories used by your build tool as these can generate false positives. To exclude paths, copy and paste the default excluded paths, then **add** your own paths to be excluded. If you don't specify the default excluded paths, you will override the defaults and _only_ paths you specify will be excluded from the SAST scans. |
|
||||
| `SAST_EXCLUDED_PATHS` | `spec, test, tests, tmp` | Exclude vulnerabilities from output based on the paths. This is a comma-separated list of patterns. Patterns can be globs (see [`doublestar.Match`](https://pkg.go.dev/github.com/bmatcuk/doublestar/v4@v4.0.2#Match) for supported patterns), or file or folder paths (for example, `doc,spec`). Parent directories also match patterns. You might need to exclude temporary directories used by your build tool as these can generate false positives. To exclude paths, copy and paste the default excluded paths, then **add** your own paths to be excluded. If you don't specify the default excluded paths, you will override the defaults and _only_ paths you specify will be excluded from the SAST scans. |
|
||||
| `SEARCH_MAX_DEPTH` | 4 | SAST searches the repository to detect the programming languages used, and selects the matching analyzers. Set the value of `SEARCH_MAX_DEPTH` to specify how many directory levels the search phase should span. After the analyzers have been selected, the _entire_ repository is analyzed. |
|
||||
| `SAST_BANDIT_EXCLUDED_PATHS` | | Comma-separated list of paths to exclude from scan. Uses Python's [`fnmatch` syntax](https://docs.python.org/2/library/fnmatch.html); For example: `'*/tests/*, */venv/*'` |
|
||||
| `SAST_BRAKEMAN_LEVEL` | 1 | Ignore Brakeman vulnerabilities under given confidence level. Integer, 1=Low 3=High. |
|
||||
|
|
|
|||
|
|
@ -6,10 +6,16 @@ module Gitlab
|
|||
DEFAULT_INDEXES_PER_INVOCATION = 2
|
||||
|
||||
def self.create_pending_indexes!(how_many: DEFAULT_INDEXES_PER_INVOCATION)
|
||||
PostgresAsyncIndex.order(:id).limit(how_many).each do |async_index|
|
||||
PostgresAsyncIndex.to_create.order(:id).limit(how_many).each do |async_index|
|
||||
IndexCreator.new(async_index).perform
|
||||
end
|
||||
end
|
||||
|
||||
def self.drop_pending_indexes!(how_many: DEFAULT_INDEXES_PER_INVOCATION)
|
||||
PostgresAsyncIndex.to_drop.order(:id).limit(how_many).each do |async_index|
|
||||
IndexDestructor.new(async_index).perform
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -0,0 +1,66 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Database
|
||||
module AsyncIndexes
|
||||
class IndexDestructor
|
||||
include ExclusiveLeaseGuard
|
||||
|
||||
TIMEOUT_PER_ACTION = 1.day
|
||||
|
||||
def initialize(async_index)
|
||||
@async_index = async_index
|
||||
end
|
||||
|
||||
def perform
|
||||
try_obtain_lease do
|
||||
if !index_exists?
|
||||
log_index_info('Skipping dropping as the index does not exist')
|
||||
else
|
||||
log_index_info('Dropping async index')
|
||||
|
||||
retries = Gitlab::Database::WithLockRetriesOutsideTransaction.new(
|
||||
connection: connection,
|
||||
timing_configuration: Gitlab::Database::Reindexing::REMOVE_INDEX_RETRY_CONFIG,
|
||||
klass: self.class,
|
||||
logger: Gitlab::AppLogger
|
||||
)
|
||||
|
||||
retries.run(raise_on_exhaustion: false) do
|
||||
connection.execute(async_index.definition)
|
||||
end
|
||||
|
||||
log_index_info('Finished dropping async index')
|
||||
end
|
||||
|
||||
async_index.destroy
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :async_index
|
||||
|
||||
def index_exists?
|
||||
connection.indexes(async_index.table_name).any? { |index| index.name == async_index.name }
|
||||
end
|
||||
|
||||
def connection
|
||||
@connection ||= async_index.connection
|
||||
end
|
||||
|
||||
def lease_timeout
|
||||
TIMEOUT_PER_ACTION
|
||||
end
|
||||
|
||||
def lease_key
|
||||
[super, async_index.connection_db_config.name].join('/')
|
||||
end
|
||||
|
||||
def log_index_info(message)
|
||||
Gitlab::AppLogger.info(message: message, table_name: async_index.table_name, index_name: async_index.name)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -77,6 +77,38 @@ module Gitlab
|
|||
async_index
|
||||
end
|
||||
|
||||
# Prepares an index for asynchronous destruction.
|
||||
#
|
||||
# Stores the index information in the postgres_async_indexes table to be removed later. The
|
||||
# index will be always be removed CONCURRENTLY, so that option does not need to be given.
|
||||
#
|
||||
# If the requested index has already been removed, it is not stored in the table for
|
||||
# asynchronous destruction.
|
||||
def prepare_async_index_removal(table_name, column_name, options = {})
|
||||
index_name = options.fetch(:name)
|
||||
raise 'prepare_async_index_removal must get an index name defined' if index_name.blank?
|
||||
|
||||
unless index_exists?(table_name, column_name, **options)
|
||||
Gitlab::AppLogger.warn "Index not removed because it does not exist (this may be due to an aborted migration or similar): table_name: #{table_name}, index_name: #{index_name}"
|
||||
return
|
||||
end
|
||||
|
||||
definition = "DROP INDEX CONCURRENTLY #{quote_column_name(index_name)}"
|
||||
|
||||
async_index = PostgresAsyncIndex.find_or_create_by!(name: index_name) do |rec|
|
||||
rec.table_name = table_name
|
||||
rec.definition = definition
|
||||
end
|
||||
|
||||
Gitlab::AppLogger.info(
|
||||
message: 'Prepared index for async destruction',
|
||||
table_name: async_index.table_name,
|
||||
index_name: async_index.name
|
||||
)
|
||||
|
||||
async_index
|
||||
end
|
||||
|
||||
def async_index_creation_available?
|
||||
connection.table_exists?(:postgres_async_indexes)
|
||||
end
|
||||
|
|
|
|||
|
|
@ -13,6 +13,9 @@ module Gitlab
|
|||
validates :table_name, presence: true, length: { maximum: MAX_IDENTIFIER_LENGTH }
|
||||
validates :definition, presence: true, length: { maximum: MAX_DEFINITION_LENGTH }
|
||||
|
||||
scope :to_create, -> { where("definition ILIKE 'CREATE%'") }
|
||||
scope :to_drop, -> { where("definition ILIKE 'DROP%'") }
|
||||
|
||||
def to_s
|
||||
definition
|
||||
end
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ module Gitlab
|
|||
|
||||
# Hack: Before we do actual reindexing work, create async indexes
|
||||
Gitlab::Database::AsyncIndexes.create_pending_indexes! if Feature.enabled?(:database_async_index_creation, type: :ops)
|
||||
Gitlab::Database::AsyncIndexes.drop_pending_indexes! if Feature.enabled?(:database_async_index_destruction, type: :ops)
|
||||
|
||||
automatic_reindexing
|
||||
end
|
||||
|
|
|
|||
|
|
@ -5,5 +5,9 @@ FactoryBot.define do
|
|||
sequence(:name) { |n| "users_id_#{n}" }
|
||||
definition { "CREATE INDEX #{name} ON #{table_name} (id)" }
|
||||
table_name { "users" }
|
||||
|
||||
trait :with_drop do
|
||||
definition { "DROP INDEX #{name}" }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -0,0 +1,69 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Database::AsyncIndexes::IndexDestructor do
|
||||
include ExclusiveLeaseHelpers
|
||||
|
||||
describe '#perform' do
|
||||
subject { described_class.new(async_index) }
|
||||
|
||||
let(:async_index) { create(:postgres_async_index, :with_drop) }
|
||||
|
||||
let(:index_model) { Gitlab::Database::AsyncIndexes::PostgresAsyncIndex }
|
||||
|
||||
let(:model) { Gitlab::Database.database_base_models[Gitlab::Database::PRIMARY_DATABASE_NAME] }
|
||||
let(:connection) { model.connection }
|
||||
|
||||
let!(:lease) { stub_exclusive_lease(lease_key, :uuid, timeout: lease_timeout) }
|
||||
let(:lease_key) { "gitlab/database/async_indexes/index_destructor/#{Gitlab::Database::PRIMARY_DATABASE_NAME}" }
|
||||
let(:lease_timeout) { described_class::TIMEOUT_PER_ACTION }
|
||||
|
||||
before do
|
||||
connection.add_index(async_index.table_name, 'id', name: async_index.name)
|
||||
end
|
||||
|
||||
around do |example|
|
||||
Gitlab::Database::SharedModel.using_connection(connection) do
|
||||
example.run
|
||||
end
|
||||
end
|
||||
|
||||
context 'when the index does not exist' do
|
||||
before do
|
||||
connection.execute(async_index.definition)
|
||||
end
|
||||
|
||||
it 'skips index destruction' do
|
||||
expect(connection).not_to receive(:execute).with(/DROP INDEX/)
|
||||
|
||||
subject.perform
|
||||
end
|
||||
end
|
||||
|
||||
it 'creates the index while controlling lock timeout' do
|
||||
allow(connection).to receive(:execute).and_call_original
|
||||
expect(connection).to receive(:execute).with("SET lock_timeout TO '60000ms'").and_call_original
|
||||
expect(connection).to receive(:execute).with(async_index.definition).and_call_original
|
||||
expect(connection).to receive(:execute)
|
||||
.with("RESET idle_in_transaction_session_timeout; RESET lock_timeout")
|
||||
.and_call_original
|
||||
|
||||
subject.perform
|
||||
end
|
||||
|
||||
it 'removes the index preparation record from postgres_async_indexes' do
|
||||
expect(async_index).to receive(:destroy).and_call_original
|
||||
|
||||
expect { subject.perform }.to change { index_model.count }.by(-1)
|
||||
end
|
||||
|
||||
it 'skips logic if not able to acquire exclusive lease' do
|
||||
expect(lease).to receive(:try_obtain).ordered.and_return(false)
|
||||
expect(connection).not_to receive(:execute).with(/DROP INDEX/)
|
||||
expect(async_index).not_to receive(:destroy)
|
||||
|
||||
expect { subject.perform }.not_to change { index_model.count }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -142,4 +142,42 @@ RSpec.describe Gitlab::Database::AsyncIndexes::MigrationHelpers do
|
|||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '#prepare_async_index_removal' do
|
||||
before do
|
||||
connection.create_table(table_name)
|
||||
connection.add_index(table_name, 'id', name: index_name)
|
||||
end
|
||||
|
||||
it 'creates the record for the async index removal' do
|
||||
expect do
|
||||
migration.prepare_async_index_removal(table_name, 'id', name: index_name)
|
||||
end.to change { index_model.where(name: index_name).count }.by(1)
|
||||
|
||||
record = index_model.find_by(name: index_name)
|
||||
|
||||
expect(record.table_name).to eq(table_name)
|
||||
expect(record.definition).to match(/DROP INDEX CONCURRENTLY "#{index_name}"/)
|
||||
end
|
||||
|
||||
context 'when the index does not exist' do
|
||||
it 'does not create the record' do
|
||||
connection.remove_index(table_name, 'id', name: index_name)
|
||||
|
||||
expect do
|
||||
migration.prepare_async_index_removal(table_name, 'id', name: index_name)
|
||||
end.not_to change { index_model.where(name: index_name).count }
|
||||
end
|
||||
end
|
||||
|
||||
context 'when the record already exists' do
|
||||
it 'does attempt to create the record' do
|
||||
create(:postgres_async_index, table_name: table_name, name: index_name)
|
||||
|
||||
expect do
|
||||
migration.prepare_async_index_removal(table_name, 'id', name: index_name)
|
||||
end.not_to change { index_model.where(name: index_name).count }
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -16,4 +16,21 @@ RSpec.describe Gitlab::Database::AsyncIndexes::PostgresAsyncIndex, type: :model
|
|||
it { is_expected.to validate_presence_of(:definition) }
|
||||
it { is_expected.to validate_length_of(:definition).is_at_most(definition_limit) }
|
||||
end
|
||||
|
||||
describe 'scopes' do
|
||||
let!(:async_index_creation) { create(:postgres_async_index) }
|
||||
let!(:async_index_destruction) { create(:postgres_async_index, :with_drop) }
|
||||
|
||||
describe '.to_create' do
|
||||
subject { described_class.to_create }
|
||||
|
||||
it { is_expected.to contain_exactly(async_index_creation) }
|
||||
end
|
||||
|
||||
describe '.to_drop' do
|
||||
subject { described_class.to_drop }
|
||||
|
||||
it { is_expected.to contain_exactly(async_index_destruction) }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ RSpec.describe Gitlab::Database::AsyncIndexes do
|
|||
end
|
||||
|
||||
it 'takes 2 pending indexes and creates those' do
|
||||
Gitlab::Database::AsyncIndexes::PostgresAsyncIndex.order(:id).limit(2).each do |index|
|
||||
Gitlab::Database::AsyncIndexes::PostgresAsyncIndex.to_create.order(:id).limit(2).each do |index|
|
||||
creator = double('index creator')
|
||||
expect(Gitlab::Database::AsyncIndexes::IndexCreator).to receive(:new).with(index).and_return(creator)
|
||||
expect(creator).to receive(:perform)
|
||||
|
|
@ -20,4 +20,22 @@ RSpec.describe Gitlab::Database::AsyncIndexes do
|
|||
subject
|
||||
end
|
||||
end
|
||||
|
||||
describe '.drop_pending_indexes!' do
|
||||
subject { described_class.drop_pending_indexes! }
|
||||
|
||||
before do
|
||||
create_list(:postgres_async_index, 4, :with_drop)
|
||||
end
|
||||
|
||||
it 'takes 2 pending indexes and destroys those' do
|
||||
Gitlab::Database::AsyncIndexes::PostgresAsyncIndex.to_drop.order(:id).limit(2).each do |index|
|
||||
destructor = double('index destructor')
|
||||
expect(Gitlab::Database::AsyncIndexes::IndexDestructor).to receive(:new).with(index).and_return(destructor)
|
||||
expect(destructor).to receive(:perform)
|
||||
end
|
||||
|
||||
subject
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -46,6 +46,27 @@ RSpec.describe Gitlab::Database::Reindexing do
|
|||
end
|
||||
end
|
||||
|
||||
context 'when async index destruction is enabled' do
|
||||
it 'executes async index destruction prior to any reindexing actions' do
|
||||
stub_feature_flags(database_async_index_destruction: true)
|
||||
|
||||
expect(Gitlab::Database::AsyncIndexes).to receive(:drop_pending_indexes!).ordered.exactly(databases_count).times
|
||||
expect(described_class).to receive(:automatic_reindexing).ordered.exactly(databases_count).times
|
||||
|
||||
described_class.invoke
|
||||
end
|
||||
end
|
||||
|
||||
context 'when async index destruction is disabled' do
|
||||
it 'does not execute async index destruction' do
|
||||
stub_feature_flags(database_async_index_destruction: false)
|
||||
|
||||
expect(Gitlab::Database::AsyncIndexes).not_to receive(:drop_pending_indexes!)
|
||||
|
||||
described_class.invoke
|
||||
end
|
||||
end
|
||||
|
||||
context 'calls automatic reindexing' do
|
||||
it 'uses all candidate indexes' do
|
||||
expect(described_class).to receive(:automatic_reindexing).exactly(databases_count).times
|
||||
|
|
|
|||
Loading…
Reference in New Issue