Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-01-16 06:08:05 +00:00
parent 21de0d5578
commit 2bc11b8442
61 changed files with 1217 additions and 280 deletions

View File

@ -152,12 +152,6 @@ include:
- <<: *if-schedule-pipeline
when: never
.rules:test:gdk-load-balancer-changes:
rules:
- changes:
- ".gitlab/ci/test-on-gdk/**"
- "lib/gitlab/database/load_balancing/**/*"
.rules:test:qa-default-branch:
rules:
- *qa-run-all-e2e-label

View File

@ -21,11 +21,11 @@ include:
.rules:gdk:qa-parallel:
rules:
# To account for cases where a group label is set which may trigger selective execution
# But we want to execute full reliable suite on gdk in case of code-pattern-changes
- <<: *code-pattern-changes
variables:
QA_TESTS: ""
# To account for cases where a group label is set which may trigger selective execution
# But we want to execute full reliable suite on gdk in case of code-pattern-changes
- !reference [.rules:test:qa-parallel, rules]
- if: $QA_SUITES =~ /Test::Instance::Blocking/
- !reference [.rules:test:manual, rules]
@ -41,14 +41,32 @@ include:
- if: $QA_SUITES =~ /Test::Instance::Smoke/
- !reference [.rules:test:manual, rules]
.rules:test:gdk-load-balancer-changes:
rules:
- when: manual
allow_failure: true
changes:
- ".gitlab/ci/test-on-gdk/**"
- "lib/gitlab/database/load_balancing/**/*"
.with-gdk-log:
after_script:
- mv $CI_BUILDS_DIR/*.log $CI_PROJECT_DIR/
.gdk-qa-base:
image: ${REGISTRY_HOST}/${REGISTRY_GROUP}/gitlab-build-images/debian-${DEBIAN_VERSION}-ruby-${RUBY_VERSION}:bundler-${BUNDLER_VERSION}-git-2.36-lfs-2.9-chrome-${CHROME_VERSION}-docker-${DOCKER_VERSION}-gcloud-383-kubectl-1.23
extends:
- .qa-cache
- .default-retry
- .gitlab-qa-report
stage: test
services:
- docker:${DOCKER_VERSION}-dind
- name: docker:${DOCKER_VERSION}-dind
- name: ${GDK_IMAGE}
alias: gdk.test
# CI setup will tail each component in to separate file
# Override default command so we don't tail all of the logs to stdout unnecessarily
command: [gdk, tail, rails-web]
tags:
- e2e
variables:
@ -56,51 +74,29 @@ include:
QA_GENERATE_ALLURE_REPORT: "true"
QA_CAN_TEST_PRAEFECT: "false"
QA_INTERCEPT_REQUESTS: "false"
QA_SUITE_STATUS_ENV_FILE: "$CI_PROJECT_DIR/suite_status.env"
QA_DOCKER_NETWORK: host
GITLAB_QA_ADMIN_ACCESS_TOKEN: $QA_ADMIN_ACCESS_TOKEN
RSPEC_REPORT_OPTS: "--order random --force-color --format documentation --format RspecJunitFormatter --out tmp/rspec-${CI_JOB_ID}.xml --format QA::Support::JsonFormatter --out tmp/rspec-${CI_JOB_ID}.json --format html --out tmp/rspec-${CI_JOB_ID}.htm"
FF_NETWORK_PER_BUILD: 1
GDK_URL: http://gdk.test:3000
KNAPSACK_TEST_FILE_PATTERN: "qa/specs/features/**/*_spec.rb"
QA_SUITE_STATUS_ENV_FILE: "$CI_PROJECT_DIR/suite_status.env"
FF_NETWORK_PER_BUILD: "true"
before_script:
- echo "SUITE_RAN=true" > "$QA_SUITE_STATUS_ENV_FILE"
- echo -e "\e[0Ksection_start:`date +%s`:pull_image[collapsed=true]\r\e[0KPull GDK QA image"
- docker pull ${GDK_IMAGE}
- echo -e "\e[0Ksection_end:`date +%s`:pull_image\r\e[0K"
# Despite `incremental: false` and `static: true`, GDK sometimes fails to start without increasing max user watches
# This is why we're not running the GDK container as a service
- sysctl -n -w fs.inotify.max_user_watches=524288
- echo -e "\e[0Ksection_start:`date +%s`:launch_gdk[collapsed=true]\r\e[0KLaunch GDK"
- mkdir -p $CI_PROJECT_DIR/log/gdk $CI_PROJECT_DIR/log/gitlab
# This command matches the permissions of the user that runs GDK inside the container.
- chown -R 1000:1000 $CI_PROJECT_DIR/log
- |
docker run -d --rm --name gdk --network host \
--volume $CI_PROJECT_DIR/log/gdk:/home/gdk/gitlab-development-kit/log \
--volume $CI_PROJECT_DIR/log/gitlab:/home/gdk/gitlab-development-kit/gitlab/log \
${GDK_IMAGE}
# With `FF_NETWORK_PER_BUILD=1` and `--network host` the IP of the gdk container should be 172.18.0.2, but we get it
# dynamically just in case
- echo "$(docker exec gdk bash -c "getent hosts \$HOSTNAME" | awk '{print $1}') gdk.test" >> /etc/hosts
- echo -e "\e[0Ksection_end:`date +%s`:launch_gdk\r\e[0K"
- echo -e "\e[0Ksection_start:`date +%s`:install_gems[collapsed=true]\r\e[0KInstall gems"
- source scripts/utils.sh
- cd qa && bundle config set --local without 'development' && bundle install
- echo -e "\e[0Ksection_end:`date +%s`:install_gems\r\e[0K"
script:
- echo -e "\e[0Ksection_start:`date +%s`:healthcheck[collapsed=true]\r\e[0KWait for gdk to start"
- retry_times_sleep 100 3 test_url $GDK_URL/users/sign_in
- echo -e "\e[0Ksection_end:`date +%s`:healthcheck\r\e[0K"
- echo -e "\e[0Ksection_start:`date +%s`:run_tests\r\e[0KRun E2E tests"
- export QA_COMMAND="bundle exec bin/qa ${QA_SCENARIO:=Test::Instance::All} $GDK_URL $GITLAB_QA_OPTS -- $QA_TESTS $QA_RSPEC_TAGS $RSPEC_REPORT_OPTS"
- echo "Running - '$QA_COMMAND'"
- eval "$QA_COMMAND"
- echo -e "\e[0Ksection_end:`date +%s`:run_tests\r\e[0K"
after_script:
- !reference [.with-gdk-log, after_script]
- !reference [.gitlab-qa-report, after_script]
artifacts:
paths:
- qa/tmp
- log/gitlab
- log/gdk/*/current
- ${CI_PROJECT_DIR}/*.log
reports:
junit: qa/tmp/rspec-*.xml
dotenv: "$QA_SUITE_STATUS_ENV_FILE"
@ -111,15 +107,8 @@ include:
# balancing. Adding 5s lag to 1 of the replicas to validate robustness of
# the load balancer.
.gdk-with-load-balancer-setup:
before_script:
- !reference [".gdk-qa-base", "before_script"]
- |
docker exec gdk bash -c "
gdk config set postgresql.replica.enabled true &&\
gdk config set postgresql.replica_2.enabled true &&\
gdk config set load_balancing.enabled true &&\
gdk reconfigure &&\
gdk restart"
variables:
WITH_LOAD_BALANCER: "true"
# ==========================================
# Pre stage
@ -141,37 +130,21 @@ download-knapsack-report:
# ==========================================
# Test stage
# ==========================================
# ------------------------------------------
# Blocking tests
# ------------------------------------------
gdk-qa-smoke:
extends:
- .gdk-qa-base
- .gitlab-qa-report
- .rules:gdk:qa-smoke
variables:
QA_SCENARIO: Test::Instance::Smoke
QA_RUN_TYPE: gdk-qa-smoke
gdk-qa-smoke-with-load-balancer:
extends:
- .gdk-qa-base
- .gdk-with-load-balancer-setup
variables:
QA_SCENARIO: Test::Instance::Smoke
QA_RUN_TYPE: gdk-qa-smoke
artifacts:
paths:
- log
reports:
dotenv: ""
rules:
- !reference [".rules:test:never-schedule-pipeline", rules]
- !reference [".rules:test:gdk-load-balancer-changes", rules]
allow_failure: true
gdk-qa-reliable:
extends:
- .gdk-qa-base
- .gitlab-qa-report
- .parallel
- .rules:gdk:qa-parallel
variables:
QA_SCENARIO: Test::Instance::Blocking
@ -181,20 +154,40 @@ gdk-qa-reliable:
gdk-qa-reliable-selective:
extends:
- .gdk-qa-base
- .gitlab-qa-report
- .rules:gdk:qa-selective
variables:
QA_SCENARIO: Test::Instance::Blocking
QA_RUN_TYPE: gdk-qa-blocking
# ------------------------------------------
# Non Blocking tests
# ------------------------------------------
gdk-qa-smoke-with-load-balancer:
extends:
- .gdk-qa-base
- .gdk-with-load-balancer-setup
- .with-gdk-log
variables:
QA_SCENARIO: Test::Instance::Smoke
QA_RUN_TYPE: gdk-qa-smoke
artifacts:
paths:
- gdk.log
reports:
dotenv: ""
rules:
- !reference [".rules:test:never-schedule-pipeline", rules]
- !reference [".rules:test:gdk-load-balancer-changes", rules]
gdk-qa-reliable-with-load-balancer:
extends:
- .gdk-qa-base
- .gdk-with-load-balancer-setup
- .parallel
- .with-gdk-log
variables:
QA_SCENARIO: Test::Instance::Blocking
QA_RUN_TYPE: gdk-qa-blocking
parallel: 5
artifacts:
paths:
- log
@ -203,15 +196,15 @@ gdk-qa-reliable-with-load-balancer:
rules:
- !reference [".rules:test:never-schedule-pipeline", rules]
- !reference [".rules:test:gdk-load-balancer-changes", rules]
allow_failure: true
gdk-qa-non-blocking:
extends:
- .gdk-qa-base
- .parallel
- .with-gdk-log
variables:
QA_SCENARIO: Test::Instance::NonBlocking
QA_RUN_TYPE: gdk-qa-non-blocking
parallel: 5
rules:
- when: manual
allow_failure: true

View File

@ -679,7 +679,6 @@ RSpec/BeforeAllRoleAssignment:
- 'ee/spec/services/llm/generate_commit_message_service_spec.rb'
- 'ee/spec/services/llm/generate_description_service_spec.rb'
- 'ee/spec/services/llm/generate_summary_service_spec.rb'
- 'ee/spec/services/llm/generate_test_file_service_spec.rb'
- 'ee/spec/services/llm/git_command_service_spec.rb'
- 'ee/spec/services/llm/merge_requests/summarize_diff_service_spec.rb'
- 'ee/spec/services/llm/merge_requests/summarize_review_service_spec.rb'

View File

@ -476,14 +476,12 @@ RSpec/NamedSubject:
- 'ee/spec/lib/gitlab/llm/templates/explain_vulnerability_spec.rb'
- 'ee/spec/lib/gitlab/llm/templates/fill_in_merge_request_template_spec.rb'
- 'ee/spec/lib/gitlab/llm/templates/generate_commit_message_spec.rb'
- 'ee/spec/lib/gitlab/llm/templates/generate_test_file_spec.rb'
- 'ee/spec/lib/gitlab/llm/templates/summarize_merge_request_spec.rb'
- 'ee/spec/lib/gitlab/llm/templates/summarize_review_spec.rb'
- 'ee/spec/lib/gitlab/llm/templates/summarize_submitted_review_spec.rb'
- 'ee/spec/lib/gitlab/llm/vertex_ai/completions/analyze_ci_job_failure_spec.rb'
- 'ee/spec/lib/gitlab/llm/vertex_ai/completions/fill_in_merge_request_template_spec.rb'
- 'ee/spec/lib/gitlab/llm/vertex_ai/completions/generate_commit_message_spec.rb'
- 'ee/spec/lib/gitlab/llm/vertex_ai/completions/generate_test_file_spec.rb'
- 'ee/spec/lib/gitlab/llm/vertex_ai/completions/summarize_merge_request_spec.rb'
- 'ee/spec/lib/gitlab/llm/vertex_ai/completions/summarize_review_spec.rb'
- 'ee/spec/lib/gitlab/llm/vertex_ai/completions/summarize_submitted_review_spec.rb'
@ -1080,7 +1078,6 @@ RSpec/NamedSubject:
- 'ee/spec/services/llm/explain_code_service_spec.rb'
- 'ee/spec/services/llm/explain_vulnerability_service_spec.rb'
- 'ee/spec/services/llm/generate_commit_message_service_spec.rb'
- 'ee/spec/services/llm/generate_test_file_service_spec.rb'
- 'ee/spec/services/llm/git_command_service_spec.rb'
- 'ee/spec/services/llm/resolve_vulnerability_service_spec.rb'
- 'ee/spec/services/merge_request_approval_settings/update_service_spec.rb'

View File

@ -9,4 +9,4 @@ if (process.env.NODE_ENV !== 'production') {
Vue.use(GlFeatureFlagsPlugin);
Vue.use(Translate);
Vue.config.ignoredElements = ['gl-emoji', 'copy-code'];
Vue.config.ignoredElements = ['gl-emoji'];

View File

@ -85,8 +85,6 @@ export default {
GlPagination,
GlSprintf,
GlAlert,
GenerateTestFileDrawer: () =>
import('ee_component/ai/components/generate_test_file_drawer.vue'),
},
mixins: [glFeatureFlagsMixin()],
alerts: {
@ -240,7 +238,6 @@ export default {
'showWhitespace',
'targetBranchName',
'branchName',
'generateTestFilePath',
]),
...mapGetters('diffs', [
'whichCollapsedTypes',
@ -443,7 +440,6 @@ export default {
'navigateToDiffFileIndex',
'setFileByFile',
'disableVirtualScroller',
'setGenerateTestFilePath',
'fetchPinnedFile',
]),
...mapActions('findingsDrawer', ['setDrawer']),
@ -818,11 +814,5 @@ export default {
</div>
</div>
</div>
<generate-test-file-drawer
v-if="getNoteableData.id"
:resource-id="resourceId"
:file-path="generateTestFilePath"
@close="() => setGenerateTestFilePath('')"
/>
</div>
</template>

View File

@ -54,11 +54,6 @@ export default {
compareButtonLabel: __('Compare submodule commit revisions'),
fileModeTooltip: __('File permissions'),
},
inject: {
showGenerateTestFileButton: {
default: false,
},
},
props: {
discussionPath: {
type: String,
@ -225,7 +220,6 @@ export default {
'reviewFile',
'setFileCollapsedByUser',
'setFileForcedOpen',
'setGenerateTestFilePath',
'toggleFileCommentForm',
'unpinFile',
]),
@ -469,15 +463,6 @@ export default {
>
{{ __('Open in Web IDE') }}
</gl-dropdown-item>
<gl-dropdown-item
v-if="showGenerateTestFileButton"
@click="setGenerateTestFilePath(diffFile.new_path)"
>
<span class="gl-display-flex gl-justify-content-space-between gl-align-items-center">
{{ __('Suggest test cases') }}
<gl-icon name="tanuki-ai" class="gl-text-purple-600 gl-mr-n3" />
</span>
</gl-dropdown-item>
<gl-dropdown-item
v-if="diffFile.replaced_view_path"
ref="replacedFileButton"

View File

@ -28,7 +28,6 @@ export default function initDiffsApp(store = notesStore) {
apolloProvider,
provide: {
newCommentTemplatePath: dataset.newCommentTemplatePath,
showGenerateTestFileButton: parseBoolean(dataset.showGenerateTestFileButton),
},
data() {
return {

View File

@ -19,14 +19,14 @@ export const BULK_IMPORT_STATIC_ITEMS = {
const STATISTIC_ITEMS = {
diff_note: __('Diff notes'),
issue: __('Issues'),
issue_attachment: s__('GithubImporter|Issue links'),
issue_attachment: s__('GithubImporter|Issue attachments'),
issue_event: __('Issue events'),
label: __('Labels'),
lfs_object: __('LFS objects'),
merge_request_attachment: s__('GithubImporter|PR attachments'),
milestone: __('Milestones'),
note: __('Notes'),
note_attachment: s__('GithubImporter|Note links'),
note_attachment: s__('GithubImporter|Note attachments'),
protected_branch: __('Protected branches'),
collaborator: s__('GithubImporter|Collaborators'),
pull_request: s__('GithubImporter|Pull requests'),
@ -34,7 +34,7 @@ const STATISTIC_ITEMS = {
pull_request_review: s__('GithubImporter|PR reviews'),
pull_request_review_request: s__('GithubImporter|PR reviewers'),
release: __('Releases'),
release_attachment: s__('GithubImporter|Release links'),
release_attachment: s__('GithubImporter|Release attachments'),
};
// support both camel case and snake case versions

View File

@ -363,6 +363,15 @@
:weight: 1
:idempotent: true
:tags: []
- :name: cronjob:concurrency_limit_resume
:worker_name: ConcurrencyLimit::ResumeWorker
:feature_category: :global_search
:has_external_dependencies: false
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
- :name: cronjob:container_expiration_policy
:worker_name: ContainerExpirationPolicyWorker
:feature_category: :container_registry

View File

@ -159,6 +159,13 @@ module WorkerAttributes
::Gitlab::SidekiqMiddleware::PauseControl::WorkersMap.strategy_for(worker: self)
end
def concurrency_limit(max_jobs)
::Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersMap.set_limit_for(
worker: self,
max_jobs: max_jobs
)
end
def get_weight
get_class_attribute(:weight) ||
NAMESPACE_WEIGHTS[queue_namespace] ||

View File

@ -0,0 +1,64 @@
# frozen_string_literal: true
module ConcurrencyLimit
class ResumeWorker
include ApplicationWorker
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext -- There is no onward scheduling and this cron handles work from across the
# application, so there's no useful context to add.
DEFAULT_LIMIT = 1_000
RESCHEDULE_DELAY = 1.second
feature_category :global_search
data_consistency :sticky
idempotent!
urgency :low
def perform
reschedule_job = false
workers.each do |worker|
next unless jobs_in_the_queue?(worker)
reschedule_job = true
limit = ::Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersMap.limit_for(worker: worker)&.call
processing_limit = if limit
current = current_concurrency(worker: worker)
limit - current
else
DEFAULT_LIMIT
end
next unless processing_limit > 0
resume_processing!(worker, limit: processing_limit)
end
self.class.perform_in(RESCHEDULE_DELAY) if reschedule_job
end
private
def current_concurrency(worker:)
@current_concurrency ||= ::Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersConcurrency.workers(
skip_cache: true
)
@current_concurrency[worker.name].to_i
end
def jobs_in_the_queue?(worker)
Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService.has_jobs_in_queue?(worker.name)
end
def resume_processing!(worker, limit:)
Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService.resume_processing!(worker.name, limit: limit)
end
def workers
Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersMap.workers
end
end
end

View File

@ -13,7 +13,6 @@ identifiers:
product_section: ops
product_stage: monitor
product_group: respond
value_type: number
milestone: "15.7"
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/105223
distributions:

View File

@ -1,8 +1,18 @@
{
"type": "object",
"required": [
"description",
"category",
"action",
"product_section",
"product_stage",
"product_group",
"introduced_by_url",
"milestone",
"tiers",
"distributions"
],
"additionalProperties": false,
"properties": {
"description": {
"type": "string"

View File

@ -19,6 +19,3 @@ tiers:
- free
- premium
- ultimate
events:
- name: unique_users_visiting_ci_catalog
unique: user.id

View File

@ -0,0 +1,8 @@
---
name: sidekiq_concurrency_limit_middleware
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/139851
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/435391
milestone: '16.9'
type: ops
group: group::global search
default_enabled: false

View File

@ -801,6 +801,9 @@ Gitlab.ee do
Settings.cron_jobs['pause_control_resume_worker'] ||= {}
Settings.cron_jobs['pause_control_resume_worker']['cron'] ||= '*/5 * * * *'
Settings.cron_jobs['pause_control_resume_worker']['job_class'] ||= 'PauseControl::ResumeWorker'
Settings.cron_jobs['concurrency_limit_resume_worker'] ||= {}
Settings.cron_jobs['concurrency_limit_resume_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['concurrency_limit_resume_worker']['job_class'] ||= 'ConcurrencyLimit::ResumeWorker'
Settings.cron_jobs['sync_seat_link_worker'] ||= {}
Settings.cron_jobs['sync_seat_link_worker']['cron'] ||= "#{rand(60)} #{rand(3..4)} * * * UTC"
Settings.cron_jobs['sync_seat_link_worker']['job_class'] = 'SyncSeatLinkWorker'

View File

@ -0,0 +1,18 @@
# frozen_string_literal: true
class AddUniqueIndexToSystemNoteMetadataOnIdConvertToBigint < Gitlab::Database::Migration[2.2]
disable_ddl_transaction!
milestone '16.9'
TABLE_NAME = :system_note_metadata
INDEX_NAME = 'index_system_note_metadata_pkey_on_id_convert_to_bigint'
def up
add_concurrent_index TABLE_NAME, :id_convert_to_bigint, unique: true, name: INDEX_NAME
end
def down
remove_concurrent_index TABLE_NAME, :id_convert_to_bigint, unique: true, name: INDEX_NAME
end
end

View File

@ -0,0 +1 @@
eaa86ad90ae101a641f30090941d0a993fa0bd92709ed429d8f23ea5b91c04c7

View File

@ -35471,6 +35471,8 @@ CREATE UNIQUE INDEX index_system_note_metadata_on_description_version_id ON syst
CREATE UNIQUE INDEX index_system_note_metadata_on_note_id ON system_note_metadata USING btree (note_id);
CREATE UNIQUE INDEX index_system_note_metadata_pkey_on_id_convert_to_bigint ON system_note_metadata USING btree (id_convert_to_bigint);
CREATE INDEX index_taggings_on_tag_id ON taggings USING btree (tag_id);
CREATE INDEX index_taggings_on_taggable_id_and_taggable_type_and_context ON taggings USING btree (taggable_id, taggable_type, context);

View File

@ -376,7 +376,7 @@ You should use PgBouncer with `session` pool mode. You can use the
The following example uses the bundled PgBouncer and sets up two separate connection pools on PostgreSQL host,
one in `session` pool mode and the other in `transaction` pool mode. For this example to work,
you need to prepare PostgreSQL server as documented in [in the setup instructions](#manual-database-setup):
you need to prepare PostgreSQL server as documented in [the setup instructions](#manual-database-setup):
```ruby
pgbouncer['databases'] = {

View File

@ -428,8 +428,8 @@ authentication mode (`patroni['tls_client_mode']`), must each have the same valu
# START user configuration
# Set the real values as explained in Required Information section
# Replace CONSUL_PASSWORD_HASH with with a generated md5 value
# Replace PGBOUNCER_PASSWORD_HASH with with a generated md5 value
# Replace CONSUL_PASSWORD_HASH with a generated md5 value
# Replace PGBOUNCER_PASSWORD_HASH with a generated md5 value
pgbouncer['users'] = {
'gitlab-consul': {
password: 'CONSUL_PASSWORD_HASH'

View File

@ -1384,7 +1384,6 @@ Input type: `AiActionInput`
| <a id="mutationaiactionfillinmergerequesttemplate"></a>`fillInMergeRequestTemplate` | [`AiFillInMergeRequestTemplateInput`](#aifillinmergerequesttemplateinput) | Input for fill_in_merge_request_template AI action. |
| <a id="mutationaiactiongeneratecommitmessage"></a>`generateCommitMessage` | [`AiGenerateCommitMessageInput`](#aigeneratecommitmessageinput) | Input for generate_commit_message AI action. |
| <a id="mutationaiactiongeneratedescription"></a>`generateDescription` | [`AiGenerateDescriptionInput`](#aigeneratedescriptioninput) | Input for generate_description AI action. |
| <a id="mutationaiactiongeneratetestfile"></a>`generateTestFile` | [`GenerateTestFileInput`](#generatetestfileinput) | Input for generate_test_file AI action. |
| <a id="mutationaiactionresolvevulnerability"></a>`resolveVulnerability` | [`AiResolveVulnerabilityInput`](#airesolvevulnerabilityinput) | Input for resolve_vulnerability AI action. |
| <a id="mutationaiactionsummarizecomments"></a>`summarizeComments` | [`AiSummarizeCommentsInput`](#aisummarizecommentsinput) | Input for summarize_comments AI action. |
| <a id="mutationaiactionsummarizereview"></a>`summarizeReview` | [`AiSummarizeReviewInput`](#aisummarizereviewinput) | Input for summarize_review AI action. |
@ -34334,15 +34333,6 @@ Represents an escalation rule.
| <a id="escalationruleinputstatus"></a>`status` | [`EscalationRuleStatus!`](#escalationrulestatus) | Status required to prevent the rule from activating. |
| <a id="escalationruleinputusername"></a>`username` | [`String`](#string) | Username of the user to notify. |
### `GenerateTestFileInput`
#### Arguments
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="generatetestfileinputfilepath"></a>`filePath` | [`String!`](#string) | File path to generate test files for. |
| <a id="generatetestfileinputresourceid"></a>`resourceId` | [`AiModelID!`](#aimodelid) | Global ID of the resource to mutate. |
### `JiraUsersMappingInputType`
#### Arguments

View File

@ -32,7 +32,7 @@ GET /projects/:id/packages
| `package_name` | string | no | Filter the project packages with a fuzzy search by name. |
| `package_version` | string | no | Filter the project packages by version. If used in combination with `include_versionless`, then no versionless packages are returned. [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/349065) in GitLab 16.6. |
| `include_versionless` | boolean | no | When set to true, versionless packages are included in the response. |
| `status` | string | no | Filter the returned packages by status. One of `default` (default), `hidden`, `processing`, `error`, or `pending_destruction`. |
| `status` | string | no | Filter the returned packages by status. One of `default`, `hidden`, `processing`, `error`, or `pending_destruction`. |
```shell
curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/:id/packages"
@ -98,7 +98,7 @@ GET /groups/:id/packages
| `package_name` | string | no | Filter the project packages with a fuzzy search by name. |
| `package_version` | string | no | Filter the returned packages by version. If used in combination with `include_versionless`, then no versionless packages are returned. [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/349065) in GitLab 16.6. |
| `include_versionless` | boolean | no | When set to true, versionless packages are included in the response. |
| `status` | string | no | Filter the returned packages by status. One of `default` (default), `hidden`, `processing`, `error`, or `pending_destruction`. |
| `status` | string | no | Filter the returned packages by status. One of `default`, `hidden`, `processing`, `error`, or `pending_destruction`. |
```shell
curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/groups/:id/packages?exclude_subgroups=false"

View File

@ -641,7 +641,7 @@ To determine the IP address of a shared runner:
### Determine the IP address of a project runner
To can find the IP address of a runner for a project project,
To can find the IP address of a runner for a project,
you must have the Owner role for the
project.

View File

@ -33,7 +33,7 @@ The [Authenticating and Reading Secrets With HashiCorp Vault](../examples/authen
tutorial has more details about authenticating with ID tokens.
You must [configure your Vault server](#configure-your-vault-server) before you
can use [use Vault secrets in a CI job](#use-vault-secrets-in-a-ci-job).
can [use Vault secrets in a CI job](#use-vault-secrets-in-a-ci-job).
The flow for using GitLab with HashiCorp Vault
is summarized by this diagram:

View File

@ -94,3 +94,8 @@ name as label:
- `limited_capacity_worker_running_jobs`
- `limited_capacity_worker_max_running_jobs`
- `limited_capacity_worker_remaining_work_count`
## Alternatives
If limited capacity worker doesn't fit your architecture, there's also a [concurrency limit](worker_attributes.md#concurrency-limit)
attribute that can be used to restrict concurrency of a Sidekiq worker.

View File

@ -365,3 +365,44 @@ class PausedWorker
# ...
end
```
## Concurrency limit
With the `concurrency_limit` property, you can limit the worker's concurrency. It will put the jobs that are over this limit in
a separate `LIST` and re-enqueued when it falls under the limit. `ConcurrencyLimit::ResumeWorker` is a cron
worker that checks if any throttled jobs should be re-enqueued.
The first job that crosses the defined concurency limit initiates the throttling process for all other jobs of this class.
Until this happens, jobs are scheduled and executed as usual.
When the throttling starts, newly scheduled and executed jobs will be added to the end of the `LIST` to ensure that
the execution order is preserved. As soon as the `LIST` is empty again, the throttling process ends.
WARNING:
If there is a sustained workload over the limit, the `LIST` is going to grow until the limit is disabled or
the workload drops under the limit.
FLAG:
The feature is currently behind a default-disabled feature flag `sidekiq_concurrency_limit_middleware`.
You should use a lambda to define the limit. If it returns `nil`, `0`, or a negative value, the limit won't be applied.
```ruby
class LimitedWorker
include ApplicationWorker
concurrency_limit -> { 60 }
# ...
end
```
```ruby
class LimitedWorker
include ApplicationWorker
concurrency_limit -> { ApplicationSetting.current.elasticsearch_concurrent_sidekiq_jobs }
# ...
end
```

View File

@ -18,7 +18,7 @@ GitLab is creating AI-assisted features across our DevSecOps platform. These fea
| Assists with quickly getting everyone up to speed on lengthy conversations to help ensure you are all on the same page. | [Discussion summary](#summarize-issue-discussions-with-discussion-summary) | **(ULTIMATE SAAS EXPERIMENT)** |
| Generates issue descriptions. | [Issue description generation](#summarize-an-issue-with-issue-description-generation) | **(ULTIMATE SAAS EXPERIMENT)** |
| Helps you write code more efficiently by viewing code suggestions as you type. <br><br><i class="fa fa-youtube-play youtube" aria-hidden="true"></i> [Watch overview](https://www.youtube.com/watch?v=hCAyCTacdAQ) | [Code Suggestions](project/repository/code_suggestions/index.md) | For SaaS: **(FREE)**<br><br> For self-managed: **(PREMIUM)** |
| Automates repetitive tasks and helps catch bugs early. | [Test generation](project/merge_requests/ai_in_merge_requests.md#generate-suggested-tests-in-merge-requests) | **(ULTIMATE SAAS EXPERIMENT)** |
| Automates repetitive tasks and helps catch bugs early. | [Test generation](gitlab_duo_chat.md#write-tests-in-the-ide) | **(ULTIMATE SAAS EXPERIMENT)** |
| Generates a description for the merge request based on the contents of the template. | [Merge request template population](project/merge_requests/ai_in_merge_requests.md#fill-in-merge-request-templates) | **(ULTIMATE SAAS EXPERIMENT)** |
| Assists in creating faster and higher-quality reviews by automatically suggesting reviewers for your merge request. <br><br><i class="fa fa-youtube-play youtube" aria-hidden="true"></i> [Watch overview](https://www.youtube.com/watch?v=ivwZQgh4Rxw) | [Suggested Reviewers](project/merge_requests/reviews/index.md#gitlab-duo-suggested-reviewers) | **(ULTIMATE SAAS)** |
| Efficiently communicates the impact of your merge request changes. | [Merge request summary](project/merge_requests/ai_in_merge_requests.md#summarize-merge-request-changes) | **(ULTIMATE SAAS EXPERIMENT)** |
@ -194,7 +194,7 @@ For details about this Beta feature, see [GitLab Duo Chat](gitlab_duo_chat.md).
| [Discussion summary](#summarize-issue-discussions-with-discussion-summary) | Vertex AI Codey [`text-bison`](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/text) |
| [Issue description generation](#summarize-an-issue-with-issue-description-generation) | Anthropic [`Claude-2`](https://docs.anthropic.com/claude/reference/selecting-a-model) |
| [Code Suggestions](project/repository/code_suggestions/index.md) | For Code Completion: Vertex AI Codey [`code-gecko`](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/code-completion) For Code Generation: Anthropic [`Claude-2`](https://docs.anthropic.com/claude/reference/selecting-a-model) |
| [Test generation](project/merge_requests/ai_in_merge_requests.md#generate-suggested-tests-in-merge-requests) | Vertex AI Codey [`text-bison`](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/text) |
| [Test generation](gitlab_duo_chat.md#write-tests-in-the-ide) | Vertex AI Codey [`text-bison`](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/text) |
| [Merge request template population](project/merge_requests/ai_in_merge_requests.md#fill-in-merge-request-templates) | Vertex AI Codey [`text-bison`](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/text) |
| [Suggested Reviewers](project/merge_requests/reviews/index.md#gitlab-duo-suggested-reviewers) | GitLab creates a machine learning model for each project, which is used to generate reviewers [View the issue](https://gitlab.com/gitlab-org/modelops/applied-ml/applied-ml-updates/-/issues/10) |
| [Merge request summary](project/merge_requests/ai_in_merge_requests.md#summarize-merge-request-changes) | Vertex AI Codey [`text-bison`](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/text) |

View File

@ -82,7 +82,7 @@ or API. However, administrators can use a workaround:
# Set the GitLab administration user to use. If user ID 1 is not available or is not an administrator, use 'admin = User.admins.first' instead to select an administrator.
admin = User.find(1)
# Set the group group you want to create a token for. For example, group with ID 109.
# Set the group you want to create a token for. For example, group with ID 109.
group = Group.find(109)
# Create the group bot user. For further group access tokens, the username should be `group_{group_id}_bot_{random_string}` and email address `group_{group_id}_bot_{random_string}@noreply.{Gitlab.config.gitlab.host}`.

View File

@ -50,7 +50,7 @@ PUT /projects/:id/packages/generic/:package_name/:package_version/:file_name?sta
| `package_name` | string | yes | The package name. It can contain only lowercase letters (`a-z`), uppercase letter (`A-Z`), numbers (`0-9`), dots (`.`), hyphens (`-`), or underscores (`_`).
| `package_version` | string | yes | The package version. The following regex validates this: `\A(\.?[\w\+-]+\.?)+\z`. You can test your version strings on [Rubular](https://rubular.com/r/aNCV0wG5K14uq8).
| `file_name` | string | yes | The filename. It can contain only lowercase letters (`a-z`), uppercase letter (`A-Z`), numbers (`0-9`), dots (`.`), hyphens (`-`), or underscores (`_`).
| `status` | string | no | The package status. It can be `default` (default) or `hidden`. Hidden packages do not appear in the UI or [package API list endpoints](../../../api/packages.md).
| `status` | string | no | The package status. It can be `default` or `hidden`. Hidden packages do not appear in the UI or [package API list endpoints](../../../api/packages.md).
| `select` | string | no | The response payload. By default, the response is empty. Valid values are: `package_file`. `package_file` returns details of the package file record created by this request.
Provide the file context in the request body.

View File

@ -94,27 +94,15 @@ Provide feedback on this experimental feature in [issue 408994](https://gitlab.c
- Contents of the file
- The file name
<!--- start_remove The following content will be removed on remove_date: '2024-04-12' -->
## Generate suggested tests in merge requests
> [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/10366) in GitLab 16.0 as an [Experiment](../../../policy/experiment-beta-support.md#experiment).
> - [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/10366) in GitLab 16.0 as an [Experiment](../../../policy/experiment-beta-support.md#experiment).
> - [Moved](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/141086) to GitLab Duo Chat in GitLab 16.8.
This feature is an [Experiment](../../../policy/experiment-beta-support.md) on GitLab.com.
This feature was [moved](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/141086)
into GitLab Duo Chat in GitLab 16.8. Find more information in
[Write tests in the IDE](../../gitlab_duo_chat.md#write-tests-in-the-ide).
Use GitLab Duo Test generation in a merge request to see a list of suggested tests for the file you are reviewing. This functionality can help determine if appropriate test coverage has been provided, or if you need more coverage for your project.
View a [click-through demo](https://go.gitlab.com/Xfp0l4).
To generate a test suggestion:
1. In a merge request, select the **Changes** tab.
1. On the header for the file, in the upper-right corner, select **Options** (**{ellipsis_v}**).
1. Select **Suggest test cases**.
The test suggestion is generated in a sidebar. You can copy the suggestion to your editor and use it as the start of your tests.
Feedback on this experimental feature can be provided in [issue 408995](https://gitlab.com/gitlab-org/gitlab/-/issues/408995).
**Data usage**: When you use this feature, the following data is sent to the large language model referenced above:
- Contents of the file
- The file name
<!--- end_remove -->

View File

@ -99,6 +99,7 @@ module Gitlab
hset
incr
incrby
ltrim
mapped_hmset
pfadd
pfmerge

View File

@ -0,0 +1,30 @@
# frozen_string_literal: true
module Gitlab
module SidekiqLogging
class ConcurrencyLimitLogger
include Singleton
include LogsJobs
def deferred_log(job)
payload = parse_job(job)
payload['job_status'] = 'concurrency_limit'
payload['message'] = "#{base_message(payload)}: concurrency_limit: paused"
Sidekiq.logger.info payload
end
def resumed_log(worker_name, args)
job = {
'class' => worker_name,
'args' => args
}
payload = parse_job(job)
payload['job_status'] = 'resumed'
payload['message'] = "#{base_message(payload)}: concurrency_limit: resumed"
Sidekiq.logger.info payload
end
end
end
end

View File

@ -38,6 +38,7 @@ module Gitlab
chain.add ::Gitlab::SidekiqMiddleware::WorkerContext::Server
chain.add ::Gitlab::SidekiqMiddleware::PauseControl::Server
chain.add ::ClickHouse::MigrationSupport::SidekiqMiddleware
chain.add ::Gitlab::SidekiqMiddleware::ConcurrencyLimit::Server
# DuplicateJobs::Server should be placed at the bottom, but before the SidekiqServerMiddleware,
# so we can compare the latest WAL location against replica
chain.add ::Gitlab::SidekiqMiddleware::DuplicateJobs::Server
@ -57,6 +58,7 @@ module Gitlab
# so we can store WAL location before we deduplicate the job.
chain.add ::Gitlab::Database::LoadBalancing::SidekiqClientMiddleware
chain.add ::Gitlab::SidekiqMiddleware::PauseControl::Client
chain.add ::Gitlab::SidekiqMiddleware::ConcurrencyLimit::Client
chain.add ::Gitlab::SidekiqMiddleware::DuplicateJobs::Client
chain.add ::Gitlab::SidekiqStatus::ClientMiddleware
chain.add ::Gitlab::SidekiqMiddleware::AdminMode::Client

View File

@ -0,0 +1,13 @@
# frozen_string_literal: true
module Gitlab
module SidekiqMiddleware
module ConcurrencyLimit
class Client
def call(worker_class, job, _queue, _redis_pool, &block)
::Gitlab::SidekiqMiddleware::ConcurrencyLimit::Middleware.new(worker_class, job).schedule(&block)
end
end
end
end
end

View File

@ -0,0 +1,102 @@
# frozen_string_literal: true
module Gitlab
module SidekiqMiddleware
module ConcurrencyLimit
class ConcurrencyLimitService
# Class for managing queues for deferred workers
def initialize(worker_name)
@worker_name = worker_name
@redis_key = "sidekiq:concurrency_limit:throttled_jobs:{#{worker_name.underscore}}"
end
class << self
def add_to_queue!(worker_name, args, context)
new(worker_name).add_to_queue!(args, context)
end
def has_jobs_in_queue?(worker_name)
new(worker_name).has_jobs_in_queue?
end
def resume_processing!(worker_name, limit:)
new(worker_name).resume_processing!(limit: limit)
end
def queue_size(worker_name)
new(worker_name).queue_size
end
end
def add_to_queue!(args, context)
with_redis do |redis|
redis.rpush(redis_key, serialize(args, context))
end
end
def queue_size
with_redis { |redis| redis.llen(redis_key) }
end
def has_jobs_in_queue?
queue_size != 0
end
def resume_processing!(limit:)
with_redis do |redis|
jobs = next_batch_from_queue(redis, limit: limit)
break if jobs.empty?
jobs.each { |j| send_to_processing_queue(deserialize(j)) }
remove_processed_jobs(redis, limit: jobs.length)
jobs.length
end
end
private
attr_reader :worker_name, :redis_key
def with_redis(&blk)
Gitlab::Redis::SharedState.with(&blk) # rubocop:disable CodeReuse/ActiveRecord -- Not active record
end
def serialize(args, context)
{
args: args,
context: context
}.to_json
end
def deserialize(json)
Gitlab::Json.parse(json)
end
def send_to_processing_queue(job)
context = (job['context'] || {}).merge(related_class: self.class.name)
Gitlab::ApplicationContext.with_raw_context(context) do
args = job['args']
Gitlab::SidekiqLogging::ConcurrencyLimitLogger.instance.resumed_log(worker_name, args)
worker_name.safe_constantize&.perform_async(*args)
end
end
def next_batch_from_queue(redis, limit:)
return [] unless limit > 0
redis.lrange(redis_key, 0, limit - 1)
end
def remove_processed_jobs(redis, limit:)
redis.ltrim(redis_key, limit, -1)
end
end
end
end
end

View File

@ -0,0 +1,85 @@
# frozen_string_literal: true
module Gitlab
module SidekiqMiddleware
module ConcurrencyLimit
class Middleware
def initialize(worker, job)
@worker = worker
@job = job
end
# This will continue the middleware chain if the job should be scheduled
# It will return false if the job needs to be cancelled
def schedule
if should_defer_schedule?
defer_job!
return
end
yield
end
# This will continue the server middleware chain if the job should be
# executed.
# It will return false if the job should not be executed.
def perform
if should_defer_perform?
defer_job!
return
end
yield
end
private
attr_reader :job, :worker
def should_defer_schedule?
return false if Feature.disabled?(:sidekiq_concurrency_limit_middleware, Feature.current_request, type: :ops)
return false if resumed?
return false unless ::Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersMap.limit_for(worker: worker)
has_jobs_in_queue?
end
def should_defer_perform?
return false if Feature.disabled?(:sidekiq_concurrency_limit_middleware, Feature.current_request, type: :ops)
return false if resumed?
return true if has_jobs_in_queue?
::Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersMap.over_the_limit?(worker: worker)
end
def concurrency_service
::Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService
end
def resumed?
current_context['meta.related_class'] == concurrency_service.name
end
def has_jobs_in_queue?
worker_class = worker.is_a?(Class) ? worker : worker.class
concurrency_service.has_jobs_in_queue?(worker_class.name)
end
def defer_job!
::Gitlab::SidekiqLogging::ConcurrencyLimitLogger.instance.deferred_log(job)
concurrency_service.add_to_queue!(
job['class'],
job['args'],
current_context
)
end
def current_context
::Gitlab::ApplicationContext.current
end
end
end
end
end

View File

@ -0,0 +1,13 @@
# frozen_string_literal: true
module Gitlab
module SidekiqMiddleware
module ConcurrencyLimit
class Server
def call(worker, job, _queue, &block)
::Gitlab::SidekiqMiddleware::ConcurrencyLimit::Middleware.new(worker, job).perform(&block)
end
end
end
end
end

View File

@ -0,0 +1,38 @@
# frozen_string_literal: true
module Gitlab
module SidekiqMiddleware
module ConcurrencyLimit
class WorkersConcurrency
class << self
CACHE_EXPIRES_IN = 5.seconds
def current_for(worker:, skip_cache: false)
worker_class = worker.is_a?(Class) ? worker : worker.class
worker_name = worker_class.name
workers(skip_cache: skip_cache)[worker_name].to_i
end
def workers(skip_cache: false)
return workers_uncached if skip_cache
Rails.cache.fetch(self.class.name, expires_in: CACHE_EXPIRES_IN) do
workers_uncached
end
end
private
def workers_uncached
sidekiq_workers.map { |_process_id, _thread_id, work| ::Gitlab::Json.parse(work['payload'])['class'] }.tally
end
def sidekiq_workers
Sidekiq::Workers.new.each
end
end
end
end
end
end

View File

@ -0,0 +1,47 @@
# frozen_string_literal: true
module Gitlab
module SidekiqMiddleware
module ConcurrencyLimit
class WorkersMap
class << self
def set_limit_for(worker:, max_jobs:)
raise ArgumentError, 'max_jobs must be a Proc instance' if max_jobs && !max_jobs.is_a?(Proc)
@data ||= {}
@data[worker] = max_jobs
end
def limit_for(worker:)
return unless data
return if Feature.disabled?(:sidekiq_concurrency_limit_middleware, Feature.current_request, type: :ops)
worker_class = worker.is_a?(Class) ? worker : worker.class
data[worker_class]
end
def over_the_limit?(worker:)
limit_proc = limit_for(worker: worker)
limit = limit_proc&.call
return false if limit.to_i <= 0
current = ::Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersConcurrency.current_for(worker: worker)
current >= limit
end
def workers
return [] unless data
data.keys
end
private
attr_reader :data
end
end
end
end
end

View File

@ -22538,7 +22538,7 @@ msgstr ""
msgid "GithubImporter|Issue %{issue_iid} attachment"
msgstr ""
msgid "GithubImporter|Issue links"
msgid "GithubImporter|Issue attachments"
msgstr ""
msgid "GithubImporter|Merge request %{merge_request_iid} attachment"
@ -22547,7 +22547,7 @@ msgstr ""
msgid "GithubImporter|Note attachment"
msgstr ""
msgid "GithubImporter|Note links"
msgid "GithubImporter|Note attachments"
msgstr ""
msgid "GithubImporter|PR attachments"
@ -22586,7 +22586,7 @@ msgstr ""
msgid "GithubImporter|Release %{tag} attachment"
msgstr ""
msgid "GithubImporter|Release links"
msgid "GithubImporter|Release attachments"
msgstr ""
msgid "GithubImporter|Your import of GitHub gists into GitLab snippets is complete."
@ -41491,9 +41491,6 @@ msgstr ""
msgid "Response didn't include `service_desk_address`"
msgstr ""
msgid "Response generated by AI"
msgstr ""
msgid "Response initiated"
msgstr ""
@ -47889,9 +47886,6 @@ msgstr ""
msgid "Suggest code changes which can be immediately applied in one click. Try it out!"
msgstr ""
msgid "Suggest test cases"
msgstr ""
msgid "Suggested change"
msgstr ""
@ -48828,9 +48822,6 @@ msgid_plural "Test coverage: %d hits"
msgstr[0] ""
msgstr[1] ""
msgid "Test generated by AI"
msgstr ""
msgid "Test settings"
msgstr ""
@ -52167,9 +52158,6 @@ msgstr ""
msgid "Unable to generate new instance ID"
msgstr ""
msgid "Unable to generate tests for specified file."
msgstr ""
msgid "Unable to load commits. Try again later."
msgstr ""

View File

@ -2,12 +2,25 @@
set -e
# host file can't be modified during docker build because it is created on container start
# this makes for a more portable solution as other alternative is to use `--add-host` option for 'docker run' command
# We want a non-loopback ip otherwise some services (maybe just workhorse?) bind to localhost and can't be accessed
# from outside the container
echo "$(hostname -i) gdk.test" | sudo tee -a /etc/hosts > /dev/null
unset BUNDLE_PATH
if [[ "${WITH_LOAD_BALANCER}" == "true" ]]; then
gdk config set postgresql.replica.enabled true
gdk config set postgresql.replica_2.enabled true
gdk config set load_balancing.enabled true
gdk reconfigure
fi
gdk start
exec "$@" | tee -a ${HOME}/gitlab-development-kit/gitlab/log/gdk.log
# Split logs in to multiple files when running in CI
if [ -z "$CI" ]; then
exec "$@" | tee -a ${HOME}/gitlab-development-kit/gitlab/log/gdk-container.log
else
logs=("gitaly" "gitlab-workhorse" "postgresql" "rails-background-jobs" "redis" "sshd" "vite" "rails-web" "webpack")
for log in "${logs[@]}"; do
gdk tail $log &>${CI_BUILDS_DIR}/gdk.${log}.log &
done
exec "$@"
fi

View File

@ -3,10 +3,10 @@ hostname: gdk.test
sshd:
additional_config: 'AcceptEnv GIT_PROTOCOL'
webpack:
live_reload: false
sourcemaps: false
incremental: false
static: true
enabled: false
vite:
enabled: true
hot_module_reloading: false
gdk:
ask_to_restart_after_update: false
auto_reconfigure: false

View File

@ -61,6 +61,7 @@ module QA
end
def remove_saml_idp_service(saml_idp_service)
saml_idp_service.logs # print log output before removal
saml_idp_service.remove!
end

View File

@ -10,7 +10,7 @@ module QA
def expand_content(element_name)
within_element(element_name) do
# Because it is possible to click the button before the JS toggle code is bound
wait_until(reload: false) do
wait_until(reload: false, message: "Waiting until content is expanded") do
click_button 'Expand' unless has_css?('button', text: 'Collapse', wait: 1)
has_content?('Collapse')

View File

@ -692,8 +692,11 @@ module QA
ENV['QA_1P_GITHUB_UUID']
end
def gdk_url
ENV['GDK_URL']
# Docker network to use when starting sidecar containers
#
# @return [String]
def docker_network
ENV["QA_DOCKER_NETWORK"]
end
private

View File

@ -5,6 +5,9 @@ require 'socket'
module QA
module Service
module DockerRun
# TODO: There are a lot of methods that reference @name yet it is not part of initializer
# Refactor all child implementations to remove assumption that @name will exist
#
class Base
include Service::Shellout
@ -13,7 +16,7 @@ module QA
end
def initialize
@network = gdk_network || Runtime::Scenario.attributes[:network] || 'test'
@network = Runtime::Scenario.attributes[:network] || Runtime::Env.docker_network || 'test'
end
# Authenticate against a container registry
@ -40,7 +43,9 @@ module QA
end
def network
network_exists?(@network) ? @network : 'bridge'
return @network_cache if @network_cache
@network_cache = network_exists?(@network) ? @network : 'bridge'
end
def inspect_network(name)
@ -58,8 +63,17 @@ module QA
end
end
# Host name of the container
#
# If host or default bridge network is used, container can only be reached using ip address
#
# @return [String]
def host_name
"#{@name}.#{network}"
@host_name ||= if network == "host" || network == "bridge"
host_ip
else
"#{@name}.#{network}"
end
end
def register!
@ -88,24 +102,6 @@ module QA
shell("docker inspect --format='{{json .State.Health.Status}}' #{@name}").delete('"')
end
# The network to use when testing against GDK in docker
#
# @return [String]
def gdk_network
return unless Runtime::Env.gdk_url
'host'
end
# The IP address of the docker host when testing against GDK in docker
#
# @return [String]
def gdk_host_ip
return unless Runtime::Env.gdk_url
Addrinfo.tcp(URI(Runtime::Env.gdk_url).host, nil).ip_address
end
# Returns the IP address of the docker host
#
# @return [String]

View File

@ -41,7 +41,6 @@ module QA
docker run -d --rm --network #{network} --name #{@name} #{'--user=root' if Runtime::Env.fips?}
#{'-v /var/run/docker.sock:/var/run/docker.sock' if @executor == :docker}
--privileged
#{"--add-host gdk.test:#{gdk_host_ip}" if gdk_network}
#{@image} #{add_gitlab_tls_cert if @address.include? 'https'}
&& docker exec --detach #{@name} sh -c "#{register_command}"
CMD
@ -91,7 +90,6 @@ module QA
args << '--docker-privileged=true'
args << "--docker-network-mode=#{network}"
args << "--docker-volumes=/certs/client"
args << "--docker-extra-hosts=gdk.test:#{gdk_host_ip}" if gdk_network
end
<<~CMD.strip

View File

@ -4,6 +4,8 @@ module QA
module Service
module DockerRun
class SamlIdp < Base
include Support::API
def initialize(gitlab_host, group)
@image = 'jamedjo/test-saml-idp'
@name = 'saml-idp-server'
@ -12,6 +14,8 @@ module QA
super()
end
delegate :logger, to: Runtime::Logger
def idp_base_url
"https://#{host_name}:8443/simplesaml"
end
@ -40,17 +44,10 @@ module QA
QA::Runtime::Env.simple_saml_fingerprint || '119b9e027959cdb7c662cfd075d9e2ef384e445f'
end
def host_name
return 'localhost' unless QA::Runtime::Env.running_in_ci?
super
end
def register!
command = <<~CMD.tr("\n", ' ')
docker run -d --rm
--network #{network}
--hostname #{host_name}
--name #{@name}
--env SIMPLESAMLPHP_SP_ENTITY_ID=#{@gitlab_host}/groups/#{@group}
--env SIMPLESAMLPHP_SP_ASSERTION_CONSUMER_SERVICE=#{@gitlab_host}/groups/#{@group}/-/saml/callback
@ -59,9 +56,23 @@ module QA
#{@image}
CMD
command.gsub!("--network #{network} ", '') unless QA::Runtime::Env.running_in_ci?
shell command
logger.debug("Waiting for SAML IDP to start...")
Support::Retrier.retry_until(
max_attempts: 3,
sleep_interval: 1,
retry_on_exception: true,
message: "Waiting for SAML IDP to start"
) do
logger.debug("Pinging SAML IDP service")
# Endpoint will return 403 for unauthenticated request once it is up
get("http://#{host_name}:8080").code == 403
end
rescue StandardError => e
# Remove container on failure because it isn't using a unique name
remove!
raise e
end
end
end

View File

@ -5,12 +5,12 @@ module QA
module DockerRun
class Smocker < Base
def initialize(name: 'smocker-server')
@image = 'thiht/smocker:0.17.1'
@image = 'thiht/smocker:0.18.5'
@name = name
@public_port = 8080
@admin_port = 8081
super()
@network_cache = network
end
# @param wait [Integer] seconds to wait for server
@ -40,14 +40,6 @@ module QA
attr_reader :public_port, :admin_port
def host_name
@host_name ||= if qa_environment? && !gdk_network && @network_cache != 'bridge'
"#{@name}.#{@network_cache}"
else
host_ip
end
end
def wait_for_running
Support::Waiter.wait_until(raise_on_failure: false, reload_page: false) do
running?
@ -57,24 +49,19 @@ module QA
def register!
command = <<~CMD.tr("\n", ' ')
docker run -d --rm
--network #{@network_cache}
--hostname #{host_name}
--name #{@name}
--publish #{@public_port}:8080
--publish #{@admin_port}:8081
#{@image}
--network #{network}
--name #{name}
--publish #{public_port}:8080
--publish #{admin_port}:8081
#{image}
CMD
command.gsub!("--network #{@network_cache} ", '') unless qa_environment?
shell command
end
private
def qa_environment?
QA::Runtime::Env.running_in_ci? || QA::Runtime::Env.qa_hostname
end
attr_reader :name, :image
end
end
end

View File

@ -8,6 +8,8 @@ module QA
PROJECT = "gitlab-qa-resources"
BUCKET = "knapsack-reports"
FALLBACK_REPORT = "knapsack/master_report.json"
PATTERN_VAR_NAME = "KNAPSACK_TEST_FILE_PATTERN"
DEFAULT_TEST_PATTERN = "qa/specs/features/**/*_spec.rb"
class << self
delegate :configure!, :move_regenerated_report, :download_report, :upload_report, to: :new
@ -117,9 +119,9 @@ module QA
#
# @return [void]
def setup_environment!
ENV["KNAPSACK_TEST_FILE_PATTERN"] ||= "qa/specs/features/**/*_spec.rb"
ENV["KNAPSACK_TEST_DIR"] = "qa/specs"
ENV["KNAPSACK_REPORT_PATH"] = report_path
ENV[PATTERN_VAR_NAME] = ENV[PATTERN_VAR_NAME].presence || DEFAULT_TEST_PATTERN
end
# Logger instance

View File

@ -29,38 +29,20 @@ module QA
end
end
context 'when running in CI' do
before do
allow(Runtime::Env).to receive(:running_in_ci?).and_return(true)
end
context 'when network is not bridge' do
it_behaves_like 'returns name.network'
end
context 'when network is bridge' do
let(:network) { 'bridge' }
it_behaves_like 'returns host ip'
end
context 'when network is not bridge or host' do
it_behaves_like 'returns name.network'
end
context 'when running not in CI' do
before do
allow(Runtime::Env).to receive(:running_in_ci?).and_return(false)
end
context 'when network is bridge' do
let(:network) { 'bridge' }
context 'when QA hostname is not set' do
it_behaves_like 'returns host ip'
end
it_behaves_like 'returns host ip'
end
context 'when QA hostname is set' do
before do
allow(Runtime::Env).to receive(:qa_hostname).and_return('qa-hostname')
end
context 'when network is host' do
let(:network) { 'host' }
it_behaves_like 'returns name.network'
end
it_behaves_like 'returns host ip'
end
end
end

View File

@ -47,8 +47,6 @@ const ENDPOINT_METADATA_URL = `${TEST_HOST}/diff/endpointMetadata`;
Vue.use(Vuex);
Vue.use(VueApollo);
Vue.config.ignoredElements = ['copy-code'];
function getCollapsedFilesWarning(wrapper) {
return wrapper.findComponent(CollapsedFilesWarning);
}

View File

@ -0,0 +1,58 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::SidekiqMiddleware::ConcurrencyLimit::Client, :clean_gitlab_redis_queues, feature_category: :global_search do
let(:worker_class) do
Class.new do
def self.name
'TestConcurrencyLimitWorker'
end
include ApplicationWorker
concurrency_limit -> { 5 }
def perform(*)
self.class.work
end
def self.work; end
end
end
before do
stub_const('TestConcurrencyLimitWorker', worker_class)
end
describe '#call' do
context 'when feature flag is disabled' do
before do
stub_feature_flags(sidekiq_concurrency_limit_middleware: false)
end
it 'schedules the job' do
expect(Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService).not_to receive(:add_to_queue!)
TestConcurrencyLimitWorker.perform_async('foo')
expect(TestConcurrencyLimitWorker.jobs.size).to eq(1)
end
end
context 'when there are jobs in the queue' do
before do
allow(::Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService).to receive(:has_jobs_in_queue?)
.and_return(true)
end
it 'defers the job' do
expect(Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService).to receive(:add_to_queue!).once
TestConcurrencyLimitWorker.perform_async('foo')
expect(TestConcurrencyLimitWorker.jobs.size).to eq(0)
end
end
end
end

View File

@ -0,0 +1,190 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService, :clean_gitlab_redis_shared_state, feature_category: :global_search do
let(:worker_class) do
Class.new do
def self.name
'DummyWorker'
end
include ApplicationWorker
end
end
let(:worker_class_name) { worker_class.name }
let(:worker_context) do
{ 'correlation_id' => 'context_correlation_id',
'meta.project' => 'gitlab-org/gitlab' }
end
let(:stored_context) do
{
"#{Gitlab::ApplicationContext::LOG_KEY}.project" => 'gitlab-org/gitlab',
"correlation_id" => 'context_correlation_id'
}
end
let(:worker_args) { [1, 2] }
subject(:service) { described_class.new(worker_class_name) }
before do
stub_const(worker_class_name, worker_class)
end
describe '.add_to_queue!' do
subject(:add_to_queue!) { described_class.add_to_queue!(worker_class_name, worker_args, worker_context) }
it 'calls an instance method' do
expect_next_instance_of(described_class) do |instance|
expect(instance).to receive(:add_to_queue!).with(worker_args, worker_context)
end
add_to_queue!
end
end
describe '.has_jobs_in_queue?' do
it 'calls an instance method' do
expect_next_instance_of(described_class) do |instance|
expect(instance).to receive(:has_jobs_in_queue?)
end
described_class.has_jobs_in_queue?(worker_class_name)
end
end
describe '.resume_processing!' do
subject(:resume_processing!) { described_class.resume_processing!(worker_class_name, limit: 10) }
it 'calls an instance method' do
expect_next_instance_of(described_class) do |instance|
expect(instance).to receive(:resume_processing!)
end
resume_processing!
end
end
describe '.queue_size' do
it 'reports the queue size' do
expect(described_class.queue_size(worker_class_name)).to eq(0)
service.add_to_queue!(worker_args, worker_context)
expect(described_class.queue_size(worker_class_name)).to eq(1)
expect { service.resume_processing!(limit: 1) }.to change { described_class.queue_size(worker_class_name) }.by(-1)
end
end
describe '#add_to_queue!' do
subject(:add_to_queue!) { service.add_to_queue!(worker_args, worker_context) }
it 'adds a job to the set' do
expect { add_to_queue! }
.to change { service.queue_size }
.from(0).to(1)
end
it 'adds only one unique job to the set' do
expect do
2.times { add_to_queue! }
end.to change { service.queue_size }.from(0).to(1)
end
it 'stores context information' do
add_to_queue!
service.send(:with_redis) do |r|
set_key = service.send(:redis_key)
stored_job = service.send(:deserialize, r.lrange(set_key, 0, -1).first)
expect(stored_job['context']).to eq(stored_context)
end
end
end
describe '#has_jobs_in_queue?' do
it 'uses queue_size' do
expect { service.add_to_queue!(worker_args, worker_context) }
.to change { service.has_jobs_in_queue? }
.from(false).to(true)
end
end
describe '#resume_processing!' do
let(:jobs) { [[1], [2], [3]] }
let(:expected_context) { stored_context.merge(related_class: described_class.name) }
it 'puts jobs back into the queue and respects order' do
jobs.each do |j|
service.add_to_queue!(j, worker_context)
end
expect(worker_class).to receive(:perform_async).with(1).ordered
expect(worker_class).to receive(:perform_async).with(2).ordered
expect(worker_class).not_to receive(:perform_async).with(3).ordered
expect(Gitlab::SidekiqLogging::ConcurrencyLimitLogger.instance)
.to receive(:resumed_log)
.with(worker_class_name, [1])
expect(Gitlab::SidekiqLogging::ConcurrencyLimitLogger.instance)
.to receive(:resumed_log)
.with(worker_class_name, [2])
service.resume_processing!(limit: 2)
end
it 'drops a set after execution' do
jobs.each do |j|
service.add_to_queue!(j, worker_context)
end
expect(Gitlab::ApplicationContext).to receive(:with_raw_context)
.with(expected_context)
.exactly(jobs.count).times.and_call_original
expect(worker_class).to receive(:perform_async).exactly(jobs.count).times
expect { service.resume_processing!(limit: jobs.count) }
.to change { service.has_jobs_in_queue? }.from(true).to(false)
end
end
context 'with concurrent changes to different queues' do
let(:second_worker_class) do
Class.new do
def self.name
'SecondDummyIndexingWorker'
end
include ApplicationWorker
end
end
let(:other_subject) { described_class.new(second_worker_class.name) }
before do
stub_const(second_worker_class.name, second_worker_class)
end
it 'allows to use queues independently of each other' do
expect { service.add_to_queue!(worker_args, worker_context) }
.to change { service.queue_size }
.from(0).to(1)
expect { other_subject.add_to_queue!(worker_args, worker_context) }
.to change { other_subject.queue_size }
.from(0).to(1)
expect { service.resume_processing!(limit: 1) }.to change { service.has_jobs_in_queue? }
.from(true).to(false)
expect { other_subject.resume_processing!(limit: 1) }.to change { other_subject.has_jobs_in_queue? }
.from(true).to(false)
end
end
end

View File

@ -0,0 +1,107 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::SidekiqMiddleware::ConcurrencyLimit::Server, feature_category: :global_search do
let(:worker_class) do
Class.new do
def self.name
'TestConcurrencyLimitWorker'
end
include ApplicationWorker
concurrency_limit -> { 5 }
def perform(*)
self.class.work
end
def self.work; end
end
end
before do
stub_const('TestConcurrencyLimitWorker', worker_class)
end
around do |example|
with_sidekiq_server_middleware do |chain|
chain.add described_class
Sidekiq::Testing.inline! { example.run }
end
end
describe '#call' do
context 'when feature flag is disabled' do
before do
stub_feature_flags(sidekiq_concurrency_limit_middleware: false)
end
it 'executes the job' do
expect(TestConcurrencyLimitWorker).to receive(:work)
expect(Gitlab::SidekiqLogging::ConcurrencyLimitLogger.instance).not_to receive(:deferred_log)
expect(Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService).not_to receive(:add_to_queue!)
TestConcurrencyLimitWorker.perform_async('foo')
end
end
context 'when there are jobs in the queue' do
before do
allow(::Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService).to receive(:has_jobs_in_queue?)
.and_return(true)
end
it 'defers the job' do
expect(TestConcurrencyLimitWorker).not_to receive(:work)
expect(Gitlab::SidekiqLogging::ConcurrencyLimitLogger.instance).to receive(:deferred_log).and_call_original
expect(Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService).to receive(:add_to_queue!)
TestConcurrencyLimitWorker.perform_async('foo')
end
it 'executes the job if resumed' do
expect(TestConcurrencyLimitWorker).to receive(:work)
expect(Gitlab::SidekiqLogging::ConcurrencyLimitLogger.instance).not_to receive(:deferred_log)
expect(Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService).not_to receive(:add_to_queue!)
related_class = 'Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService'
Gitlab::ApplicationContext.with_raw_context(related_class: related_class) do
TestConcurrencyLimitWorker.perform_async('foo')
end
end
end
context 'when sidekiq_workers are stubbed' do
before do
allow(::Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersMap).to receive(:over_the_limit?)
.and_return(over_the_limit)
end
context 'when under the limit' do
let(:over_the_limit) { false }
it 'executes the job' do
expect(TestConcurrencyLimitWorker).to receive(:work)
expect(Gitlab::SidekiqLogging::ConcurrencyLimitLogger.instance).not_to receive(:deferred_log)
expect(Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService).not_to receive(:add_to_queue!)
TestConcurrencyLimitWorker.perform_async('foo')
end
end
context 'when over the limit' do
let(:over_the_limit) { true }
it 'defers the job' do
expect(TestConcurrencyLimitWorker).not_to receive(:work)
expect(Gitlab::SidekiqLogging::ConcurrencyLimitLogger.instance).to receive(:deferred_log).and_call_original
expect(Gitlab::SidekiqMiddleware::ConcurrencyLimit::ConcurrencyLimitService).to receive(:add_to_queue!)
TestConcurrencyLimitWorker.perform_async('foo')
end
end
end
end
end

View File

@ -0,0 +1,93 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersConcurrency, feature_category: :global_search do
let(:worker_class) do
Class.new do
def self.name
'TestConcurrencyLimitWorker'
end
include ApplicationWorker
concurrency_limit -> { 60 }
def perform(*); end
end
end
let(:current_concurrency) { 10 }
let(:sidekiq_worker) do
[
'process_id',
'thread_id',
{
'queue' => 'default',
'payload' => {
'class' => 'TestConcurrencyLimitWorker'
}.to_json
}
]
end
before do
stub_const('TestConcurrencyLimitWorker', worker_class)
allow(described_class).to receive(:sidekiq_workers).and_return([sidekiq_worker] * current_concurrency)
end
describe '.current_for' do
subject(:current_for) { described_class.current_for(worker: TestConcurrencyLimitWorker, skip_cache: skip_cache) }
context 'without cache' do
let(:skip_cache) { true }
it 'returns the current concurrency' do
expect(described_class).to receive(:workers_uncached).and_call_original
expect(current_for).to eq(current_concurrency)
end
end
context 'with cache' do
let(:skip_cache) { false }
let(:cached_value) { { "TestConcurrencyLimitWorker" => 20 } }
before do
allow(Rails.cache).to receive(:fetch).and_return(cached_value)
end
it 'returns cached current_for' do
expect(described_class).not_to receive(:workers_uncached)
expect(current_for).to eq(20)
end
end
end
describe '.workers' do
subject(:workers) { described_class.workers(skip_cache: skip_cache) }
context 'without cache' do
let(:skip_cache) { true }
it 'returns current_workers' do
expect(workers).to eq('TestConcurrencyLimitWorker' => 10)
end
end
context 'with cache' do
let(:skip_cache) { false }
let(:cached_value) { { "TestConcurrencyLimitWorker" => 20 } }
before do
allow(Rails.cache).to receive(:fetch).and_return(cached_value)
end
it 'returns cached workers' do
expect(described_class).not_to receive(:workers_uncached)
expect(workers).to eq(cached_value)
end
end
end
end

View File

@ -0,0 +1,75 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersMap, feature_category: :global_search do
let(:worker_class) do
Class.new do
def self.name
'TestConcurrencyLimitWorker'
end
include ApplicationWorker
concurrency_limit -> { 60 }
def perform(*); end
end
end
before do
stub_const('TestConcurrencyLimitWorker', worker_class)
end
describe '.limit_for' do
let(:expected_limit) { 60 }
it 'accepts worker instance' do
expect(described_class.limit_for(worker: worker_class.new).call).to eq(expected_limit)
end
it 'accepts worker class' do
expect(described_class.limit_for(worker: worker_class).call).to eq(expected_limit)
end
it 'returns nil for unknown worker' do
expect(described_class.limit_for(worker: described_class)).to be_nil
end
it 'returns nil if the feature flag is disabled' do
stub_feature_flags(sidekiq_concurrency_limit_middleware: false)
expect(described_class.limit_for(worker: worker_class)).to be_nil
end
end
describe '.over_the_limit?' do
subject(:over_the_limit?) { described_class.over_the_limit?(worker: worker_class) }
it 'returns false if no limit is set' do
expect(described_class).to receive(:limit_for).and_return(nil)
expect(over_the_limit?).to be_falsey
end
it 'returns false if under the limit' do
allow(::Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersConcurrency).to receive(:current_for).and_return(50)
expect(over_the_limit?).to be_falsey
end
it 'returns true if over the limit' do
allow(::Gitlab::SidekiqMiddleware::ConcurrencyLimit::WorkersConcurrency).to receive(:current_for).and_return(100)
expect(over_the_limit?).to be_truthy
end
end
describe '.workers' do
subject(:workers) { described_class.workers }
it 'includes the worker' do
expect(workers).to include(worker_class)
end
end
end

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Gitlab::Tracking::EventDefinition do
RSpec.describe Gitlab::Tracking::EventDefinition, feature_category: :service_ping do
let(:attributes) do
{
description: 'Created issues',
@ -15,8 +15,10 @@ RSpec.describe Gitlab::Tracking::EventDefinition do
product_stage: 'growth',
product_section: 'dev',
product_group: 'group::product analytics',
distribution: %w[ee ce],
tier: %w[free premium ultimate]
distributions: %w[ee ce],
tiers: %w[free premium ultimate],
introduced_by_url: "https://gitlab.com/example/-/merge_requests/123",
milestone: '1.6'
}
end

View File

@ -143,21 +143,24 @@ export default defineConfig({
}
: viteGDKConfig.hmr,
https: false,
watch: {
ignored: [
'**/*.stories.js',
function ignoreRootFolder(x) {
/*
`vite` watches the root folder of gitlab and all of its sub folders
This is not what we want, because we have temp files, and all kind
of other stuff. As vite starts its watchers recursively, we just
ignore if the path matches exactly the root folder
watch:
viteGDKConfig.hmr === null
? null
: {
ignored: [
'**/*.stories.js',
function ignoreRootFolder(x) {
/*
`vite` watches the root folder of gitlab and all of its sub folders
This is not what we want, because we have temp files, and all kind
of other stuff. As vite starts its watchers recursively, we just
ignore if the path matches exactly the root folder
Additional folders like `ee/app/assets` are defined in
*/
return x === __dirname;
},
],
},
Additional folders like `ee/app/assets` are defined in
*/
return x === __dirname;
},
],
},
},
});