Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-06-29 18:08:50 +00:00
parent 4821e72a01
commit 860efb35e2
115 changed files with 421 additions and 2696 deletions

View File

@ -1495,12 +1495,6 @@
changes: ["vendor/gems/mail-smtp_pool/**/*"]
- <<: *if-merge-request-labels-run-all-rspec
.vendor:rules:ipynbdiff:
rules:
- <<: *if-merge-request
changes: ["vendor/gems/ipynbdiff/**/*"]
- <<: *if-merge-request-labels-run-all-rspec
##################
# Releases rules #
##################

View File

@ -5,10 +5,3 @@ vendor mail-smtp_pool:
trigger:
include: vendor/gems/mail-smtp_pool/.gitlab-ci.yml
strategy: depend
vendor ipynbdiff:
extends:
- .vendor:rules:ipynbdiff
needs: []
trigger:
include: vendor/gems/ipynbdiff/.gitlab-ci.yml
strategy: depend

View File

@ -1 +1 @@
3.0.0
3.0.1

View File

@ -103,7 +103,7 @@ gem 'rack-cors', '~> 1.1.0', require: 'rack/cors'
gem 'graphql', '~> 1.13.12'
gem 'graphiql-rails', '~> 1.8'
gem 'apollo_upload_server', '~> 2.1.0'
gem 'graphql-docs', '~> 1.6.0', group: [:development, :test]
gem 'graphql-docs', '~> 2.1.0', group: [:development, :test]
gem 'graphlient', '~> 0.5.0' # Used by BulkImport feature (group::import)
gem 'hashie'
@ -546,6 +546,6 @@ gem 'ipaddress', '~> 0.8.3'
gem 'parslet', '~> 1.8'
gem 'ipynbdiff', path: 'vendor/gems/ipynbdiff'
gem 'ipynbdiff', '0.4.7'
gem 'ed25519', '~> 1.3.0'

View File

@ -1,10 +1,3 @@
PATH
remote: vendor/gems/ipynbdiff
specs:
ipynbdiff (0.4.7)
diffy (~> 3.3)
json (~> 2.5, >= 2.5.1)
PATH
remote: vendor/gems/mail-smtp_pool
specs:
@ -594,13 +587,13 @@ GEM
graphql-client (0.17.0)
activesupport (>= 3.0)
graphql (~> 1.10)
graphql-docs (1.6.0)
graphql-docs (2.1.0)
commonmarker (~> 0.16)
escape_utils (~> 1.2)
extended-markdown-filter (~> 0.4)
gemoji (~> 3.0)
graphql (~> 1.6)
html-pipeline (~> 2.8)
graphql (~> 1.12)
html-pipeline (~> 2.9)
sass (~> 3.4)
grpc (1.42.0)
google-protobuf (~> 3.18)
@ -674,6 +667,9 @@ GEM
invisible_captcha (1.1.0)
rails (>= 4.2)
ipaddress (0.8.3)
ipynbdiff (0.4.7)
diffy (~> 3.3)
json (~> 2.5, >= 2.5.1)
jaeger-client (1.1.0)
opentracing (~> 0.3)
thrift
@ -1563,7 +1559,7 @@ DEPENDENCIES
graphiql-rails (~> 1.8)
graphlient (~> 0.5.0)
graphql (~> 1.13.12)
graphql-docs (~> 1.6.0)
graphql-docs (~> 2.1.0)
grpc (~> 1.42.0)
gssapi
guard-rspec
@ -1579,7 +1575,7 @@ DEPENDENCIES
icalendar
invisible_captcha (~> 1.1.0)
ipaddress (~> 0.8.3)
ipynbdiff!
ipynbdiff (= 0.4.7)
jira-ruby (~> 2.1.4)
js_regex (~> 3.7)
json (~> 2.5.1)

View File

@ -131,6 +131,7 @@
"VulnerabilityLocationSecretDetection"
],
"WorkItemWidget": [
"WorkItemWidgetAssignees",
"WorkItemWidgetDescription",
"WorkItemWidgetHierarchy"
]

View File

@ -3,6 +3,7 @@ import { GlButton, GlSkeletonLoader } from '@gitlab/ui';
import createFlash from '~/flash';
import { __ } from '~/locale';
import glFeatureFlagMixin from '~/vue_shared/mixins/gl_feature_flags_mixin';
import toast from '~/vue_shared/plugins/global_toast';
import simplePoll from '~/lib/utils/simple_poll';
import eventHub from '../../event_hub';
import mergeRequestQueryVariablesMixin from '../../mixins/merge_request_query_variables';
@ -120,13 +121,15 @@ export default {
.poll()
.then((res) => res.data)
.then((res) => {
if (res.rebase_in_progress) {
if (res.rebase_in_progress || res.should_be_rebased) {
continuePolling();
} else {
this.isMakingRequest = false;
if (res.merge_error && res.merge_error.length) {
this.rebasingError = res.merge_error;
} else {
toast(__('Rebase completed'));
}
eventHub.$emit('MRWidgetRebaseSuccess');

View File

@ -16,13 +16,16 @@ module Types
::Types::WorkItems::Widgets::DescriptionType
when ::WorkItems::Widgets::Hierarchy
::Types::WorkItems::Widgets::HierarchyType
when ::WorkItems::Widgets::Assignees
::Types::WorkItems::Widgets::AssigneesType
else
raise "Unknown GraphQL type for widget #{object}"
end
end
orphan_types ::Types::WorkItems::Widgets::DescriptionType,
::Types::WorkItems::Widgets::HierarchyType
::Types::WorkItems::Widgets::HierarchyType,
::Types::WorkItems::Widgets::AssigneesType
end
end
end

View File

@ -0,0 +1,24 @@
# frozen_string_literal: true
module Types
module WorkItems
module Widgets
# Disabling widget level authorization as it might be too granular
# and we already authorize the parent work item
# rubocop:disable Graphql/AuthorizeTypes
class AssigneesType < BaseObject
graphql_name 'WorkItemWidgetAssignees'
description 'Represents an assignees widget'
implements Types::WorkItems::WidgetInterface
field :assignees, Types::UserType.connection_type, null: true,
description: 'Assignees of the work item.'
field :allows_multiple_assignees, GraphQL::Types::Boolean, null: true, method: :allows_multiple_assignees?,
description: 'Indicates whether multiple assignees are allowed.'
end
# rubocop:enable Graphql/AuthorizeTypes
end
end
end

View File

@ -200,14 +200,21 @@ class Integration < ApplicationRecord
# Provide convenient boolean accessor methods for each serialized property.
# Also keep track of updated properties in a similar way as ActiveModel::Dirty
def self.boolean_accessor(*args)
prop_accessor(*args)
args.each do |arg|
class_eval <<~RUBY, __FILE__, __LINE__ + 1
def #{arg}
return if properties.blank?
# TODO: Allow legacy usage of `.boolean_accessor`, once all integrations
# are converted to the field DSL we can remove this and only call
# `.boolean_accessor` through `.field`.
#
# See https://gitlab.com/groups/gitlab-org/-/epics/7652
prop_accessor(arg) unless method_defined?(arg)
Gitlab::Utils.to_boolean(properties['#{arg}'])
class_eval <<~RUBY, __FILE__, __LINE__ + 1
# Make the original getter available as a private method.
alias_method :#{arg}_before_type_cast, :#{arg}
private(:#{arg}_before_type_cast)
def #{arg}
Gitlab::Utils.to_boolean(#{arg}_before_type_cast)
end
def #{arg}?

View File

@ -21,11 +21,11 @@ module WorkItems
}.freeze
WIDGETS_FOR_TYPE = {
issue: [Widgets::Description, Widgets::Hierarchy],
issue: [Widgets::Description, Widgets::Hierarchy, Widgets::Assignees],
incident: [Widgets::Description],
test_case: [Widgets::Description],
requirement: [Widgets::Description],
task: [Widgets::Description, Widgets::Hierarchy]
task: [Widgets::Description, Widgets::Hierarchy, Widgets::Assignees]
}.freeze
cache_markdown_field :description, pipeline: :single_line

View File

@ -0,0 +1,10 @@
# frozen_string_literal: true
module WorkItems
module Widgets
class Assignees < Base
delegate :assignees, to: :work_item
delegate :allows_multiple_assignees?, to: :work_item
end
end
end

View File

@ -7,6 +7,7 @@ class MergeRequestBasicEntity < Grape::Entity
expose :state
expose :source_branch_exists?, as: :source_branch_exists
expose :rebase_in_progress?, as: :rebase_in_progress
expose :should_be_rebased?, as: :should_be_rebased
expose :milestone, using: API::Entities::Milestone
expose :labels, using: LabelEntity
expose :assignees, using: API::Entities::UserBasic

View File

@ -14,7 +14,7 @@ module WaitableWorker
# are not likely to finish within the timeout. This assumes we can process
# 10 jobs per second:
# https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/205
return bulk_perform_async(args_list) if args_list.length >= 10 * timeout
return bulk_perform_async(args_list) if (args_list.length >= 10 * timeout) || async_only_refresh?
waiter = Gitlab::JobWaiter.new(args_list.size, worker_label: self.to_s)
@ -41,6 +41,10 @@ module WaitableWorker
bulk_perform_async(failed) if failed.present?
end
def async_only_refresh?
Feature.enabled?(:async_only_project_authorizations_refresh)
end
end
def perform(*args)

View File

@ -0,0 +1,8 @@
---
name: async_only_project_authorizations_refresh
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/90495
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/365585
milestone: '15.2'
type: development
group: group::workspace
default_enabled: false

View File

@ -18415,6 +18415,18 @@ Check permissions for the current user on a work item.
| <a id="workitemtypeid"></a>`id` | [`WorkItemsTypeID!`](#workitemstypeid) | Global ID of the work item type. |
| <a id="workitemtypename"></a>`name` | [`String!`](#string) | Name of the work item type. |
### `WorkItemWidgetAssignees`
Represents an assignees widget.
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="workitemwidgetassigneesallowsmultipleassignees"></a>`allowsMultipleAssignees` | [`Boolean`](#boolean) | Indicates whether multiple assignees are allowed. |
| <a id="workitemwidgetassigneesassignees"></a>`assignees` | [`UserCoreConnection`](#usercoreconnection) | Assignees of the work item. (see [Connections](#connections)) |
| <a id="workitemwidgetassigneestype"></a>`type` | [`WorkItemWidgetType`](#workitemwidgettype) | Widget type. |
### `WorkItemWidgetDescription`
Represents a description widget.
@ -20290,6 +20302,7 @@ Type of a work item widget.
| Value | Description |
| ----- | ----------- |
| <a id="workitemwidgettypeassignees"></a>`ASSIGNEES` | Assignees widget. |
| <a id="workitemwidgettypedescription"></a>`DESCRIPTION` | Description widget. |
| <a id="workitemwidgettypehierarchy"></a>`HIERARCHY` | Hierarchy widget. |
@ -21510,6 +21523,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
Implementations:
- [`WorkItemWidgetAssignees`](#workitemwidgetassignees)
- [`WorkItemWidgetDescription`](#workitemwidgetdescription)
- [`WorkItemWidgetHierarchy`](#workitemwidgethierarchy)

View File

@ -369,6 +369,155 @@ harder to 'manage' component and class separately.
Please refer to the Vue section of our [style guide](style/vue.md)
for best practices while writing and testing your Vue components and templates.
## Composition API
With Vue 2.7 it is possible to use [Composition API](https://vuejs.org/guide/introduction.html#api-styles) in Vue components and as standalone composables.
### Prefer `<script>` over `<script setup>`
Composition API allows you to place the logic in the `<script>` section of the component or to have a dedicated `<script setup>` section. We should use `<script>` and add Composition API to components using `setup()` property:
```html
<script>
import { computed } from 'vue';
export default {
name: 'MyComponent',
setup(props) {
const doubleCount = computed(() => props.count*2)
}
}
</script>
```
### Aim to have one API style per component
When adding `setup()` property to Vue component, consider refactoring it to Composition API entirely. It's not always feasible, especially for large components, but we should aim to have one API style per component for readability and maintainability.
### Composables
With Composition API, we have a new way of abstracting logic including reactive state to _composables_. Composable is the function that can accept parameters and return reactive properties and methods to be used in Vue component.
```javascript
// useCount.js
import { ref } from 'vue';
export function useCount(initialValue) {
const count = ref(initialValue)
function incrementCount() {
ref.value += 1
}
function decrementCount() {
ref.value -= 1
}
return { count, incrementCount, decrementCount }
}
```
```javascript
// MyComponent.vue
import { useCount } from 'useCount'
export default {
name: 'MyComponent',
setup() {
const { count, incrementCount, decrementCount } = useCount(5)
return { count, incrementCount, decrementCount }
}
}
```
#### Prefix function and file names with `use`
Common naming convention in Vue for composables is to prefix them with `use` and then refer to composable functionality briefly (`useBreakpoints`, `useGeolocation` etc). The same rule applies to the `.js` files containing composables - they should start with `use_` even if the file contains more than one composable.
#### Avoid lifecycle pitfalls
When building a composable, we should aim to keep it as simple as possible. Lifecycle hooks add complexity to composables and might lead to unexpected side effects. In order to avoid that we should follow these principles:
- minimize lifecycle hooks usage whenever possible, prefer accepting/returning callbacks instead;
- if you need to have lifecycle hooks in the composable, make sure this composable also performs a cleanup: if we are adding a listener on `onMounted`, we should remove it on `onUnmounted` within the same composable;
- always set up lifecycle hooks immediately:
```javascript
// bad
const useAsyncLogic = () => {
const action = async () => {
await doSomething();
onMounted(doSomethingElse);
};
return { action };
};
// OK
const useAsyncLogic = () => {
const done = ref(false);
onMounted(() => {
watch(
done,
() => done.value && doSomethingElse(),
{ immediate: true },
);
});
const action = async () => {
await doSomething();
done.value = true;
};
return { action };
};
```
#### Avoid escape hatches
It might be tempting to write a composable that does everything as a black box with a help of some of the escape hatches that Vue provides. But for most of the cases this makes them too complex and hard to maintain. One of these escape hatches is `getCurrentInstance` method which returns an instance of a current rendering component. Instead of using that method you should prefer passing down the data or methods to a composable via arguments.
```javascript
const useSomeLogic = () => {
doSomeLogic();
getCurrentInstance().emit('done'); // bad
};
```
```javascript
const done = () => emit('done');
const useSomeLogic = (done) => {
doSomeLogic();
done(); // good, composable doesn't try to be too smart
}
```
#### Composables and Vuex
We should always prefer to avoid using Vuex state in composables. In case it's not possible we should use props to receive that state and emit events from the `setup` to update the Vuex state. A parent component should be responsible to get that state from Vuex and mutate it on events emitted from a child. You should **never mutate a state that's coming down from a prop**. If a composable needs to mutate a Vuex state it should use a callback to emit an event.
```javascript
const useAsyncComposable = ({ state, update }) => {
const start = async () => {
const newState = await doSomething(state);
update(newState);
};
return { start };
};
const ComponentWithComposable = {
setup(props, { emit }) {
const update = (data) => emit('update', data);
const state = computed(() => props.state); // state from Vuex
const { start } = useAsyncComposable({ state, update });
start();
},
};
```
#### Testing composables
<!-- TBD -->
## Testing Vue Components
Please refer to the [Vue testing style guide](style/vue.md#vue-testing)

View File

@ -31482,6 +31482,9 @@ msgstr ""
msgid "Rebase"
msgstr ""
msgid "Rebase completed"
msgstr ""
msgid "Rebase in progress"
msgstr ""

View File

@ -7,6 +7,7 @@
"source_branch_exists": { "type": "boolean" },
"merge_error": { "type": ["string", "null"] },
"rebase_in_progress": { "type": "boolean" },
"should_be_rebased": { "type": "boolean" },
"allow_collaboration": { "type": "boolean"},
"allow_maintainer_to_push": { "type": "boolean"},
"assignees": {

View File

@ -2,6 +2,9 @@ import { shallowMount } from '@vue/test-utils';
import { nextTick } from 'vue';
import WidgetRebase from '~/vue_merge_request_widget/components/states/mr_widget_rebase.vue';
import eventHub from '~/vue_merge_request_widget/event_hub';
import toast from '~/vue_shared/plugins/global_toast';
jest.mock('~/vue_shared/plugins/global_toast');
let wrapper;
@ -261,6 +264,7 @@ describe('Merge request widget rebase component', () => {
return Promise.resolve({
data: {
rebase_in_progress: false,
should_be_rebased: false,
merge_error: null,
},
});
@ -280,6 +284,7 @@ describe('Merge request widget rebase component', () => {
await nextTick();
expect(eventHub.$emit).toHaveBeenCalledWith('MRWidgetRebaseSuccess');
expect(toast).toHaveBeenCalledWith('Rebase completed');
});
});
});

View File

@ -17,6 +17,7 @@ RSpec.describe Types::WorkItems::WidgetInterface do
where(:widget_class, :widget_type_name) do
WorkItems::Widgets::Description | Types::WorkItems::Widgets::DescriptionType
WorkItems::Widgets::Hierarchy | Types::WorkItems::Widgets::HierarchyType
WorkItems::Widgets::Assignees | Types::WorkItems::Widgets::AssigneesType
end
with_them do

View File

@ -0,0 +1,11 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Types::WorkItems::Widgets::AssigneesType do
it 'exposes the expected fields' do
expected_fields = %i[assignees allows_multiple_assignees type]
expect(described_class).to have_graphql_fields(*expected_fields)
end
end

View File

@ -1083,11 +1083,8 @@ RSpec.describe Integration do
end
it 'provides boolean accessors for checkbox fields' do
integration.boolean = 'yes'
expect(integration.boolean?).to be(true)
integration.boolean = nil
expect(integration.boolean?).to be(false)
expect(integration).to respond_to(:boolean)
expect(integration).to respond_to(:boolean?)
expect(integration).not_to respond_to(:foo?)
expect(integration).not_to respond_to(:bar?)
@ -1129,11 +1126,12 @@ RSpec.describe Integration do
describe 'boolean_accessor' do
let(:klass) do
Class.new(Integration) do
prop_accessor :test_value
boolean_accessor :test_value
end
end
let(:integration) { klass.new(properties: { test_value: input }) }
let(:integration) { klass.new(test_value: input) }
where(:input, :method_result, :predicate_method_result) do
true | true | true
@ -1163,6 +1161,35 @@ RSpec.describe Integration do
test_value: be(method_result),
test_value?: be(predicate_method_result)
)
# Make sure the original value is stored correctly
expect(integration.send(:test_value_before_type_cast)).to eq(input)
expect(integration.properties).to include('test_value' => input)
end
context 'when using data fields' do
let(:klass) do
Class.new(Integration) do
field :project_url, storage: :data_fields, type: 'checkbox'
def data_fields
issue_tracker_data || self.build_issue_tracker_data
end
end
end
let(:integration) { klass.new(project_url: input) }
it 'has the correct value' do
expect(integration).to have_attributes(
project_url: be(method_result),
project_url?: be(predicate_method_result)
)
# Make sure the original value is stored correctly
expect(integration.send(:project_url_before_type_cast)).to eq(input == false ? 'false' : input)
expect(integration.properties).not_to include('project_url')
end
end
end
@ -1174,6 +1201,24 @@ RSpec.describe Integration do
test_value?: be(false)
)
end
context 'when getter is not defined' do
let(:input) { true }
let(:klass) do
Class.new(Integration) do
boolean_accessor :test_value
end
end
it 'defines a prop_accessor' do
expect(integration).to have_attributes(
test_value: true,
test_value?: true
)
expect(integration.properties['test_value']).to be(true)
end
end
end
describe '#attributes' do

View File

@ -38,7 +38,8 @@ RSpec.describe WorkItem do
it 'returns instances of supported widgets' do
is_expected.to match_array([instance_of(WorkItems::Widgets::Description),
instance_of(WorkItems::Widgets::Hierarchy)])
instance_of(WorkItems::Widgets::Hierarchy),
instance_of(WorkItems::Widgets::Assignees)])
end
end

View File

@ -65,7 +65,8 @@ RSpec.describe WorkItems::Type do
it 'returns list of all possible widgets' do
is_expected.to match_array([::WorkItems::Widgets::Description,
::WorkItems::Widgets::Hierarchy])
::WorkItems::Widgets::Hierarchy,
::WorkItems::Widgets::Assignees])
end
end

View File

@ -0,0 +1,31 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe WorkItems::Widgets::Assignees do
let_it_be(:work_item) { create(:work_item, assignees: [create(:user)]) }
describe '.type' do
subject { described_class.type }
it { is_expected.to eq(:assignees) }
end
describe '#type' do
subject { described_class.new(work_item).type }
it { is_expected.to eq(:assignees) }
end
describe '#assignees' do
subject { described_class.new(work_item).assignees }
it { is_expected.to eq(work_item.assignees) }
end
describe '#allows_multiple_assignees?' do
subject { described_class.new(work_item).allows_multiple_assignees? }
it { is_expected.to eq(work_item.allows_multiple_assignees?) }
end
end

View File

@ -64,16 +64,13 @@ RSpec.describe 'Query.work_item(id)' do
it 'returns widget information' do
expect(work_item_data).to include(
'id' => work_item.to_gid.to_s,
'widgets' => match_array([
'widgets' => include(
hash_including(
'type' => 'DESCRIPTION',
'description' => work_item.description,
'descriptionHtml' => ::MarkupHelper.markdown_field(work_item, :description, {})
),
hash_including(
'type' => 'HIERARCHY'
)
])
)
)
end
end
@ -101,10 +98,7 @@ RSpec.describe 'Query.work_item(id)' do
it 'returns widget information' do
expect(work_item_data).to include(
'id' => work_item.to_gid.to_s,
'widgets' => match_array([
hash_including(
'type' => 'DESCRIPTION'
),
'widgets' => include(
hash_including(
'type' => 'HIERARCHY',
'parent' => nil,
@ -113,7 +107,7 @@ RSpec.describe 'Query.work_item(id)' do
hash_including('id' => child_link2.work_item.to_gid.to_s)
]) }
)
])
)
)
end
@ -137,10 +131,7 @@ RSpec.describe 'Query.work_item(id)' do
it 'filters out not accessible children or parent' do
expect(work_item_data).to include(
'id' => work_item.to_gid.to_s,
'widgets' => match_array([
hash_including(
'type' => 'DESCRIPTION'
),
'widgets' => include(
hash_including(
'type' => 'HIERARCHY',
'parent' => nil,
@ -148,7 +139,7 @@ RSpec.describe 'Query.work_item(id)' do
hash_including('id' => child_link1.work_item.to_gid.to_s)
]) }
)
])
)
)
end
end
@ -160,20 +151,57 @@ RSpec.describe 'Query.work_item(id)' do
it 'returns parent information' do
expect(work_item_data).to include(
'id' => work_item.to_gid.to_s,
'widgets' => match_array([
hash_including(
'type' => 'DESCRIPTION'
),
'widgets' => include(
hash_including(
'type' => 'HIERARCHY',
'parent' => hash_including('id' => parent_link.work_item_parent.to_gid.to_s),
'children' => { 'nodes' => match_array([]) }
)
])
)
)
end
end
end
describe 'assignees widget' do
let(:assignees) { create_list(:user, 2) }
let(:work_item) { create(:work_item, project: project, assignees: assignees) }
let(:work_item_fields) do
<<~GRAPHQL
id
widgets {
type
... on WorkItemWidgetAssignees {
allowsMultipleAssignees
assignees {
nodes {
id
username
}
}
}
}
GRAPHQL
end
it 'returns widget information' do
expect(work_item_data).to include(
'id' => work_item.to_gid.to_s,
'widgets' => include(
hash_including(
'type' => 'ASSIGNEES',
'allowsMultipleAssignees' => boolean,
'assignees' => {
'nodes' => match_array(
assignees.map { |a| { 'id' => a.to_gid.to_s, 'username' => a.username } }
)
}
)
)
)
end
end
end
context 'when an Issue Global ID is provided' do

View File

@ -659,7 +659,7 @@ RSpec.describe Groups::TransferService, :sidekiq_inline do
it 'schedules authorizations job' do
expect(AuthorizedProjectUpdate::ProjectRecalculateWorker).to receive(:bulk_perform_async)
.with(array_including(group.all_projects.ids.map { |id| [id, anything] }))
.with(array_including(group.all_projects.ids.map { |id| [id] }))
transfer_service.execute(new_parent_group)
end

View File

@ -50,11 +50,25 @@ RSpec.describe WaitableWorker do
expect(worker.counter).to eq(6)
end
it 'runs > 3 jobs using sidekiq and a waiter key' do
expect(worker).to receive(:bulk_perform_async)
.with([[1, anything], [2, anything], [3, anything], [4, anything]])
context 'when the feature flag `async_only_project_authorizations_refresh` is turned off' do
before do
stub_feature_flags(async_only_project_authorizations_refresh: false)
end
worker.bulk_perform_and_wait([[1], [2], [3], [4]])
it 'runs > 3 jobs using sidekiq and a waiter key' do
expect(worker).to receive(:bulk_perform_async)
.with([[1, anything], [2, anything], [3, anything], [4, anything]])
worker.bulk_perform_and_wait([[1], [2], [3], [4]])
end
end
it 'runs > 3 jobs using sidekiq and no waiter key' do
arguments = 1.upto(5).map { |i| [i] }
expect(worker).to receive(:bulk_perform_async).with(arguments)
worker.bulk_perform_and_wait(arguments, timeout: 2)
end
it 'runs > 10 * timeout jobs using sidekiq and no waiter key' do

View File

@ -22,7 +22,7 @@ module Tooling
@output_dir = output_dir
@template = template
@layout = Haml::Engine.new(File.read(template))
@parsed_schema = GraphQLDocs::Parser.new(schema.graphql_definition, {}).parse
@parsed_schema = GraphQLDocs::Parser.new(schema, {}).parse
@schema = schema
@seen = Set.new
end

View File

@ -1,2 +0,0 @@
*.gem
.bundle

View File

@ -1,32 +0,0 @@
# You can override the included template(s) by including variable overrides
# SAST customization: https://docs.gitlab.com/ee/user/application_security/sast/#customizing-the-sast-settings
# Secret Detection customization: https://docs.gitlab.com/ee/user/application_security/secret_detection/#customizing-settings
# Dependency Scanning customization: https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#customizing-the-dependency-scanning-settings
# Note that environment variables can be set in several places
# See https://docs.gitlab.com/ee/ci/variables/#cicd-variable-precedence
workflow:
rules:
- if: $CI_MERGE_REQUEST_ID
.rspec:
cache:
key: ipynbdiff
paths:
- vendor/gems/ipynbdiff/vendor/ruby
before_script:
- cd vendor/gems/ipynbdiff
- ruby -v # Print out ruby version for debugging
- gem install bundler --no-document # Bundler is not installed with the image
- bundle config set --local path 'vendor' # Install dependencies into ./vendor/ruby
- bundle config set with 'development'
- bundle install -j $(nproc)
script:
- bundle exec rspec
rspec-2.7:
image: "ruby:2.7"
extends: .rspec
rspec-3.0:
image: "ruby:3.0"
extends: .rspec

View File

@ -1,5 +0,0 @@
# frozen_string_literal: true
source 'https://rubygems.org'
gemspec

View File

@ -1,64 +0,0 @@
PATH
remote: .
specs:
ipynbdiff (0.4.7)
diffy (~> 3.3)
json (~> 2.5, >= 2.5.1)
GEM
remote: https://rubygems.org/
specs:
ast (2.4.2)
binding_ninja (0.2.3)
coderay (1.1.3)
diff-lcs (1.5.0)
diffy (3.4.2)
json (2.6.2)
method_source (1.0.0)
parser (3.1.2.0)
ast (~> 2.4.1)
proc_to_ast (0.1.0)
coderay
parser
unparser
pry (0.14.1)
coderay (~> 1.1)
method_source (~> 1.0)
rake (13.0.6)
rspec (3.11.0)
rspec-core (~> 3.11.0)
rspec-expectations (~> 3.11.0)
rspec-mocks (~> 3.11.0)
rspec-core (3.11.0)
rspec-support (~> 3.11.0)
rspec-expectations (3.11.0)
diff-lcs (>= 1.2.0, < 2.0)
rspec-support (~> 3.11.0)
rspec-mocks (3.11.1)
diff-lcs (>= 1.2.0, < 2.0)
rspec-support (~> 3.11.0)
rspec-parameterized (0.5.1)
binding_ninja (>= 0.2.3)
parser
proc_to_ast
rspec (>= 2.13, < 4)
unparser
rspec-support (3.11.0)
unparser (0.6.5)
diff-lcs (~> 1.3)
parser (>= 3.1.0)
PLATFORMS
x86_64-darwin-20
x86_64-linux
DEPENDENCIES
bundler (~> 2.2)
ipynbdiff!
pry (~> 0.14)
rake (~> 13.0)
rspec (~> 3.10)
rspec-parameterized (~> 0.5.1)
BUNDLED WITH
2.3.16

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2016-2021 GitLab B.V.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,56 +0,0 @@
# IpynbDiff: Better diff for Jupyter Notebooks
This is a simple diff tool that cleans up Jupyter notebooks, transforming each [notebook](example/1/from.ipynb)
into a [readable markdown file](example/1/from_html.md), keeping the output of cells, and running the
diff after. Markdowns are generated using an opinionated Jupyter to Markdown conversion. This means
that the entire file is readable on the diff.
The result are diffs that are much easier to read:
| Diff | IpynbDiff |
| ----------------------------------- | ----------------------------------------------------- |
| [Diff text](example/diff.txt) | [IpynbDiff text](example/ipynbdiff_percent.txt) |
| ![Diff image](example/img/diff.png) | ![IpynbDiff image](example/img/ipynbdiff_percent.png) |
This started as a port of [ipynbdiff](https://gitlab.com/gitlab-org/incubation-engineering/mlops/poc/ipynbdiff),
but now has extended functionality although not working as git driver.
## Usage
### Generating diffs
```ruby
IpynbDiff.diff(from_path, to_path, options)
```
Options:
```ruby
@default_transform_options = {
preprocess_input: true, # Whether the input should be transformed
write_output_to: nil, # Pass a path to save the output to a file
format: :text, # These are the formats Diffy accepts https://github.com/samg/diffy
sources_are_files: false, # Weather to use the from/to as string or path to a file
raise_if_invalid_notebook: false, # Raises an error if the notebooks are invalid, otherwise returns nil
transform_options: @default_transform_options, # See below for transform options
diff_opts: {
include_diff_info: false # These are passed to Diffy https://github.com/samg/diffy
}
}
```
### Transforming the notebooks
It might be necessary to have the transformed files in addition to the diff.
```ruby
IpynbDiff.transform(notebook, options)
```
Options:
```ruby
@default_transform_options = {
include_frontmatter: false, # Whether to include or not the notebook metadata (kernel, language, etc)
}
```

View File

@ -1,34 +0,0 @@
# frozen_string_literal: true
lib = File.expand_path('lib/..', __dir__)
$LOAD_PATH.unshift lib unless $LOAD_PATH.include?(lib)
require 'lib/version'
Gem::Specification.new do |s|
s.name = 'ipynbdiff'
s.version = IpynbDiff::VERSION
s.summary = 'Human Readable diffs for Jupyter Notebooks'
s.description = 'Better diff for Jupyter Notebooks by first preprocessing them and removing clutter'
s.authors = ['Eduardo Bonet']
s.email = 'ebonet@gitlab.com'
# Specify which files should be added to the gem when it is released.
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
s.files = Dir.chdir(File.expand_path(__dir__)) do
`git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(spec|example)/}) }
end
s.homepage =
'https://gitlab.com/gitlab-org/incubation-engineering/mlops/rb-ipynbdiff'
s.license = 'MIT'
s.require_paths = ['lib']
s.add_runtime_dependency 'diffy', '~> 3.3'
s.add_runtime_dependency 'json', '~> 2.5', '>= 2.5.1'
s.add_development_dependency 'bundler', '~> 2.2'
s.add_development_dependency 'pry', '~> 0.14'
s.add_development_dependency 'rake', '~> 13.0'
s.add_development_dependency 'rspec', '~> 3.10'
s.add_development_dependency 'rspec-parameterized', '~> 0.5.1'
end

View File

@ -1,20 +0,0 @@
# frozen_string_literal: true
# Custom differ for Jupyter Notebooks
module IpynbDiff
require 'delegate'
# The result of a diff object
class Diff < SimpleDelegator
require 'diffy'
attr_reader :from, :to
def initialize(from, to, diffy_opts)
super(Diffy::Diff.new(from.as_text, to.as_text, **diffy_opts))
@from = from
@to = to
end
end
end

View File

@ -1,218 +0,0 @@
# frozen_string_literal: true
module IpynbDiff
class InvalidTokenError < StandardError
end
# Creates a symbol map for a ipynb file (JSON format)
class IpynbSymbolMap
class << self
def parse(notebook, objects_to_ignore = [])
IpynbSymbolMap.new(notebook, objects_to_ignore).parse('')
end
end
attr_reader :current_line, :char_idx, :results
WHITESPACE_CHARS = ["\t", "\r", ' ', "\n"].freeze
VALUE_STOPPERS = [',', '[', ']', '{', '}', *WHITESPACE_CHARS].freeze
def initialize(notebook, objects_to_ignore = [])
@chars = notebook.chars
@current_line = 0
@char_idx = 0
@results = {}
@objects_to_ignore = objects_to_ignore
end
def parse(prefix = '.')
raise_if_file_ended
skip_whitespaces
if (c = current_char) == '"'
parse_string
elsif c == '['
parse_array(prefix)
elsif c == '{'
parse_object(prefix)
else
parse_value
end
results
end
def parse_array(prefix)
# [1, 2, {"some": "object"}, [1]]
i = 0
current_should_be '['
loop do
raise_if_file_ended
break if skip_beginning(']')
new_prefix = "#{prefix}.#{i}"
add_result(new_prefix, current_line)
parse(new_prefix)
i += 1
end
end
def parse_object(prefix)
# {"name":"value", "another_name": [1, 2, 3]}
current_should_be '{'
loop do
raise_if_file_ended
break if skip_beginning('}')
prop_name = parse_string(return_value: true)
next_and_skip_whitespaces
current_should_be ':'
next_and_skip_whitespaces
if @objects_to_ignore.include? prop_name
skip
else
new_prefix = "#{prefix}.#{prop_name}"
add_result(new_prefix, current_line)
parse(new_prefix)
end
end
end
def parse_string(return_value: false)
current_should_be '"'
init_idx = @char_idx
loop do
increment_char_index
raise_if_file_ended
if current_char == '"' && !prev_backslash?
init_idx += 1
break
end
end
@chars[init_idx...@char_idx].join if return_value
end
def add_result(key, line_number)
@results[key] = line_number
end
def parse_value
increment_char_index until raise_if_file_ended || VALUE_STOPPERS.include?(current_char)
end
def skip_whitespaces
while WHITESPACE_CHARS.include?(current_char)
raise_if_file_ended
check_for_new_line
increment_char_index
end
end
def increment_char_index
@char_idx += 1
end
def next_and_skip_whitespaces
increment_char_index
skip_whitespaces
end
def current_char
raise_if_file_ended
@chars[@char_idx]
end
def prev_backslash?
@chars[@char_idx - 1] == '\\' && @chars[@char_idx - 2] != '\\'
end
def current_should_be(another_char)
raise InvalidTokenError unless current_char == another_char
end
def check_for_new_line
@current_line += 1 if current_char == "\n"
end
def raise_if_file_ended
@char_idx >= @chars.size && raise(InvalidTokenError)
end
def skip
raise_if_file_ended
skip_whitespaces
if (c = current_char) == '"'
parse_string
elsif c == '['
skip_array
elsif c == '{'
skip_object
else
parse_value
end
end
def skip_array
loop do
raise_if_file_ended
break if skip_beginning(']')
skip
end
end
def skip_object
loop do
raise_if_file_ended
break if skip_beginning('}')
parse_string
next_and_skip_whitespaces
current_should_be ':'
next_and_skip_whitespaces
skip
end
end
def skip_beginning(closing_char)
check_for_new_line
next_and_skip_whitespaces
return true if current_char == closing_char
next_and_skip_whitespaces if current_char == ','
end
end
end

View File

@ -1,23 +0,0 @@
# frozen_string_literal: true
# Human Readable Jupyter Diffs
module IpynbDiff
require 'transformer'
require 'diff'
def self.diff(from, to, raise_if_invalid_nb: false, include_frontmatter: false, hide_images: false, diffy_opts: {})
transformer = Transformer.new(include_frontmatter: include_frontmatter, hide_images: hide_images)
Diff.new(transformer.transform(from), transformer.transform(to), diffy_opts)
rescue InvalidNotebookError
raise if raise_if_invalid_nb
end
def self.transform(notebook, raise_errors: false, include_frontmatter: true, hide_images: false)
return unless notebook
Transformer.new(include_frontmatter: include_frontmatter, hide_images: hide_images).transform(notebook).as_text
rescue InvalidNotebookError
raise if raise_errors
end
end

View File

@ -1,83 +0,0 @@
# frozen_string_literal: true
module IpynbDiff
# Transforms Jupyter output data into markdown
class OutputTransformer
require 'symbolized_markdown_helper'
include SymbolizedMarkdownHelper
HIDDEN_IMAGE_OUTPUT = ' [Hidden Image Output]'
ORDERED_KEYS = {
'execute_result' => %w[image/png image/svg+xml image/jpeg text/markdown text/latex text/plain],
'display_data' => %w[image/png image/svg+xml image/jpeg text/markdown text/latex],
'stream' => %w[text]
}.freeze
def initialize(hide_images: false)
@hide_images = hide_images
end
def transform(output, symbol)
transformed = case (output_type = output['output_type'])
when 'error'
transform_error(output['traceback'], symbol / 'traceback')
when 'execute_result', 'display_data'
transform_non_error(ORDERED_KEYS[output_type], output['data'], symbol / 'data')
when 'stream'
transform_element('text', output['text'], symbol)
end
transformed ? decorate_output(transformed, output, symbol) : []
end
def decorate_output(output_rows, output, symbol)
[
_,
_(symbol, %(%%%% Output: #{output['output_type']})),
_,
*output_rows
]
end
def transform_error(traceback, symbol)
traceback.map.with_index do |t, idx|
t.split("\n").map do |l|
_(symbol / idx, l.gsub(/\[[0-9][0-9;]*m/, '').sub("\u001B", ' ').gsub(/\u001B/, '').rstrip)
end
end
end
def transform_non_error(accepted_keys, elements, symbol)
accepted_keys.filter { |key| elements.key?(key) }.map do |key|
transform_element(key, elements[key], symbol)
end
end
def transform_element(output_type, output_element, symbol_prefix)
new_symbol = symbol_prefix / output_type
case output_type
when 'image/png', 'image/jpeg'
transform_image(output_type + ';base64', output_element, new_symbol)
when 'image/svg+xml'
transform_image(output_type + ';utf8', output_element, new_symbol)
when 'text/markdown', 'text/latex', 'text/plain', 'text'
transform_text(output_element, new_symbol)
end
end
def transform_image(image_type, image_content, symbol)
return _(nil, HIDDEN_IMAGE_OUTPUT) if @hide_images
lines = image_content.is_a?(Array) ? image_content : [image_content]
single_line = lines.map(&:strip).join.gsub(/\s+/, ' ')
_(symbol, " ![](data:#{image_type},#{single_line})")
end
def transform_text(text_content, symbol)
symbolize_array(symbol, text_content) { |l| " #{l.rstrip}" }
end
end
end

View File

@ -1,26 +0,0 @@
# frozen_string_literal: true
module IpynbDiff
# Helper functions
module SymbolizedMarkdownHelper
def _(symbol = nil, content = '')
{ symbol: symbol, content: content }
end
def symbolize_array(symbol, content, &block)
if content.is_a?(Array)
content.map.with_index { |l, idx| _(symbol / idx, block.call(l)) }
else
_(symbol, content)
end
end
end
# Simple wrapper for a string
class JsonSymbol < String
def /(other)
JsonSymbol.new((other.is_a?(Array) ? [self, *other] : [self, other]).join('.'))
end
end
end

View File

@ -1,20 +0,0 @@
# frozen_string_literal: true
module IpynbDiff
# Notebook that was transformed into md, including location of source cells
class TransformedNotebook
attr_reader :blocks
def as_text
@blocks.map { |b| b[:content] }.join("\n")
end
private
def initialize(lines = [], symbol_map = {})
@blocks = lines.map do |line|
{ content: line[:content], source_symbol: (symbol = line[:symbol]), source_line: symbol && symbol_map[symbol] }
end
end
end
end

View File

@ -1,101 +0,0 @@
# frozen_string_literal: true
module IpynbDiff
class InvalidNotebookError < StandardError
end
# Returns a markdown version of the Jupyter Notebook
class Transformer
require 'json'
require 'yaml'
require 'output_transformer'
require 'symbolized_markdown_helper'
require 'ipynb_symbol_map'
require 'transformed_notebook'
include SymbolizedMarkdownHelper
@include_frontmatter = true
@objects_to_ignore = ['application/javascript', 'application/vnd.holoviews_load.v0+json']
def initialize(include_frontmatter: true, hide_images: false)
@include_frontmatter = include_frontmatter
@hide_images = hide_images
@output_transformer = OutputTransformer.new(hide_images: hide_images)
end
def validate_notebook(notebook)
notebook_json = JSON.parse(notebook)
return notebook_json if notebook_json.key?('cells')
raise InvalidNotebookError
rescue JSON::ParserError
raise InvalidNotebookError
end
def transform(notebook)
return TransformedNotebook.new unless notebook
notebook_json = validate_notebook(notebook)
transformed = transform_document(notebook_json)
symbol_map = IpynbSymbolMap.parse(notebook)
TransformedNotebook.new(transformed, symbol_map)
end
def transform_document(notebook)
symbol = JsonSymbol.new('.cells')
transformed_blocks = notebook['cells'].map.with_index do |cell, idx|
decorate_cell(transform_cell(cell, notebook, symbol / idx), cell, symbol / idx)
end
transformed_blocks.prepend(transform_metadata(notebook)) if @include_frontmatter
transformed_blocks.flatten
end
def decorate_cell(rows, cell, symbol)
tags = cell['metadata']&.fetch('tags', [])
type = cell['cell_type'] || 'raw'
[
_(symbol, %(%% Cell type:#{type} id:#{cell['id']} tags:#{tags&.join(',')})),
_,
rows,
_
]
end
def transform_cell(cell, notebook, symbol)
cell['cell_type'] == 'code' ? transform_code_cell(cell, notebook, symbol) : transform_text_cell(cell, symbol)
end
def transform_code_cell(cell, notebook, symbol)
[
_(symbol / 'source', %(``` #{notebook.dig('metadata', 'kernelspec', 'language') || ''})),
symbolize_array(symbol / 'source', cell['source'], &:rstrip),
_(nil, '```'),
cell['outputs'].map.with_index do |output, idx|
@output_transformer.transform(output, symbol / ['outputs', idx])
end
]
end
def transform_text_cell(cell, symbol)
symbolize_array(symbol / 'source', cell['source'], &:rstrip)
end
def transform_metadata(notebook_json)
as_yaml = {
'jupyter' => {
'kernelspec' => notebook_json['metadata']['kernelspec'],
'language_info' => notebook_json['metadata']['language_info'],
'nbformat' => notebook_json['nbformat'],
'nbformat_minor' => notebook_json['nbformat_minor']
}
}.to_yaml
as_yaml.split("\n").map { |l| _(nil, l) }.append(_(nil, '---'), _)
end
end
end

View File

@ -1,5 +0,0 @@
# frozen_string_literal: true
module IpynbDiff
VERSION = '0.4.7'
end

View File

@ -1,165 +0,0 @@
# frozen_string_literal: true
require 'rspec'
require 'json'
require 'rspec-parameterized'
require 'ipynb_symbol_map'
describe IpynbDiff::IpynbSymbolMap do
def res(*cases)
cases&.to_h || []
end
describe '#parse_string' do
using RSpec::Parameterized::TableSyntax
let(:mapper) { IpynbDiff::IpynbSymbolMap.new(input) }
where(:input, :result) do
# Empty string
'""' | ''
# Some string with quotes
'"he\nll\"o"' | 'he\nll\"o'
end
with_them do
it { expect(mapper.parse_string(return_value: true)).to eq(result) }
it { expect(mapper.parse_string).to be_nil }
it { expect(mapper.results).to be_empty }
end
it 'raises if invalid string' do
mapper = IpynbDiff::IpynbSymbolMap.new('"')
expect { mapper.parse_string }.to raise_error(IpynbDiff::InvalidTokenError)
end
end
describe '#parse_object' do
using RSpec::Parameterized::TableSyntax
let(:mapper) { IpynbDiff::IpynbSymbolMap.new(notebook, objects_to_ignore) }
before do
mapper.parse_object('')
end
where(:notebook, :objects_to_ignore, :result) do
# Empty object
'{ }' | [] | res
# Object with string
'{ "hello" : "world" }' | [] | res(['.hello', 0])
# Object with boolean
'{ "hello" : true }' | [] | res(['.hello', 0])
# Object with integer
'{ "hello" : 1 }' | [] | res(['.hello', 0])
# Object with 2 properties in the same line
'{ "hello" : "world" , "my" : "bad" }' | [] | res(['.hello', 0], ['.my', 0])
# Object with 2 properties in the different lines line
"{ \"hello\" : \"world\" , \n \n \"my\" : \"bad\" }" | [] | res(['.hello', 0], ['.my', 2])
# Object with 2 properties, but one is ignored
"{ \"hello\" : \"world\" , \n \n \"my\" : \"bad\" }" | ['hello'] | res(['.my', 2])
end
with_them do
it { expect(mapper.results).to include(result) }
end
end
describe '#parse_array' do
using RSpec::Parameterized::TableSyntax
where(:notebook, :result) do
# Empty Array
'[]' | res
# Array with string value
'["a"]' | res(['.0', 0])
# Array with boolean
'[ true ]' | res(['.0', 0])
# Array with integer
'[ 1 ]' | res(['.0', 0])
# Two values on the same line
'["a", "b"]' | res(['.0', 0], ['.1', 0])
# With line breaks'
"[\n \"a\" \n , \n \"b\" ]" | res(['.0', 1], ['.1', 3])
end
let(:mapper) { IpynbDiff::IpynbSymbolMap.new(notebook) }
before do
mapper.parse_array('')
end
with_them do
it { expect(mapper.results).to match_array(result) }
end
end
describe '#skip_object' do
subject { IpynbDiff::IpynbSymbolMap.parse(JSON.pretty_generate(source)) }
end
describe '#parse' do
let(:objects_to_ignore) { [] }
subject { IpynbDiff::IpynbSymbolMap.parse(JSON.pretty_generate(source), objects_to_ignore) }
context 'Empty object' do
let(:source) { {} }
it { is_expected.to be_empty }
end
context 'Object with inner object and number' do
let(:source) { { obj1: { obj2: 1 } } }
it { is_expected.to match_array(res(['.obj1', 1], ['.obj1.obj2', 2])) }
end
context 'Object with inner object and number, string and array with object' do
let(:source) { { obj1: { obj2: [123, 2, true], obj3: "hel\nlo", obj4: true, obj5: 123, obj6: 'a' } } }
it do
is_expected.to match_array(
res(['.obj1', 1],
['.obj1.obj2', 2],
['.obj1.obj2.0', 3],
['.obj1.obj2.1', 4],
['.obj1.obj2.2', 5],
['.obj1.obj3', 7],
['.obj1.obj4', 8],
['.obj1.obj5', 9],
['.obj1.obj6', 10])
)
end
end
context 'When index is exceeded because of failure' do
it 'raises an exception' do
source = '{"\\a": "a\""}'
mapper = IpynbDiff::IpynbSymbolMap.new(source)
expect(mapper).to receive(:prev_backslash?).at_least(1).time.and_return(false)
expect { mapper.parse('') }.to raise_error(IpynbDiff::InvalidTokenError)
end
end
context 'Object with inner object and number, string and array with object' do
let(:source) { { obj1: { obj2: [123, 2, true], obj3: "hel\nlo", obj4: true, obj5: 123, obj6: { obj7: 'a' } } } }
let(:objects_to_ignore) { %w(obj2 obj6) }
it do
is_expected.to match_array(
res(['.obj1', 1],
['.obj1.obj3', 7],
['.obj1.obj4', 8],
['.obj1.obj5', 9],
)
)
end
end
end
end

View File

@ -1,126 +0,0 @@
# frozen_string_literal: true
require 'ipynbdiff'
require 'rspec'
require 'rspec-parameterized'
BASE_PATH = File.join(File.expand_path(File.dirname(__FILE__)), 'testdata')
describe IpynbDiff do
def diff_signs(diff)
diff.to_s(:text).scan(/.*\n/).map { |l| l[0] }.join('')
end
describe 'diff' do
let(:from_path) { File.join(BASE_PATH, 'from.ipynb') }
let(:to_path) { File.join(BASE_PATH,'to.ipynb') }
let(:from) { File.read(from_path) }
let(:to) { File.read(to_path) }
let(:include_frontmatter) { false }
let(:hide_images) { false }
subject { IpynbDiff.diff(from, to, include_frontmatter: include_frontmatter, hide_images: hide_images) }
context 'if preprocessing is active' do
it 'html tables are stripped' do
is_expected.to_not include('<td>')
end
end
context 'when to is nil' do
let(:to) { nil }
let(:from_path) { File.join(BASE_PATH, 'only_md', 'input.ipynb') }
it 'all lines are removals' do
expect(diff_signs(subject)).to eq('-----')
end
end
context 'when to is nil' do
let(:from) { nil }
let(:to_path) { File.join(BASE_PATH, 'only_md', 'input.ipynb') }
it 'all lines are additions' do
expect(diff_signs(subject)).to eq('+++++')
end
end
context 'When include_frontmatter is true' do
let(:include_frontmatter) { true }
it 'should show changes metadata in the metadata' do
expect(subject.to_s(:text)).to include('+ display_name: New Python 3 (ipykernel)')
end
end
context 'When hide_images is true' do
let(:hide_images) { true }
it 'hides images' do
expect(subject.to_s(:text)).to include(' [Hidden Image Output]')
end
end
context 'When include_frontmatter is false' do
it 'should drop metadata from the diff' do
expect(subject.to_s(:text)).to_not include('+ display_name: New Python 3 (ipykernel)')
end
end
context 'when either notebook can not be processed' do
using RSpec::Parameterized::TableSyntax
where(:ctx, :from, :to) do
'because from is invalid' | 'a' | nil
'because from does not have the cell tag' | '{"metadata":[]}' | nil
'because to is invalid' | nil | 'a'
'because to does not have the cell tag' | nil | '{"metadata":[]}'
end
with_them do
it { is_expected.to be_nil }
end
end
end
describe 'transform' do
[nil, 'a', '{"metadata":[]}'].each do |invalid_nb|
context "when json is invalid (#{invalid_nb || 'nil'})" do
it 'is nil' do
expect(IpynbDiff.transform(invalid_nb)).to be_nil
end
end
end
context 'options' do
let(:include_frontmatter) { false }
let(:hide_images) { false }
subject do
IpynbDiff.transform(File.read(File.join(BASE_PATH, 'from.ipynb')),
include_frontmatter: include_frontmatter,
hide_images: hide_images)
end
context 'include_frontmatter is false' do
it { is_expected.to_not include('display_name: Python 3 (ipykernel)') }
end
context 'include_frontmatter is true' do
let(:include_frontmatter) { true }
it { is_expected.to include('display_name: Python 3 (ipykernel)') }
end
context 'hide_images is false' do
it { is_expected.not_to include('[Hidden Image Output]') }
end
context 'hide_images is true' do
let(:hide_images) { true }
it { is_expected.to include(' [Hidden Image Output]') }
end
end
end
end

View File

@ -1,7 +0,0 @@
%% Cell type:markdown id: tags:
\
%% Cell type:markdown id: tags:
a

View File

@ -1,7 +0,0 @@
.cells.0
.cells.0.source.0
.cells.1
.cells.1.source.0

View File

@ -1,16 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"source": [
"\\"
]
},
{
"cell_type": "markdown",
"source": [
"a"
]
}
]
}

View File

@ -1,16 +0,0 @@
%% Cell type:code id:5 tags:
``` python
# A cell that has an error
y = sin(x)
```
%%%% Output: error
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
/var/folders/cq/l637k4x13gx6y9p_gfs4c_gc0000gn/T/ipykernel_72857/3962062127.py in <module>
1 # A cell that has an error
----> 2 y = sin(x)
NameError: name 'sin' is not defined

View File

@ -1,16 +0,0 @@
.cells.0
.cells.0.source
.cells.0.source.0
.cells.0.source.1
.cells.0.outputs.0
.cells.0.outputs.0.traceback.0
.cells.0.outputs.0.traceback.1
.cells.0.outputs.0.traceback.2
.cells.0.outputs.0.traceback.2
.cells.0.outputs.0.traceback.2
.cells.0.outputs.0.traceback.2
.cells.0.outputs.0.traceback.3

View File

@ -1,32 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 7,
"id": "5",
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'sin' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/var/folders/cq/l637k4x13gx6y9p_gfs4c_gc0000gn/T/ipykernel_72857/3962062127.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# A cell that has an error\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0my\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mNameError\u001b[0m: name 'sin' is not defined"
]
}
],
"source": [
"# A cell that has an error\n",
"y = sin(x)"
]
}
],
"metadata": {
"kernelspec": {
"language": "python"
}
}
}

View File

@ -1,198 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "0aac5da7-745c-4eda-847a-3d0d07a1bb9b",
"metadata": {
"tags": []
},
"source": [
"# This is a markdown cell\n",
"\n",
"This paragraph has\n",
"With\n",
"Many\n",
"Lines. How we will he handle MR notes?\n",
"\n",
"But I can add another paragraph"
]
},
{
"cell_type": "raw",
"id": "faecea5b-de0a-49fa-9a3a-61c2add652da",
"metadata": {},
"source": [
"This is a raw cell\n",
"With\n",
"Multiple lines"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "893ca2c0-ab75-4276-9dad-be1c40e16e8a",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "0d707fb5-226f-46d6-80bd-489ebfb8905c",
"metadata": {},
"outputs": [],
"source": [
"np.random.seed(42)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "35467fcf-28b1-4c7b-bb09-4cb192c35293",
"metadata": {
"tags": [
"senoid"
]
},
"outputs": [
{
"data": {
"text/plain": [
"[<matplotlib.lines.Line2D at 0x123e39370>]"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
},
{
"data": {
"image/png": "some_invalid_base64_image_here\n",
"text/plain": [
"<Figure size 432x288 with 1 Axes>"
]
},
"metadata": {
"needs_background": "light"
},
"output_type": "display_data"
}
],
"source": [
"x = np.linspace(0, 4*np.pi,50)\n",
"y = np.sin(x)\n",
"\n",
"plt.plot(x, y)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "dc1178cd-c46d-4da3-9ab5-08f000699884",
"metadata": {},
"outputs": [],
"source": [
"df = pd.DataFrame({\"x\": x, \"y\": y})"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "6e749b4f-b409-4700-870f-f68c39462490",
"metadata": {
"tags": [
"some-table"
]
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>x</th>\n",
" <th>y</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.000000</td>\n",
" <td>0.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>0.256457</td>\n",
" <td>0.253655</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" x y\n",
"0 0.000000 0.000000\n",
"1 0.256457 0.253655"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df[:2]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0ddef5ef-94a3-4afd-9c70-ddee9694f512",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
},
"toc-showtags": true
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -1,12 +0,0 @@
%% Cell type:code id:5 tags:senoid
``` python
```
%%%% Output: display_data
[Hidden Image Output]
%%%% Output: display_data
[Hidden Image Output]

View File

@ -1,12 +0,0 @@
.cells.0
.cells.0.source
.cells.0.outputs.0
.cells.0.outputs.1

View File

@ -1,45 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 3,
"id": "5",
"metadata": {
"tags": [
"senoid"
]
},
"outputs": [
{
"data": {
"image/png": "this_is_an_invalid_hash_for_testing_purposes\n",
"text/plain": [
"<Figure size 432x288 with 1 Axes>"
]
},
"metadata": {
"needs_background": "light"
},
"output_type": "display_data"
},
{
"data": {
"image/svg+xml": "<svg xmlns=\"http://www.w3.org/2000/svg\" viewBox=\"0 0 100 100\"><circle cx=\"50\" cy=\"50\" r=\"50\"/></svg>",
"text/plain": [
"<IPython.core.display.SVG object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
]
}
],
"metadata": {
"kernelspec": {
"language": "python"
}
}
}

View File

@ -1,11 +0,0 @@
%% Cell type:code id:5 tags:some-table
``` python
df[:2]
```
%%%% Output: execute_result
x y
0 0.000000 0.000000
1 0.256457 0.507309

View File

@ -1,11 +0,0 @@
.cells.0
.cells.0.source
.cells.0.source.0
.cells.0.outputs.0
.cells.0.outputs.0.data.text/plain.0
.cells.0.outputs.0.data.text/plain.1
.cells.0.outputs.0.data.text/plain.2

View File

@ -1,74 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 5,
"id": "5",
"metadata": {
"tags": [
"some-table"
]
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>x</th>\n",
" <th>y</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.000000</td>\n",
" <td>0.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>0.256457</td>\n",
" <td>0.507309</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" x y\n",
"0 0.000000 0.000000\n",
"1 0.256457 0.507309"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df[:2]"
]
}
],
"metadata": {
"kernelspec": {
"language": "python"
}
}
}

View File

@ -1,10 +0,0 @@
%% Cell type:code id:5 tags:
``` python
from IPython.display import display, Math
display(Math(r'Dims: {}x{}m \\ Area: {}m^2 \\ Volume: {}m^3'.format(1, round(2,2), 3, 4)))
```
%%%% Output: display_data
$\displaystyle Dims: 1x2m \\ Area: 3m^2 \\ Volume: 4m^3$

View File

@ -1,10 +0,0 @@
.cells.0
.cells.0.source
.cells.0.source.0
.cells.0.source.1
.cells.0.outputs.0
.cells.0.outputs.0.data.text/latex.0

View File

@ -1,34 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"id": "5",
"outputs": [
{
"data": {
"text/latex": [
"$\\displaystyle Dims: 1x2m \\\\ Area: 3m^2 \\\\ Volume: 4m^3$"
],
"text/plain": [
"<IPython.core.display.Math object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"from IPython.display import display, Math\n",
"display(Math(r'Dims: {}x{}m \\\\ Area: {}m^2 \\\\ Volume: {}m^3'.format(1, round(2,2), 3, 4)))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
}
}
}

View File

@ -1,9 +0,0 @@
%% Cell type:code id:5 tags:
```
Some Image
```
%%%% Output: display_data
![](data:image/png;base64,this_is_an_invalid_hash_for_testing_purposes)

View File

@ -1,9 +0,0 @@
.cells.0
.cells.0.source
.cells.0.source.0
.cells.0.outputs.0
.cells.0.outputs.0.data.image/png

View File

@ -1,25 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"id": "5",
"metadata": {
},
"outputs": [
{
"data": {
"image/png": [
"this_is_an_invalid_hash_for_testing_purposes"
]
},
"output_type": "display_data"
}
],
"source": [
"Some Image"
]
}
],
"metadata": {
}
}

View File

@ -1,19 +0,0 @@
---
jupyter:
kernelspec:
display_name: Python 3 (ipykernel)
language: python
name: python3
language_info:
codemirror_mode:
name: ipython
version: 3
file_extension: ".py"
mimetype: text/x-python
name: python
nbconvert_exporter: python
pygments_lexer: ipython3
version: 3.9.7
nbformat: 4
nbformat_minor: 5
---

View File

@ -1,25 +0,0 @@
{
"cells": [],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
},
"toc-showtags": true
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -1,25 +0,0 @@
{
"cells": [],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
},
"toc-showtags": true
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -1,13 +0,0 @@
%% Cell type:markdown id:1 tags:
# A
B
%% Cell type:code id:3 tags:
``` python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
```

View File

@ -1,13 +0,0 @@
.cells.0
.cells.0.source.0
.cells.0.source.1
.cells.0.source.2
.cells.1
.cells.1.source
.cells.1.source.0
.cells.1.source.1
.cells.1.source.2

View File

@ -1,29 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "1",
"source": [
"# A\n",
"\n",
"B"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "3",
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt"
]
}
],
"metadata": {
"kernelspec": {
"language": "python"
}
}
}

View File

@ -1,7 +0,0 @@
%% Cell type:code id:3 tags:
``` python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
```

View File

@ -1,7 +0,0 @@
.cells.0
.cells.0.source
.cells.0.source.0
.cells.0.source.1
.cells.0.source.2

View File

@ -1,21 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "3",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt"
]
}
],
"metadata": {
"kernelspec": {
"language": "python"
}
}
}

View File

@ -1,5 +0,0 @@
%% Cell type:code id:3 tags:
```
```

View File

@ -1,5 +0,0 @@
.cells.0
.cells.0.source
.cells.0.source

View File

@ -1,12 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "3",
"source": "",
"outputs": []
}
],
"metadata": {}
}

View File

@ -1,5 +0,0 @@
%% Cell type:code id:3 tags:
```
```

View File

@ -1,5 +0,0 @@
.cells.0
.cells.0.source
.cells.0.source

View File

@ -1,14 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "3",
"source": "",
"outputs": []
}
],
"metadata": {
"kernelspec": {}
}
}

View File

@ -1,5 +0,0 @@
%% Cell type:code id:3 tags:
```
```

View File

@ -1,5 +0,0 @@
.cells.0
.cells.0.source
.cells.0.source

View File

@ -1,11 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "3",
"source": "",
"outputs": []
}
]
}

View File

@ -1,5 +0,0 @@
%% Cell type:markdown id:1 tags:hello,world
# A
B

View File

@ -1,5 +0,0 @@
.cells.0
.cells.0.source.0
.cells.0.source.1
.cells.0.source.2

View File

@ -1,21 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "1",
"metadata": {
"tags": [
"hello",
"world"
]
},
"source": [
"# A\n",
"\n",
"B"
]
}
],
"metadata": {
}
}

View File

@ -1,4 +0,0 @@
%% Cell type:raw id:2 tags:
A
B

View File

@ -1,4 +0,0 @@
.cells.0
.cells.0.source.0
.cells.0.source.1

View File

@ -1,15 +0,0 @@
{
"cells": [
{
"cell_type": "raw",
"id": "2",
"metadata": {},
"source": [
"A\n",
"B"
]
}
],
"metadata": {
}
}

View File

@ -1,70 +0,0 @@
%% Cell type:markdown id:0aac5da7-745c-4eda-847a-3d0d07a1bb9b tags:
# This is a markdown cell
This paragraph has
With
Many
Lines. How we will he handle MR notes?
But I can add another paragraph
%% Cell type:raw id:faecea5b-de0a-49fa-9a3a-61c2add652da tags:
This is a raw cell
With
Multiple lines
%% Cell type:code id:893ca2c0-ab75-4276-9dad-be1c40e16e8a tags:
``` python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
```
%% Cell type:code id:0d707fb5-226f-46d6-80bd-489ebfb8905c tags:
``` python
np.random.seed(42)
```
%% Cell type:code id:35467fcf-28b1-4c7b-bb09-4cb192c35293 tags:senoid
``` python
x = np.linspace(0, 4*np.pi,50)
y = np.sin(x)
plt.plot(x, y)
```
%%%% Output: execute_result
[<matplotlib.lines.Line2D at 0x123e39370>]
%%%% Output: display_data
![](data:image/png;base64,some_invalid_base64_image_here)
%% Cell type:code id:dc1178cd-c46d-4da3-9ab5-08f000699884 tags:
``` python
df = pd.DataFrame({"x": x, "y": y})
```
%% Cell type:code id:6e749b4f-b409-4700-870f-f68c39462490 tags:some-table
``` python
df[:2]
```
%%%% Output: execute_result
x y
0 0.000000 0.000000
1 0.256457 0.253655
%% Cell type:code id:0ddef5ef-94a3-4afd-9c70-ddee9694f512 tags:
``` python
```

View File

@ -1,70 +0,0 @@
.cells.0
.cells.0.source.0
.cells.0.source.1
.cells.0.source.2
.cells.0.source.3
.cells.0.source.4
.cells.0.source.5
.cells.0.source.6
.cells.0.source.7
.cells.1
.cells.1.source.0
.cells.1.source.1
.cells.1.source.2
.cells.2
.cells.2.source
.cells.2.source.0
.cells.2.source.1
.cells.2.source.2
.cells.3
.cells.3.source
.cells.3.source.0
.cells.4
.cells.4.source
.cells.4.source.0
.cells.4.source.1
.cells.4.source.2
.cells.4.source.3
.cells.4.outputs.0
.cells.4.outputs.0.data.text/plain.0
.cells.4.outputs.1
.cells.4.outputs.1.data.image/png
.cells.5
.cells.5.source
.cells.5.source.0
.cells.6
.cells.6.source
.cells.6.source.0
.cells.6.outputs.0
.cells.6.outputs.0.data.text/plain.0
.cells.6.outputs.0.data.text/plain.1
.cells.6.outputs.0.data.text/plain.2
.cells.7
.cells.7.source

View File

@ -1,3 +0,0 @@
%% Cell type:markdown id:1 tags:hello,world
A

View File

@ -1,3 +0,0 @@
.cells.0
.cells.0.source

View File

@ -1,17 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "1",
"metadata": {
"tags": [
"hello",
"world"
]
},
"source": "A"
}
],
"metadata": {
}
}

View File

@ -1,9 +0,0 @@
%% Cell type:code id:123 tags:
``` python
print("G'bye")
```
%%%% Output: stream
G'bye

View File

@ -1,9 +0,0 @@
.cells.0
.cells.0.source
.cells.0.source.0
.cells.0.outputs.0
.cells.0.outputs.0.text.0

Some files were not shown because too many files have changed in this diff Show More