2021-05-24 01:24:22 +08:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2010-11-30 16:27:59 +08:00
|
|
|
module Zip
|
2013-06-03 15:56:24 +08:00
|
|
|
class CentralDirectory
|
2010-11-30 16:27:59 +08:00
|
|
|
include Enumerable
|
2011-11-18 04:53:04 +08:00
|
|
|
|
2013-08-27 04:26:14 +08:00
|
|
|
END_OF_CDS = 0x06054b50
|
|
|
|
ZIP64_END_OF_CDS = 0x06064b50
|
|
|
|
ZIP64_EOCD_LOCATOR = 0x07064b50
|
2015-03-24 00:23:04 +08:00
|
|
|
MAX_END_OF_CDS_SIZE = 65_536 + 18
|
2013-08-27 04:26:14 +08:00
|
|
|
STATIC_EOCD_SIZE = 22
|
2021-06-11 20:50:09 +08:00
|
|
|
ZIP64_STATIC_EOCD_SIZE = 56
|
2010-11-30 16:27:59 +08:00
|
|
|
|
|
|
|
attr_reader :comment
|
|
|
|
|
|
|
|
# Returns an Enumerable containing the entries.
|
|
|
|
def entries
|
2013-06-03 02:33:03 +08:00
|
|
|
@entry_set.entries
|
2010-11-30 16:27:59 +08:00
|
|
|
end
|
|
|
|
|
2013-08-27 04:26:14 +08:00
|
|
|
def initialize(entries = EntrySet.new, comment = '') #:nodoc:
|
2010-11-30 16:27:59 +08:00
|
|
|
super()
|
2015-06-08 15:45:23 +08:00
|
|
|
@entry_set = entries.kind_of?(EntrySet) ? entries : EntrySet.new(entries)
|
2013-08-27 04:26:14 +08:00
|
|
|
@comment = comment
|
2010-11-30 16:27:59 +08:00
|
|
|
end
|
|
|
|
|
2013-06-03 02:33:03 +08:00
|
|
|
def write_to_stream(io) #:nodoc:
|
Add read/write support for zip64 extensions
This commit adds the capability of creating archives larger than
4GB via zip64 extensions. It also fixes bugs reading archives of
this size (specifically, the 64-bit offset of the local file
header was not being read from the central directory entry).
To maximize compatibility, zip64 extensions are used only when
required. Unfortunately, at the time we write a local file header,
we don't know the size of the file and thus whether a Zip64
Extended Information Extra Field will be required. Therefore
this commit writes a 'placeholder' extra field to reserve space
for the zip64 entry, which will be written if necessary when
we update the local entry with the final sizes and CRC. I use
the signature "\x99\x99" for this field, following the example
of DotNetZip which does the same.
This commit also adds a rake task, zip64_full_test, which
fully tests zip64 by actually creating and verifying a 4GB zip
file. Please note, however, that this test requires UnZip
version 6.00 or newer, which may not be supplied by your OS.
This test doesn't run along with the main unit tests because
it takes a few minutes to complete.
2013-09-28 10:41:00 +08:00
|
|
|
cdir_offset = io.tell
|
2013-06-03 02:33:03 +08:00
|
|
|
@entry_set.each { |entry| entry.write_c_dir_entry(io) }
|
Add read/write support for zip64 extensions
This commit adds the capability of creating archives larger than
4GB via zip64 extensions. It also fixes bugs reading archives of
this size (specifically, the 64-bit offset of the local file
header was not being read from the central directory entry).
To maximize compatibility, zip64 extensions are used only when
required. Unfortunately, at the time we write a local file header,
we don't know the size of the file and thus whether a Zip64
Extended Information Extra Field will be required. Therefore
this commit writes a 'placeholder' extra field to reserve space
for the zip64 entry, which will be written if necessary when
we update the local entry with the final sizes and CRC. I use
the signature "\x99\x99" for this field, following the example
of DotNetZip which does the same.
This commit also adds a rake task, zip64_full_test, which
fully tests zip64 by actually creating and verifying a 4GB zip
file. Please note, however, that this test requires UnZip
version 6.00 or newer, which may not be supplied by your OS.
This test doesn't run along with the main unit tests because
it takes a few minutes to complete.
2013-09-28 10:41:00 +08:00
|
|
|
eocd_offset = io.tell
|
|
|
|
cdir_size = eocd_offset - cdir_offset
|
2014-03-14 00:48:04 +08:00
|
|
|
if ::Zip.write_zip64_support
|
|
|
|
need_zip64_eocd = cdir_offset > 0xFFFFFFFF || cdir_size > 0xFFFFFFFF || @entry_set.size > 0xFFFF
|
|
|
|
need_zip64_eocd ||= @entry_set.any? { |entry| entry.extra['Zip64'] }
|
|
|
|
if need_zip64_eocd
|
|
|
|
write_64_e_o_c_d(io, cdir_offset, cdir_size)
|
|
|
|
write_64_eocd_locator(io, eocd_offset)
|
|
|
|
end
|
Add read/write support for zip64 extensions
This commit adds the capability of creating archives larger than
4GB via zip64 extensions. It also fixes bugs reading archives of
this size (specifically, the 64-bit offset of the local file
header was not being read from the central directory entry).
To maximize compatibility, zip64 extensions are used only when
required. Unfortunately, at the time we write a local file header,
we don't know the size of the file and thus whether a Zip64
Extended Information Extra Field will be required. Therefore
this commit writes a 'placeholder' extra field to reserve space
for the zip64 entry, which will be written if necessary when
we update the local entry with the final sizes and CRC. I use
the signature "\x99\x99" for this field, following the example
of DotNetZip which does the same.
This commit also adds a rake task, zip64_full_test, which
fully tests zip64 by actually creating and verifying a 4GB zip
file. Please note, however, that this test requires UnZip
version 6.00 or newer, which may not be supplied by your OS.
This test doesn't run along with the main unit tests because
it takes a few minutes to complete.
2013-09-28 10:41:00 +08:00
|
|
|
end
|
|
|
|
write_e_o_c_d(io, cdir_offset, cdir_size)
|
2010-11-30 16:27:59 +08:00
|
|
|
end
|
|
|
|
|
Add read/write support for zip64 extensions
This commit adds the capability of creating archives larger than
4GB via zip64 extensions. It also fixes bugs reading archives of
this size (specifically, the 64-bit offset of the local file
header was not being read from the central directory entry).
To maximize compatibility, zip64 extensions are used only when
required. Unfortunately, at the time we write a local file header,
we don't know the size of the file and thus whether a Zip64
Extended Information Extra Field will be required. Therefore
this commit writes a 'placeholder' extra field to reserve space
for the zip64 entry, which will be written if necessary when
we update the local entry with the final sizes and CRC. I use
the signature "\x99\x99" for this field, following the example
of DotNetZip which does the same.
This commit also adds a rake task, zip64_full_test, which
fully tests zip64 by actually creating and verifying a 4GB zip
file. Please note, however, that this test requires UnZip
version 6.00 or newer, which may not be supplied by your OS.
This test doesn't run along with the main unit tests because
it takes a few minutes to complete.
2013-09-28 10:41:00 +08:00
|
|
|
def write_e_o_c_d(io, offset, cdir_size) #:nodoc:
|
2012-02-14 06:03:34 +08:00
|
|
|
tmp = [
|
2013-08-27 04:26:14 +08:00
|
|
|
END_OF_CDS,
|
2013-06-03 02:33:03 +08:00
|
|
|
0, # @numberOfThisDisk
|
|
|
|
0, # @numberOfDiskWithStartOfCDir
|
Add read/write support for zip64 extensions
This commit adds the capability of creating archives larger than
4GB via zip64 extensions. It also fixes bugs reading archives of
this size (specifically, the 64-bit offset of the local file
header was not being read from the central directory entry).
To maximize compatibility, zip64 extensions are used only when
required. Unfortunately, at the time we write a local file header,
we don't know the size of the file and thus whether a Zip64
Extended Information Extra Field will be required. Therefore
this commit writes a 'placeholder' extra field to reserve space
for the zip64 entry, which will be written if necessary when
we update the local entry with the final sizes and CRC. I use
the signature "\x99\x99" for this field, following the example
of DotNetZip which does the same.
This commit also adds a rake task, zip64_full_test, which
fully tests zip64 by actually creating and verifying a 4GB zip
file. Please note, however, that this test requires UnZip
version 6.00 or newer, which may not be supplied by your OS.
This test doesn't run along with the main unit tests because
it takes a few minutes to complete.
2013-09-28 10:41:00 +08:00
|
|
|
@entry_set ? [@entry_set.size, 0xFFFF].min : 0,
|
|
|
|
@entry_set ? [@entry_set.size, 0xFFFF].min : 0,
|
|
|
|
[cdir_size, 0xFFFFFFFF].min,
|
|
|
|
[offset, 0xFFFFFFFF].min,
|
2014-07-23 17:54:43 +08:00
|
|
|
@comment ? @comment.bytesize : 0
|
2012-02-14 06:03:34 +08:00
|
|
|
]
|
|
|
|
io << tmp.pack('VvvvvVVv')
|
|
|
|
io << @comment
|
2010-11-30 16:27:59 +08:00
|
|
|
end
|
2013-06-03 02:33:03 +08:00
|
|
|
|
2010-11-30 16:27:59 +08:00
|
|
|
private :write_e_o_c_d
|
|
|
|
|
Add read/write support for zip64 extensions
This commit adds the capability of creating archives larger than
4GB via zip64 extensions. It also fixes bugs reading archives of
this size (specifically, the 64-bit offset of the local file
header was not being read from the central directory entry).
To maximize compatibility, zip64 extensions are used only when
required. Unfortunately, at the time we write a local file header,
we don't know the size of the file and thus whether a Zip64
Extended Information Extra Field will be required. Therefore
this commit writes a 'placeholder' extra field to reserve space
for the zip64 entry, which will be written if necessary when
we update the local entry with the final sizes and CRC. I use
the signature "\x99\x99" for this field, following the example
of DotNetZip which does the same.
This commit also adds a rake task, zip64_full_test, which
fully tests zip64 by actually creating and verifying a 4GB zip
file. Please note, however, that this test requires UnZip
version 6.00 or newer, which may not be supplied by your OS.
This test doesn't run along with the main unit tests because
it takes a few minutes to complete.
2013-09-28 10:41:00 +08:00
|
|
|
def write_64_e_o_c_d(io, offset, cdir_size) #:nodoc:
|
|
|
|
tmp = [
|
|
|
|
ZIP64_END_OF_CDS,
|
|
|
|
44, # size of zip64 end of central directory record (excludes signature and field itself)
|
|
|
|
VERSION_MADE_BY,
|
|
|
|
VERSION_NEEDED_TO_EXTRACT_ZIP64,
|
|
|
|
0, # @numberOfThisDisk
|
|
|
|
0, # @numberOfDiskWithStartOfCDir
|
|
|
|
@entry_set ? @entry_set.size : 0, # number of entries on this disk
|
|
|
|
@entry_set ? @entry_set.size : 0, # number of entries total
|
|
|
|
cdir_size, # size of central directory
|
2019-09-22 17:06:20 +08:00
|
|
|
offset # offset of start of central directory in its disk
|
Add read/write support for zip64 extensions
This commit adds the capability of creating archives larger than
4GB via zip64 extensions. It also fixes bugs reading archives of
this size (specifically, the 64-bit offset of the local file
header was not being read from the central directory entry).
To maximize compatibility, zip64 extensions are used only when
required. Unfortunately, at the time we write a local file header,
we don't know the size of the file and thus whether a Zip64
Extended Information Extra Field will be required. Therefore
this commit writes a 'placeholder' extra field to reserve space
for the zip64 entry, which will be written if necessary when
we update the local entry with the final sizes and CRC. I use
the signature "\x99\x99" for this field, following the example
of DotNetZip which does the same.
This commit also adds a rake task, zip64_full_test, which
fully tests zip64 by actually creating and verifying a 4GB zip
file. Please note, however, that this test requires UnZip
version 6.00 or newer, which may not be supplied by your OS.
This test doesn't run along with the main unit tests because
it takes a few minutes to complete.
2013-09-28 10:41:00 +08:00
|
|
|
]
|
|
|
|
io << tmp.pack('VQ<vvVVQ<Q<Q<Q<')
|
|
|
|
end
|
|
|
|
|
|
|
|
private :write_64_e_o_c_d
|
|
|
|
|
|
|
|
def write_64_eocd_locator(io, zip64_eocd_offset)
|
|
|
|
tmp = [
|
|
|
|
ZIP64_EOCD_LOCATOR,
|
|
|
|
0, # number of disk containing the start of zip64 eocd record
|
|
|
|
zip64_eocd_offset, # offset of the start of zip64 eocd record in its disk
|
|
|
|
1 # total number of disks
|
|
|
|
]
|
|
|
|
io << tmp.pack('VVQ<V')
|
2010-11-30 16:27:59 +08:00
|
|
|
end
|
2013-06-03 02:33:03 +08:00
|
|
|
|
Add read/write support for zip64 extensions
This commit adds the capability of creating archives larger than
4GB via zip64 extensions. It also fixes bugs reading archives of
this size (specifically, the 64-bit offset of the local file
header was not being read from the central directory entry).
To maximize compatibility, zip64 extensions are used only when
required. Unfortunately, at the time we write a local file header,
we don't know the size of the file and thus whether a Zip64
Extended Information Extra Field will be required. Therefore
this commit writes a 'placeholder' extra field to reserve space
for the zip64 entry, which will be written if necessary when
we update the local entry with the final sizes and CRC. I use
the signature "\x99\x99" for this field, following the example
of DotNetZip which does the same.
This commit also adds a rake task, zip64_full_test, which
fully tests zip64 by actually creating and verifying a 4GB zip
file. Please note, however, that this test requires UnZip
version 6.00 or newer, which may not be supplied by your OS.
This test doesn't run along with the main unit tests because
it takes a few minutes to complete.
2013-09-28 10:41:00 +08:00
|
|
|
private :write_64_eocd_locator
|
2010-11-30 16:27:59 +08:00
|
|
|
|
2021-06-11 20:50:09 +08:00
|
|
|
def unpack_64_e_o_c_d(buffer) #:nodoc:
|
|
|
|
index = buffer.rindex([ZIP64_END_OF_CDS].pack('V'))
|
|
|
|
raise Error, 'Zip64 end of central directory signature not found' unless index
|
|
|
|
|
|
|
|
l_index = buffer.rindex([ZIP64_EOCD_LOCATOR].pack('V'))
|
|
|
|
raise Error, 'Zip64 end of central directory signature locator not found' unless l_index
|
|
|
|
|
|
|
|
buf = buffer.slice(index..l_index)
|
|
|
|
|
|
|
|
_, # ZIP64_END_OF_CDS signature. We know we have this at this point.
|
|
|
|
@size_of_zip64_e_o_c_d,
|
|
|
|
@version_made_by,
|
|
|
|
@version_needed_for_extract,
|
|
|
|
@number_of_this_disk,
|
|
|
|
@number_of_disk_with_start_of_cdir,
|
|
|
|
@total_number_of_entries_in_cdir_on_this_disk,
|
|
|
|
@size,
|
|
|
|
@size_in_bytes,
|
|
|
|
@cdir_offset = buf.unpack('VQ<vvVVQ<Q<Q<Q<')
|
|
|
|
|
|
|
|
zip64_extensible_data_size =
|
|
|
|
@size_of_zip64_e_o_c_d - ZIP64_STATIC_EOCD_SIZE + 12
|
|
|
|
@zip64_extensible_data = if zip64_extensible_data_size.zero?
|
|
|
|
''
|
|
|
|
else
|
|
|
|
buffer.slice(
|
|
|
|
ZIP64_STATIC_EOCD_SIZE,
|
|
|
|
zip64_extensible_data_size
|
|
|
|
)
|
|
|
|
end
|
2013-08-27 04:26:14 +08:00
|
|
|
end
|
|
|
|
|
2021-06-11 05:44:51 +08:00
|
|
|
def unpack_e_o_c_d(buffer) #:nodoc:
|
|
|
|
index = buffer.rindex([END_OF_CDS].pack('V'))
|
|
|
|
raise Error, 'Zip end of central directory signature not found' unless index
|
|
|
|
|
|
|
|
buf = buffer.slice(index, buffer.size)
|
|
|
|
|
|
|
|
_, # END_OF_CDS signature. We know we have this at this point.
|
|
|
|
@number_of_this_disk,
|
|
|
|
@number_of_disk_with_start_of_cdir,
|
|
|
|
@total_number_of_entries_in_cdir_on_this_disk,
|
|
|
|
@size,
|
|
|
|
@size_in_bytes,
|
|
|
|
@cdir_offset,
|
|
|
|
comment_length = buf.unpack('VvvvvVVv')
|
|
|
|
|
|
|
|
@comment = if comment_length.positive?
|
|
|
|
buf.slice(STATIC_EOCD_SIZE, comment_length)
|
|
|
|
else
|
|
|
|
''
|
|
|
|
end
|
2010-11-30 16:27:59 +08:00
|
|
|
end
|
2011-11-18 04:53:04 +08:00
|
|
|
|
2013-06-03 02:33:03 +08:00
|
|
|
def read_central_directory_entries(io) #:nodoc:
|
2010-11-30 16:27:59 +08:00
|
|
|
begin
|
2013-08-27 04:26:14 +08:00
|
|
|
io.seek(@cdir_offset, IO::SEEK_SET)
|
2010-11-30 16:27:59 +08:00
|
|
|
rescue Errno::EINVAL
|
2015-03-21 16:27:44 +08:00
|
|
|
raise Error, 'Zip consistency problem while reading central directory entry'
|
2010-11-30 16:27:59 +08:00
|
|
|
end
|
2013-06-03 15:56:24 +08:00
|
|
|
@entry_set = EntrySet.new
|
2012-02-14 00:55:08 +08:00
|
|
|
@size.times do
|
2020-09-21 01:11:49 +08:00
|
|
|
entry = Entry.read_c_dir_entry(io)
|
|
|
|
next unless entry
|
|
|
|
|
|
|
|
offset = if entry.extra['Zip64']
|
|
|
|
entry.extra['Zip64'].relative_header_offset
|
|
|
|
else
|
|
|
|
entry.local_header_offset
|
|
|
|
end
|
|
|
|
|
|
|
|
unless offset.nil?
|
|
|
|
io_save = io.tell
|
|
|
|
io.seek(offset, IO::SEEK_SET)
|
|
|
|
entry.read_extra_field(read_local_extra_field(io))
|
|
|
|
io.seek(io_save, IO::SEEK_SET)
|
|
|
|
end
|
|
|
|
|
|
|
|
@entry_set << entry
|
2012-02-14 00:55:08 +08:00
|
|
|
end
|
2010-11-30 16:27:59 +08:00
|
|
|
end
|
2011-11-18 04:53:04 +08:00
|
|
|
|
2020-09-21 01:11:49 +08:00
|
|
|
def read_local_extra_field(io)
|
|
|
|
buf = io.read(::Zip::LOCAL_ENTRY_STATIC_HEADER_LENGTH) || ''
|
|
|
|
return '' unless buf.bytesize == ::Zip::LOCAL_ENTRY_STATIC_HEADER_LENGTH
|
|
|
|
|
|
|
|
head, _, _, _, _, _, _, _, _, _, n_len, e_len = buf.unpack('VCCvvvvVVVvv')
|
|
|
|
return '' unless head == ::Zip::LOCAL_ENTRY_SIGNATURE
|
|
|
|
|
|
|
|
io.seek(n_len, IO::SEEK_CUR) # Skip over the entry name.
|
|
|
|
io.read(e_len)
|
|
|
|
end
|
|
|
|
|
2013-06-03 02:33:03 +08:00
|
|
|
def read_from_stream(io) #:nodoc:
|
2013-08-27 04:26:14 +08:00
|
|
|
buf = start_buf(io)
|
2017-06-29 10:57:12 +08:00
|
|
|
if zip64_file?(buf)
|
2021-06-11 20:50:09 +08:00
|
|
|
unpack_64_e_o_c_d(buf)
|
2013-08-27 04:26:14 +08:00
|
|
|
else
|
2021-06-11 05:44:51 +08:00
|
|
|
unpack_e_o_c_d(buf)
|
2013-08-27 04:26:14 +08:00
|
|
|
end
|
2010-11-30 16:27:59 +08:00
|
|
|
read_central_directory_entries(io)
|
|
|
|
end
|
2011-11-18 04:53:04 +08:00
|
|
|
|
2013-08-27 04:26:14 +08:00
|
|
|
def zip64_file?(buf)
|
|
|
|
buf.rindex([ZIP64_END_OF_CDS].pack('V')) && buf.rindex([ZIP64_EOCD_LOCATOR].pack('V'))
|
|
|
|
end
|
|
|
|
|
|
|
|
def start_buf(io)
|
2010-11-30 16:27:59 +08:00
|
|
|
begin
|
2013-08-27 04:26:14 +08:00
|
|
|
io.seek(-MAX_END_OF_CDS_SIZE, IO::SEEK_END)
|
2010-11-30 16:27:59 +08:00
|
|
|
rescue Errno::EINVAL
|
2011-11-18 04:53:04 +08:00
|
|
|
io.seek(0, IO::SEEK_SET)
|
2010-11-30 16:27:59 +08:00
|
|
|
end
|
2013-08-27 04:26:14 +08:00
|
|
|
io.read
|
|
|
|
end
|
|
|
|
|
2010-11-30 16:27:59 +08:00
|
|
|
# For iterating over the entries.
|
2020-02-19 15:28:46 +08:00
|
|
|
def each(&a_proc)
|
|
|
|
@entry_set.each(&a_proc)
|
2010-11-30 16:27:59 +08:00
|
|
|
end
|
|
|
|
|
2014-01-24 17:37:38 +08:00
|
|
|
# Returns the number of entries in the central directory (and
|
2010-11-30 16:27:59 +08:00
|
|
|
# consequently in the zip archive).
|
|
|
|
def size
|
2013-06-03 02:33:03 +08:00
|
|
|
@entry_set.size
|
2010-11-30 16:27:59 +08:00
|
|
|
end
|
|
|
|
|
2013-08-27 04:26:14 +08:00
|
|
|
def self.read_from_stream(io) #:nodoc:
|
2013-06-03 02:33:03 +08:00
|
|
|
cdir = new
|
2010-11-30 16:27:59 +08:00
|
|
|
cdir.read_from_stream(io)
|
2019-09-16 00:40:12 +08:00
|
|
|
cdir
|
2014-01-24 17:37:38 +08:00
|
|
|
rescue Error
|
2019-09-16 00:40:12 +08:00
|
|
|
nil
|
2010-11-30 16:27:59 +08:00
|
|
|
end
|
|
|
|
|
2012-02-14 00:55:08 +08:00
|
|
|
def ==(other) #:nodoc:
|
2015-06-08 15:45:23 +08:00
|
|
|
return false unless other.kind_of?(CentralDirectory)
|
2020-02-09 21:13:21 +08:00
|
|
|
|
2013-06-03 02:33:03 +08:00
|
|
|
@entry_set.entries.sort == other.entries.sort && comment == other.comment
|
2010-11-30 16:27:59 +08:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2013-06-03 02:33:03 +08:00
|
|
|
# Copyright (C) 2002, 2003 Thomas Sondergaard
|
|
|
|
# rubyzip is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the ruby license.
|