apkzlib 7.1.0-alpha10 (#19)
This commit is contained in:
parent
b24c05c820
commit
898bf7d536
|
|
@ -0,0 +1 @@
|
||||||
|
/build
|
||||||
|
|
@ -0,0 +1,18 @@
|
||||||
|
plugins {
|
||||||
|
id 'java-library'
|
||||||
|
}
|
||||||
|
|
||||||
|
java {
|
||||||
|
sourceCompatibility = JavaVersion.VERSION_11
|
||||||
|
targetCompatibility = JavaVersion.VERSION_11
|
||||||
|
}
|
||||||
|
|
||||||
|
dependencies {
|
||||||
|
implementation 'com.google.code.findbugs:jsr305:3.0.2'
|
||||||
|
implementation 'org.bouncycastle:bcpkix-jdk15on:1.69'
|
||||||
|
implementation 'org.bouncycastle:bcprov-jdk15on:1.69'
|
||||||
|
api 'com.google.guava:guava:30.1.1-jre'
|
||||||
|
api 'com.android.tools.build:apksig:7.0.1'
|
||||||
|
compileOnlyApi 'com.google.auto.value:auto-value-annotations:1.8.2'
|
||||||
|
annotationProcessor 'com.google.auto.value:auto-value:1.8.2'
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,86 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Abstract implementation of a {@link CloseableByteSourceFromOutputStreamBuilder} that simplifies
|
||||||
|
* the implementation of concrete instances. It implements the state machine implied by the
|
||||||
|
* interface contract and requires subclasses to implement two methods:
|
||||||
|
* {@link #doWrite(byte[], int, int)} -- that actually does writing and {@link #doBuild()} that
|
||||||
|
* builds the {@link CloseableByteSource].
|
||||||
|
*/
|
||||||
|
abstract class AbstractCloseableByteSourceFromOutputStreamBuilder
|
||||||
|
extends CloseableByteSourceFromOutputStreamBuilder {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Array that allows {@link #write(int)} to delegate to {@link #write(byte[], int, int)} without
|
||||||
|
* having to create an array for each invocation.
|
||||||
|
*/
|
||||||
|
private final byte[] tempByte;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has the builder been closed? If it has, then {@link #build()} may be called, but none of the
|
||||||
|
* writing methods can.
|
||||||
|
*/
|
||||||
|
private boolean closed;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has the builder been built? If this is {@code true} then {@link #closed} is also {@code true}.
|
||||||
|
*/
|
||||||
|
private boolean built;
|
||||||
|
|
||||||
|
/** Creates a new builder. */
|
||||||
|
AbstractCloseableByteSourceFromOutputStreamBuilder() {
|
||||||
|
tempByte = new byte[1];
|
||||||
|
closed = false;
|
||||||
|
built = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void write(byte[] b, int off, int len) throws IOException {
|
||||||
|
Preconditions.checkState(!closed);
|
||||||
|
doWrite(b, off, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void write(int b) throws IOException {
|
||||||
|
tempByte[0] = (byte) b;
|
||||||
|
write(tempByte, 0, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
closed = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CloseableByteSource build() throws IOException {
|
||||||
|
Preconditions.checkState(!built);
|
||||||
|
closed = true;
|
||||||
|
built = true;
|
||||||
|
|
||||||
|
return doBuild();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Same as {@link #write(byte[], int, int)}, but with the guarantee that the source has not been
|
||||||
|
* built and the builder is still open.
|
||||||
|
*
|
||||||
|
* @param b see {@link #write(byte[], int, int)}
|
||||||
|
* @param off see {@link #write(byte[], int, int)}
|
||||||
|
* @param len see {@link #write(byte[], int, int)}
|
||||||
|
* @throws IOException see {@link #write(byte[], int, int)}
|
||||||
|
*/
|
||||||
|
protected abstract void doWrite(byte[] b, int off, int len) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds the {@link CloseableByteSource} from the written data. This method is at most invoked
|
||||||
|
* once.
|
||||||
|
*
|
||||||
|
* @return the new source that will contain all data written to the builder so far
|
||||||
|
* @throws IOException failed to create the byte source
|
||||||
|
*/
|
||||||
|
protected abstract CloseableByteSource doBuild() throws IOException;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,72 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2018 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.io.ByteSource;
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Interface for a storage that will temporarily save bytes. There are several factory methods to
|
||||||
|
* create byte sources from several inputs, all of which may be discarded after the byte source has
|
||||||
|
* been created. The data is saved in the storage and will be kept until the byte source is closed.
|
||||||
|
*/
|
||||||
|
public interface ByteStorage extends Closeable {
|
||||||
|
/**
|
||||||
|
* Creates a new byte source by fully reading an input stream.
|
||||||
|
*
|
||||||
|
* @param stream the input stream
|
||||||
|
* @return a byte source containing the cached data from the given stream
|
||||||
|
* @throws IOException failed to read the stream
|
||||||
|
*/
|
||||||
|
CloseableByteSource fromStream(InputStream stream) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a builder that is an output stream and can create a byte source.
|
||||||
|
*
|
||||||
|
* @return a builder where data can be written to and a {@link CloseableByteSource} can eventually
|
||||||
|
* be obtained from
|
||||||
|
* @throws IOException failed to create the builder; this may happen if the builder require some
|
||||||
|
* preparation such as temporary storage allocation that may fail
|
||||||
|
*/
|
||||||
|
CloseableByteSourceFromOutputStreamBuilder makeBuilder() throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new byte source from another byte source.
|
||||||
|
*
|
||||||
|
* @param source the byte source to copy data from
|
||||||
|
* @return the tracked byte source
|
||||||
|
* @throws IOException failed to read data from the byte source
|
||||||
|
*/
|
||||||
|
CloseableByteSource fromSource(ByteSource source) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the number of bytes currently used.
|
||||||
|
*
|
||||||
|
* @return the number of bytes
|
||||||
|
*/
|
||||||
|
long getBytesUsed();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the maximum number of bytes ever used by this tracker.
|
||||||
|
*
|
||||||
|
* @return the number of bytes
|
||||||
|
*/
|
||||||
|
long getMaxBytesUsed();
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,14 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/** Factory that creates {@link ByteStorage}. */
|
||||||
|
public interface ByteStorageFactory {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new storage.
|
||||||
|
*
|
||||||
|
* @return a storage that should be closed when no longer used.
|
||||||
|
*/
|
||||||
|
ByteStorage create() throws IOException;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,143 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.io.ByteSource;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Byte storage that breaks byte sources into smaller byte sources. This storage uses another
|
||||||
|
* storage as a delegate and, when a source is requested, it will allocate one or more sources from
|
||||||
|
* the delegate to build the requested source.
|
||||||
|
*/
|
||||||
|
public class ChunkBasedByteStorage implements ByteStorage {
|
||||||
|
|
||||||
|
/** Size of the default chunk size. */
|
||||||
|
private static final long DEFAULT_CHUNK_SIZE_BYTES = 10 * 1024 * 1024;
|
||||||
|
|
||||||
|
/** Maximum size of each chunk. */
|
||||||
|
private final long maxChunkSize;
|
||||||
|
|
||||||
|
/** Byte storage where the data is actually stored. */
|
||||||
|
private final ByteStorage delegate;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new storage breaking sources in chunks with the default maximum size and allocating
|
||||||
|
* each chunk from {@code delegate}.
|
||||||
|
*/
|
||||||
|
ChunkBasedByteStorage(ByteStorage delegate) {
|
||||||
|
this(DEFAULT_CHUNK_SIZE_BYTES, delegate);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new storage breaking sources in chunks with the maximum of {@code maxChunkSize} and
|
||||||
|
* allocating each chunk from {@code delegate}.
|
||||||
|
*/
|
||||||
|
ChunkBasedByteStorage(long maxChunkSize, ByteStorage delegate) {
|
||||||
|
this.maxChunkSize = maxChunkSize;
|
||||||
|
this.delegate = delegate;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Obtains the byte storage chunks are allocated from. */
|
||||||
|
@VisibleForTesting // private otherwise.
|
||||||
|
public ByteStorage getDelegate() {
|
||||||
|
return delegate;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CloseableByteSource fromStream(InputStream stream) throws IOException {
|
||||||
|
List<CloseableByteSource> sources = new ArrayList<>();
|
||||||
|
while (true) {
|
||||||
|
LimitedInputStream limitedInput = new LimitedInputStream(stream, maxChunkSize);
|
||||||
|
sources.add(delegate.fromStream(limitedInput));
|
||||||
|
if (limitedInput.isInputFinished()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return new ChunkBasedCloseableByteSource(sources);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CloseableByteSourceFromOutputStreamBuilder makeBuilder() throws IOException {
|
||||||
|
return new AbstractCloseableByteSourceFromOutputStreamBuilder() {
|
||||||
|
private final List<CloseableByteSource> sources = new ArrayList<>();
|
||||||
|
@Nullable private CloseableByteSourceFromOutputStreamBuilder currentBuilder = null;
|
||||||
|
private long written = 0;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void doWrite(byte[] b, int off, int len) throws IOException {
|
||||||
|
int actualOffset = off;
|
||||||
|
int remaining = len;
|
||||||
|
|
||||||
|
while (remaining > 0) {
|
||||||
|
// Since we're writing data, make sure we have a builder to create the new source.
|
||||||
|
if (currentBuilder == null) {
|
||||||
|
currentBuilder = delegate.makeBuilder();
|
||||||
|
written = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// See how much we can write without exceeding maxChunkSize in the current builder.
|
||||||
|
int maxWrite = (int) Math.min(maxChunkSize - written, remaining);
|
||||||
|
currentBuilder.write(b, actualOffset, maxWrite);
|
||||||
|
written += maxWrite;
|
||||||
|
|
||||||
|
remaining -= maxWrite;
|
||||||
|
actualOffset += maxWrite;
|
||||||
|
|
||||||
|
// If we've reached the end of the chunk, create the source for the part we have and reset
|
||||||
|
// to builder so we start a new one if there is more data.
|
||||||
|
if (written == maxChunkSize) {
|
||||||
|
sources.add(currentBuilder.build());
|
||||||
|
currentBuilder = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected CloseableByteSource doBuild() throws IOException {
|
||||||
|
// If we were writing a chunk, close it.
|
||||||
|
if (currentBuilder != null) {
|
||||||
|
sources.add(currentBuilder.build());
|
||||||
|
currentBuilder = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return new ChunkBasedCloseableByteSource(sources);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CloseableByteSource fromSource(ByteSource source) throws IOException {
|
||||||
|
List<CloseableByteSource> sources = new ArrayList<>();
|
||||||
|
|
||||||
|
long end = source.size();
|
||||||
|
long start = 0;
|
||||||
|
while (start < end) {
|
||||||
|
long chunkSize = Math.min(end - start, maxChunkSize);
|
||||||
|
sources.add(delegate.fromSource(source.slice(start, chunkSize)));
|
||||||
|
start += chunkSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
return new ChunkBasedCloseableByteSource(sources);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getBytesUsed() {
|
||||||
|
return delegate.getBytesUsed();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getMaxBytesUsed() {
|
||||||
|
return delegate.getMaxBytesUsed();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
delegate.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,40 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@link ByteStorageFactory} that creates {@link ByteStorage} instances that keep all data in
|
||||||
|
* memory.
|
||||||
|
*/
|
||||||
|
public class ChunkBasedByteStorageFactory implements ByteStorageFactory {
|
||||||
|
|
||||||
|
/** Factory to create the delegate storages. */
|
||||||
|
private final ByteStorageFactory delegate;
|
||||||
|
|
||||||
|
/** Maximum size for chunks, if any. */
|
||||||
|
@Nullable private final Long maxChunkSize;
|
||||||
|
|
||||||
|
/** Creates a new factory whose storages are created using delegates from the given factory. */
|
||||||
|
public ChunkBasedByteStorageFactory(ByteStorageFactory delegate) {
|
||||||
|
this(delegate, /*maxChunkSize=*/ null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new factory whose storages use the given maximum chunk size and are created using
|
||||||
|
* delegates from the given factory.
|
||||||
|
*/
|
||||||
|
public ChunkBasedByteStorageFactory(ByteStorageFactory delegate, @Nullable Long maxChunkSize) {
|
||||||
|
this.delegate = delegate;
|
||||||
|
this.maxChunkSize = maxChunkSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ByteStorage create() throws IOException {
|
||||||
|
if (maxChunkSize == null) {
|
||||||
|
return new ChunkBasedByteStorage(delegate.create());
|
||||||
|
} else {
|
||||||
|
return new ChunkBasedByteStorage(maxChunkSize, delegate.create());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,44 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableDelegateByteSource;
|
||||||
|
import com.google.common.collect.ImmutableList;
|
||||||
|
import com.google.common.io.ByteSource;
|
||||||
|
import com.google.common.io.Closer;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Byte source that has its data spread over several chunks, each with its own {@link
|
||||||
|
* CloseableByteSource}.
|
||||||
|
*/
|
||||||
|
class ChunkBasedCloseableByteSource extends CloseableDelegateByteSource {
|
||||||
|
|
||||||
|
/** The sources for data of all the chunks, in order. */
|
||||||
|
private final ImmutableList<CloseableByteSource> sources;
|
||||||
|
|
||||||
|
/** Creates a new source from the given sources. */
|
||||||
|
ChunkBasedCloseableByteSource(List<CloseableByteSource> sources) throws IOException {
|
||||||
|
super(ByteSource.concat(sources), sumSizes(sources));
|
||||||
|
this.sources = ImmutableList.copyOf(sources);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Computes the size of this source by summing the sizes of all sources. */
|
||||||
|
private static long sumSizes(List<CloseableByteSource> sources) throws IOException {
|
||||||
|
long sum = 0;
|
||||||
|
for (CloseableByteSource source : sources) {
|
||||||
|
sum += source.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
return sum;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected synchronized void innerClose() throws IOException {
|
||||||
|
try (Closer closer = Closer.create()) {
|
||||||
|
for (CloseableByteSource source : sources) {
|
||||||
|
closer.register(source);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,22 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output stream that creates a {@link CloseableByteSource} from the data that was written to it.
|
||||||
|
* Calling {@link #close} is optional as {@link #build()} will also close the output stream.
|
||||||
|
*/
|
||||||
|
public abstract class CloseableByteSourceFromOutputStreamBuilder extends OutputStream {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates the source from the data that has been written to the stream. No more data can be
|
||||||
|
* written to the output stream after this method has been called.
|
||||||
|
*
|
||||||
|
* @return a source that will provide the data that was written to the stream before this method
|
||||||
|
* is invoked; where this data is stored is not specified by this interface
|
||||||
|
* @throws IOException failed to build the byte source
|
||||||
|
*/
|
||||||
|
public abstract CloseableByteSource build() throws IOException;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,104 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2018 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableDelegateByteSource;
|
||||||
|
import com.google.common.io.ByteSource;
|
||||||
|
import com.google.common.io.ByteStreams;
|
||||||
|
import java.io.ByteArrayOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
/** Keeps track of used bytes allowing gauging memory usage. */
|
||||||
|
public class InMemoryByteStorage implements ByteStorage {
|
||||||
|
|
||||||
|
/** Number of bytes currently in use. */
|
||||||
|
private long bytesUsed;
|
||||||
|
|
||||||
|
/** Maximum number of bytes used. */
|
||||||
|
private long maxBytesUsed;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CloseableByteSource fromStream(InputStream stream) throws IOException {
|
||||||
|
byte[] data = ByteStreams.toByteArray(stream);
|
||||||
|
updateUsage(data.length);
|
||||||
|
return new CloseableDelegateByteSource(ByteSource.wrap(data), data.length) {
|
||||||
|
@Override
|
||||||
|
public synchronized void innerClose() throws IOException {
|
||||||
|
super.innerClose();
|
||||||
|
updateUsage(-sizeNoException());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CloseableByteSourceFromOutputStreamBuilder makeBuilder() throws IOException {
|
||||||
|
ByteArrayOutputStream output = new ByteArrayOutputStream();
|
||||||
|
return new AbstractCloseableByteSourceFromOutputStreamBuilder() {
|
||||||
|
@Override
|
||||||
|
protected void doWrite(byte[] b, int off, int len) throws IOException {
|
||||||
|
output.write(b, off, len);
|
||||||
|
updateUsage(len);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected CloseableByteSource doBuild() throws IOException {
|
||||||
|
byte[] data = output.toByteArray();
|
||||||
|
return new CloseableDelegateByteSource(ByteSource.wrap(data), data.length) {
|
||||||
|
@Override
|
||||||
|
protected synchronized void innerClose() throws IOException {
|
||||||
|
super.innerClose();
|
||||||
|
updateUsage(-data.length);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CloseableByteSource fromSource(ByteSource source) throws IOException {
|
||||||
|
return fromStream(source.openStream());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Updates the memory used by this tracker.
|
||||||
|
*
|
||||||
|
* @param delta the number of bytes to add or remove, if negative
|
||||||
|
*/
|
||||||
|
private synchronized void updateUsage(long delta) {
|
||||||
|
bytesUsed += delta;
|
||||||
|
if (maxBytesUsed < bytesUsed) {
|
||||||
|
maxBytesUsed = bytesUsed;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized long getBytesUsed() {
|
||||||
|
return bytesUsed;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized long getMaxBytesUsed() {
|
||||||
|
return maxBytesUsed;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
// Nothing to do on close.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,15 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@link ByteStorageFactory} that creates {@link ByteStorage} instances that keep all data in
|
||||||
|
* memory.
|
||||||
|
*/
|
||||||
|
public class InMemoryByteStorageFactory implements ByteStorageFactory {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ByteStorage create() throws IOException {
|
||||||
|
return new InMemoryByteStorage();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,88 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2018 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Input stream that reads only a limited number of bytes from another input stream before reporting
|
||||||
|
* EOF. When closed, this stream will not close the underlying stream.
|
||||||
|
*
|
||||||
|
* <p>If the underlying stream does not have enough data, this stream will read all available data
|
||||||
|
* from the underlying stream.
|
||||||
|
*/
|
||||||
|
class LimitedInputStream extends InputStream {
|
||||||
|
/** Where the data comes from. */
|
||||||
|
private final InputStream input;
|
||||||
|
|
||||||
|
/** How many bytes remain in this stream. */
|
||||||
|
private long remaining;
|
||||||
|
|
||||||
|
/** Has EOF been detected in {@link #input}? */
|
||||||
|
private boolean eofDetected;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new input stream.
|
||||||
|
*
|
||||||
|
* @param input where to read data from
|
||||||
|
* @param maximum the maximum number of bytes to read from {@code input}
|
||||||
|
*/
|
||||||
|
LimitedInputStream(InputStream input, long maximum) {
|
||||||
|
this.input = input;
|
||||||
|
this.remaining = maximum;
|
||||||
|
this.eofDetected = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int read() throws IOException {
|
||||||
|
if (remaining == 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int r = input.read();
|
||||||
|
if (r >= 0) {
|
||||||
|
remaining--;
|
||||||
|
} else {
|
||||||
|
eofDetected = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int read(byte[] whereTo, int offset, int length) throws IOException {
|
||||||
|
if (remaining == 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int toRead = (int) Math.min(remaining, length);
|
||||||
|
int r = input.read(whereTo, offset, toRead);
|
||||||
|
if (r >= 0) {
|
||||||
|
remaining -= r;
|
||||||
|
} else {
|
||||||
|
eofDetected = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns {@code true} if EOF has been detected in the {@code input} stream. */
|
||||||
|
boolean isInputFinished() {
|
||||||
|
return eofDetected;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,80 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Byte source that, until switched, will keep itself in the LRU queue. The byte source will
|
||||||
|
* automatically remove itself from the queue once closed or moved to disk (see {@link
|
||||||
|
* #moveToDisk(ByteStorage)}. This source should not be switched explicitly or tracking will not
|
||||||
|
* work.
|
||||||
|
*
|
||||||
|
* <p>The source will consider an access to be opening a stream. Every time a stream is open the
|
||||||
|
* source will move itself to the top of the LRU list.
|
||||||
|
*/
|
||||||
|
class LruTrackedCloseableByteSource extends SwitchableDelegateCloseableByteSource {
|
||||||
|
/** The tracker being used. */
|
||||||
|
private final LruTracker<LruTrackedCloseableByteSource> tracker;
|
||||||
|
|
||||||
|
/** Are we still tracking usage? */
|
||||||
|
private boolean tracking;
|
||||||
|
|
||||||
|
/** Has the byte source been closed? */
|
||||||
|
private boolean closed;
|
||||||
|
|
||||||
|
/** Creates a new byte source based on the given source and using the provided tracker. */
|
||||||
|
LruTrackedCloseableByteSource(
|
||||||
|
CloseableByteSource delegate, LruTracker<LruTrackedCloseableByteSource> tracker)
|
||||||
|
throws IOException {
|
||||||
|
super(delegate);
|
||||||
|
this.tracker = tracker;
|
||||||
|
tracker.track(this);
|
||||||
|
tracking = true;
|
||||||
|
closed = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized InputStream openStream() throws IOException {
|
||||||
|
Preconditions.checkState(!closed);
|
||||||
|
if (tracking) {
|
||||||
|
tracker.access(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
return super.openStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected synchronized void innerClose() throws IOException {
|
||||||
|
closed = true;
|
||||||
|
|
||||||
|
untrack();
|
||||||
|
super.innerClose();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Marks this source as not being tracked any more. May be called multiple times (only the first
|
||||||
|
* one will do anything).
|
||||||
|
*/
|
||||||
|
private synchronized void untrack() {
|
||||||
|
if (tracking) {
|
||||||
|
tracking = false;
|
||||||
|
tracker.untrack(this);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Moves the contents of this source to a storage. This will untrack the source and switch its
|
||||||
|
* contents to a new delegate provided by {@code diskStorage}.
|
||||||
|
*/
|
||||||
|
synchronized void move(ByteStorage diskStorage) throws IOException {
|
||||||
|
if (closed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
CloseableByteSource diskSource = diskStorage.fromSource(this);
|
||||||
|
untrack();
|
||||||
|
switchSource(diskSource);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,85 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.collect.BiMap;
|
||||||
|
import com.google.common.collect.HashBiMap;
|
||||||
|
import java.util.TreeSet;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A tracker that keeps a list of the last-recently-used objects of type {@code T}. The tracker
|
||||||
|
* doesn't define what LRU means, it has a method, {@link #access(Object)} that marks the object as
|
||||||
|
* being accessed and moves it to the top of the queue.
|
||||||
|
*
|
||||||
|
* <p>This implementation is O(log(N)) on all operations.
|
||||||
|
*
|
||||||
|
* <p>Implementation note: we don't keep track of time. Instead we use a counter that is incremented
|
||||||
|
* every time a new access is done or a new object is tracked. Because of this, each access time is
|
||||||
|
* unique for each object (although it will change after each access).
|
||||||
|
*/
|
||||||
|
class LruTracker<T> {
|
||||||
|
|
||||||
|
/** Maps each object to its unique access time and vice-versa. */
|
||||||
|
private final BiMap<T, Integer> objectToAccessTime;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ordered set of all object's access times. This set has the same contents as {@code
|
||||||
|
* objectToAccessTime.value()}. It is sorted from the highest access time (newest) to the lowest
|
||||||
|
* access time (oldest).
|
||||||
|
*/
|
||||||
|
private final TreeSet<Integer> accessTimes;
|
||||||
|
|
||||||
|
/** Next access time to use for tracking or accessing. */
|
||||||
|
private int currentTime;
|
||||||
|
|
||||||
|
/** Creates a new tracker without any objects. */
|
||||||
|
LruTracker() {
|
||||||
|
currentTime = 1;
|
||||||
|
objectToAccessTime = HashBiMap.create();
|
||||||
|
accessTimes = new TreeSet<>((i0, i1) -> i1 - i0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Starts tracking an object. This object's will be the most recently used. */
|
||||||
|
synchronized void track(T object) {
|
||||||
|
Preconditions.checkState(!objectToAccessTime.containsKey(object));
|
||||||
|
objectToAccessTime.put(object, currentTime);
|
||||||
|
accessTimes.add(currentTime);
|
||||||
|
currentTime++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Stops tracking an object. */
|
||||||
|
synchronized void untrack(T object) {
|
||||||
|
Preconditions.checkState(objectToAccessTime.containsKey(object));
|
||||||
|
accessTimes.remove(objectToAccessTime.get(object));
|
||||||
|
objectToAccessTime.remove(object);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Marks the given object as having been accessed promoting it as the most recently used. */
|
||||||
|
synchronized void access(T object) {
|
||||||
|
untrack(object);
|
||||||
|
track(object);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the position of an object in the queue. It will be {@code 0} for the most recently used
|
||||||
|
* object.
|
||||||
|
*/
|
||||||
|
synchronized int positionOf(T object) {
|
||||||
|
Preconditions.checkState(objectToAccessTime.containsKey(object));
|
||||||
|
int lastAccess = objectToAccessTime.get(object);
|
||||||
|
return accessTimes.headSet(lastAccess).size();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the last element, the one last accessed earliest. Will return empty if there are no
|
||||||
|
* objects being tracked.
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
synchronized T last() {
|
||||||
|
if (accessTimes.isEmpty()) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return objectToAccessTime.inverse().get(accessTimes.last());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,167 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.io.ByteSource;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Byte storage that keeps data in memory up to a certain size. After that, older sources are moved
|
||||||
|
* to disk and the newer ones served from memory.
|
||||||
|
*
|
||||||
|
* <p>Once unloaded to disk, sources are not reloaded into memory as that would be in direct
|
||||||
|
* conflict with the filesystem's caching and the costs would probably outweight the benefits.
|
||||||
|
*
|
||||||
|
* <p>The maximum memory used by storage is actually larger than the maximum provided. It may exceed
|
||||||
|
* the limit by the size of one source. That is because sources are always loaded into memory before
|
||||||
|
* the storage decides to flush them to disk.
|
||||||
|
*/
|
||||||
|
public class OverflowToDiskByteStorage implements ByteStorage {
|
||||||
|
|
||||||
|
/** Size of the default memory cache. */
|
||||||
|
private static final long DEFAULT_MEMORY_CACHE_BYTES = 50 * 1024 * 1024;
|
||||||
|
|
||||||
|
/** In-memory storage. */
|
||||||
|
private final InMemoryByteStorage memoryStorage;
|
||||||
|
|
||||||
|
/** Disk-based storage. */
|
||||||
|
@VisibleForTesting // private otherwise.
|
||||||
|
final TemporaryDirectoryStorage diskStorage;
|
||||||
|
|
||||||
|
/** Tracker that keeps all memory sources. */
|
||||||
|
private final LruTracker<LruTrackedCloseableByteSource> memorySourcesTracker;
|
||||||
|
|
||||||
|
/** Maximum amount of data to keep in memory. */
|
||||||
|
private final long memoryCacheSize;
|
||||||
|
|
||||||
|
/** Maximum amount of data used. */
|
||||||
|
private long maxBytesUsed;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new byte storage with the default memory cache using the provided temporary directory
|
||||||
|
* to write data that overflows the memory size.
|
||||||
|
*
|
||||||
|
* @param temporaryDirectoryFactory the factory used to create a temporary directory where to
|
||||||
|
* overflow to; the created directory will be closed when the {@link
|
||||||
|
* OverflowToDiskByteStorage} object is closed
|
||||||
|
* @throws IOException failed to create the temporary directory
|
||||||
|
*/
|
||||||
|
public OverflowToDiskByteStorage(TemporaryDirectoryFactory temporaryDirectoryFactory)
|
||||||
|
throws IOException {
|
||||||
|
this(DEFAULT_MEMORY_CACHE_BYTES, temporaryDirectoryFactory);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new byte storage with the given memory cache size using the provided temporary
|
||||||
|
* directory to write data that overflows the memory size.
|
||||||
|
*
|
||||||
|
* @param memoryCacheSize the in-memory cache; a value of {@link 0} will effectively disable
|
||||||
|
* in-memory caching
|
||||||
|
* @param temporaryDirectoryFactory the factory used to create a temporary directory where to
|
||||||
|
* overflow to; the created directory will be closed when the {@link
|
||||||
|
* OverflowToDiskByteStorage} object is closed
|
||||||
|
* @throws IOException failed to create the temporary directory
|
||||||
|
*/
|
||||||
|
public OverflowToDiskByteStorage(
|
||||||
|
long memoryCacheSize, TemporaryDirectoryFactory temporaryDirectoryFactory)
|
||||||
|
throws IOException {
|
||||||
|
memoryStorage = new InMemoryByteStorage();
|
||||||
|
diskStorage = new TemporaryDirectoryStorage(temporaryDirectoryFactory);
|
||||||
|
this.memoryCacheSize = memoryCacheSize;
|
||||||
|
this.memorySourcesTracker = new LruTracker<>();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CloseableByteSource fromStream(InputStream stream) throws IOException {
|
||||||
|
CloseableByteSource memSource =
|
||||||
|
new LruTrackedCloseableByteSource(memoryStorage.fromStream(stream), memorySourcesTracker);
|
||||||
|
checkMaxUsage();
|
||||||
|
reviewSources();
|
||||||
|
return memSource;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CloseableByteSourceFromOutputStreamBuilder makeBuilder() throws IOException {
|
||||||
|
CloseableByteSourceFromOutputStreamBuilder memBuilder = memoryStorage.makeBuilder();
|
||||||
|
return new AbstractCloseableByteSourceFromOutputStreamBuilder() {
|
||||||
|
@Override
|
||||||
|
protected void doWrite(byte[] b, int off, int len) throws IOException {
|
||||||
|
memBuilder.write(b, off, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected CloseableByteSource doBuild() throws IOException {
|
||||||
|
CloseableByteSource memSource =
|
||||||
|
new LruTrackedCloseableByteSource(memBuilder.build(), memorySourcesTracker);
|
||||||
|
checkMaxUsage();
|
||||||
|
reviewSources();
|
||||||
|
return memSource;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CloseableByteSource fromSource(ByteSource source) throws IOException {
|
||||||
|
CloseableByteSource memSource =
|
||||||
|
new LruTrackedCloseableByteSource(memoryStorage.fromSource(source), memorySourcesTracker);
|
||||||
|
checkMaxUsage();
|
||||||
|
reviewSources();
|
||||||
|
return memSource;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized long getBytesUsed() {
|
||||||
|
return memoryStorage.getBytesUsed() + diskStorage.getBytesUsed();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized long getMaxBytesUsed() {
|
||||||
|
return maxBytesUsed;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Checks if we have reached a new high of data usage and set it. */
|
||||||
|
private synchronized void checkMaxUsage() {
|
||||||
|
if (getBytesUsed() > maxBytesUsed) {
|
||||||
|
maxBytesUsed = getBytesUsed();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Checks if any of the sources needs to be written to disk or loaded into memory. */
|
||||||
|
private synchronized void reviewSources() throws IOException {
|
||||||
|
// Move data from memory to disk until we have at most memoryCacheSize bytes in memory.
|
||||||
|
while (memoryStorage.getBytesUsed() > memoryCacheSize) {
|
||||||
|
LruTrackedCloseableByteSource last = memorySourcesTracker.last();
|
||||||
|
if (last != null) {
|
||||||
|
LruTrackedCloseableByteSource lastSource = last;
|
||||||
|
lastSource.move(diskStorage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Obtains the number of bytes stored in memory. */
|
||||||
|
public long getMemoryBytesUsed() {
|
||||||
|
return memoryStorage.getBytesUsed();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Obtains the maximum number of bytes ever stored in memory. */
|
||||||
|
public long getMaxMemoryBytesUsed() {
|
||||||
|
return memoryStorage.getMaxBytesUsed();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Obtains the number of bytes stored in disk. */
|
||||||
|
public long getDiskBytesUsed() {
|
||||||
|
return diskStorage.getBytesUsed();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Obtains the maximum number of bytes ever stored in disk. */
|
||||||
|
public long getMaxDiskBytesUsed() {
|
||||||
|
return diskStorage.getMaxBytesUsed();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
memoryStorage.close();
|
||||||
|
diskStorage.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,50 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@link ByteStorageFactory} that creates instances of {@link ByteStorage} that will keep some data
|
||||||
|
* in memory and will overflow to disk when necessary.
|
||||||
|
*/
|
||||||
|
public class OverflowToDiskByteStorageFactory implements ByteStorageFactory {
|
||||||
|
|
||||||
|
/** How much data we want to keep in cache? If {@code null} then we want the default value. */
|
||||||
|
@Nullable private final Long memoryCacheSizeInBytes;
|
||||||
|
|
||||||
|
/** Factory that creates temporary directories. */
|
||||||
|
private final TemporaryDirectoryFactory temporaryDirectoryFactory;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new factory with an optional in-memory size and a temporary directory for overflow.
|
||||||
|
*
|
||||||
|
* @param temporaryDirectoryFactory a factory that creates temporary directories that will be used
|
||||||
|
* for overflow of the {@link ByteStorage} instances created by this factory
|
||||||
|
*/
|
||||||
|
public OverflowToDiskByteStorageFactory(TemporaryDirectoryFactory temporaryDirectoryFactory) {
|
||||||
|
this(null, temporaryDirectoryFactory);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new factory with an optional in-memory size and a temporary directory for overflow.
|
||||||
|
*
|
||||||
|
* @param memoryCacheSizeInBytes how many bytes to keep in memory? If {@code null} then a default
|
||||||
|
* value will be used
|
||||||
|
* @param temporaryDirectoryFactory a factory that creates temporary directories that will be used
|
||||||
|
* for overflow of the {@link ByteStorage} instances created by this factory
|
||||||
|
*/
|
||||||
|
public OverflowToDiskByteStorageFactory(
|
||||||
|
Long memoryCacheSizeInBytes, TemporaryDirectoryFactory temporaryDirectoryFactory) {
|
||||||
|
this.memoryCacheSizeInBytes = memoryCacheSizeInBytes;
|
||||||
|
this.temporaryDirectoryFactory = temporaryDirectoryFactory;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ByteStorage create() throws IOException {
|
||||||
|
if (memoryCacheSizeInBytes == null) {
|
||||||
|
return new OverflowToDiskByteStorage(temporaryDirectoryFactory);
|
||||||
|
} else {
|
||||||
|
return new OverflowToDiskByteStorage(memoryCacheSizeInBytes, temporaryDirectoryFactory);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,122 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.io.Closer;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Byte source that delegates to another byte source that can be switched dynamically.
|
||||||
|
*
|
||||||
|
* <p>This byte source encloses another byte source (the delegate) and allows switching the
|
||||||
|
* delegate. Switching is done transparently for the user (as long as the new byte source represents
|
||||||
|
* the same data) maintaining all open streams working, but now streaming from the new source.
|
||||||
|
*/
|
||||||
|
class SwitchableDelegateCloseableByteSource extends CloseableByteSource {
|
||||||
|
|
||||||
|
/** The current delegate. */
|
||||||
|
private CloseableByteSource delegate;
|
||||||
|
|
||||||
|
/** Has the byte source been closed? */
|
||||||
|
private boolean closed;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Streams that have been opened, but not yet closed. These are all the streams that have to be
|
||||||
|
* switched when we switch delegates.
|
||||||
|
*/
|
||||||
|
private final List<SwitchableDelegateInputStream> nonClosedStreams;
|
||||||
|
|
||||||
|
/** Creates a new source using {@code source} as delegate. */
|
||||||
|
SwitchableDelegateCloseableByteSource(CloseableByteSource source) {
|
||||||
|
this.delegate = source;
|
||||||
|
nonClosedStreams = new ArrayList<>();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected synchronized void innerClose() throws IOException {
|
||||||
|
closed = true;
|
||||||
|
|
||||||
|
try (Closer closer = Closer.create()) {
|
||||||
|
for (SwitchableDelegateInputStream stream : nonClosedStreams) {
|
||||||
|
closer.register(stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
nonClosedStreams.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
delegate.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized InputStream openStream() throws IOException {
|
||||||
|
SwitchableDelegateInputStream stream =
|
||||||
|
new SwitchableDelegateInputStream(delegate.openStream()) {
|
||||||
|
// Can't have a lock on the stream while we synchronize the removal of nonClosedStreams
|
||||||
|
// because it can deadlock when called in parallel with switchSource as the lock order is
|
||||||
|
// reversed. The lack of synchronization is OK because we don't access any data on the
|
||||||
|
// stream anyway until super.close() is called.
|
||||||
|
@SuppressWarnings("UnsynchronizedOverridesSynchronized")
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
// Remove the stream on close.
|
||||||
|
synchronized (SwitchableDelegateCloseableByteSource.this) {
|
||||||
|
nonClosedStreams.remove(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
super.close();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
nonClosedStreams.add(stream);
|
||||||
|
return stream;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Switches the current source for {@code source}. All streams are kept valid. The current source
|
||||||
|
* is closed.
|
||||||
|
*
|
||||||
|
* <p>If the current source has already been closed, {@code source} will also be closed and
|
||||||
|
* nothing else is done.
|
||||||
|
*
|
||||||
|
* <p>Otherwise, as long as it is possible to open enough input streams from {@code source} to
|
||||||
|
* replace all current input streams, the source if changed. Any errors while closing input
|
||||||
|
* streams (which happens during switching -- see {@link
|
||||||
|
* SwitchableDelegateInputStream#switchStream(InputStream)}) or closing the old source are
|
||||||
|
* reported as thrown {@code IOException}
|
||||||
|
*/
|
||||||
|
synchronized void switchSource(CloseableByteSource source) throws IOException {
|
||||||
|
if (source == delegate) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (closed) {
|
||||||
|
source.close();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
List<InputStream> switchStreams = new ArrayList<>();
|
||||||
|
for (int i = 0; i < nonClosedStreams.size(); i++) {
|
||||||
|
switchStreams.add(source.openStream());
|
||||||
|
}
|
||||||
|
|
||||||
|
CloseableByteSource oldDelegate = delegate;
|
||||||
|
delegate = source;
|
||||||
|
|
||||||
|
// A bit of trickery. We want to call switchStream for all streams. switchStream will
|
||||||
|
// successfully switch the stream even if it throws an exception (if it does, it means it
|
||||||
|
// failed to close the old stream). So we want to continue switching and recording all
|
||||||
|
// exceptions. Closer() has that logic already so we register each stream switch as a close
|
||||||
|
// operation.
|
||||||
|
try (Closer closer = Closer.create()) {
|
||||||
|
for (int i = 0; i < nonClosedStreams.size(); i++) {
|
||||||
|
SwitchableDelegateInputStream nonClosedStream = nonClosedStreams.get(i);
|
||||||
|
InputStream switchStream = switchStreams.get(i);
|
||||||
|
closer.register(() -> nonClosedStream.switchStream(switchStream));
|
||||||
|
}
|
||||||
|
|
||||||
|
closer.register(oldDelegate);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,181 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Input stream that delegates to another input stream, but can switch transparently the source
|
||||||
|
* input stream.
|
||||||
|
*
|
||||||
|
* <p>Given a set of input streams that return the same data, this input stream will read from one
|
||||||
|
* and allow switching to read from other streams continuing from the offset that was initially
|
||||||
|
* read. The result is only meaningful if all streams read the same data.
|
||||||
|
*
|
||||||
|
* <p>This class allows transparently to switch between different implementations of the underlying
|
||||||
|
* streams (memory, disk, etc.) while transparently providing data to users. It does not support
|
||||||
|
* marking and it is multi-thread safe.
|
||||||
|
*/
|
||||||
|
class SwitchableDelegateInputStream extends InputStream {
|
||||||
|
|
||||||
|
/** The input stream that is currently providing data. */
|
||||||
|
private InputStream delegate;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Current offset in the input stream. We keep track of this to allow skipping data when switching
|
||||||
|
* input streams.
|
||||||
|
*/
|
||||||
|
private long currentOffset;
|
||||||
|
|
||||||
|
/** Have we reached the end of stream? */
|
||||||
|
@VisibleForTesting // private otherwise.
|
||||||
|
boolean endOfStreamReached;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If a switch has occurred, how many bytes still need to be skipped in the input stream to
|
||||||
|
* continue reading from the same position?
|
||||||
|
*/
|
||||||
|
private long needsSkipping;
|
||||||
|
|
||||||
|
SwitchableDelegateInputStream(InputStream delegate) {
|
||||||
|
this.delegate = delegate;
|
||||||
|
currentOffset = 0;
|
||||||
|
endOfStreamReached = false;
|
||||||
|
needsSkipping = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Skips data in the input stream if it has been switched and there is data to skip. Will fail if
|
||||||
|
* we can't skip all the data.
|
||||||
|
*/
|
||||||
|
private void skipDataIfNeeded() throws IOException {
|
||||||
|
while (needsSkipping > 0) {
|
||||||
|
long skipped = delegate.skip(needsSkipping);
|
||||||
|
if (skipped == 0) {
|
||||||
|
throw new IOException("Skipping InputStream after switching failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
needsSkipping -= skipped;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Same as {@link #increaseOffset(long)}. */
|
||||||
|
private int increaseOffset(int amount) {
|
||||||
|
return (int) increaseOffset((long) amount);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Increases the current offset after reading. {@code amount} will indicate how many bytes we have
|
||||||
|
* read. It {@code -1} then we know we've reached the end of the stream and {@link
|
||||||
|
* #endOfStreamReached} is set to {@code true}.
|
||||||
|
*/
|
||||||
|
private long increaseOffset(long amount) {
|
||||||
|
if (amount > 0) {
|
||||||
|
currentOffset += amount;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (amount == -1) {
|
||||||
|
endOfStreamReached = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return amount;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized int read(byte[] b) throws IOException {
|
||||||
|
if (endOfStreamReached) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
skipDataIfNeeded();
|
||||||
|
return increaseOffset(delegate.read(b));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized int read(byte[] b, int off, int len) throws IOException {
|
||||||
|
if (endOfStreamReached) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
skipDataIfNeeded();
|
||||||
|
return increaseOffset(delegate.read(b, off, len));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized int read() throws IOException {
|
||||||
|
if (endOfStreamReached) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
skipDataIfNeeded();
|
||||||
|
int r = delegate.read();
|
||||||
|
if (r == -1) {
|
||||||
|
endOfStreamReached = true;
|
||||||
|
} else {
|
||||||
|
increaseOffset(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized long skip(long n) throws IOException {
|
||||||
|
if (endOfStreamReached) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
skipDataIfNeeded();
|
||||||
|
return increaseOffset(delegate.skip(n));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized int available() throws IOException {
|
||||||
|
if (endOfStreamReached) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
skipDataIfNeeded();
|
||||||
|
return delegate.available();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized void close() throws IOException {
|
||||||
|
endOfStreamReached = true;
|
||||||
|
delegate.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void mark(int readlimit) {
|
||||||
|
// We don't support marking.
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void reset() throws IOException {
|
||||||
|
throw new IOException("Mark not supported");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean markSupported() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Switches the stream used.
|
||||||
|
*
|
||||||
|
* <p>The stream that is currently in use and the new stream will be used in further operations.
|
||||||
|
* If this stream has already reached the end, {@code newStream} will be closed immediately and no
|
||||||
|
* other action is taken. If the stream has not reached the end, any exception reported is due to
|
||||||
|
* closing the stream currently in use, the new stream is not affected and this stream can still
|
||||||
|
* be used to read from {@code newStream}.
|
||||||
|
*/
|
||||||
|
synchronized void switchStream(InputStream newStream) throws IOException {
|
||||||
|
if (newStream == delegate) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try (InputStream oldDelegate = delegate) {
|
||||||
|
delegate = newStream;
|
||||||
|
needsSkipping = currentOffset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,77 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A temporary directory is a directory that creates temporary files. Upon close, all temporary
|
||||||
|
* files are removed. Whether the directory itself is removed is dependent on the actual
|
||||||
|
* implementation.
|
||||||
|
*/
|
||||||
|
public interface TemporaryDirectory extends Closeable {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new file in the directory. This method returns a new file that deleted, recreated,
|
||||||
|
* read and written freely by the caller. No assumptions are made on the contents of this file
|
||||||
|
* except that it will be deleted it if it still exists when the temporary directory is closed.
|
||||||
|
*/
|
||||||
|
File newFile() throws IOException;
|
||||||
|
|
||||||
|
/** Obtains the directory, only useful for tests. */
|
||||||
|
@VisibleForTesting // private otherwise.
|
||||||
|
File getDirectory();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new temporary directory in the system's temporary directory. All files created will
|
||||||
|
* be created in this directory. The directory will be deleted (as long as all the files in it)
|
||||||
|
* when closed.
|
||||||
|
*/
|
||||||
|
static TemporaryDirectory newSystemTemporaryDirectory() throws IOException {
|
||||||
|
Path tempDir = Files.createTempDirectory("tempdir_");
|
||||||
|
TemporaryFile tempDirFile = new TemporaryFile(tempDir.toFile());
|
||||||
|
return new TemporaryDirectory() {
|
||||||
|
@Override
|
||||||
|
public File newFile() throws IOException {
|
||||||
|
return Files.createTempFile(tempDir, "temp_", ".data").toFile();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public File getDirectory() {
|
||||||
|
return tempDir.toFile();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
tempDirFile.close();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new temporary directory that uses a fixed directory.
|
||||||
|
*
|
||||||
|
* @param directory the directory that will be returned; this directory won't be deleted when the
|
||||||
|
* {@link TemporaryDirectory} objects are closed
|
||||||
|
* @return a {@link TemporaryDirectory} that will create files in {@code directory}
|
||||||
|
*/
|
||||||
|
static TemporaryDirectory fixed(File directory) {
|
||||||
|
return new TemporaryDirectory() {
|
||||||
|
@Override
|
||||||
|
public File newFile() throws IOException {
|
||||||
|
return Files.createTempFile(directory.toPath(), "temp_", ".data").toFile();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public File getDirectory() {
|
||||||
|
return directory;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,31 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Factory that creates temporary directories. {@link
|
||||||
|
* TemporaryDirectory#newSystemTemporaryDirectory()} conforms to this interface.
|
||||||
|
*/
|
||||||
|
public interface TemporaryDirectoryFactory {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new temporary directory.
|
||||||
|
*
|
||||||
|
* @return the new temporary directory that should be closed when finished
|
||||||
|
* @throws IOException failed to create the temporary directory
|
||||||
|
*/
|
||||||
|
TemporaryDirectory make() throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains a factory that creates temporary directories using {@link
|
||||||
|
* TemporaryDirectory#fixed(File)}.
|
||||||
|
*
|
||||||
|
* @param directory the directory where all temporary files will be created
|
||||||
|
* @return a factory that creates instances of {@link TemporaryDirectory} that creates all files
|
||||||
|
* inside {@code directory}
|
||||||
|
*/
|
||||||
|
static TemporaryDirectoryFactory fixed(File directory) {
|
||||||
|
return () -> TemporaryDirectory.fixed(directory);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,102 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.io.ByteSource;
|
||||||
|
import com.google.common.io.ByteStreams;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.FileOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Byte storage that keeps all byte sources as files in a temporary directory. Each data stored is
|
||||||
|
* stored as a new file. The file is deleted as soon as the byte source is closed.
|
||||||
|
*/
|
||||||
|
public class TemporaryDirectoryStorage implements ByteStorage {
|
||||||
|
|
||||||
|
/** Temporary directory to use. */
|
||||||
|
@VisibleForTesting // private otherwise.
|
||||||
|
final TemporaryDirectory temporaryDirectory;
|
||||||
|
|
||||||
|
/** Number of bytes currently used. */
|
||||||
|
private long bytesUsed;
|
||||||
|
|
||||||
|
/** Maximum number of bytes used. */
|
||||||
|
private long maxBytesUsed;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new storage using the provided temporary directory.
|
||||||
|
*
|
||||||
|
* @param temporaryDirectoryFactory a factory used to create the directory to use for temporary
|
||||||
|
* files; this directory will be closed when the {@link TemporaryDirectoryStorage} is closed.
|
||||||
|
* @throws IOException failed to create the temporary directory
|
||||||
|
*/
|
||||||
|
public TemporaryDirectoryStorage(TemporaryDirectoryFactory temporaryDirectoryFactory)
|
||||||
|
throws IOException {
|
||||||
|
this.temporaryDirectory = temporaryDirectoryFactory.make();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CloseableByteSource fromStream(InputStream stream) throws IOException {
|
||||||
|
File temporaryFile = temporaryDirectory.newFile();
|
||||||
|
try (FileOutputStream output = new FileOutputStream(temporaryFile)) {
|
||||||
|
ByteStreams.copy(stream, output);
|
||||||
|
}
|
||||||
|
|
||||||
|
long size = temporaryFile.length();
|
||||||
|
incrementBytesUsed(size);
|
||||||
|
return new TemporaryFileCloseableByteSource(temporaryFile, () -> incrementBytesUsed(-size));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CloseableByteSourceFromOutputStreamBuilder makeBuilder() throws IOException {
|
||||||
|
File temporaryFile = temporaryDirectory.newFile();
|
||||||
|
return new AbstractCloseableByteSourceFromOutputStreamBuilder() {
|
||||||
|
private final FileOutputStream output = new FileOutputStream(temporaryFile);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void doWrite(byte[] b, int off, int len) throws IOException {
|
||||||
|
output.write(b, off, len);
|
||||||
|
incrementBytesUsed(len);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected CloseableByteSource doBuild() throws IOException {
|
||||||
|
output.close();
|
||||||
|
long size = temporaryFile.length();
|
||||||
|
return new TemporaryFileCloseableByteSource(temporaryFile, () -> incrementBytesUsed(-size));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CloseableByteSource fromSource(ByteSource source) throws IOException {
|
||||||
|
try (InputStream stream = source.openStream()) {
|
||||||
|
return fromStream(stream);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized long getBytesUsed() {
|
||||||
|
return bytesUsed;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized long getMaxBytesUsed() {
|
||||||
|
return maxBytesUsed;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Increments the byte counter by the given amount (decrements if {@code amount} is negative). */
|
||||||
|
private synchronized void incrementBytesUsed(long amount) {
|
||||||
|
bytesUsed += amount;
|
||||||
|
if (bytesUsed > maxBytesUsed) {
|
||||||
|
maxBytesUsed = bytesUsed;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
temporaryDirectory.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,64 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A temporary file or directory. Wraps a file or directory and deletes it (recursively, if it is a
|
||||||
|
* directory) when closed.
|
||||||
|
*/
|
||||||
|
public class TemporaryFile implements Closeable {
|
||||||
|
|
||||||
|
/** Has the file or directory represented by {@link #file} been deleted? */
|
||||||
|
private boolean deleted;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The file or directory that will be deleted on close. May no longer exist if {@link #deleted} is
|
||||||
|
* {@code true}.
|
||||||
|
*/
|
||||||
|
private final File file;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new wrapper around the given file. The file or directory {@code file} will be deleted
|
||||||
|
* (recursively, if it is a directory) on close.
|
||||||
|
*/
|
||||||
|
public TemporaryFile(File file) {
|
||||||
|
deleted = false;
|
||||||
|
this.file = file;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Obtains the file or directory this temporary file refers to. */
|
||||||
|
public File getFile() {
|
||||||
|
Preconditions.checkState(!deleted, "File already deleted");
|
||||||
|
return file;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
if (deleted) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
deleted = true;
|
||||||
|
|
||||||
|
deleteFile(file);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Deletes a file or directory if it exists. */
|
||||||
|
private void deleteFile(File file) throws IOException {
|
||||||
|
if (file.isDirectory()) {
|
||||||
|
File[] contents = file.listFiles();
|
||||||
|
if (contents != null) {
|
||||||
|
for (File subFile : contents) {
|
||||||
|
deleteFile(subFile);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (file.exists() && !file.delete()) {
|
||||||
|
throw new IOException("Failed to delete '" + file.getAbsolutePath() + "'");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,37 @@
|
||||||
|
package com.android.tools.build.apkzlib.bytestorage;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableDelegateByteSource;
|
||||||
|
import com.google.common.io.Files;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Closeable byte source that uses a temporary file to store its contents. The file is deleted when
|
||||||
|
* the byte source is closed.
|
||||||
|
*/
|
||||||
|
class TemporaryFileCloseableByteSource extends CloseableDelegateByteSource {
|
||||||
|
|
||||||
|
/** Temporary file backing the byte source. */
|
||||||
|
private final TemporaryFile temporaryFile;
|
||||||
|
|
||||||
|
/** Callback to notify when the byte source is closed. */
|
||||||
|
private final Runnable closeCallback;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new byte source based on the given file. The provided callback is executed when the
|
||||||
|
* source is deleted. There is no guarantee about which thread invokes the callback (it is the
|
||||||
|
* thread that closes the source).
|
||||||
|
*/
|
||||||
|
TemporaryFileCloseableByteSource(File file, Runnable closeCallback) {
|
||||||
|
super(Files.asByteSource(file), file.length());
|
||||||
|
temporaryFile = new TemporaryFile(file);
|
||||||
|
this.closeCallback = closeCallback;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected synchronized void innerClose() throws IOException {
|
||||||
|
super.innerClose();
|
||||||
|
temporaryFile.close();
|
||||||
|
closeCallback.run();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,69 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.sign;
|
||||||
|
|
||||||
|
|
||||||
|
/** Message digest algorithms. */
|
||||||
|
public enum DigestAlgorithm {
|
||||||
|
/**
|
||||||
|
* SHA-1 digest.
|
||||||
|
*
|
||||||
|
* <p>Android 2.3 (API Level 9) to 4.2 (API Level 17) (inclusive) do not support SHA-2 JAR
|
||||||
|
* signatures.
|
||||||
|
*
|
||||||
|
* <p>Moreover, platforms prior to API Level 18, without the additional Digest-Algorithms
|
||||||
|
* attribute, only support SHA or SHA1 algorithm names in .SF and MANIFEST.MF attributes.
|
||||||
|
*/
|
||||||
|
SHA1("SHA1", "SHA-1"),
|
||||||
|
|
||||||
|
/** SHA-256 digest. */
|
||||||
|
SHA256("SHA-256", "SHA-256");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* API level which supports {@link #SHA256} with {@link SignatureAlgorithm#RSA} and {@link
|
||||||
|
* SignatureAlgorithm#ECDSA}.
|
||||||
|
*/
|
||||||
|
public static final int API_SHA_256_RSA_AND_ECDSA = 18;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* API level which supports {@link #SHA256} for all {@link SignatureAlgorithm}s.
|
||||||
|
*
|
||||||
|
* <p>Before that, SHA256 can only be used with RSA and ECDSA.
|
||||||
|
*/
|
||||||
|
public static final int API_SHA_256_ALL_ALGORITHMS = 21;
|
||||||
|
|
||||||
|
/** Name of algorithm for message digest. */
|
||||||
|
public final String messageDigestName;
|
||||||
|
|
||||||
|
/** Name of attribute in signature file with the manifest digest. */
|
||||||
|
public final String manifestAttributeName;
|
||||||
|
|
||||||
|
/** Name of attribute in entry (both manifest and signature file) with the entry's digest. */
|
||||||
|
public final String entryAttributeName;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a digest algorithm.
|
||||||
|
*
|
||||||
|
* @param attributeName attribute name in the signature file
|
||||||
|
* @param messageDigestName name of algorithm for message digest
|
||||||
|
*/
|
||||||
|
DigestAlgorithm(String attributeName, String messageDigestName) {
|
||||||
|
this.messageDigestName = messageDigestName;
|
||||||
|
this.entryAttributeName = attributeName + "-Digest";
|
||||||
|
this.manifestAttributeName = attributeName + "-Digest-Manifest";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,222 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.sign;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.utils.CachedSupplier;
|
||||||
|
import com.android.tools.build.apkzlib.utils.IOExceptionRunnable;
|
||||||
|
import com.android.tools.build.apkzlib.utils.IOExceptionWrapper;
|
||||||
|
import com.android.tools.build.apkzlib.zfile.ManifestAttributes;
|
||||||
|
import com.android.tools.build.apkzlib.zip.StoredEntry;
|
||||||
|
import com.android.tools.build.apkzlib.zip.ZFile;
|
||||||
|
import com.android.tools.build.apkzlib.zip.ZFileExtension;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.base.Verify;
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.ByteArrayOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.jar.Attributes;
|
||||||
|
import java.util.jar.Manifest;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extension to {@link ZFile} that will generate a manifest. The extension will register
|
||||||
|
* automatically with the {@link ZFile}.
|
||||||
|
*
|
||||||
|
* <p>Creating this extension will ensure a manifest for the zip exists. This extension will
|
||||||
|
* generate a manifest if one does not exist and will update an existing manifest, if one does
|
||||||
|
* exist. The extension will also provide access to the manifest so that others may update the
|
||||||
|
* manifest.
|
||||||
|
*
|
||||||
|
* <p>Apart from standard manifest elements, this extension does not handle any particular manifest
|
||||||
|
* features such as signing or adding custom attributes. It simply generates a plain manifest and
|
||||||
|
* provides infrastructure so that other extensions can add data in the manifest.
|
||||||
|
*
|
||||||
|
* <p>The manifest itself will only be written when the {@link ZFileExtension#beforeUpdate()}
|
||||||
|
* notification is received, meaning all manifest manipulation is done in-memory.
|
||||||
|
*/
|
||||||
|
public class ManifestGenerationExtension {
|
||||||
|
|
||||||
|
/** Name of META-INF directory. */
|
||||||
|
private static final String META_INF_DIR = "META-INF";
|
||||||
|
|
||||||
|
/** Name of the manifest file. */
|
||||||
|
static final String MANIFEST_NAME = META_INF_DIR + "/MANIFEST.MF";
|
||||||
|
|
||||||
|
/** Who should be reported as the manifest builder. */
|
||||||
|
private final String builtBy;
|
||||||
|
|
||||||
|
/** Who should be reported as the manifest creator. */
|
||||||
|
private final String createdBy;
|
||||||
|
|
||||||
|
/** The file this extension is attached to. {@code null} if not yet registered. */
|
||||||
|
@Nullable private ZFile zFile;
|
||||||
|
|
||||||
|
/** The zip file's manifest. */
|
||||||
|
private final Manifest manifest;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Byte representation of the manifest. There is no guarantee that two writes of the java's {@code
|
||||||
|
* Manifest} object will yield the same byte array (there is no guaranteed order of entries in the
|
||||||
|
* manifest).
|
||||||
|
*
|
||||||
|
* <p>Because we need the byte representation of the manifest to be stable if there are no changes
|
||||||
|
* to the manifest, we cannot rely on {@code Manifest} to generate the byte representation every
|
||||||
|
* time we need the byte representation.
|
||||||
|
*
|
||||||
|
* <p>This cache will ensure that we will request one byte generation from the {@code Manifest}
|
||||||
|
* and will cache it. All further requests of the manifest's byte representation will receive the
|
||||||
|
* same byte array.
|
||||||
|
*/
|
||||||
|
private final CachedSupplier<byte[]> manifestBytes;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has the current manifest been changed and not yet flushed? If {@link #dirty} is {@code true},
|
||||||
|
* then {@link #manifestBytes} should not be valid. This means that marking the manifest as dirty
|
||||||
|
* should also invalidate {@link #manifestBytes}. To avoid breaking the invariant, instead of
|
||||||
|
* setting {@link #dirty}, {@link #markDirty()} should be called.
|
||||||
|
*/
|
||||||
|
private boolean dirty;
|
||||||
|
|
||||||
|
/** The extension to register with the {@link ZFile}. {@code null} if not registered. */
|
||||||
|
@Nullable private ZFileExtension extension;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new extension. This will not register the extension with the provided {@link ZFile}.
|
||||||
|
* Until {@link #register(ZFile)} is invoked, this extension is not used.
|
||||||
|
*
|
||||||
|
* @param builtBy who built the manifest?
|
||||||
|
* @param createdBy who created the manifest?
|
||||||
|
*/
|
||||||
|
public ManifestGenerationExtension(String builtBy, String createdBy) {
|
||||||
|
this.builtBy = builtBy;
|
||||||
|
this.createdBy = createdBy;
|
||||||
|
manifest = new Manifest();
|
||||||
|
dirty = false;
|
||||||
|
manifestBytes =
|
||||||
|
new CachedSupplier<>(
|
||||||
|
() -> {
|
||||||
|
ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
|
||||||
|
try {
|
||||||
|
manifest.write(outBytes);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOExceptionWrapper(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return outBytes.toByteArray();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Marks the manifest as being dirty, <i>i.e.</i>, its data has changed since it was last read
|
||||||
|
* and/or written.
|
||||||
|
*/
|
||||||
|
private void markDirty() {
|
||||||
|
dirty = true;
|
||||||
|
manifestBytes.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Registers the extension with the {@link ZFile} provided in the constructor.
|
||||||
|
*
|
||||||
|
* @param zFile the zip file to add the extension to
|
||||||
|
* @throws IOException failed to analyze the zip
|
||||||
|
*/
|
||||||
|
public void register(ZFile zFile) throws IOException {
|
||||||
|
Preconditions.checkState(extension == null, "register() has already been invoked.");
|
||||||
|
this.zFile = zFile;
|
||||||
|
|
||||||
|
rebuildManifest();
|
||||||
|
|
||||||
|
extension =
|
||||||
|
new ZFileExtension() {
|
||||||
|
@Nullable
|
||||||
|
@Override
|
||||||
|
public IOExceptionRunnable beforeUpdate() {
|
||||||
|
return ManifestGenerationExtension.this::updateManifest;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
this.zFile.addZFileExtension(extension);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Rebuilds the zip file's manifest, if it needs changes. */
|
||||||
|
private void rebuildManifest() throws IOException {
|
||||||
|
Verify.verifyNotNull(zFile, "zFile == null");
|
||||||
|
|
||||||
|
StoredEntry manifestEntry = zFile.get(MANIFEST_NAME);
|
||||||
|
|
||||||
|
if (manifestEntry != null) {
|
||||||
|
/*
|
||||||
|
* Read the manifest entry in the zip file. Make sure we store these byte sequence
|
||||||
|
* because writing the manifest may not generate the same byte sequence, which may
|
||||||
|
* trigger an unnecessary re-sign of the jar.
|
||||||
|
*/
|
||||||
|
manifest.clear();
|
||||||
|
byte[] manifestBytes = manifestEntry.read();
|
||||||
|
manifest.read(new ByteArrayInputStream(manifestBytes));
|
||||||
|
this.manifestBytes.precomputed(manifestBytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
Attributes mainAttributes = manifest.getMainAttributes();
|
||||||
|
String currentVersion = mainAttributes.getValue(ManifestAttributes.MANIFEST_VERSION);
|
||||||
|
if (currentVersion == null) {
|
||||||
|
setMainAttribute(
|
||||||
|
ManifestAttributes.MANIFEST_VERSION, ManifestAttributes.CURRENT_MANIFEST_VERSION);
|
||||||
|
} else {
|
||||||
|
if (!currentVersion.equals(ManifestAttributes.CURRENT_MANIFEST_VERSION)) {
|
||||||
|
throw new IOException("Unsupported manifest version: " + currentVersion + ".");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We "blindly" override all other main attributes.
|
||||||
|
*/
|
||||||
|
setMainAttribute(ManifestAttributes.BUILT_BY, builtBy);
|
||||||
|
setMainAttribute(ManifestAttributes.CREATED_BY, createdBy);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the value of a main attribute.
|
||||||
|
*
|
||||||
|
* @param attribute the attribute
|
||||||
|
* @param value the value
|
||||||
|
*/
|
||||||
|
private void setMainAttribute(String attribute, String value) {
|
||||||
|
Attributes mainAttributes = manifest.getMainAttributes();
|
||||||
|
String current = mainAttributes.getValue(attribute);
|
||||||
|
if (!value.equals(current)) {
|
||||||
|
mainAttributes.putValue(attribute, value);
|
||||||
|
markDirty();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Updates the manifest in the zip file, if it has been changed.
|
||||||
|
*
|
||||||
|
* @throws IOException failed to update the manifest
|
||||||
|
*/
|
||||||
|
private void updateManifest() throws IOException {
|
||||||
|
Verify.verifyNotNull(zFile, "zFile == null");
|
||||||
|
|
||||||
|
if (!dirty) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
zFile.add(MANIFEST_NAME, new ByteArrayInputStream(manifestBytes.get()));
|
||||||
|
dirty = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,94 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.sign;
|
||||||
|
|
||||||
|
import java.security.NoSuchAlgorithmException;
|
||||||
|
|
||||||
|
/** Signature algorithm. */
|
||||||
|
public enum SignatureAlgorithm {
|
||||||
|
/** RSA algorithm. */
|
||||||
|
RSA("RSA", 1, "withRSA"),
|
||||||
|
|
||||||
|
/** ECDSA algorithm. */
|
||||||
|
ECDSA("EC", 18, "withECDSA"),
|
||||||
|
|
||||||
|
/** DSA algorithm. */
|
||||||
|
DSA("DSA", 1, "withDSA");
|
||||||
|
|
||||||
|
/** Name of the private key as reported by {@code PrivateKey}. */
|
||||||
|
public final String keyAlgorithm;
|
||||||
|
|
||||||
|
/** Minimum SDK version that allows this signature. */
|
||||||
|
public final int minSdkVersion;
|
||||||
|
|
||||||
|
/** Suffix appended to digest algorithm to obtain signature algorithm. */
|
||||||
|
public final String signatureAlgorithmSuffix;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new signature algorithm.
|
||||||
|
*
|
||||||
|
* @param keyAlgorithm the name as reported by {@code PrivateKey}
|
||||||
|
* @param minSdkVersion minimum SDK version that allows this signature
|
||||||
|
* @param signatureAlgorithmSuffix suffix for signature name with used with a digest
|
||||||
|
*/
|
||||||
|
SignatureAlgorithm(String keyAlgorithm, int minSdkVersion, String signatureAlgorithmSuffix) {
|
||||||
|
this.keyAlgorithm = keyAlgorithm;
|
||||||
|
this.minSdkVersion = minSdkVersion;
|
||||||
|
this.signatureAlgorithmSuffix = signatureAlgorithmSuffix;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the signature algorithm that corresponds to a private key name applicable to a SDK
|
||||||
|
* version.
|
||||||
|
*
|
||||||
|
* @param keyAlgorithm the named referred in the {@code PrivateKey}
|
||||||
|
* @param minSdkVersion minimum SDK version to run
|
||||||
|
* @return the algorithm that has {@link #keyAlgorithm} equal to {@code keyAlgorithm}
|
||||||
|
* @throws NoSuchAlgorithmException if no algorithm was found for the given private key; an
|
||||||
|
* algorithm was found but is not applicable to the given SDK version
|
||||||
|
*/
|
||||||
|
public static SignatureAlgorithm fromKeyAlgorithm(String keyAlgorithm, int minSdkVersion)
|
||||||
|
throws NoSuchAlgorithmException {
|
||||||
|
for (SignatureAlgorithm alg : values()) {
|
||||||
|
if (alg.keyAlgorithm.equalsIgnoreCase(keyAlgorithm)) {
|
||||||
|
if (alg.minSdkVersion > minSdkVersion) {
|
||||||
|
throw new NoSuchAlgorithmException(
|
||||||
|
"Signatures with "
|
||||||
|
+ keyAlgorithm
|
||||||
|
+ " keys are not supported on minSdkVersion "
|
||||||
|
+ minSdkVersion
|
||||||
|
+ ". They are supported only for minSdkVersion >= "
|
||||||
|
+ alg.minSdkVersion);
|
||||||
|
}
|
||||||
|
|
||||||
|
return alg;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new NoSuchAlgorithmException("Signing with " + keyAlgorithm + " keys is not supported");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the name of the signature algorithm when used with a digest algorithm.
|
||||||
|
*
|
||||||
|
* @param digestAlgorithm the digest algorithm to use
|
||||||
|
* @return the name of the signature algorithm
|
||||||
|
*/
|
||||||
|
public String signatureAlgorithmName(DigestAlgorithm digestAlgorithm) {
|
||||||
|
return digestAlgorithm.messageDigestName.replace("-", "") + signatureAlgorithmSuffix;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,437 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.android.tools.build.apkzlib.sign;
|
||||||
|
|
||||||
|
import com.android.apksig.ApkSignerEngine;
|
||||||
|
import com.android.apksig.ApkVerifier;
|
||||||
|
import com.android.apksig.DefaultApkSignerEngine;
|
||||||
|
import com.android.apksig.apk.ApkFormatException;
|
||||||
|
import com.android.apksig.internal.apk.ApkSigningBlockUtils;
|
||||||
|
import com.android.apksig.util.DataSink;
|
||||||
|
import com.android.apksig.util.DataSource;
|
||||||
|
import com.android.apksig.util.DataSources;
|
||||||
|
import com.android.tools.build.apkzlib.utils.IOExceptionRunnable;
|
||||||
|
import com.android.tools.build.apkzlib.utils.SigningBlockUtils;
|
||||||
|
import com.android.tools.build.apkzlib.zip.StoredEntry;
|
||||||
|
import com.android.tools.build.apkzlib.zip.ZFile;
|
||||||
|
import com.android.tools.build.apkzlib.zip.ZFileExtension;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.base.Supplier;
|
||||||
|
import com.google.common.base.Suppliers;
|
||||||
|
import com.google.common.collect.ImmutableList;
|
||||||
|
import com.google.common.collect.ImmutableSet;
|
||||||
|
import com.google.common.collect.Iterables;
|
||||||
|
import com.google.common.primitives.Bytes;
|
||||||
|
import java.io.BufferedInputStream;
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.security.InvalidKeyException;
|
||||||
|
import java.security.NoSuchAlgorithmException;
|
||||||
|
import java.security.SignatureException;
|
||||||
|
import java.security.cert.CertificateEncodingException;
|
||||||
|
import java.security.cert.X509Certificate;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@link ZFile} extension which signs the APK.
|
||||||
|
*
|
||||||
|
* <p>This extension is capable of signing the APK using JAR signing (aka v1 scheme) and APK
|
||||||
|
* Signature Scheme v2 (aka v2 scheme). Which schemes are actually used is specified by parameters
|
||||||
|
* to this extension's constructor.
|
||||||
|
*/
|
||||||
|
public class SigningExtension {
|
||||||
|
private static final int MAX_READ_CHUNK_SIZE = 65536;
|
||||||
|
|
||||||
|
// IMPLEMENTATION NOTE: Most of the heavy lifting is performed by the ApkSignerEngine primitive
|
||||||
|
// from apksig library. This class is an adapter between ZFile extension and ApkSignerEngine.
|
||||||
|
// This class takes care of invoking the right methods on ApkSignerEngine in response to ZFile
|
||||||
|
// extension events/callbacks.
|
||||||
|
//
|
||||||
|
// The main issue leading to additional complexity in this class is that the current build
|
||||||
|
// pipeline does not reuse ApkSignerEngine instances (or ZFile extension instances for that
|
||||||
|
// matter) for incremental builds. Thus:
|
||||||
|
// * ZFile extension receives no events for JAR entries already in the APK whereas
|
||||||
|
// ApkSignerEngine needs to know about all JAR entries to be covered by signature. Thus, this
|
||||||
|
// class, during "beforeUpdate" ZFile event, notifies ApkSignerEngine about JAR entries
|
||||||
|
// already in the APK which ApkSignerEngine hasn't yet been told about -- these are the JAR
|
||||||
|
// entries which the incremental build session did not touch.
|
||||||
|
// * The build pipeline expects the APK not to change if no JAR entry was added to it or removed
|
||||||
|
// from it whereas ApkSignerEngine produces no output only if it has already produced a signed
|
||||||
|
// APK and no changes have since been made to it. This class addresses this issue by checking
|
||||||
|
// in its "register" method whether the APK is correctly signed and, only if that's the case,
|
||||||
|
// doesn't modify the APK unless a JAR entry is added to it or removed from it after
|
||||||
|
// "register".
|
||||||
|
|
||||||
|
/** APK signer which performs most of the heavy lifting. */
|
||||||
|
private final ApkSignerEngine signer;
|
||||||
|
|
||||||
|
/** Names of APK entries which have been processed by {@link #signer}. */
|
||||||
|
private final Set<String> signerProcessedOutputEntryNames = new HashSet<>();
|
||||||
|
|
||||||
|
/** Signing block Id for SDK dependency block. */
|
||||||
|
static final int DEPENDENCY_INFO_BLOCK_ID = 0x504b4453;
|
||||||
|
|
||||||
|
/** SDK dependencies of the APK */
|
||||||
|
@Nullable private byte[] sdkDependencyData;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cached contents of the most recently output APK Signing Block or {@code null} if the block
|
||||||
|
* hasn't yet been output.
|
||||||
|
*/
|
||||||
|
@Nullable private byte[] cachedApkSigningBlock;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@code true} if signatures may need to be output, {@code false} if there's no need to output
|
||||||
|
* signatures. This is used in an optimization where we don't modify the APK if it's already
|
||||||
|
* signed and if no JAR entries have been added to or removed from the file.
|
||||||
|
*/
|
||||||
|
private boolean dirty;
|
||||||
|
|
||||||
|
/** The extension registered with the {@link ZFile}. {@code null} if not registered. */
|
||||||
|
@Nullable private ZFileExtension extension;
|
||||||
|
|
||||||
|
/** The file this extension is attached to. {@code null} if not yet registered. */
|
||||||
|
@Nullable private ZFile zFile;
|
||||||
|
|
||||||
|
/** A buffer used to read data from entries to feed to digests */
|
||||||
|
private final Supplier<byte[]> digestBuffer =
|
||||||
|
Suppliers.memoize(() -> new byte[MAX_READ_CHUNK_SIZE]);
|
||||||
|
|
||||||
|
/** An object that has all necessary information to sign the zip file and verify its signature */
|
||||||
|
private final SigningOptions options;
|
||||||
|
|
||||||
|
public SigningExtension(SigningOptions opts) throws InvalidKeyException {
|
||||||
|
DefaultApkSignerEngine.SignerConfig signerConfig =
|
||||||
|
new DefaultApkSignerEngine.SignerConfig.Builder(
|
||||||
|
"CERT", opts.getKey(), opts.getCertificates())
|
||||||
|
.build();
|
||||||
|
signer =
|
||||||
|
new DefaultApkSignerEngine.Builder(ImmutableList.of(signerConfig), opts.getMinSdkVersion())
|
||||||
|
.setOtherSignersSignaturesPreserved(false)
|
||||||
|
.setV1SigningEnabled(opts.isV1SigningEnabled())
|
||||||
|
.setV2SigningEnabled(opts.isV2SigningEnabled())
|
||||||
|
.setV3SigningEnabled(false)
|
||||||
|
.setCreatedBy("1.0 (Android)")
|
||||||
|
.build();
|
||||||
|
if (opts.getSdkDependencyData() != null) {
|
||||||
|
sdkDependencyData = opts.getSdkDependencyData();
|
||||||
|
}
|
||||||
|
if (opts.getExecutor() != null) {
|
||||||
|
signer.setExecutor(opts.getExecutor());
|
||||||
|
}
|
||||||
|
this.options = opts;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void register(ZFile zFile) throws NoSuchAlgorithmException, IOException {
|
||||||
|
Preconditions.checkState(extension == null, "register() already invoked");
|
||||||
|
this.zFile = zFile;
|
||||||
|
switch (options.getValidation()) {
|
||||||
|
case ALWAYS_VALIDATE:
|
||||||
|
dirty = !isCurrentSignatureAsRequested();
|
||||||
|
break;
|
||||||
|
case ASSUME_VALID:
|
||||||
|
if (options.isV1SigningEnabled()) {
|
||||||
|
Set<String> entryNames =
|
||||||
|
ImmutableSet.copyOf(
|
||||||
|
Iterables.transform(
|
||||||
|
zFile.entries(), e -> e.getCentralDirectoryHeader().getName()));
|
||||||
|
StoredEntry manifestEntry = zFile.get(ManifestGenerationExtension.MANIFEST_NAME);
|
||||||
|
|
||||||
|
Preconditions.checkNotNull(
|
||||||
|
manifestEntry,
|
||||||
|
"No manifest found in apk for incremental build with enabled v1 signature");
|
||||||
|
signerProcessedOutputEntryNames.addAll(
|
||||||
|
this.signer.initWith(manifestEntry.read(), entryNames));
|
||||||
|
}
|
||||||
|
|
||||||
|
dirty = false;
|
||||||
|
break;
|
||||||
|
case ASSUME_INVALID:
|
||||||
|
dirty = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
extension =
|
||||||
|
new ZFileExtension() {
|
||||||
|
@Override
|
||||||
|
public IOExceptionRunnable added(StoredEntry entry, @Nullable StoredEntry replaced) {
|
||||||
|
return () -> onZipEntryOutput(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IOExceptionRunnable removed(StoredEntry entry) {
|
||||||
|
String entryName = entry.getCentralDirectoryHeader().getName();
|
||||||
|
return () -> onZipEntryRemovedFromOutput(entryName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IOExceptionRunnable beforeUpdate() throws IOException {
|
||||||
|
return () -> onOutputZipReadyForUpdate();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void entriesWritten() throws IOException {
|
||||||
|
onOutputZipEntriesWritten();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void closed() {
|
||||||
|
onOutputClosed();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
this.zFile.addZFileExtension(extension);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns {@code true} if the APK's signatures are as requested by parameters to this signing
|
||||||
|
* extension.
|
||||||
|
*/
|
||||||
|
private boolean isCurrentSignatureAsRequested() throws IOException, NoSuchAlgorithmException {
|
||||||
|
ApkVerifier.Result result;
|
||||||
|
try {
|
||||||
|
result =
|
||||||
|
new ApkVerifier.Builder(zFile.asDataSource())
|
||||||
|
.setMinCheckedPlatformVersion(options.getMinSdkVersion())
|
||||||
|
.build()
|
||||||
|
.verify();
|
||||||
|
} catch (ApkFormatException e) {
|
||||||
|
// Malformed APK
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!result.isVerified()) {
|
||||||
|
// Signature(s) did not verify
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((result.isVerifiedUsingV1Scheme() != options.isV1SigningEnabled())
|
||||||
|
|| (result.isVerifiedUsingV2Scheme() != options.isV2SigningEnabled())) {
|
||||||
|
// APK isn't signed with exactly the schemes we want it to be signed
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
List<X509Certificate> verifiedSignerCerts = result.getSignerCertificates();
|
||||||
|
if (verifiedSignerCerts.size() != 1) {
|
||||||
|
// APK is not signed by exactly one signer
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
byte[] expectedEncodedCert;
|
||||||
|
byte[] actualEncodedCert;
|
||||||
|
try {
|
||||||
|
expectedEncodedCert = options.getCertificates().get(0).getEncoded();
|
||||||
|
actualEncodedCert = verifiedSignerCerts.get(0).getEncoded();
|
||||||
|
} catch (CertificateEncodingException e) {
|
||||||
|
// Failed to encode signing certificates
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!Arrays.equals(expectedEncodedCert, actualEncodedCert)) {
|
||||||
|
// APK is signed by a wrong signer
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// APK is signed the way we want it to be signed
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void onZipEntryOutput(StoredEntry entry) throws IOException {
|
||||||
|
setDirty();
|
||||||
|
String entryName = entry.getCentralDirectoryHeader().getName();
|
||||||
|
// This event may arrive after the entry has already been deleted. In that case, we don't
|
||||||
|
// report the addition of the entry to ApkSignerEngine.
|
||||||
|
if (entry.isDeleted()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ApkSignerEngine.InspectJarEntryRequest inspectEntryRequest = signer.outputJarEntry(entryName);
|
||||||
|
signerProcessedOutputEntryNames.add(entryName);
|
||||||
|
if (inspectEntryRequest != null) {
|
||||||
|
try (InputStream inputStream = new BufferedInputStream(entry.open())) {
|
||||||
|
copyStreamToDataSink(inputStream, inspectEntryRequest.getDataSink());
|
||||||
|
}
|
||||||
|
inspectEntryRequest.done();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void copyStreamToDataSink(InputStream inputStream, DataSink dataSink) throws IOException {
|
||||||
|
int bytesRead;
|
||||||
|
byte[] buffer = digestBuffer.get();
|
||||||
|
while ((bytesRead = inputStream.read(buffer)) > 0) {
|
||||||
|
dataSink.consume(buffer, 0, bytesRead);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void onZipEntryRemovedFromOutput(String entryName) {
|
||||||
|
setDirty();
|
||||||
|
signer.outputJarEntryRemoved(entryName);
|
||||||
|
signerProcessedOutputEntryNames.remove(entryName);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void onOutputZipReadyForUpdate() throws IOException {
|
||||||
|
if (!dirty) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notify signer engine about ZIP entries that have appeared in the output without the
|
||||||
|
// engine knowing. Also identify ZIP entries which disappeared from the output without the
|
||||||
|
// engine knowing.
|
||||||
|
Set<String> unprocessedRemovedEntryNames = new HashSet<>(signerProcessedOutputEntryNames);
|
||||||
|
for (StoredEntry entry : zFile.entries()) {
|
||||||
|
String entryName = entry.getCentralDirectoryHeader().getName();
|
||||||
|
unprocessedRemovedEntryNames.remove(entryName);
|
||||||
|
if (!signerProcessedOutputEntryNames.contains(entryName)) {
|
||||||
|
// Signer engine is not yet aware that this entry is in the output
|
||||||
|
onZipEntryOutput(entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notify signer engine about entries which disappeared from the output without the engine
|
||||||
|
// knowing
|
||||||
|
for (String entryName : unprocessedRemovedEntryNames) {
|
||||||
|
onZipEntryRemovedFromOutput(entryName);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether we need to output additional JAR entries which comprise the v1 signature
|
||||||
|
ApkSignerEngine.OutputJarSignatureRequest addV1SignatureRequest;
|
||||||
|
try {
|
||||||
|
addV1SignatureRequest = signer.outputJarEntries();
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new IOException("Failed to generate v1 signature", e);
|
||||||
|
}
|
||||||
|
if (addV1SignatureRequest == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to output additional JAR entries which comprise the v1 signature
|
||||||
|
List<ApkSignerEngine.OutputJarSignatureRequest.JarEntry> v1SignatureEntries =
|
||||||
|
new ArrayList<>(addV1SignatureRequest.getAdditionalJarEntries());
|
||||||
|
|
||||||
|
// Reorder the JAR entries comprising the v1 signature so that MANIFEST.MF is the first
|
||||||
|
// entry. This ensures that it cleanly overwrites the existing MANIFEST.MF output by
|
||||||
|
// ManifestGenerationExtension.
|
||||||
|
for (int i = 0; i < v1SignatureEntries.size(); i++) {
|
||||||
|
ApkSignerEngine.OutputJarSignatureRequest.JarEntry entry = v1SignatureEntries.get(i);
|
||||||
|
String name = entry.getName();
|
||||||
|
if (!ManifestGenerationExtension.MANIFEST_NAME.equals(name)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (i != 0) {
|
||||||
|
v1SignatureEntries.remove(i);
|
||||||
|
v1SignatureEntries.add(0, entry);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output the JAR entries comprising the v1 signature
|
||||||
|
for (ApkSignerEngine.OutputJarSignatureRequest.JarEntry entry : v1SignatureEntries) {
|
||||||
|
String name = entry.getName();
|
||||||
|
byte[] data = entry.getData();
|
||||||
|
zFile.add(name, new ByteArrayInputStream(data));
|
||||||
|
}
|
||||||
|
|
||||||
|
addV1SignatureRequest.done();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void onOutputZipEntriesWritten() throws IOException {
|
||||||
|
if (!dirty) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether we should output an APK Signing Block which contains v2 signatures
|
||||||
|
byte[] apkSigningBlock;
|
||||||
|
byte[] centralDirBytes = zFile.getCentralDirectoryBytes();
|
||||||
|
byte[] eocdBytes = zFile.getEocdBytes();
|
||||||
|
ApkSignerEngine.OutputApkSigningBlockRequest2 addV2SignatureRequest;
|
||||||
|
// This event may arrive a second time -- after we write out the APK Signing Block. Thus, we
|
||||||
|
// cache the block to speed things up. The cached block is invalidated by any changes to the
|
||||||
|
// file (as reported to this extension).
|
||||||
|
if (cachedApkSigningBlock != null) {
|
||||||
|
apkSigningBlock = cachedApkSigningBlock;
|
||||||
|
addV2SignatureRequest = null;
|
||||||
|
} else {
|
||||||
|
DataSource centralDir = DataSources.asDataSource(ByteBuffer.wrap(centralDirBytes));
|
||||||
|
DataSource eocd = DataSources.asDataSource(ByteBuffer.wrap(eocdBytes));
|
||||||
|
long zipEntriesSizeBytes =
|
||||||
|
zFile.getCentralDirectoryOffset() - zFile.getExtraDirectoryOffset();
|
||||||
|
DataSource zipEntries = zFile.asDataSource(0, zipEntriesSizeBytes);
|
||||||
|
try {
|
||||||
|
addV2SignatureRequest = signer.outputZipSections2(zipEntries, centralDir, eocd);
|
||||||
|
} catch (NoSuchAlgorithmException
|
||||||
|
| InvalidKeyException
|
||||||
|
| SignatureException
|
||||||
|
| ApkFormatException
|
||||||
|
| IOException e) {
|
||||||
|
throw new IOException("Failed to generate v2 signature", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (addV2SignatureRequest != null) {
|
||||||
|
apkSigningBlock = addV2SignatureRequest.getApkSigningBlock();
|
||||||
|
if (sdkDependencyData != null) {
|
||||||
|
apkSigningBlock =
|
||||||
|
SigningBlockUtils.addToSigningBlock(
|
||||||
|
apkSigningBlock, sdkDependencyData, DEPENDENCY_INFO_BLOCK_ID);
|
||||||
|
}
|
||||||
|
apkSigningBlock =
|
||||||
|
Bytes.concat(
|
||||||
|
new byte[addV2SignatureRequest.getPaddingSizeBeforeApkSigningBlock()],
|
||||||
|
apkSigningBlock);
|
||||||
|
} else {
|
||||||
|
apkSigningBlock = new byte[0];
|
||||||
|
if (sdkDependencyData != null) {
|
||||||
|
apkSigningBlock =
|
||||||
|
SigningBlockUtils.addToSigningBlock(
|
||||||
|
apkSigningBlock, sdkDependencyData, DEPENDENCY_INFO_BLOCK_ID);
|
||||||
|
int paddingSize =
|
||||||
|
ApkSigningBlockUtils.generateApkSigningBlockPadding(
|
||||||
|
zipEntries, /* apkSigningBlockPaddingSupported */ true)
|
||||||
|
.getSecond();
|
||||||
|
apkSigningBlock = Bytes.concat(new byte[paddingSize], apkSigningBlock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cachedApkSigningBlock = apkSigningBlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert the APK Signing Block into the output right before the ZIP Central Directory and
|
||||||
|
// accordingly update the start offset of ZIP Central Directory in ZIP End of Central
|
||||||
|
// Directory.
|
||||||
|
zFile.directWrite(
|
||||||
|
zFile.getCentralDirectoryOffset() - zFile.getExtraDirectoryOffset(), apkSigningBlock);
|
||||||
|
zFile.setExtraDirectoryOffset(apkSigningBlock.length);
|
||||||
|
|
||||||
|
if (addV2SignatureRequest != null) {
|
||||||
|
addV2SignatureRequest.done();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void onOutputClosed() {
|
||||||
|
if (!dirty) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
signer.outputDone();
|
||||||
|
dirty = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setDirty() {
|
||||||
|
dirty = true;
|
||||||
|
cachedApkSigningBlock = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,102 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2018 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.sign;
|
||||||
|
|
||||||
|
import com.android.apksig.util.RunnablesExecutor;
|
||||||
|
import com.google.auto.value.AutoValue;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.collect.ImmutableList;
|
||||||
|
import java.security.PrivateKey;
|
||||||
|
import java.security.cert.X509Certificate;
|
||||||
|
import javax.annotation.Nonnull;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/** A class that contains data to initialize SigningExtension. */
|
||||||
|
@AutoValue
|
||||||
|
public abstract class SigningOptions {
|
||||||
|
|
||||||
|
/** An implementation of builder pattern to create a {@link SigningOptions} object. */
|
||||||
|
@AutoValue.Builder
|
||||||
|
public abstract static class Builder {
|
||||||
|
public abstract Builder setKey(@Nonnull PrivateKey key);
|
||||||
|
public abstract Builder setCertificates(@Nonnull ImmutableList<X509Certificate> certs);
|
||||||
|
public abstract Builder setCertificates(X509Certificate... certs);
|
||||||
|
public abstract Builder setV1SigningEnabled(boolean enabled);
|
||||||
|
public abstract Builder setV2SigningEnabled(boolean enabled);
|
||||||
|
public abstract Builder setMinSdkVersion(int version);
|
||||||
|
public abstract Builder setValidation(@Nonnull Validation validation);
|
||||||
|
public abstract Builder setExecutor(@Nullable RunnablesExecutor executor);
|
||||||
|
public abstract Builder setSdkDependencyData(@Nullable byte[] sdkDependencyData);
|
||||||
|
|
||||||
|
abstract SigningOptions autoBuild();
|
||||||
|
|
||||||
|
public SigningOptions build() {
|
||||||
|
SigningOptions options = autoBuild();
|
||||||
|
Preconditions.checkArgument(options.getMinSdkVersion() >= 0, "minSdkVersion < 0");
|
||||||
|
Preconditions.checkArgument(
|
||||||
|
!options.getCertificates().isEmpty(),
|
||||||
|
"There should be at least one certificate in SigningOptions");
|
||||||
|
return options;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Builder builder() {
|
||||||
|
return new AutoValue_SigningOptions.Builder()
|
||||||
|
.setV1SigningEnabled(false)
|
||||||
|
.setV2SigningEnabled(false)
|
||||||
|
.setValidation(Validation.ALWAYS_VALIDATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** {@link PrivateKey} used to sign the archive. */
|
||||||
|
public abstract PrivateKey getKey();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A list of the {@link X509Certificate}s to embed in the signed APKs. The first
|
||||||
|
* element of the list must be the certificate associated with the private key.
|
||||||
|
*/
|
||||||
|
public abstract ImmutableList<X509Certificate> getCertificates();
|
||||||
|
|
||||||
|
/** Shows whether signing with JAR Signature Scheme (aka v1 signing) is enabled. */
|
||||||
|
public abstract boolean isV1SigningEnabled();
|
||||||
|
|
||||||
|
/** Shows whether signing with APK Signature Scheme v2 (aka v2 signing) is enabled. */
|
||||||
|
public abstract boolean isV2SigningEnabled();
|
||||||
|
|
||||||
|
/** Minimum SDK version supported. */
|
||||||
|
public abstract int getMinSdkVersion();
|
||||||
|
|
||||||
|
/** Strategy of package signature validation */
|
||||||
|
public abstract Validation getValidation();
|
||||||
|
|
||||||
|
@Nullable
|
||||||
|
public abstract RunnablesExecutor getExecutor();
|
||||||
|
|
||||||
|
/** SDK dependencies of the APK */
|
||||||
|
@Nullable
|
||||||
|
public abstract byte[] getSdkDependencyData();
|
||||||
|
|
||||||
|
public enum Validation {
|
||||||
|
/** Always perform signature validation */
|
||||||
|
ALWAYS_VALIDATE,
|
||||||
|
/**
|
||||||
|
* Assume the signature is valid without validation i.e. don't resign if no files changed
|
||||||
|
*/
|
||||||
|
ASSUME_VALID,
|
||||||
|
/** Assume the signature is invalid without validation i.e. unconditionally resign */
|
||||||
|
ASSUME_INVALID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,157 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The {@code sign} package provides extensions for the {@code zip} package that allow:
|
||||||
|
*
|
||||||
|
* <ul>
|
||||||
|
* <li>Adding a {@code MANIFEST.MF} file to a zip making a jar.
|
||||||
|
* <li>Signing a jar.
|
||||||
|
* <li>Fully signing a jar using v2 apk signature.
|
||||||
|
* </ul>
|
||||||
|
*
|
||||||
|
* <p>Because the {@code zip} package is completely independent of the {@code sign} package, the
|
||||||
|
* actual coordination between the two is complex. The {@code sign} package works by registering
|
||||||
|
* extensions with the {@code zip} package. These extensions are notified in changes made in the zip
|
||||||
|
* and will change the zip file itself.
|
||||||
|
*
|
||||||
|
* <p>The {@link com.android.apkzlib.sign.ManifestGenerationExtension} extension will ensure the zip
|
||||||
|
* has a manifest file and is, therefore, a valid jar. The {@link
|
||||||
|
* com.android.apkzlib.sign.SigningExtension} extension will ensure the jar is signed.
|
||||||
|
*
|
||||||
|
* <p>The extension mechanism used is the one provided in the {@code zip} package (see {@link
|
||||||
|
* com.android.apkzlib.zip.ZFile} and {@link com.android.apkzlib.zip.ZFileExtension}. Building the
|
||||||
|
* zip and then operating the extensions is not done sequentially, as we don't want to build a zip
|
||||||
|
* and then sign it. We want to build a zip that is automatically signed. Extension are basically
|
||||||
|
* observers that register on the zip and are notified when things happen in the zip. They will then
|
||||||
|
* modify the zip accordingly.
|
||||||
|
*
|
||||||
|
* <p>The zip file notifies extensions in 4 critical moments: when a file is added or removed from
|
||||||
|
* the zip, when the zip is about to be flushed to disk and when the zip's entries have been flushed
|
||||||
|
* but the central directory not. At these moments, the extensions can act to update the zip in any
|
||||||
|
* way they need.
|
||||||
|
*
|
||||||
|
* <p>To see how this works, consider the manifest generation extension: when the extension is
|
||||||
|
* created, it checks the zip file to see if there is a manifest. If a manifest exists and does not
|
||||||
|
* need updating, it does not change anything, otherwise it generates a new manifest for the zip
|
||||||
|
* file. At this point, the extension could write the manifest to the zip, but we opted not to. It
|
||||||
|
* would be irrelevant anyway as the zip will only be written when flushed.
|
||||||
|
*
|
||||||
|
* <p>Now, when the {@code ZFile} notifies the extension that it is about to start writing the zip
|
||||||
|
* file, the manifest extension, if it has noted that the manifest needs to be rewritten, will --
|
||||||
|
* before the {@code ZFile} actually writes anything -- modify the zip and add or replace the
|
||||||
|
* existing manifest file. So, process-wise, the zip is written only once with the correct manifest.
|
||||||
|
* The flow is as follows (if only the manifest generation extension was added to the {@code
|
||||||
|
* ZFile}):
|
||||||
|
*
|
||||||
|
* <ol>
|
||||||
|
* <li>{@code ZFile.update()} is called.
|
||||||
|
* <li>{@code ZFile} calls {@code beforeUpdate()} for all {@code ZFileExtensions} registered, in
|
||||||
|
* this case, only the instance of the anonymous inner class generated in the {@code
|
||||||
|
* ManifestGenerationExtension} constructor is invoked.
|
||||||
|
* <li>{@code ManifestGenerationExtension.updateManifest()} is called.
|
||||||
|
* <li>If the manifest does not need to be updated, {@code updateManifest()} returns immediately.
|
||||||
|
* <li>If the manifest needs updating, {@code ZFile.add()} is invoked to add or replace the
|
||||||
|
* manifest.
|
||||||
|
* <li>{@code ManifestGenerationExtension.updateManifest()} returns.
|
||||||
|
* <li>{@code ZFile.update()} continues and writes the zip file, containing the manifest.
|
||||||
|
* <li>The zip is finally written with an updated manifest.
|
||||||
|
* </ol>
|
||||||
|
*
|
||||||
|
* <p>To generate a signed apk, we need to add a second extension, the {@code SigningExtension}.
|
||||||
|
* This extension will also register listeners with the {@code ZFile}.
|
||||||
|
*
|
||||||
|
* <p>In this case the flow would be (starting a bit earlier for clarity and assuming a package task
|
||||||
|
* in the build process):
|
||||||
|
*
|
||||||
|
* <ol>
|
||||||
|
* <li>Package task creates a {@code ZFile} on the target apk (or non-existing file, if there is
|
||||||
|
* no target apk in the output directory).
|
||||||
|
* <li>Package task configures the {@code ZFile} with alignment rules.
|
||||||
|
* <li>Package task creates a {@code ManifestGenerationExtension}.
|
||||||
|
* <li>Package task registers the {@code ManifestGenerationExtension} with the {@code ZFile}.
|
||||||
|
* <li>The {@code ManifestGenerationExtension} looks at the {@code ZFile} to see if there is valid
|
||||||
|
* manifest. No changes are done to the {@code ZFile}.
|
||||||
|
* <li>Package task creates a {@code SigningExtension}.
|
||||||
|
* <li>Package task registers the {@code SigningExtension} with the {@code ZFile}.
|
||||||
|
* <li>The {@code SigningExtension} registers a {@code ZFileExtension} with the {@code ZFile} and
|
||||||
|
* look at the {@code ZFile} to see if there is a valid signature file.
|
||||||
|
* <li>If there are changes to the digital signature file needed, these are marked internally in
|
||||||
|
* the extension. If there are changes needed to the digests, the manifest is updated (by
|
||||||
|
* calling {@code ManifestGenerationExtension}.<br>
|
||||||
|
* <em>(note that this point, the apk file, if any existed, has not been touched, the manifest
|
||||||
|
* is only updated in memory and the digests of all files in the apk, if any, have been
|
||||||
|
* computed and stored in memory only; the digital signature of the {@code SF} file has not
|
||||||
|
* been computed.) </em>
|
||||||
|
* <li>The Package task now adds all files to the {@code ZFile}.
|
||||||
|
* <li>For each file that is added (*), {@code ZFile} calls the added {@code ZFileExtension.added}
|
||||||
|
* method of all registered extensions.
|
||||||
|
* <li>The {@code ManifestGenerationExtension} ignores added invocations.
|
||||||
|
* <li>The {@code SigningExtension} computes the digest for the added file and stores them in the
|
||||||
|
* manifest.<br>
|
||||||
|
* <em>(when all files are added to the apk, all digests are computed and the manifest is
|
||||||
|
* updated but only in memory; the apk file has not been touched; also note that {@code ZFile}
|
||||||
|
* has not actually written anything to disk at this point, all files added are kept in
|
||||||
|
* memory).</em>
|
||||||
|
* <li>Package task calls {@code ZFile.update()} to update the apk.
|
||||||
|
* <li>{@code ZFile} calls {@code before()} for all {@code ZFileExtensions} registered. This is
|
||||||
|
* done before anything is written. In this case both the {@code ManifestGenerationExtension}
|
||||||
|
* and {@code SigningExtension} are invoked.
|
||||||
|
* <li>The {@code ManifestGenerationExtension} will update the {@code ZFile} with the new
|
||||||
|
* manifest, unless nothing has changed, in which case it does nothing.
|
||||||
|
* <li>The {@code SigningExtension} will add the SF file (unless nothing has changed), will
|
||||||
|
* compute the digital signature of the SF file and write it to the {@code ZFile}.<br>
|
||||||
|
* <em>(note that the order by which the {@code ManifestGenerationExtension} and {@code
|
||||||
|
* SigningExtension} are called is non-deterministic; however, this is not a problem because
|
||||||
|
* the manifest is already computed by the {@code ManifestGenerationExtension} at this time
|
||||||
|
* and the {@code SigningExtension} will obtain the manifest data from the {@code
|
||||||
|
* ManifestGenerationExtension} and not from the {@code ZFile}; this means that the {@code SF}
|
||||||
|
* file may be added to the {@code ZFile} before the {@code MF} file, but that is
|
||||||
|
* irrelevant.)</em>
|
||||||
|
* <li>Once both extensions have finished doing the {@code beforeUpdate()} method, the {@code
|
||||||
|
* ZFile.update()} method continues.
|
||||||
|
* <li>{@code ZFile.update()} writes all changes and new entries to the zip file.
|
||||||
|
* <li>{@code ZFile.update()} calls {@code ZFileExtension.entriesWritten()} for all registered
|
||||||
|
* extensions. {@code SigningExtension} will kick in at this point, if v2 signature has
|
||||||
|
* changed.
|
||||||
|
* <li>{@code ZFile} writes the central directory and EOCD.
|
||||||
|
* <li>{@code ZFile.update()} returns control to the package task.
|
||||||
|
* <li>The package task finishes.
|
||||||
|
* </ol>
|
||||||
|
*
|
||||||
|
* <em>(*) There is a number of optimizations if we're adding files from another {@code ZFile},
|
||||||
|
* which is the case when we add the output of aapt to the apk. In particular, files from the aapt
|
||||||
|
* are ignored if they are already in the apk (same name, same CRC32) and also files copied from the
|
||||||
|
* aapt's output are not recompressed (the binary compressed data is directly copied to the
|
||||||
|
* zip).</em>
|
||||||
|
*
|
||||||
|
* <p>If there are no changes to the {@code ZFile} made by the package task and the file's manifest
|
||||||
|
* and v1 signatures are correct, neither the {@code ManifestGenerationExtension} nor the {@code
|
||||||
|
* SigningExtension} will not do anything on the {@code beforeUpdate()} and the {@code ZFile} won't
|
||||||
|
* even be open for writing.
|
||||||
|
*
|
||||||
|
* <p>This implementation provides perfect incremental updates.
|
||||||
|
*
|
||||||
|
* <p>Additionally, by adding/removing extensions we can configure what type of apk we want:
|
||||||
|
*
|
||||||
|
* <ul>
|
||||||
|
* <li>No SigningExtension ⇒ Aligned, unsigned apk.
|
||||||
|
* <li>SigningExtension ⇒ Aligned, signed apk.
|
||||||
|
* </ul>
|
||||||
|
*
|
||||||
|
* So, by configuring which extensions to add, the package task can decide what type of apk we want.
|
||||||
|
*/
|
||||||
|
package com.android.apkzlib.sign;
|
||||||
|
|
@ -0,0 +1,38 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.utils;
|
||||||
|
|
||||||
|
/** Pair implementation to use with the {@code apkzlib} library. */
|
||||||
|
public class ApkZLibPair<T1, T2> {
|
||||||
|
|
||||||
|
/** First value. */
|
||||||
|
public T1 v1;
|
||||||
|
|
||||||
|
/** Second value. */
|
||||||
|
public T2 v2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new pair.
|
||||||
|
*
|
||||||
|
* @param v1 the first value
|
||||||
|
* @param v2 the second value
|
||||||
|
*/
|
||||||
|
public ApkZLibPair(T1 v1, T2 v2) {
|
||||||
|
this.v1 = v1;
|
||||||
|
this.v2 = v2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,160 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.utils;
|
||||||
|
|
||||||
|
import com.google.common.base.Objects;
|
||||||
|
import com.google.common.hash.HashCode;
|
||||||
|
import com.google.common.hash.Hashing;
|
||||||
|
import com.google.common.io.Files;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A cache for file contents. The cache allows closing a file and saving in memory its contents (or
|
||||||
|
* some related information). It can then be used to check if the contents are still valid at some
|
||||||
|
* later time. Typical usage flow is:
|
||||||
|
*
|
||||||
|
* <p>
|
||||||
|
*
|
||||||
|
* <pre>{@code
|
||||||
|
* Object fileRepresentation = // ...
|
||||||
|
* File toWrite = // ...
|
||||||
|
* // Write file contents and update in memory representation
|
||||||
|
* CachedFileContents<Object> contents = new CachedFileContents<Object>(toWrite);
|
||||||
|
* contents.closed(fileRepresentation);
|
||||||
|
*
|
||||||
|
* // Later, when data is needed:
|
||||||
|
* if (contents.isValid()) {
|
||||||
|
* fileRepresentation = contents.getCache();
|
||||||
|
* } else {
|
||||||
|
* // Re-read the file and recreate the file representation
|
||||||
|
* }
|
||||||
|
* }</pre>
|
||||||
|
*
|
||||||
|
* @param <T> the type of cached contents
|
||||||
|
*/
|
||||||
|
public class CachedFileContents<T> {
|
||||||
|
|
||||||
|
/** The file. */
|
||||||
|
private final File file;
|
||||||
|
|
||||||
|
/** Time when last closed (time when {@link #closed(Object)} was invoked). */
|
||||||
|
private long lastClosed;
|
||||||
|
|
||||||
|
/** Size of the file when last closed. */
|
||||||
|
private long size;
|
||||||
|
|
||||||
|
/** Hash of the file when closed. {@code null} if hashing failed for some reason. */
|
||||||
|
@Nullable private HashCode hash;
|
||||||
|
|
||||||
|
/** Cached data associated with the file. */
|
||||||
|
@Nullable private T cache;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new contents. When the file is written, {@link #closed(Object)} should be invoked to
|
||||||
|
* set the cache.
|
||||||
|
*
|
||||||
|
* @param file the file
|
||||||
|
*/
|
||||||
|
public CachedFileContents(File file) {
|
||||||
|
this.file = file;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Should be called when the file's contents are set and the file closed. This will save the cache
|
||||||
|
* and register the file's timestamp to later detect if it has been modified.
|
||||||
|
*
|
||||||
|
* <p>This method can be called as many times as the file has been written.
|
||||||
|
*
|
||||||
|
* @param cache an optional cache to save
|
||||||
|
*/
|
||||||
|
public void closed(@Nullable T cache) {
|
||||||
|
this.cache = cache;
|
||||||
|
lastClosed = file.lastModified();
|
||||||
|
size = file.length();
|
||||||
|
hash = hashFile();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Are the cached contents still valid? If this method determines that the file has been modified
|
||||||
|
* since the last time {@link #closed(Object)} was invoked.
|
||||||
|
*
|
||||||
|
* @return are the cached contents still valid? If this method returns {@code false}, the cache is
|
||||||
|
* cleared
|
||||||
|
*/
|
||||||
|
public boolean isValid() {
|
||||||
|
boolean valid = true;
|
||||||
|
|
||||||
|
if (!file.exists()) {
|
||||||
|
valid = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (valid && file.lastModified() != lastClosed) {
|
||||||
|
valid = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (valid && file.length() != size) {
|
||||||
|
valid = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (valid && !Objects.equal(hash, hashFile())) {
|
||||||
|
valid = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!valid) {
|
||||||
|
cache = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return valid;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the cached data set with {@link #closed(Object)} if the file has not been modified
|
||||||
|
* since {@link #closed(Object)} was invoked.
|
||||||
|
*
|
||||||
|
* @return the last cached data or {@code null} if the file has been modified since {@link
|
||||||
|
* #closed(Object)} has been invoked
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
public T getCache() {
|
||||||
|
return cache;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the hashcode of the cached file.
|
||||||
|
*
|
||||||
|
* @return the hash code
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
private HashCode hashFile() {
|
||||||
|
try {
|
||||||
|
return Files.asByteSource(file).hash(Hashing.crc32());
|
||||||
|
} catch (IOException e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the file used for caching.
|
||||||
|
*
|
||||||
|
* @return the file; this file always exists and contains the old (cached) contents of the file
|
||||||
|
*/
|
||||||
|
public File getFile() {
|
||||||
|
return file;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,109 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.utils;
|
||||||
|
|
||||||
|
import com.google.common.base.Supplier;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Supplier that will cache a computed value and always supply the same value. It can be used to
|
||||||
|
* lazily compute data. For example:
|
||||||
|
*
|
||||||
|
* <pre>{@code
|
||||||
|
* CachedSupplier<Integer> value = new CachedSupplier<>(() -> {
|
||||||
|
* Integer result;
|
||||||
|
* // Do some expensive computation.
|
||||||
|
* return result;
|
||||||
|
* });
|
||||||
|
*
|
||||||
|
* if (a) {
|
||||||
|
* // We need the result of the expensive computation.
|
||||||
|
* Integer r = value.get();
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* if (b) {
|
||||||
|
* // We also need the result of the expensive computation.
|
||||||
|
* Integer r = value.get();
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* // If neither a nor b are true, we avoid doing the computation at all.
|
||||||
|
* }</pre>
|
||||||
|
*/
|
||||||
|
public class CachedSupplier<T> {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The cached data, {@code null} if computation resulted in {@code null}. It is also {@code null}
|
||||||
|
* if the cached data has not yet been computed.
|
||||||
|
*/
|
||||||
|
private T cached;
|
||||||
|
|
||||||
|
/** Is the current data in {@link #cached} valid? */
|
||||||
|
private boolean valid;
|
||||||
|
|
||||||
|
/** Actual supplier of data, if computation is needed. */
|
||||||
|
private final Supplier<T> supplier;
|
||||||
|
|
||||||
|
/** Creates a new supplier. */
|
||||||
|
public CachedSupplier(Supplier<T> supplier) {
|
||||||
|
valid = false;
|
||||||
|
this.supplier = supplier;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the value.
|
||||||
|
*
|
||||||
|
* @return the value, either cached (if one exists) or computed
|
||||||
|
*/
|
||||||
|
public synchronized T get() {
|
||||||
|
if (!valid) {
|
||||||
|
cached = supplier.get();
|
||||||
|
valid = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return cached;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resets the cache forcing a {@code get()} on the supplier next time {@link #get()} is invoked.
|
||||||
|
*/
|
||||||
|
public synchronized void reset() {
|
||||||
|
cached = null;
|
||||||
|
valid = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* In some cases, we may be able to precompute the cache value (or load it from somewhere we had
|
||||||
|
* previously stored it). This method allows the cache value to be loaded.
|
||||||
|
*
|
||||||
|
* <p>If this method is invoked, then an invocation of {@link #get()} will not trigger an
|
||||||
|
* invocation of the supplier provided in the constructor.
|
||||||
|
*
|
||||||
|
* @param t the new cache contents; will replace any currently cache content, if one exists
|
||||||
|
*/
|
||||||
|
public synchronized void precomputed(T t) {
|
||||||
|
cached = t;
|
||||||
|
valid = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the contents of the cache are valid.
|
||||||
|
*
|
||||||
|
* @return are there valid contents in the cache?
|
||||||
|
*/
|
||||||
|
public synchronized boolean isValid() {
|
||||||
|
return valid;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,31 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.utils;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/** Consumer that can throw an {@link IOException}. */
|
||||||
|
public interface IOExceptionConsumer<T> {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs an operation on the given input.
|
||||||
|
*
|
||||||
|
* @param input the input
|
||||||
|
*/
|
||||||
|
void accept(@Nullable T input) throws IOException;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,49 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.utils;
|
||||||
|
|
||||||
|
import com.google.common.base.Function;
|
||||||
|
import java.io.IOException;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/** Function that can throw an I/O Exception */
|
||||||
|
public interface IOExceptionFunction<F, T> {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Applies the function to the given input.
|
||||||
|
*
|
||||||
|
* @param input the input
|
||||||
|
* @return the function result
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
T apply(@Nullable F input) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wraps a function that may throw an IO Exception throwing an {@link IOExceptionWrapper}.
|
||||||
|
*
|
||||||
|
* @param f the function
|
||||||
|
*/
|
||||||
|
static <F, T> Function<F, T> asFunction(IOExceptionFunction<F, T> f) {
|
||||||
|
return i -> {
|
||||||
|
try {
|
||||||
|
return f.apply(i);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOExceptionWrapper(e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,45 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.utils;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/** Runnable that can throw I/O exceptions. */
|
||||||
|
public interface IOExceptionRunnable {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Runs the runnable.
|
||||||
|
*
|
||||||
|
* @throws IOException failed to run
|
||||||
|
*/
|
||||||
|
void run() throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wraps a runnable that may throw an IO Exception throwing an {@code UncheckedIOException}.
|
||||||
|
*
|
||||||
|
* @param r the runnable
|
||||||
|
*/
|
||||||
|
static Runnable asRunnable(IOExceptionRunnable r) {
|
||||||
|
return () -> {
|
||||||
|
try {
|
||||||
|
r.run();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOExceptionWrapper(e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,40 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.utils;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Runtime exception used to encapsulate an IO Exception. This is used to allow throwing I/O
|
||||||
|
* exceptions in functional interfaces that do not allow it and catching the exception afterwards.
|
||||||
|
*/
|
||||||
|
public class IOExceptionWrapper extends RuntimeException {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new exception.
|
||||||
|
*
|
||||||
|
* @param e the I/O exception to encapsulate
|
||||||
|
*/
|
||||||
|
public IOExceptionWrapper(IOException e) {
|
||||||
|
super(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IOException getCause() {
|
||||||
|
return (IOException) super.getCause();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,185 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2019 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.utils;
|
||||||
|
|
||||||
|
import static java.nio.ByteOrder.LITTLE_ENDIAN;
|
||||||
|
|
||||||
|
import com.android.apksig.apk.ApkSigningBlockNotFoundException;
|
||||||
|
import com.android.apksig.apk.ApkUtils;
|
||||||
|
import com.android.apksig.apk.ApkUtils.ApkSigningBlock;
|
||||||
|
import com.android.apksig.internal.apk.ApkSigningBlockUtils;
|
||||||
|
import com.android.apksig.internal.util.Pair;
|
||||||
|
import com.android.apksig.util.DataSource;
|
||||||
|
import com.android.apksig.util.DataSources;
|
||||||
|
import com.android.apksig.zip.ZipFormatException;
|
||||||
|
import com.google.common.collect.ImmutableList;
|
||||||
|
import com.google.common.primitives.Ints;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.RandomAccessFile;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.nio.ByteOrder;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/** Generates and appends a new block to APK v2 Signature block. */
|
||||||
|
public final class SigningBlockUtils {
|
||||||
|
|
||||||
|
private static final int MAGIC_NUM_BYTES = 16;
|
||||||
|
private static final int BLOCK_LENGTH_NUM_BYTES = 8;
|
||||||
|
static final int SIZE_OF_BLOCK_NUM_BYTES = 8;
|
||||||
|
static final int BLOCK_ID_NUM_BYTES = 4;
|
||||||
|
|
||||||
|
static final int ANDROID_COMMON_PAGE_ALIGNMENT_NUM_BYTES = 4096;
|
||||||
|
static final int VERITY_PADDING_BLOCK_ID = 0x42726577;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates a new block with the given block value and block id, and appends it to the signing
|
||||||
|
* block.
|
||||||
|
*
|
||||||
|
* @param signingBlock Block containing v2 signature and (optionally) padding block or null.
|
||||||
|
* @param blockValue byte array containing block value of the new block or null.
|
||||||
|
* @param blockId block id of the new block.
|
||||||
|
* @return APK v2 block with signatures and the new block. If {@code blockValue} is null the
|
||||||
|
* {@code signingBlock} is returned without any modification. If {@code signingBlock} is null,
|
||||||
|
* a new signature block is created containing the new block and, optionally, padding block.
|
||||||
|
*/
|
||||||
|
public static byte[] addToSigningBlock(byte[] signingBlock, byte[] blockValue, int blockId)
|
||||||
|
throws IOException {
|
||||||
|
if (blockValue == null || blockValue.length == 0) {
|
||||||
|
return signingBlock;
|
||||||
|
}
|
||||||
|
if (signingBlock == null || signingBlock.length == 0) {
|
||||||
|
return createSigningBlock(blockValue, blockId);
|
||||||
|
}
|
||||||
|
return appendToSigningBlock(signingBlock, blockValue, blockId);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a new block to the signature block and a padding block, if required.
|
||||||
|
*
|
||||||
|
* @param signingBlock APK v2 signing block containing : length prefix, signers (can include
|
||||||
|
* padding block), length postfix and APK sig v2 block magic.
|
||||||
|
* @param blockValue byte array containing block value of the new block.
|
||||||
|
* @param blockId block id of the new block.
|
||||||
|
* @return APK v2 signing block containing : length prefix, signers including the new block (may
|
||||||
|
* include padding block as well), length postfix and APK sig v2 block magic.
|
||||||
|
*/
|
||||||
|
private static byte[] appendToSigningBlock(byte[] signingBlock, byte[] blockValue, int blockId)
|
||||||
|
throws IOException {
|
||||||
|
ImmutableList<Pair<byte[], Integer>> entries =
|
||||||
|
ImmutableList.<Pair<byte[], Integer>>builder()
|
||||||
|
.addAll(extractAllSigners(DataSources.asDataSource(ByteBuffer.wrap(signingBlock))))
|
||||||
|
.add(Pair.of(blockValue, blockId))
|
||||||
|
.build();
|
||||||
|
return ApkSigningBlockUtils.generateApkSigningBlock(entries);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate APK sig v2 block containing a block composed of the provided block value and id, and
|
||||||
|
* (optionally) padding block.
|
||||||
|
*/
|
||||||
|
private static byte[] createSigningBlock(byte[] blockValue, int blockId) {
|
||||||
|
return ApkSigningBlockUtils.generateApkSigningBlock(
|
||||||
|
ImmutableList.of(Pair.of(blockValue, blockId)));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extracts all signing block entries except padding block.
|
||||||
|
*
|
||||||
|
* @param signingBlock APK v2 signing block containing: length prefix, signers (can include
|
||||||
|
* padding block), length postfix and APK sig v2 block magic.
|
||||||
|
* @return list of block entry value and block entry id pairs.
|
||||||
|
*/
|
||||||
|
private static ImmutableList<Pair<byte[], Integer>> extractAllSigners(DataSource signingBlock)
|
||||||
|
throws IOException {
|
||||||
|
long wholeBlockSize = signingBlock.size();
|
||||||
|
// Take the segment of the existing signing block without the length prefix (8 bytes)
|
||||||
|
// at the beginning and the length and magic (24 bytes) at the end, so it is just the sequence
|
||||||
|
// of length prefix id value pairs.
|
||||||
|
DataSource lengthPrefixedIdValuePairsSource =
|
||||||
|
signingBlock.slice(
|
||||||
|
SIZE_OF_BLOCK_NUM_BYTES,
|
||||||
|
wholeBlockSize - 2 * SIZE_OF_BLOCK_NUM_BYTES - MAGIC_NUM_BYTES);
|
||||||
|
final int lengthAndIdByteCount = BLOCK_LENGTH_NUM_BYTES + BLOCK_ID_NUM_BYTES;
|
||||||
|
ByteBuffer lengthAndId = ByteBuffer.allocate(lengthAndIdByteCount).order(LITTLE_ENDIAN);
|
||||||
|
ImmutableList.Builder<Pair<byte[], Integer>> idValuePairs = ImmutableList.builder();
|
||||||
|
|
||||||
|
for (int index = 0; index <= lengthPrefixedIdValuePairsSource.size() - lengthAndIdByteCount; ) {
|
||||||
|
lengthPrefixedIdValuePairsSource.copyTo(index, lengthAndIdByteCount, lengthAndId);
|
||||||
|
lengthAndId.flip();
|
||||||
|
int blockLength = Ints.checkedCast(lengthAndId.getLong());
|
||||||
|
int id = lengthAndId.getInt();
|
||||||
|
lengthAndId.clear();
|
||||||
|
|
||||||
|
if (id != VERITY_PADDING_BLOCK_ID) {
|
||||||
|
int blockValueSize = blockLength - BLOCK_ID_NUM_BYTES;
|
||||||
|
ByteBuffer blockValue = ByteBuffer.allocate(blockValueSize);
|
||||||
|
lengthPrefixedIdValuePairsSource.copyTo(
|
||||||
|
index + BLOCK_LENGTH_NUM_BYTES + BLOCK_ID_NUM_BYTES, blockValueSize, blockValue);
|
||||||
|
idValuePairs.add(Pair.of(blockValue.array(), id));
|
||||||
|
}
|
||||||
|
|
||||||
|
index += blockLength + BLOCK_LENGTH_NUM_BYTES;
|
||||||
|
}
|
||||||
|
return idValuePairs.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract a block with the given id from the APK. If there is more than one block with the same
|
||||||
|
* ID, the first block will be returned. If there are no block with the give id, {@code null} will
|
||||||
|
* be returned.
|
||||||
|
*
|
||||||
|
* @param apk APK file
|
||||||
|
* @param blockId id of the block to be extracted.
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
public static ByteBuffer extractBlock(File apk, int blockId)
|
||||||
|
throws IOException, ZipFormatException, ApkSigningBlockNotFoundException {
|
||||||
|
try (RandomAccessFile file = new RandomAccessFile(apk, "r")) {
|
||||||
|
DataSource apkDataSource = DataSources.asDataSource(file);
|
||||||
|
ApkSigningBlock signingBlockInfo =
|
||||||
|
ApkUtils.findApkSigningBlock(apkDataSource, ApkUtils.findZipSections(apkDataSource));
|
||||||
|
|
||||||
|
DataSource wholeV2Block = signingBlockInfo.getContents();
|
||||||
|
final int lengthAndIdByteCount = BLOCK_LENGTH_NUM_BYTES + BLOCK_ID_NUM_BYTES;
|
||||||
|
DataSource signingBlock =
|
||||||
|
wholeV2Block.slice(
|
||||||
|
SIZE_OF_BLOCK_NUM_BYTES,
|
||||||
|
wholeV2Block.size() - SIZE_OF_BLOCK_NUM_BYTES - MAGIC_NUM_BYTES);
|
||||||
|
ByteBuffer lengthAndId =
|
||||||
|
ByteBuffer.allocate(lengthAndIdByteCount).order(ByteOrder.LITTLE_ENDIAN);
|
||||||
|
for (int index = 0; index <= signingBlock.size() - lengthAndIdByteCount; ) {
|
||||||
|
signingBlock.copyTo(index, lengthAndIdByteCount, lengthAndId);
|
||||||
|
lengthAndId.flip();
|
||||||
|
int blockLength = (int) lengthAndId.getLong();
|
||||||
|
int id = lengthAndId.getInt();
|
||||||
|
lengthAndId.flip();
|
||||||
|
if (id == blockId) {
|
||||||
|
ByteBuffer block = ByteBuffer.allocate(blockLength - BLOCK_ID_NUM_BYTES);
|
||||||
|
signingBlock.copyTo(
|
||||||
|
index + lengthAndIdByteCount, blockLength - BLOCK_ID_NUM_BYTES, block);
|
||||||
|
block.flip();
|
||||||
|
return block;
|
||||||
|
}
|
||||||
|
index += blockLength + BLOCK_LENGTH_NUM_BYTES;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private SigningBlockUtils() {}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,18 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2017 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/** Utilities to work with {@code apkzlib}. */
|
||||||
|
package com.android.tools.build.apkzlib.utils;
|
||||||
|
|
@ -0,0 +1,66 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zfile;
|
||||||
|
|
||||||
|
import com.google.common.base.Function;
|
||||||
|
import com.google.common.base.Predicate;
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/** Creates or updates APKs based on provided entries. */
|
||||||
|
public interface ApkCreator extends Closeable {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copies the content of a Jar/Zip archive into the receiver archive.
|
||||||
|
*
|
||||||
|
* <p>An optional predicate allows to selectively choose which files to copy over and an option
|
||||||
|
* function allows renaming the files as they are copied.
|
||||||
|
*
|
||||||
|
* @param zip the zip to copy data from
|
||||||
|
* @param transform an optional transform to apply to file names before copying them
|
||||||
|
* @param isIgnored an optional filter or {@code null} to mark which out files should not be
|
||||||
|
* added, even through they are on the zip; if {@code transform} is specified, then this
|
||||||
|
* predicate applies after transformation
|
||||||
|
* @throws IOException I/O error
|
||||||
|
*/
|
||||||
|
void writeZip(
|
||||||
|
File zip, @Nullable Function<String, String> transform, @Nullable Predicate<String> isIgnored)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes a new {@link File} into the archive. If a file already existed with the given path, it
|
||||||
|
* should be replaced.
|
||||||
|
*
|
||||||
|
* @param inputFile the {@link File} to write.
|
||||||
|
* @param apkPath the filepath inside the archive.
|
||||||
|
* @throws IOException I/O error
|
||||||
|
*/
|
||||||
|
void writeFile(File inputFile, String apkPath) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deletes a file in a given path.
|
||||||
|
*
|
||||||
|
* @param apkPath the path to remove
|
||||||
|
* @throws IOException failed to remove the entry
|
||||||
|
*/
|
||||||
|
void deleteFile(String apkPath) throws IOException;
|
||||||
|
|
||||||
|
/** Returns true if the APK will be rewritten on close. */
|
||||||
|
boolean hasPendingChangesWithWait() throws IOException;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,128 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zfile;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.sign.SigningOptions;
|
||||||
|
import com.google.auto.value.AutoValue;
|
||||||
|
import com.google.common.base.Optional;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.base.Predicate;
|
||||||
|
import java.io.File;
|
||||||
|
import javax.annotation.Nonnull;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/** Factory that creates instances of {@link ApkCreator}. */
|
||||||
|
public interface ApkCreatorFactory {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an {@link ApkCreator} with a given output location, and signing information.
|
||||||
|
*
|
||||||
|
* @param creationData the information to create the APK
|
||||||
|
*/
|
||||||
|
ApkCreator make(CreationData creationData);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Data structure with the required information to initiate the creation of an APK. See {@link
|
||||||
|
* ApkCreatorFactory#make(CreationData)}.
|
||||||
|
*/
|
||||||
|
@AutoValue
|
||||||
|
abstract class CreationData {
|
||||||
|
|
||||||
|
/** An implementation of builder pattern to create a {@link CreationData} object. */
|
||||||
|
@AutoValue.Builder
|
||||||
|
public abstract static class Builder {
|
||||||
|
public abstract Builder setApkPath(@Nonnull File apkPath);
|
||||||
|
|
||||||
|
public abstract Builder setSigningOptions(@Nonnull SigningOptions signingOptions);
|
||||||
|
|
||||||
|
public abstract Builder setBuiltBy(@Nullable String buildBy);
|
||||||
|
|
||||||
|
public abstract Builder setCreatedBy(@Nullable String createdBy);
|
||||||
|
|
||||||
|
public abstract Builder setNativeLibrariesPackagingMode(
|
||||||
|
NativeLibrariesPackagingMode packagingMode);
|
||||||
|
|
||||||
|
public abstract Builder setNoCompressPredicate(Predicate<String> predicate);
|
||||||
|
|
||||||
|
public abstract Builder setIncremental(boolean incremental);
|
||||||
|
|
||||||
|
abstract CreationData autoBuild();
|
||||||
|
|
||||||
|
public CreationData build() {
|
||||||
|
CreationData data = autoBuild();
|
||||||
|
Preconditions.checkArgument(data.getApkPath() != null, "Output apk path is not set");
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Builder builder() {
|
||||||
|
return new AutoValue_ApkCreatorFactory_CreationData.Builder()
|
||||||
|
.setBuiltBy(null)
|
||||||
|
.setCreatedBy(null)
|
||||||
|
.setNoCompressPredicate(s -> false)
|
||||||
|
.setIncremental(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the path where the APK should be located. If the path already exists, then the APK
|
||||||
|
* may be updated instead of re-created.
|
||||||
|
*
|
||||||
|
* @return the path that may already exist or not
|
||||||
|
*/
|
||||||
|
public abstract File getApkPath();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the data used to sign the APK.
|
||||||
|
*
|
||||||
|
* @return the SigningOptions
|
||||||
|
*/
|
||||||
|
@Nonnull
|
||||||
|
public abstract Optional<SigningOptions> getSigningOptions();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the "built-by" text for the APK.
|
||||||
|
*
|
||||||
|
* @return the text or {@code null} if the default should be used
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
public abstract String getBuiltBy();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the "created-by" text for the APK.
|
||||||
|
*
|
||||||
|
* @return the text or {@code null} if the default should be used
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
public abstract String getCreatedBy();
|
||||||
|
|
||||||
|
/** Returns the packaging policy that the {@link ApkCreator} should use for native libraries. */
|
||||||
|
public abstract NativeLibrariesPackagingMode getNativeLibrariesPackagingMode();
|
||||||
|
|
||||||
|
/** Returns the predicate to decide which file paths should be uncompressed. */
|
||||||
|
public abstract Predicate<String> getNoCompressPredicate();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns if this apk build is incremental.
|
||||||
|
*
|
||||||
|
* As mentioned in {@link getApkPath} description, we may already have an existing apk in place.
|
||||||
|
* This is the case when e.g. building APK via build system and this is not the first build.
|
||||||
|
* In that case the build is called incremental and internal APK data might be reused speeding
|
||||||
|
* the build up.
|
||||||
|
*/
|
||||||
|
public abstract boolean isIncremental();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,176 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zfile;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.AlignmentRule;
|
||||||
|
import com.android.tools.build.apkzlib.zip.AlignmentRules;
|
||||||
|
import com.android.tools.build.apkzlib.zip.StoredEntry;
|
||||||
|
import com.android.tools.build.apkzlib.zip.ZFile;
|
||||||
|
import com.android.tools.build.apkzlib.zip.ZFileOptions;
|
||||||
|
import com.google.common.base.Function;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.base.Predicate;
|
||||||
|
import com.google.common.io.Closer;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.FileInputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/** {@link ApkCreator} that uses {@link ZFileOptions} to generate the APK. */
|
||||||
|
class ApkZFileCreator implements ApkCreator {
|
||||||
|
|
||||||
|
/** Suffix for native libraries. */
|
||||||
|
private static final String NATIVE_LIBRARIES_SUFFIX = ".so";
|
||||||
|
|
||||||
|
/** Shared libraries are alignment at 4096 boundaries. */
|
||||||
|
private static final AlignmentRule SO_RULE =
|
||||||
|
AlignmentRules.constantForSuffix(NATIVE_LIBRARIES_SUFFIX, 4096);
|
||||||
|
|
||||||
|
/** The zip file. */
|
||||||
|
private final ZFile zip;
|
||||||
|
|
||||||
|
/** Has the zip file been closed? */
|
||||||
|
private boolean closed;
|
||||||
|
|
||||||
|
/** Predicate defining which files should not be compressed. */
|
||||||
|
private final Predicate<String> noCompressPredicate;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new creator.
|
||||||
|
*
|
||||||
|
* @param creationData the data needed to create the APK
|
||||||
|
* @param options zip file options
|
||||||
|
* @throws IOException failed to create the zip
|
||||||
|
*/
|
||||||
|
ApkZFileCreator(ApkCreatorFactory.CreationData creationData, ZFileOptions options)
|
||||||
|
throws IOException {
|
||||||
|
|
||||||
|
switch (creationData.getNativeLibrariesPackagingMode()) {
|
||||||
|
case COMPRESSED:
|
||||||
|
noCompressPredicate = creationData.getNoCompressPredicate();
|
||||||
|
break;
|
||||||
|
case UNCOMPRESSED_AND_ALIGNED:
|
||||||
|
Predicate<String> baseNoCompressPredicate = creationData.getNoCompressPredicate();
|
||||||
|
noCompressPredicate =
|
||||||
|
name -> baseNoCompressPredicate.apply(name) || name.endsWith(NATIVE_LIBRARIES_SUFFIX);
|
||||||
|
options.setAlignmentRule(AlignmentRules.compose(SO_RULE, options.getAlignmentRule()));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new AssertionError();
|
||||||
|
}
|
||||||
|
// In case of incremental build we can skip validation since we generated the previous apk and
|
||||||
|
// we trust ourselves
|
||||||
|
options.setSkipValidation(creationData.isIncremental());
|
||||||
|
|
||||||
|
zip =
|
||||||
|
ZFiles.apk(
|
||||||
|
creationData.getApkPath(),
|
||||||
|
options,
|
||||||
|
creationData.getSigningOptions(),
|
||||||
|
creationData.getBuiltBy(),
|
||||||
|
creationData.getCreatedBy());
|
||||||
|
closed = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeZip(
|
||||||
|
File zip, @Nullable Function<String, String> transform, @Nullable Predicate<String> isIgnored)
|
||||||
|
throws IOException {
|
||||||
|
Preconditions.checkState(!closed, "closed == true");
|
||||||
|
Preconditions.checkArgument(zip.isFile(), "!zip.isFile()");
|
||||||
|
|
||||||
|
Closer closer = Closer.create();
|
||||||
|
try {
|
||||||
|
ZFile toMerge = closer.register(ZFile.openReadWrite(zip));
|
||||||
|
|
||||||
|
Predicate<String> ignorePredicate;
|
||||||
|
if (isIgnored == null) {
|
||||||
|
ignorePredicate = s -> false;
|
||||||
|
} else {
|
||||||
|
ignorePredicate = isIgnored;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Files that *must* be uncompressed in the result should not be merged and should be
|
||||||
|
// added after. This is just very slightly less efficient than ignoring just the ones
|
||||||
|
// that were compressed and must be uncompressed, but it is a lot simpler :)
|
||||||
|
Predicate<String> noMergePredicate =
|
||||||
|
v -> ignorePredicate.apply(v) || noCompressPredicate.apply(v);
|
||||||
|
|
||||||
|
this.zip.mergeFrom(toMerge, noMergePredicate);
|
||||||
|
|
||||||
|
for (StoredEntry toMergeEntry : toMerge.entries()) {
|
||||||
|
String path = toMergeEntry.getCentralDirectoryHeader().getName();
|
||||||
|
if (noCompressPredicate.apply(path) && !ignorePredicate.apply(path)) {
|
||||||
|
// This entry *must* be uncompressed so it was ignored in the merge and should
|
||||||
|
// now be added to the apk.
|
||||||
|
try (InputStream ignoredData = toMergeEntry.open()) {
|
||||||
|
this.zip.add(path, ignoredData, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (Throwable t) {
|
||||||
|
throw closer.rethrow(t);
|
||||||
|
} finally {
|
||||||
|
closer.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeFile(File inputFile, String apkPath) throws IOException {
|
||||||
|
Preconditions.checkState(!closed, "closed == true");
|
||||||
|
|
||||||
|
boolean mayCompress = !noCompressPredicate.apply(apkPath);
|
||||||
|
|
||||||
|
Closer closer = Closer.create();
|
||||||
|
try {
|
||||||
|
FileInputStream inputFileStream = closer.register(new FileInputStream(inputFile));
|
||||||
|
zip.add(apkPath, inputFileStream, mayCompress);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw closer.rethrow(e, IOException.class);
|
||||||
|
} catch (Throwable t) {
|
||||||
|
throw closer.rethrow(t);
|
||||||
|
} finally {
|
||||||
|
closer.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void deleteFile(String apkPath) throws IOException {
|
||||||
|
Preconditions.checkState(!closed, "closed == true");
|
||||||
|
|
||||||
|
StoredEntry entry = zip.get(apkPath);
|
||||||
|
if (entry != null) {
|
||||||
|
entry.delete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasPendingChangesWithWait() throws IOException {
|
||||||
|
return zip.hasPendingChangesWithWait();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
if (closed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
zip.close();
|
||||||
|
closed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,46 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zfile;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.utils.IOExceptionWrapper;
|
||||||
|
import com.android.tools.build.apkzlib.zip.ZFileOptions;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/** Creates instances of {@link ApkZFileCreator}. */
|
||||||
|
public class ApkZFileCreatorFactory implements ApkCreatorFactory {
|
||||||
|
|
||||||
|
/** Options for the {@link ZFileOptions} to use in all APKs. */
|
||||||
|
private final ZFileOptions options;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new factory.
|
||||||
|
*
|
||||||
|
* @param options the options to use for all instances created
|
||||||
|
*/
|
||||||
|
public ApkZFileCreatorFactory(ZFileOptions options) {
|
||||||
|
this.options = options;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ApkCreator make(CreationData creationData) {
|
||||||
|
try {
|
||||||
|
return new ApkZFileCreator(creationData, options);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOExceptionWrapper(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zfile;
|
||||||
|
|
||||||
|
/** Java manifest attributes and some default values. */
|
||||||
|
public interface ManifestAttributes {
|
||||||
|
/** Manifest attribute with the built by information. */
|
||||||
|
String BUILT_BY = "Built-By";
|
||||||
|
|
||||||
|
/** Manifest attribute with the created by information. */
|
||||||
|
String CREATED_BY = "Created-By";
|
||||||
|
|
||||||
|
/** Manifest attribute with the manifest version. */
|
||||||
|
String MANIFEST_VERSION = "Manifest-Version";
|
||||||
|
|
||||||
|
/** Manifest attribute value with the manifest version. */
|
||||||
|
String CURRENT_MANIFEST_VERSION = "1.0";
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zfile;
|
||||||
|
|
||||||
|
/** Describes how native libs should be packaged. */
|
||||||
|
public enum NativeLibrariesPackagingMode {
|
||||||
|
/** Native libs are packaged as any other file. */
|
||||||
|
COMPRESSED,
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Native libs are packaged uncompressed and page-aligned, so they can be mapped into memory at
|
||||||
|
* runtime.
|
||||||
|
*
|
||||||
|
* <p>Support for this mode was added in Android 23, it only works if the {@code
|
||||||
|
* extractNativeLibs} attribute is set in the manifest.
|
||||||
|
*/
|
||||||
|
UNCOMPRESSED_AND_ALIGNED;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,139 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zfile;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.sign.ManifestGenerationExtension;
|
||||||
|
import com.android.tools.build.apkzlib.sign.SigningExtension;
|
||||||
|
import com.android.tools.build.apkzlib.sign.SigningOptions;
|
||||||
|
import com.android.tools.build.apkzlib.zip.AlignmentRule;
|
||||||
|
import com.android.tools.build.apkzlib.zip.AlignmentRules;
|
||||||
|
import com.android.tools.build.apkzlib.zip.ZFile;
|
||||||
|
import com.android.tools.build.apkzlib.zip.ZFileOptions;
|
||||||
|
import com.google.common.base.Optional;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.security.InvalidKeyException;
|
||||||
|
import java.security.NoSuchAlgorithmException;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/** Factory for {@link ZFile}s that are specifically configured to be APKs, AARs, ... */
|
||||||
|
public class ZFiles {
|
||||||
|
|
||||||
|
/** By default all non-compressed files are alignment at 4 byte boundaries.. */
|
||||||
|
private static final AlignmentRule APK_DEFAULT_RULE = AlignmentRules.constant(4);
|
||||||
|
|
||||||
|
/** Default build by string. */
|
||||||
|
private static final String DEFAULT_BUILD_BY = "Generated-by-ADT";
|
||||||
|
|
||||||
|
/** Default created by string. */
|
||||||
|
private static final String DEFAULT_CREATED_BY = "Generated-by-ADT";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new zip file configured as an apk, based on a given file.
|
||||||
|
*
|
||||||
|
* @param f the file, if this path does not represent an existing path, will create a {@link
|
||||||
|
* ZFile} based on an non-existing path (a zip will be created when {@link ZFile#close()} is
|
||||||
|
* invoked)
|
||||||
|
* @param options the options to create the {@link ZFile}
|
||||||
|
* @return the zip file
|
||||||
|
* @throws IOException failed to create the zip file
|
||||||
|
*/
|
||||||
|
public static ZFile apk(File f, ZFileOptions options) throws IOException {
|
||||||
|
options.setAlignmentRule(AlignmentRules.compose(options.getAlignmentRule(), APK_DEFAULT_RULE));
|
||||||
|
return ZFile.openReadWrite(f, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new zip file configured as an apk, based on a given file.
|
||||||
|
*
|
||||||
|
* @param f the file, if this path does not represent an existing path, will create a {@link
|
||||||
|
* ZFile} based on an non-existing path (a zip will be created when {@link ZFile#close()} is
|
||||||
|
* invoked)
|
||||||
|
* @param options the options to create the {@link ZFile}
|
||||||
|
* @param signingOptions the options to sign the apk
|
||||||
|
* @param builtBy who to mark as builder in the manifest
|
||||||
|
* @param createdBy who to mark as creator in the manifest
|
||||||
|
* @return the zip file
|
||||||
|
* @throws IOException failed to create the zip file
|
||||||
|
*/
|
||||||
|
public static ZFile apk(
|
||||||
|
File f,
|
||||||
|
ZFileOptions options,
|
||||||
|
Optional<SigningOptions> signingOptions,
|
||||||
|
@Nullable String builtBy,
|
||||||
|
@Nullable String createdBy)
|
||||||
|
throws IOException {
|
||||||
|
return apk(
|
||||||
|
f, options, signingOptions, builtBy, createdBy, options.getAlwaysGenerateJarManifest());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new zip file configured as an apk, based on a given file.
|
||||||
|
*
|
||||||
|
* @param f the file, if this path does not represent an existing path, will create a {@link
|
||||||
|
* ZFile} based on an non-existing path (a zip will be created when {@link ZFile#close()} is
|
||||||
|
* invoked)
|
||||||
|
* @param options the options to create the {@link ZFile}
|
||||||
|
* @param signingOptions the options to sign the apk
|
||||||
|
* @param builtBy who to mark as builder in the manifest
|
||||||
|
* @param createdBy who to mark as creator in the manifest
|
||||||
|
* @param writeManifest a migration parameter that forces keeping (useless) manifest.mf file in
|
||||||
|
* apk file in order to prevent breaking changes. Clients of the previous interface will still
|
||||||
|
* get apk with manifest.mf because the flag is true by default
|
||||||
|
* @return the zip file
|
||||||
|
* @throws IOException failed to create the zip file
|
||||||
|
* @deprecated Use ZFileOptions.setAlwaysGenerateJarManifest() instead.
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
|
// This method can be removed once ZFileOptions.getAlwaysGenerateJarManifest() is on Maven.
|
||||||
|
public static ZFile apk(
|
||||||
|
File f,
|
||||||
|
ZFileOptions options,
|
||||||
|
Optional<SigningOptions> signingOptions,
|
||||||
|
@Nullable String builtBy,
|
||||||
|
@Nullable String createdBy,
|
||||||
|
boolean writeManifest)
|
||||||
|
throws IOException {
|
||||||
|
ZFile zfile = apk(f, options);
|
||||||
|
|
||||||
|
if ((signingOptions.isPresent() && signingOptions.get().isV1SigningEnabled())
|
||||||
|
|| writeManifest) {
|
||||||
|
if (builtBy == null) {
|
||||||
|
builtBy = DEFAULT_BUILD_BY;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (createdBy == null) {
|
||||||
|
createdBy = DEFAULT_CREATED_BY;
|
||||||
|
}
|
||||||
|
ManifestGenerationExtension manifestExt = new ManifestGenerationExtension(builtBy, createdBy);
|
||||||
|
manifestExt.register(zfile);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (signingOptions.isPresent()) {
|
||||||
|
SigningOptions signOptions = signingOptions.get();
|
||||||
|
try {
|
||||||
|
new SigningExtension(signOptions).register(zfile);
|
||||||
|
} catch (NoSuchAlgorithmException | InvalidKeyException e) {
|
||||||
|
throw new IOException("Failed to create signature extensions", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return zfile;
|
||||||
|
}
|
||||||
|
|
||||||
|
private ZFiles() {}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,18 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2017 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/** The {@code zfile} package contains */
|
||||||
|
package com.android.tools.build.apkzlib.zfile;
|
||||||
|
|
@ -0,0 +1,34 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
|
||||||
|
/** An alignment rule defines how to a file should be aligned in a zip, based on its name. */
|
||||||
|
public interface AlignmentRule {
|
||||||
|
|
||||||
|
/** Alignment value of files that do not require alignment. */
|
||||||
|
int NO_ALIGNMENT = 1;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the alignment this rule computes for a given path.
|
||||||
|
*
|
||||||
|
* @param path the path in the zip file
|
||||||
|
* @return the alignment value, always greater than {@code 0}; if this rule places no restrictions
|
||||||
|
* on the provided path, then {@link AlignmentRule#NO_ALIGNMENT} is returned
|
||||||
|
*/
|
||||||
|
int alignment(String path);
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,74 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
|
/** Factory for instances of {@link AlignmentRule}. */
|
||||||
|
public final class AlignmentRules {
|
||||||
|
|
||||||
|
private AlignmentRules() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A rule that defines a constant alignment for all files.
|
||||||
|
*
|
||||||
|
* @param alignment the alignment
|
||||||
|
* @return the rule
|
||||||
|
*/
|
||||||
|
public static AlignmentRule constant(int alignment) {
|
||||||
|
Preconditions.checkArgument(alignment > 0, "alignment <= 0");
|
||||||
|
|
||||||
|
return (String path) -> alignment;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A rule that defines constant alignment for all files with a certain suffix, placing no
|
||||||
|
* restrictions on other files.
|
||||||
|
*
|
||||||
|
* @param suffix the suffix
|
||||||
|
* @param alignment the alignment for paths that match the provided suffix
|
||||||
|
* @return the rule
|
||||||
|
*/
|
||||||
|
public static AlignmentRule constantForSuffix(String suffix, int alignment) {
|
||||||
|
Preconditions.checkArgument(!suffix.isEmpty(), "suffix.isEmpty()");
|
||||||
|
Preconditions.checkArgument(alignment > 0, "alignment <= 0");
|
||||||
|
|
||||||
|
return (String path) -> path.endsWith(suffix) ? alignment : AlignmentRule.NO_ALIGNMENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A rule that applies other rules in order.
|
||||||
|
*
|
||||||
|
* @param rules all rules to be tried; the first rule that does not return {@link
|
||||||
|
* AlignmentRule#NO_ALIGNMENT} will define the alignment for a path; if there are no rules
|
||||||
|
* that return a value different from {@link AlignmentRule#NO_ALIGNMENT}, then {@link
|
||||||
|
* AlignmentRule#NO_ALIGNMENT} is returned
|
||||||
|
* @return the composition rule
|
||||||
|
*/
|
||||||
|
public static AlignmentRule compose(AlignmentRule... rules) {
|
||||||
|
return (String path) -> {
|
||||||
|
for (AlignmentRule r : rules) {
|
||||||
|
int align = r.alignment(path);
|
||||||
|
if (align != AlignmentRule.NO_ALIGNMENT) {
|
||||||
|
return align;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return AlignmentRule.NO_ALIGNMENT;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,482 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.bytestorage.ByteStorage;
|
||||||
|
import com.android.tools.build.apkzlib.utils.CachedSupplier;
|
||||||
|
import com.android.tools.build.apkzlib.utils.IOExceptionWrapper;
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.MsDosDateTimeUtils;
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.collect.ImmutableMap;
|
||||||
|
import com.google.common.collect.Lists;
|
||||||
|
import com.google.common.collect.Maps;
|
||||||
|
import com.google.common.primitives.Ints;
|
||||||
|
import com.google.common.util.concurrent.Futures;
|
||||||
|
import com.google.common.util.concurrent.ListenableFuture;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.UncheckedIOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
/** Representation of the central directory of a zip archive. */
|
||||||
|
class CentralDirectory {
|
||||||
|
|
||||||
|
/** Field in the central directory with the central directory signature. */
|
||||||
|
private static final ZipField.F4 F_SIGNATURE = new ZipField.F4(0, 0x02014b50, "Signature");
|
||||||
|
|
||||||
|
/** Field in the central directory with the "made by" code. */
|
||||||
|
private static final ZipField.F2 F_MADE_BY =
|
||||||
|
new ZipField.F2(F_SIGNATURE.endOffset(), "Made by", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Field in the central directory with the minimum version required to extract the entry. */
|
||||||
|
@VisibleForTesting
|
||||||
|
static final ZipField.F2 F_VERSION_EXTRACT =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_MADE_BY.endOffset(), "Version to extract", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Field in the central directory with the GP bit flag. */
|
||||||
|
private static final ZipField.F2 F_GP_BIT =
|
||||||
|
new ZipField.F2(F_VERSION_EXTRACT.endOffset(), "GP bit");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the central directory with the code of the compression method. See {@link
|
||||||
|
* CompressionMethod#fromCode(long)}.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_METHOD = new ZipField.F2(F_GP_BIT.endOffset(), "Method");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the central directory with the last modification time in MS-DOS format (see {@link
|
||||||
|
* MsDosDateTimeUtils#packTime(long)}).
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_LAST_MOD_TIME =
|
||||||
|
new ZipField.F2(F_METHOD.endOffset(), "Last modification time");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the central directory with the last modification date in MS-DOS format. See {@link
|
||||||
|
* MsDosDateTimeUtils#packDate(long)}.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_LAST_MOD_DATE =
|
||||||
|
new ZipField.F2(F_LAST_MOD_TIME.endOffset(), "Last modification date");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the central directory with the CRC32 checksum of the entry. This will be zero for
|
||||||
|
* directories and files with no content.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F4 F_CRC32 = new ZipField.F4(F_LAST_MOD_DATE.endOffset(), "CRC32");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the central directory with the entry's compressed size, <em>i.e.</em>, the file on the
|
||||||
|
* archive. This will be the same as the uncompressed size if the method is {@link
|
||||||
|
* CompressionMethod#STORE}.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F4 F_COMPRESSED_SIZE =
|
||||||
|
new ZipField.F4(F_CRC32.endOffset(), "Compressed size", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the central directory with the entry's uncompressed size, <em>i.e.</em>, the size the
|
||||||
|
* file will have when extracted from the zip. This will be zero for directories and empty files
|
||||||
|
* and will be the same as the compressed size if the method is {@link CompressionMethod#STORE}.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F4 F_UNCOMPRESSED_SIZE =
|
||||||
|
new ZipField.F4(
|
||||||
|
F_COMPRESSED_SIZE.endOffset(), "Uncompressed size", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the central directory with the length of the file name. The file name is stored after
|
||||||
|
* the offset field ({@link #F_OFFSET}). The number of characters in the file name are stored in
|
||||||
|
* this field.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_FILE_NAME_LENGTH =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_UNCOMPRESSED_SIZE.endOffset(), "File name length", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the central directory with the length of the extra field. The extra field is stored
|
||||||
|
* after the file name ({@link #F_FILE_NAME_LENGTH}). The contents of this field are partially
|
||||||
|
* defined in the zip specification but we do not parse it.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_EXTRA_FIELD_LENGTH =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_FILE_NAME_LENGTH.endOffset(), "Extra field length", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the central directory with the length of the comment. The comment is stored after the
|
||||||
|
* extra field ({@link #F_EXTRA_FIELD_LENGTH}). We do not parse the comment.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_COMMENT_LENGTH =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_EXTRA_FIELD_LENGTH.endOffset(), "Comment length", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Number of the disk where the central directory starts. Because we do not support multi-file
|
||||||
|
* archives, this field has to have value {@code 0}.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_DISK_NUMBER_START =
|
||||||
|
new ZipField.F2(F_COMMENT_LENGTH.endOffset(), 0, "Disk start");
|
||||||
|
|
||||||
|
/** Internal attributes. This field can only contain one bit set, the {@link #ASCII_BIT}. */
|
||||||
|
private static final ZipField.F2 F_INTERNAL_ATTRIBUTES =
|
||||||
|
new ZipField.F2(F_DISK_NUMBER_START.endOffset(), "Int attributes");
|
||||||
|
|
||||||
|
/** External attributes. This field is ignored. */
|
||||||
|
private static final ZipField.F4 F_EXTERNAL_ATTRIBUTES =
|
||||||
|
new ZipField.F4(F_INTERNAL_ATTRIBUTES.endOffset(), "Ext attributes");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Offset into the archive where the entry starts. This is the offset to the local header (see
|
||||||
|
* {@link StoredEntry} for information on the local header), not to the file data itself. The file
|
||||||
|
* data, if there is any, will be stored after the local header.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F4 F_OFFSET =
|
||||||
|
new ZipField.F4(
|
||||||
|
F_EXTERNAL_ATTRIBUTES.endOffset(), "Offset", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Maximum supported version to extract. */
|
||||||
|
private static final int MAX_VERSION_TO_EXTRACT = 20;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Bit that can be set on the internal attributes stating that the file is an ASCII file. We don't
|
||||||
|
* do anything with this information, but we check that nothing unexpected appears in the internal
|
||||||
|
* attributes.
|
||||||
|
*/
|
||||||
|
private static final int ASCII_BIT = 1;
|
||||||
|
|
||||||
|
/** Contains all entries in the directory mapped from their names. */
|
||||||
|
private final Map<String, StoredEntry> entries;
|
||||||
|
|
||||||
|
/** The file where this directory belongs to. */
|
||||||
|
private final ZFile file;
|
||||||
|
|
||||||
|
/** Supplier that provides a byte representation of the central directory. */
|
||||||
|
private final CachedSupplier<byte[]> bytesSupplier;
|
||||||
|
|
||||||
|
/** Verify log for the central directory. */
|
||||||
|
private final VerifyLog verifyLog;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new, empty, central directory, for a given zip file.
|
||||||
|
*
|
||||||
|
* @param file the file
|
||||||
|
*/
|
||||||
|
CentralDirectory(ZFile file) {
|
||||||
|
entries = Maps.newHashMap();
|
||||||
|
this.file = file;
|
||||||
|
bytesSupplier = new CachedSupplier<>(this::computeByteRepresentation);
|
||||||
|
verifyLog = file.getVerifyLog();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads the central directory data from a zip file, parses it, and creates the in-memory
|
||||||
|
* structure representing the directory.
|
||||||
|
*
|
||||||
|
* @param bytes the data of the central directory; the directory is read from the buffer's current
|
||||||
|
* position; when this method terminates, the buffer's position is the first byte after the
|
||||||
|
* directory
|
||||||
|
* @param count the number of entries expected in the central directory (usually read from the
|
||||||
|
* {@link Eocd}).
|
||||||
|
* @param file the zip file this central directory belongs to
|
||||||
|
* @param storage the storage used to generate sources with entry data
|
||||||
|
* @return the central directory
|
||||||
|
* @throws IOException failed to read data from the zip, or the central directory is corrupted or
|
||||||
|
* has unsupported features
|
||||||
|
*/
|
||||||
|
static CentralDirectory makeFromData(ByteBuffer bytes, long count, ZFile file, ByteStorage storage)
|
||||||
|
throws IOException {
|
||||||
|
Preconditions.checkNotNull(bytes, "bytes == null");
|
||||||
|
Preconditions.checkArgument(count >= 0, "count < 0");
|
||||||
|
|
||||||
|
CentralDirectory directory = new CentralDirectory(file);
|
||||||
|
|
||||||
|
for (long i = 0; i < count; i++) {
|
||||||
|
try {
|
||||||
|
directory.readEntry(bytes, storage);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOException(
|
||||||
|
"Failed to read directory entry index "
|
||||||
|
+ i
|
||||||
|
+ " (total "
|
||||||
|
+ "directory bytes read: "
|
||||||
|
+ bytes.position()
|
||||||
|
+ ").",
|
||||||
|
e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return directory;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new central directory from the entries. This is used to build a new central directory
|
||||||
|
* from entries in the zip file.
|
||||||
|
*
|
||||||
|
* @param entries the entries in the zip file
|
||||||
|
* @param file the zip file itself
|
||||||
|
* @return the created central directory
|
||||||
|
*/
|
||||||
|
static CentralDirectory makeFromEntries(Set<StoredEntry> entries, ZFile file) {
|
||||||
|
CentralDirectory directory = new CentralDirectory(file);
|
||||||
|
for (StoredEntry entry : entries) {
|
||||||
|
CentralDirectoryHeader cdr = entry.getCentralDirectoryHeader();
|
||||||
|
Preconditions.checkArgument(
|
||||||
|
!directory.entries.containsKey(cdr.getName()), "Duplicate filename");
|
||||||
|
directory.entries.put(cdr.getName(), entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
return directory;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads the next entry from the central directory and adds it to {@link #entries}.
|
||||||
|
*
|
||||||
|
* @param bytes the central directory's data, positioned starting at the beginning of the next
|
||||||
|
* entry to read; when finished, the buffer's position will be at the first byte after the
|
||||||
|
* entry
|
||||||
|
* @param storage the storage used to generate sources to store entry data
|
||||||
|
* @throws IOException failed to read the directory entry, either because of an I/O error, because
|
||||||
|
* it is corrupt or contains unsupported features
|
||||||
|
*/
|
||||||
|
private void readEntry(ByteBuffer bytes, ByteStorage storage) throws IOException {
|
||||||
|
F_SIGNATURE.verify(bytes);
|
||||||
|
long madeBy = F_MADE_BY.read(bytes);
|
||||||
|
|
||||||
|
long versionNeededToExtract = F_VERSION_EXTRACT.read(bytes);
|
||||||
|
verifyLog.verify(
|
||||||
|
versionNeededToExtract <= MAX_VERSION_TO_EXTRACT,
|
||||||
|
"Ignored unknown version needed to extract in zip directory entry: %s.",
|
||||||
|
versionNeededToExtract);
|
||||||
|
|
||||||
|
long gpBit = F_GP_BIT.read(bytes);
|
||||||
|
GPFlags flags = GPFlags.from(gpBit);
|
||||||
|
|
||||||
|
long methodCode = F_METHOD.read(bytes);
|
||||||
|
CompressionMethod method = CompressionMethod.fromCode(methodCode);
|
||||||
|
verifyLog.verify(method != null, "Unknown method in zip directory entry: %s.", methodCode);
|
||||||
|
|
||||||
|
long lastModTime;
|
||||||
|
long lastModDate;
|
||||||
|
if (file.areTimestampsIgnored()) {
|
||||||
|
lastModTime = 0;
|
||||||
|
lastModDate = 0;
|
||||||
|
F_LAST_MOD_TIME.skip(bytes);
|
||||||
|
F_LAST_MOD_DATE.skip(bytes);
|
||||||
|
} else {
|
||||||
|
lastModTime = F_LAST_MOD_TIME.read(bytes);
|
||||||
|
lastModDate = F_LAST_MOD_DATE.read(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
long crc32 = F_CRC32.read(bytes);
|
||||||
|
long compressedSize = F_COMPRESSED_SIZE.read(bytes);
|
||||||
|
long uncompressedSize = F_UNCOMPRESSED_SIZE.read(bytes);
|
||||||
|
int fileNameLength = Ints.checkedCast(F_FILE_NAME_LENGTH.read(bytes));
|
||||||
|
int extraFieldLength = Ints.checkedCast(F_EXTRA_FIELD_LENGTH.read(bytes));
|
||||||
|
int fileCommentLength = Ints.checkedCast(F_COMMENT_LENGTH.read(bytes));
|
||||||
|
|
||||||
|
F_DISK_NUMBER_START.verify(bytes, verifyLog);
|
||||||
|
long internalAttributes = F_INTERNAL_ATTRIBUTES.read(bytes);
|
||||||
|
verifyLog.verify(
|
||||||
|
(internalAttributes & ~ASCII_BIT) == 0,
|
||||||
|
"Ignored invalid internal attributes: %s.",
|
||||||
|
internalAttributes);
|
||||||
|
|
||||||
|
long externalAttributes = F_EXTERNAL_ATTRIBUTES.read(bytes);
|
||||||
|
long entryOffset = F_OFFSET.read(bytes);
|
||||||
|
|
||||||
|
long remainingSize = (long) fileNameLength + extraFieldLength + fileCommentLength;
|
||||||
|
|
||||||
|
if (bytes.remaining() < fileNameLength + extraFieldLength + fileCommentLength) {
|
||||||
|
throw new IOException(
|
||||||
|
"Directory entry should have "
|
||||||
|
+ remainingSize
|
||||||
|
+ " bytes remaining (name = "
|
||||||
|
+ fileNameLength
|
||||||
|
+ ", extra = "
|
||||||
|
+ extraFieldLength
|
||||||
|
+ ", comment = "
|
||||||
|
+ fileCommentLength
|
||||||
|
+ "), but it has "
|
||||||
|
+ bytes.remaining()
|
||||||
|
+ ".");
|
||||||
|
}
|
||||||
|
|
||||||
|
byte[] encodedFileName = new byte[fileNameLength];
|
||||||
|
bytes.get(encodedFileName);
|
||||||
|
String fileName = EncodeUtils.decode(encodedFileName, flags);
|
||||||
|
|
||||||
|
byte[] extraField = new byte[extraFieldLength];
|
||||||
|
bytes.get(extraField);
|
||||||
|
|
||||||
|
byte[] fileCommentField = new byte[fileCommentLength];
|
||||||
|
bytes.get(fileCommentField);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Tricky: to create a CentralDirectoryHeader we need the future that will hold the result
|
||||||
|
* of the compress information. But, to actually create the result of the compress
|
||||||
|
* information we need the CentralDirectoryHeader
|
||||||
|
*/
|
||||||
|
ListenableFuture<CentralDirectoryHeaderCompressInfo> compressInfo =
|
||||||
|
Futures.immediateFuture(
|
||||||
|
new CentralDirectoryHeaderCompressInfo(method, compressedSize, versionNeededToExtract));
|
||||||
|
CentralDirectoryHeader centralDirectoryHeader =
|
||||||
|
new CentralDirectoryHeader(
|
||||||
|
fileName,
|
||||||
|
encodedFileName,
|
||||||
|
uncompressedSize,
|
||||||
|
compressInfo,
|
||||||
|
flags,
|
||||||
|
file,
|
||||||
|
lastModTime,
|
||||||
|
lastModDate);
|
||||||
|
centralDirectoryHeader.setMadeBy(madeBy);
|
||||||
|
centralDirectoryHeader.setLastModTime(lastModTime);
|
||||||
|
centralDirectoryHeader.setLastModDate(lastModDate);
|
||||||
|
centralDirectoryHeader.setCrc32(crc32);
|
||||||
|
centralDirectoryHeader.setInternalAttributes(internalAttributes);
|
||||||
|
centralDirectoryHeader.setExternalAttributes(externalAttributes);
|
||||||
|
centralDirectoryHeader.setOffset(entryOffset);
|
||||||
|
centralDirectoryHeader.setExtraFieldNoNotify(new ExtraField(extraField));
|
||||||
|
centralDirectoryHeader.setComment(fileCommentField);
|
||||||
|
|
||||||
|
StoredEntry entry;
|
||||||
|
|
||||||
|
try {
|
||||||
|
entry = new StoredEntry(centralDirectoryHeader, file, null, storage);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOException("Failed to read stored entry '" + fileName + "'.", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (entries.containsKey(fileName)) {
|
||||||
|
verifyLog.log("File file contains duplicate file '" + fileName + "'.");
|
||||||
|
}
|
||||||
|
|
||||||
|
entries.put(fileName, entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains all the entries in the central directory.
|
||||||
|
*
|
||||||
|
* @return all entries on a non-modifiable map
|
||||||
|
*/
|
||||||
|
Map<String, StoredEntry> getEntries() {
|
||||||
|
return ImmutableMap.copyOf(entries);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains whether the Central Directory contains any files with Zip64 file extensions.
|
||||||
|
*
|
||||||
|
* <p>At the present time, files in the Zip64 format are not supported, so this method returns
|
||||||
|
* false.
|
||||||
|
*
|
||||||
|
* @return false, as Zip64 formatted files are not supported
|
||||||
|
*/
|
||||||
|
boolean containsZip64Files() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the byte representation of the central directory.
|
||||||
|
*
|
||||||
|
* @return a byte array containing the whole central directory
|
||||||
|
* @throws IOException failed to write the byte array
|
||||||
|
*/
|
||||||
|
byte[] toBytes() throws IOException {
|
||||||
|
return bytesSupplier.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the byte representation of the central directory.
|
||||||
|
*
|
||||||
|
* @return a byte array containing the whole central directory
|
||||||
|
* @throws UncheckedIOException failed to write the byte array
|
||||||
|
*/
|
||||||
|
private byte[] computeByteRepresentation() {
|
||||||
|
|
||||||
|
List<StoredEntry> sorted = Lists.newArrayList(entries.values());
|
||||||
|
Collections.sort(sorted, StoredEntry.COMPARE_BY_NAME);
|
||||||
|
|
||||||
|
CentralDirectoryHeader[] cdhs = new CentralDirectoryHeader[entries.size()];
|
||||||
|
CentralDirectoryHeaderCompressInfo[] compressInfos =
|
||||||
|
new CentralDirectoryHeaderCompressInfo[entries.size()];
|
||||||
|
byte[][] encodedFileNames = new byte[entries.size()][];
|
||||||
|
byte[][] extraFields = new byte[entries.size()][];
|
||||||
|
byte[][] comments = new byte[entries.size()][];
|
||||||
|
|
||||||
|
try {
|
||||||
|
/*
|
||||||
|
* First collect all the data and compute the total size of the central directory.
|
||||||
|
*/
|
||||||
|
int idx = 0;
|
||||||
|
int total = 0;
|
||||||
|
for (StoredEntry entry : sorted) {
|
||||||
|
cdhs[idx] = entry.getCentralDirectoryHeader();
|
||||||
|
compressInfos[idx] = cdhs[idx].getCompressionInfoWithWait();
|
||||||
|
encodedFileNames[idx] = cdhs[idx].getEncodedFileName();
|
||||||
|
extraFields[idx] = new byte[cdhs[idx].getExtraField().size()];
|
||||||
|
cdhs[idx].getExtraField().write(ByteBuffer.wrap(extraFields[idx]));
|
||||||
|
comments[idx] = cdhs[idx].getComment();
|
||||||
|
|
||||||
|
total +=
|
||||||
|
F_OFFSET.endOffset()
|
||||||
|
+ encodedFileNames[idx].length
|
||||||
|
+ extraFields[idx].length
|
||||||
|
+ comments[idx].length;
|
||||||
|
idx++;
|
||||||
|
}
|
||||||
|
|
||||||
|
ByteBuffer out = ByteBuffer.allocate(total);
|
||||||
|
|
||||||
|
for (idx = 0; idx < entries.size(); idx++) {
|
||||||
|
F_SIGNATURE.write(out);
|
||||||
|
F_MADE_BY.write(out, cdhs[idx].getMadeBy());
|
||||||
|
F_VERSION_EXTRACT.write(out, compressInfos[idx].getVersionExtract());
|
||||||
|
F_GP_BIT.write(out, cdhs[idx].getGpBit().getValue());
|
||||||
|
F_METHOD.write(out, compressInfos[idx].getMethod().methodCode);
|
||||||
|
|
||||||
|
if (file.areTimestampsIgnored()) {
|
||||||
|
F_LAST_MOD_TIME.write(out, 0);
|
||||||
|
F_LAST_MOD_DATE.write(out, 0);
|
||||||
|
} else {
|
||||||
|
F_LAST_MOD_TIME.write(out, cdhs[idx].getLastModTime());
|
||||||
|
F_LAST_MOD_DATE.write(out, cdhs[idx].getLastModDate());
|
||||||
|
}
|
||||||
|
|
||||||
|
F_CRC32.write(out, cdhs[idx].getCrc32());
|
||||||
|
F_COMPRESSED_SIZE.write(out, compressInfos[idx].getCompressedSize());
|
||||||
|
F_UNCOMPRESSED_SIZE.write(out, cdhs[idx].getUncompressedSize());
|
||||||
|
|
||||||
|
F_FILE_NAME_LENGTH.write(out, cdhs[idx].getEncodedFileName().length);
|
||||||
|
F_EXTRA_FIELD_LENGTH.write(out, cdhs[idx].getExtraField().size());
|
||||||
|
F_COMMENT_LENGTH.write(out, cdhs[idx].getComment().length);
|
||||||
|
F_DISK_NUMBER_START.write(out);
|
||||||
|
F_INTERNAL_ATTRIBUTES.write(out, cdhs[idx].getInternalAttributes());
|
||||||
|
F_EXTERNAL_ATTRIBUTES.write(out, cdhs[idx].getExternalAttributes());
|
||||||
|
F_OFFSET.write(out, cdhs[idx].getOffset());
|
||||||
|
|
||||||
|
out.put(encodedFileNames[idx]);
|
||||||
|
out.put(extraFields[idx]);
|
||||||
|
out.put(comments[idx]);
|
||||||
|
}
|
||||||
|
|
||||||
|
return out.array();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOExceptionWrapper(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,414 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.MsDosDateTimeUtils;
|
||||||
|
import com.google.common.base.Verify;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The Central Directory Header contains information about files stored in the zip. Instances of
|
||||||
|
* this class contain information for files that already are in the zip and, for which the data was
|
||||||
|
* read from the Central Directory. But some instances of this class are used for new files. Because
|
||||||
|
* instances of this class can refer to files not yet on the zip, some of the fields may not be
|
||||||
|
* filled in, or may be filled in with default values.
|
||||||
|
*
|
||||||
|
* <p>Because compression decision is done lazily, some data is stored with futures.
|
||||||
|
*/
|
||||||
|
public class CentralDirectoryHeader implements Cloneable {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default "version made by" field: upper byte needs to be 0 to set to MS-DOS compatibility. Lower
|
||||||
|
* byte can be anything, really. We use 18 because aapt uses 17 :)
|
||||||
|
*/
|
||||||
|
private static final int DEFAULT_VERSION_MADE_BY = 0x0018;
|
||||||
|
|
||||||
|
private static final byte[] EMPTY_COMMENT = new byte[0];
|
||||||
|
|
||||||
|
/** Name of the file. */
|
||||||
|
private final String name;
|
||||||
|
|
||||||
|
/** CRC32 of the data. 0 if not yet computed. */
|
||||||
|
private long crc32;
|
||||||
|
|
||||||
|
/** Size of the file uncompressed. 0 if the file has no data. */
|
||||||
|
private long uncompressedSize;
|
||||||
|
|
||||||
|
/** Code of the program that made the zip. We actually don't care about this. */
|
||||||
|
private long madeBy;
|
||||||
|
|
||||||
|
/** General-purpose bit flag. */
|
||||||
|
private GPFlags gpBit;
|
||||||
|
|
||||||
|
/** Last modification time in MS-DOS format (see {@link MsDosDateTimeUtils#packTime(long)}). */
|
||||||
|
private long lastModTime;
|
||||||
|
|
||||||
|
/** Last modification time in MS-DOS format (see {@link MsDosDateTimeUtils#packDate(long)}). */
|
||||||
|
private long lastModDate;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extra data field contents. This field follows a specific structure according to the
|
||||||
|
* specification.
|
||||||
|
*/
|
||||||
|
private ExtraField extraField;
|
||||||
|
|
||||||
|
/** File comment. */
|
||||||
|
private byte[] comment;
|
||||||
|
|
||||||
|
/** File internal attributes. */
|
||||||
|
private long internalAttributes;
|
||||||
|
|
||||||
|
/** File external attributes. */
|
||||||
|
private long externalAttributes;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Offset in the file where the data is located. This will be -1 if the header corresponds to a
|
||||||
|
* new file that is not yet written in the zip and, therefore, has no written data.
|
||||||
|
*/
|
||||||
|
private long offset;
|
||||||
|
|
||||||
|
/** Encoded file name. */
|
||||||
|
private byte[] encodedFileName;
|
||||||
|
|
||||||
|
/** Compress information that may not have been computed yet due to lazy compression. */
|
||||||
|
private final Future<CentralDirectoryHeaderCompressInfo> compressInfo;
|
||||||
|
|
||||||
|
/** The file this header belongs to. */
|
||||||
|
private final ZFile file;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates data for a file.
|
||||||
|
*
|
||||||
|
* @param name the file name
|
||||||
|
* @param encodedFileName the encoded file name, this array will be owned by the header
|
||||||
|
* @param uncompressedSize the uncompressed file size
|
||||||
|
* @param compressInfo computation that defines the compression information
|
||||||
|
* @param flags flags used in the entry
|
||||||
|
* @param zFile the file this header belongs to
|
||||||
|
*/
|
||||||
|
CentralDirectoryHeader(
|
||||||
|
String name,
|
||||||
|
byte[] encodedFileName,
|
||||||
|
long uncompressedSize,
|
||||||
|
Future<CentralDirectoryHeaderCompressInfo> compressInfo,
|
||||||
|
GPFlags flags,
|
||||||
|
ZFile zFile) {
|
||||||
|
this(
|
||||||
|
name,
|
||||||
|
encodedFileName,
|
||||||
|
uncompressedSize,
|
||||||
|
compressInfo,
|
||||||
|
flags,
|
||||||
|
zFile,
|
||||||
|
MsDosDateTimeUtils.packCurrentTime(),
|
||||||
|
MsDosDateTimeUtils.packCurrentDate());
|
||||||
|
}
|
||||||
|
|
||||||
|
CentralDirectoryHeader(
|
||||||
|
String name,
|
||||||
|
byte[] encodedFileName,
|
||||||
|
long uncompressedSize,
|
||||||
|
Future<CentralDirectoryHeaderCompressInfo> compressInfo,
|
||||||
|
GPFlags flags,
|
||||||
|
ZFile zFile,
|
||||||
|
long currentTime,
|
||||||
|
long currentDate) {
|
||||||
|
this.name = name;
|
||||||
|
this.uncompressedSize = uncompressedSize;
|
||||||
|
crc32 = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set sensible defaults for the rest.
|
||||||
|
*/
|
||||||
|
madeBy = DEFAULT_VERSION_MADE_BY;
|
||||||
|
|
||||||
|
gpBit = flags;
|
||||||
|
lastModTime = currentTime;
|
||||||
|
lastModDate = currentDate;
|
||||||
|
extraField = ExtraField.EMPTY;
|
||||||
|
comment = EMPTY_COMMENT;
|
||||||
|
internalAttributes = 0;
|
||||||
|
externalAttributes = 0;
|
||||||
|
offset = -1;
|
||||||
|
this.encodedFileName = encodedFileName;
|
||||||
|
this.compressInfo = compressInfo;
|
||||||
|
file = zFile;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the name of the file.
|
||||||
|
*
|
||||||
|
* @return the name
|
||||||
|
*/
|
||||||
|
public String getName() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the size of the uncompressed file.
|
||||||
|
*
|
||||||
|
* @return the size of the file
|
||||||
|
*/
|
||||||
|
public long getUncompressedSize() {
|
||||||
|
return uncompressedSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the CRC32 of the data.
|
||||||
|
*
|
||||||
|
* @return the CRC32, 0 if not yet computed
|
||||||
|
*/
|
||||||
|
public long getCrc32() {
|
||||||
|
return crc32;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the CRC32 of the data.
|
||||||
|
*
|
||||||
|
* @param crc32 the CRC 32
|
||||||
|
*/
|
||||||
|
void setCrc32(long crc32) {
|
||||||
|
this.crc32 = crc32;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the code of the program that made the zip.
|
||||||
|
*
|
||||||
|
* @return the code
|
||||||
|
*/
|
||||||
|
public long getMadeBy() {
|
||||||
|
return madeBy;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the code of the progtram that made the zip.
|
||||||
|
*
|
||||||
|
* @param madeBy the code
|
||||||
|
*/
|
||||||
|
void setMadeBy(long madeBy) {
|
||||||
|
this.madeBy = madeBy;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the general-purpose bit flag.
|
||||||
|
*
|
||||||
|
* @return the bit flag
|
||||||
|
*/
|
||||||
|
public GPFlags getGpBit() {
|
||||||
|
return gpBit;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the last modification time of the entry.
|
||||||
|
*
|
||||||
|
* @return the last modification time in MS-DOS format (see {@link
|
||||||
|
* MsDosDateTimeUtils#packTime(long)})
|
||||||
|
*/
|
||||||
|
public long getLastModTime() {
|
||||||
|
return lastModTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the last modification time of the entry.
|
||||||
|
*
|
||||||
|
* @param lastModTime the last modification time in MS-DOS format (see {@link
|
||||||
|
* MsDosDateTimeUtils#packTime(long)})
|
||||||
|
*/
|
||||||
|
void setLastModTime(long lastModTime) {
|
||||||
|
this.lastModTime = lastModTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the last modification date of the entry.
|
||||||
|
*
|
||||||
|
* @return the last modification date in MS-DOS format (see {@link
|
||||||
|
* MsDosDateTimeUtils#packDate(long)})
|
||||||
|
*/
|
||||||
|
public long getLastModDate() {
|
||||||
|
return lastModDate;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the last modification date of the entry.
|
||||||
|
*
|
||||||
|
* @param lastModDate the last modification date in MS-DOS format (see {@link
|
||||||
|
* MsDosDateTimeUtils#packDate(long)})
|
||||||
|
*/
|
||||||
|
void setLastModDate(long lastModDate) {
|
||||||
|
this.lastModDate = lastModDate;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the data in the extra field.
|
||||||
|
*
|
||||||
|
* @return the data (returns an empty array if there is none)
|
||||||
|
*/
|
||||||
|
public ExtraField getExtraField() {
|
||||||
|
return extraField;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the data in the extra field.
|
||||||
|
*
|
||||||
|
* @param extraField the data to set
|
||||||
|
*/
|
||||||
|
public void setExtraField(ExtraField extraField) {
|
||||||
|
setExtraFieldNoNotify(extraField);
|
||||||
|
file.centralDirectoryChanged();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the data in the extra field, but does not notify {@link ZFile}. This method is invoked
|
||||||
|
* when the {@link ZFile} knows the extra field is being set.
|
||||||
|
*
|
||||||
|
* @param extraField the data to set
|
||||||
|
*/
|
||||||
|
void setExtraFieldNoNotify(ExtraField extraField) {
|
||||||
|
this.extraField = extraField;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the entry's comment.
|
||||||
|
*
|
||||||
|
* @return the comment (returns an empty array if there is no comment)
|
||||||
|
*/
|
||||||
|
public byte[] getComment() {
|
||||||
|
return comment;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the entry's comment.
|
||||||
|
*
|
||||||
|
* @param comment the comment
|
||||||
|
*/
|
||||||
|
void setComment(byte[] comment) {
|
||||||
|
this.comment = comment;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the entry's internal attributes.
|
||||||
|
*
|
||||||
|
* @return the entry's internal attributes
|
||||||
|
*/
|
||||||
|
public long getInternalAttributes() {
|
||||||
|
return internalAttributes;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the entry's internal attributes.
|
||||||
|
*
|
||||||
|
* @param internalAttributes the entry's internal attributes
|
||||||
|
*/
|
||||||
|
void setInternalAttributes(long internalAttributes) {
|
||||||
|
this.internalAttributes = internalAttributes;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the entry's external attributes.
|
||||||
|
*
|
||||||
|
* @return the entry's external attributes
|
||||||
|
*/
|
||||||
|
public long getExternalAttributes() {
|
||||||
|
return externalAttributes;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the entry's external attributes.
|
||||||
|
*
|
||||||
|
* @param externalAttributes the entry's external attributes
|
||||||
|
*/
|
||||||
|
void setExternalAttributes(long externalAttributes) {
|
||||||
|
this.externalAttributes = externalAttributes;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the offset in the zip file where this entry's data is.
|
||||||
|
*
|
||||||
|
* @return the offset or {@code -1} if the file has no data in the zip and, therefore, data is
|
||||||
|
* stored in memory
|
||||||
|
*/
|
||||||
|
public long getOffset() {
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the offset in the zip file where this entry's data is.
|
||||||
|
*
|
||||||
|
* @param offset the offset or {@code -1} if the file is new and has no data in the zip yet
|
||||||
|
*/
|
||||||
|
void setOffset(long offset) {
|
||||||
|
this.offset = offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the encoded file name.
|
||||||
|
*
|
||||||
|
* @return the encoded file name
|
||||||
|
*/
|
||||||
|
public byte[] getEncodedFileName() {
|
||||||
|
return encodedFileName;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Resets the deferred CRC flag in the GP flags. */
|
||||||
|
void resetDeferredCrc() {
|
||||||
|
/*
|
||||||
|
* We actually create a new set of flags. Since the only information we care about is the
|
||||||
|
* UTF-8 encoding, we'll just create a brand new object.
|
||||||
|
*/
|
||||||
|
gpBit = GPFlags.make(gpBit.isUtf8FileName());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected CentralDirectoryHeader clone() throws CloneNotSupportedException {
|
||||||
|
CentralDirectoryHeader cdr = (CentralDirectoryHeader) super.clone();
|
||||||
|
cdr.extraField = extraField;
|
||||||
|
cdr.comment = Arrays.copyOf(comment, comment.length);
|
||||||
|
cdr.encodedFileName = Arrays.copyOf(encodedFileName, encodedFileName.length);
|
||||||
|
return cdr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the future with the compression information.
|
||||||
|
*
|
||||||
|
* @return the information
|
||||||
|
*/
|
||||||
|
public Future<CentralDirectoryHeaderCompressInfo> getCompressionInfo() {
|
||||||
|
return compressInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Equivalent to {@code getCompressionInfo().get()} but masking the possible exceptions and
|
||||||
|
* guaranteeing non-{@code null} return.
|
||||||
|
*
|
||||||
|
* @return the result of the future
|
||||||
|
* @throws IOException failed to get the information
|
||||||
|
*/
|
||||||
|
public CentralDirectoryHeaderCompressInfo getCompressionInfoWithWait() throws IOException {
|
||||||
|
try {
|
||||||
|
CentralDirectoryHeaderCompressInfo info = getCompressionInfo().get();
|
||||||
|
Verify.verifyNotNull(info, "info == null");
|
||||||
|
return info;
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
throw new IOException("Interrupted while waiting for compression information.", e);
|
||||||
|
} catch (ExecutionException e) {
|
||||||
|
throw new IOException("Execution of compression failed.", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,110 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Information stored in the {@link CentralDirectoryHeader} that is related to compression and may
|
||||||
|
* need to be computed lazily.
|
||||||
|
*/
|
||||||
|
public class CentralDirectoryHeaderCompressInfo {
|
||||||
|
|
||||||
|
/** Version of zip file that only supports stored files. */
|
||||||
|
public static final long VERSION_WITH_STORE_FILES_ONLY = 10L;
|
||||||
|
|
||||||
|
/** Version of zip file that only supports directories and deflated files. */
|
||||||
|
public static final long VERSION_WITH_DIRECTORIES_AND_DEFLATE = 20L;
|
||||||
|
|
||||||
|
/** Version of zip file that only supports ZIP64 format extensions */
|
||||||
|
public static final long VERSION_WITH_ZIP64_EXTENSIONS = 45L;
|
||||||
|
|
||||||
|
/** Version of zip file that uses central file encryption and version 2 of the Zip64 EOCD */
|
||||||
|
public static final long VERSION_WITH_CENTRAL_FILE_ENCRYPTION = 62L;
|
||||||
|
|
||||||
|
/** The compression method. */
|
||||||
|
private final CompressionMethod method;
|
||||||
|
|
||||||
|
/** Size of the file compressed. 0 if the file has no data. */
|
||||||
|
private final long compressedSize;
|
||||||
|
|
||||||
|
/** Version needed to extract the zip. */
|
||||||
|
private final long versionExtract;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates new compression information for the central directory header.
|
||||||
|
*
|
||||||
|
* @param method the compression method
|
||||||
|
* @param compressedSize the compressed size
|
||||||
|
* @param versionToExtract minimum version to extract (typically {@link
|
||||||
|
* #VERSION_WITH_STORE_FILES_ONLY} or {@link #VERSION_WITH_DIRECTORIES_AND_DEFLATE})
|
||||||
|
*/
|
||||||
|
public CentralDirectoryHeaderCompressInfo(
|
||||||
|
CompressionMethod method, long compressedSize, long versionToExtract) {
|
||||||
|
this.method = method;
|
||||||
|
this.compressedSize = compressedSize;
|
||||||
|
versionExtract = versionToExtract;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates new compression information for the central directory header.
|
||||||
|
*
|
||||||
|
* @param header the header this information relates to
|
||||||
|
* @param method the compression method
|
||||||
|
* @param compressedSize the compressed size
|
||||||
|
*/
|
||||||
|
public CentralDirectoryHeaderCompressInfo(
|
||||||
|
CentralDirectoryHeader header, CompressionMethod method, long compressedSize) {
|
||||||
|
this.method = method;
|
||||||
|
this.compressedSize = compressedSize;
|
||||||
|
|
||||||
|
if (header.getName().endsWith("/") || method == CompressionMethod.DEFLATE) {
|
||||||
|
/*
|
||||||
|
* Directories and compressed files only in version 2.0.
|
||||||
|
*/
|
||||||
|
versionExtract = VERSION_WITH_DIRECTORIES_AND_DEFLATE;
|
||||||
|
} else {
|
||||||
|
versionExtract = VERSION_WITH_STORE_FILES_ONLY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the compression data size.
|
||||||
|
*
|
||||||
|
* @return the compressed data size
|
||||||
|
*/
|
||||||
|
public long getCompressedSize() {
|
||||||
|
return compressedSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the compression method.
|
||||||
|
*
|
||||||
|
* @return the compression method
|
||||||
|
*/
|
||||||
|
public CompressionMethod getMethod() {
|
||||||
|
return method;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the minimum version for extract.
|
||||||
|
*
|
||||||
|
* @return the minimum version
|
||||||
|
*/
|
||||||
|
long getVersionExtract() {
|
||||||
|
return versionExtract;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,57 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/** Enumeration with all known compression methods. */
|
||||||
|
public enum CompressionMethod {
|
||||||
|
/** STORE method: data is stored without any compression. */
|
||||||
|
STORE(0),
|
||||||
|
|
||||||
|
/** DEFLATE method: data is stored compressed using the DEFLATE algorithm. */
|
||||||
|
DEFLATE(8);
|
||||||
|
|
||||||
|
/** Code, within the zip file, that identifies this compression method. */
|
||||||
|
int methodCode;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new compression method.
|
||||||
|
*
|
||||||
|
* @param methodCode the code used in the zip file that identifies the compression method
|
||||||
|
*/
|
||||||
|
CompressionMethod(int methodCode) {
|
||||||
|
this.methodCode = methodCode;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the compression method that corresponds to the provided code.
|
||||||
|
*
|
||||||
|
* @param code the code
|
||||||
|
* @return the method or {@code null} if no method has the provided code
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
static CompressionMethod fromCode(long code) {
|
||||||
|
for (CompressionMethod method : values()) {
|
||||||
|
if (method.methodCode == code) {
|
||||||
|
return method;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,74 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
|
||||||
|
/** Result of compressing data. */
|
||||||
|
public class CompressionResult {
|
||||||
|
|
||||||
|
/** The compression method used. */
|
||||||
|
private final CompressionMethod compressionMethod;
|
||||||
|
|
||||||
|
/** The resulting data. */
|
||||||
|
private final CloseableByteSource source;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Size of the compressed source. Kept because {@code source.size()} can throw {@code
|
||||||
|
* IOException}.
|
||||||
|
*/
|
||||||
|
private final long mSize;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new compression result.
|
||||||
|
*
|
||||||
|
* @param source the data source
|
||||||
|
* @param method the compression method
|
||||||
|
*/
|
||||||
|
public CompressionResult(CloseableByteSource source, CompressionMethod method, long size) {
|
||||||
|
compressionMethod = method;
|
||||||
|
this.source = source;
|
||||||
|
mSize = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the compression method.
|
||||||
|
*
|
||||||
|
* @return the compression method
|
||||||
|
*/
|
||||||
|
public CompressionMethod getCompressionMethod() {
|
||||||
|
return compressionMethod;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the compressed data.
|
||||||
|
*
|
||||||
|
* @return the data, the resulting array should not be modified
|
||||||
|
*/
|
||||||
|
public CloseableByteSource getSource() {
|
||||||
|
return source;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the size of the compression result.
|
||||||
|
*
|
||||||
|
* @return the size
|
||||||
|
*/
|
||||||
|
public long getSize() {
|
||||||
|
return mSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,38 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.bytestorage.ByteStorage;
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.util.concurrent.ListenableFuture;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A compressor is capable of, well, compressing data. Data is read from an {@code ByteSource}.
|
||||||
|
* Compressors are asynchronous: compressing results in a {@code ListenableFuture} that will contain
|
||||||
|
* the compression result.
|
||||||
|
*/
|
||||||
|
public interface Compressor {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compresses an entry source.
|
||||||
|
*
|
||||||
|
* @param source the source to compress
|
||||||
|
* @param storage a byte storage from where the compressor can obtain byte sources to work
|
||||||
|
* @return a future that will eventually contain the compression result
|
||||||
|
*/
|
||||||
|
ListenableFuture<CompressionResult> compress(CloseableByteSource source, ByteStorage storage);
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,49 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Type of data descriptor that an entry has. Data descriptors are used if the CRC and sizing data
|
||||||
|
* is not known when the data is being written and cannot be placed in the file's local header. In
|
||||||
|
* those cases, after the file data itself, a data descriptor is placed after the entry's contents.
|
||||||
|
*
|
||||||
|
* <p>While the zip specification says the data descriptor should be used but it is optional. We
|
||||||
|
* record also whether the data descriptor contained the 4-byte signature at the start of the block
|
||||||
|
* or not.
|
||||||
|
*/
|
||||||
|
public enum DataDescriptorType {
|
||||||
|
/** The entry has no data descriptor. */
|
||||||
|
NO_DATA_DESCRIPTOR(0),
|
||||||
|
|
||||||
|
/** The entry has a data descriptor that does not contain a signature. */
|
||||||
|
DATA_DESCRIPTOR_WITHOUT_SIGNATURE(12),
|
||||||
|
|
||||||
|
/** The entry has a data descriptor that contains a signature. */
|
||||||
|
DATA_DESCRIPTOR_WITH_SIGNATURE(16);
|
||||||
|
|
||||||
|
/** The number of bytes the data descriptor spans. */
|
||||||
|
public int size;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new data descriptor.
|
||||||
|
*
|
||||||
|
* @param size the number of bytes the data descriptor spans
|
||||||
|
*/
|
||||||
|
DataDescriptorType(int size) {
|
||||||
|
this.size = size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,136 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.US_ASCII;
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.nio.charset.CharacterCodingException;
|
||||||
|
import java.nio.charset.Charset;
|
||||||
|
import java.nio.charset.CodingErrorAction;
|
||||||
|
|
||||||
|
/** Utilities to encode and decode file names in zips. */
|
||||||
|
public class EncodeUtils {
|
||||||
|
|
||||||
|
/** Utility class: no constructor. */
|
||||||
|
private EncodeUtils() {
|
||||||
|
/*
|
||||||
|
* Nothing to do.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Decodes a file name.
|
||||||
|
*
|
||||||
|
* @param bytes the raw data buffer to read from
|
||||||
|
* @param length the number of bytes in the raw data buffer containing the string to decode
|
||||||
|
* @param flags the zip entry flags
|
||||||
|
* @return the decode file name
|
||||||
|
*/
|
||||||
|
public static String decode(ByteBuffer bytes, int length, GPFlags flags) throws IOException {
|
||||||
|
if (bytes.remaining() < length) {
|
||||||
|
throw new IOException(
|
||||||
|
"Only "
|
||||||
|
+ bytes.remaining()
|
||||||
|
+ " bytes exist in the buffer, but "
|
||||||
|
+ "length is "
|
||||||
|
+ length
|
||||||
|
+ ".");
|
||||||
|
}
|
||||||
|
|
||||||
|
byte[] stringBytes = new byte[length];
|
||||||
|
bytes.get(stringBytes);
|
||||||
|
return decode(stringBytes, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Decodes a file name.
|
||||||
|
*
|
||||||
|
* @param data the raw data
|
||||||
|
* @param flags the zip entry flags
|
||||||
|
* @return the decode file name
|
||||||
|
*/
|
||||||
|
public static String decode(byte[] data, GPFlags flags) {
|
||||||
|
return decode(data, flagsCharset(flags));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Decodes a file name.
|
||||||
|
*
|
||||||
|
* @param data the raw data
|
||||||
|
* @param charset the charset to use
|
||||||
|
* @return the decode file name
|
||||||
|
*/
|
||||||
|
private static String decode(byte[] data, Charset charset) {
|
||||||
|
try {
|
||||||
|
return charset
|
||||||
|
.newDecoder()
|
||||||
|
.onMalformedInput(CodingErrorAction.REPORT)
|
||||||
|
.decode(ByteBuffer.wrap(data))
|
||||||
|
.toString();
|
||||||
|
} catch (CharacterCodingException e) {
|
||||||
|
// If we're trying to decode ASCII, try UTF-8. Otherwise, revert to the default
|
||||||
|
// behavior (usually replacing invalid characters).
|
||||||
|
if (charset.equals(US_ASCII)) {
|
||||||
|
return decode(data, UTF_8);
|
||||||
|
} else {
|
||||||
|
return charset.decode(ByteBuffer.wrap(data)).toString();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Encodes a file name.
|
||||||
|
*
|
||||||
|
* @param name the name to encode
|
||||||
|
* @param flags the zip entry flags
|
||||||
|
* @return the encoded file name
|
||||||
|
*/
|
||||||
|
public static byte[] encode(String name, GPFlags flags) {
|
||||||
|
Charset charset = flagsCharset(flags);
|
||||||
|
ByteBuffer bytes = charset.encode(name);
|
||||||
|
byte[] result = new byte[bytes.remaining()];
|
||||||
|
bytes.get(result);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the charset to encode and decode zip entries, given a set of flags.
|
||||||
|
*
|
||||||
|
* @param flags the flags
|
||||||
|
* @return the charset to use
|
||||||
|
*/
|
||||||
|
private static Charset flagsCharset(GPFlags flags) {
|
||||||
|
if (flags.isUtf8FileName()) {
|
||||||
|
return UTF_8;
|
||||||
|
} else {
|
||||||
|
return US_ASCII;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if some text may be encoded using ASCII.
|
||||||
|
*
|
||||||
|
* @param text the text to check
|
||||||
|
* @return can it be encoded using ASCII?
|
||||||
|
*/
|
||||||
|
public static boolean canAsciiEncode(String text) {
|
||||||
|
return US_ASCII.newEncoder().canEncode(text);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,277 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.utils.CachedSupplier;
|
||||||
|
import com.android.tools.build.apkzlib.utils.IOExceptionWrapper;
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.base.Verify;
|
||||||
|
import com.google.common.primitives.Ints;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.UncheckedIOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
|
/** End Of Central Directory record in a zip file. */
|
||||||
|
class Eocd {
|
||||||
|
|
||||||
|
/** Max total records that can be specified by the standard EOCD. */
|
||||||
|
static final long MAX_TOTAL_RECORDS = 0xFFFFL;
|
||||||
|
|
||||||
|
/** Max size of the Central Directory that can be specified by the standard EOCD. */
|
||||||
|
static final long MAX_CD_SIZE = 0xFFFFFFFFL;
|
||||||
|
|
||||||
|
/** Max offset of the Central Directory that can be specified by the standard EOCD. */
|
||||||
|
static final long MAX_CD_OFFSET = 0xFFFFFFFFL;
|
||||||
|
|
||||||
|
/** Field in the record: the record signature, fixed at this value by the specification. */
|
||||||
|
private static final ZipField.F4 F_SIGNATURE = new ZipField.F4(0, 0x06054b50, "EOCD signature");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: the number of the disk where the EOCD is located. It has to be zero
|
||||||
|
* because we do not support multi-file archives.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_NUMBER_OF_DISK =
|
||||||
|
new ZipField.F2(F_SIGNATURE.endOffset(), 0, "Number of this disk");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: the number of the disk where the Central Directory starts. Has to be zero
|
||||||
|
* because we do not support multi-file archives.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_DISK_CD_START =
|
||||||
|
new ZipField.F2(F_NUMBER_OF_DISK.endOffset(), 0, "Disk where CD starts");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: the number of entries in the Central Directory on this disk. Because we do
|
||||||
|
* not support multi-file archives, this is the same as {@link #F_RECORDS_TOTAL}.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_RECORDS_DISK =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_DISK_CD_START.endOffset(), "Record on disk count", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: the total number of entries in the Central Directory. This value will be
|
||||||
|
* {@link #MAX_TOTAL_RECORDS} if the file is in the Zip64 format, and the Central Directory holds
|
||||||
|
* at least {@link #MAX_TOTAL_RECORDS} entries.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_RECORDS_TOTAL =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_RECORDS_DISK.endOffset(),
|
||||||
|
"Total records",
|
||||||
|
new ZipFieldInvariantNonNegative(),
|
||||||
|
new ZipFieldInvariantMaxValue(Integer.MAX_VALUE));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: number of bytes of the Central Directory. This is not private because it
|
||||||
|
* is required in unit tests. This value will be {@link #MAX_CD_SIZE} if the file is in the Zip64
|
||||||
|
* format, and the Central Directory is at least {@link #MAX_CD_SIZE} bytes.
|
||||||
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
|
static final ZipField.F4 F_CD_SIZE =
|
||||||
|
new ZipField.F4(
|
||||||
|
F_RECORDS_TOTAL.endOffset(), "Directory size", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: offset, from the archive start, where the Central Directory starts. This
|
||||||
|
* is not private because it is required in unit tests. This value will be {@link #MAX_CD_OFFSET}
|
||||||
|
* if the file is in the Zip64 format, and the Central Directory is at least
|
||||||
|
* {@link #MAX_CD_OFFSET} bytes.
|
||||||
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
|
static final ZipField.F4 F_CD_OFFSET =
|
||||||
|
new ZipField.F4(
|
||||||
|
F_CD_SIZE.endOffset(), "Directory offset", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: number of bytes of the file comment (located at the end of the EOCD
|
||||||
|
* record).
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_COMMENT_SIZE =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_CD_OFFSET.endOffset(), "File comment size", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Number of entries in the central directory. */
|
||||||
|
private final long totalRecords;
|
||||||
|
|
||||||
|
/** Offset from the beginning of the archive where the Central Directory is located. */
|
||||||
|
private final long directoryOffset;
|
||||||
|
|
||||||
|
/** Number of bytes of the Central Directory. */
|
||||||
|
private final long directorySize;
|
||||||
|
|
||||||
|
/** Contents of the EOCD comment. */
|
||||||
|
private final byte[] comment;
|
||||||
|
|
||||||
|
/** Supplier of the byte representation of the EOCD. */
|
||||||
|
private final CachedSupplier<byte[]> byteSupplier;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new EOCD, reading it from a byte source. This method will parse the byte source and
|
||||||
|
* obtain the EOCD. It will check that the byte source starts with the EOCD signature.
|
||||||
|
*
|
||||||
|
* @param bytes the byte buffer with the EOCD data; when this method finishes, the byte buffer's
|
||||||
|
* position will have moved to the end of the EOCD
|
||||||
|
* @throws IOException failed to read information or the EOCD data is corrupt or invalid
|
||||||
|
*/
|
||||||
|
Eocd(ByteBuffer bytes) throws IOException {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read the EOCD record.
|
||||||
|
*/
|
||||||
|
F_SIGNATURE.verify(bytes);
|
||||||
|
F_NUMBER_OF_DISK.verify(bytes);
|
||||||
|
F_DISK_CD_START.verify(bytes);
|
||||||
|
long totalRecords1 = F_RECORDS_DISK.read(bytes);
|
||||||
|
long totalRecords2 = F_RECORDS_TOTAL.read(bytes);
|
||||||
|
long directorySize = F_CD_SIZE.read(bytes);
|
||||||
|
long directoryOffset = F_CD_OFFSET.read(bytes);
|
||||||
|
int commentSize = Ints.checkedCast(F_COMMENT_SIZE.read(bytes));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some sanity checks.
|
||||||
|
*/
|
||||||
|
if (totalRecords1 != totalRecords2) {
|
||||||
|
throw new IOException(
|
||||||
|
"Zip states records split in multiple disks, which is not " + "supported.");
|
||||||
|
}
|
||||||
|
|
||||||
|
Verify.verify(totalRecords1 <= Integer.MAX_VALUE);
|
||||||
|
|
||||||
|
totalRecords = Ints.checkedCast(totalRecords1);
|
||||||
|
this.directorySize = directorySize;
|
||||||
|
this.directoryOffset = directoryOffset;
|
||||||
|
|
||||||
|
if (bytes.remaining() < commentSize) {
|
||||||
|
throw new IOException(
|
||||||
|
"Corrupt EOCD record: not enough data for comment (comment "
|
||||||
|
+ "size is "
|
||||||
|
+ commentSize
|
||||||
|
+ ").");
|
||||||
|
}
|
||||||
|
|
||||||
|
comment = new byte[commentSize];
|
||||||
|
bytes.get(comment);
|
||||||
|
byteSupplier = new CachedSupplier<>(this::computeByteRepresentation);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new EOCD. This is used when generating an EOCD for an Central Directory that has just
|
||||||
|
* been generated. The EOCD will be generated without any comment.
|
||||||
|
*
|
||||||
|
* @param totalRecords total number of records in the directory
|
||||||
|
* @param directoryOffset offset, since beginning of archive, where the Central Directory is
|
||||||
|
* located
|
||||||
|
* @param directorySize number of bytes of the Central Directory
|
||||||
|
* @param comment the EOCD comment
|
||||||
|
*/
|
||||||
|
Eocd(long totalRecords, long directoryOffset, long directorySize, byte[] comment) {
|
||||||
|
Preconditions.checkArgument(totalRecords >= 0, "totalRecords < 0");
|
||||||
|
Preconditions.checkArgument(directoryOffset >= 0, "directoryOffset < 0");
|
||||||
|
Preconditions.checkArgument(directorySize >= 0, "directorySize < 0");
|
||||||
|
|
||||||
|
this.totalRecords = totalRecords;
|
||||||
|
this.directoryOffset = directoryOffset;
|
||||||
|
this.directorySize = directorySize;
|
||||||
|
this.comment = comment;
|
||||||
|
byteSupplier = new CachedSupplier<>(this::computeByteRepresentation);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the number of records in the Central Directory.
|
||||||
|
*
|
||||||
|
* @return the number of records
|
||||||
|
*/
|
||||||
|
long getTotalRecords() {
|
||||||
|
return totalRecords;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the offset since the beginning of the zip archive where the Central Directory is
|
||||||
|
* located.
|
||||||
|
*
|
||||||
|
* @return the offset where the Central Directory is located
|
||||||
|
*/
|
||||||
|
long getDirectoryOffset() {
|
||||||
|
return directoryOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the size of the Central Directory.
|
||||||
|
*
|
||||||
|
* @return the number of bytes that make up the Central Directory
|
||||||
|
*/
|
||||||
|
long getDirectorySize() {
|
||||||
|
return directorySize;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the size of the EOCD.
|
||||||
|
*
|
||||||
|
* @return the size, in bytes, of the EOCD
|
||||||
|
*/
|
||||||
|
long getEocdSize() {
|
||||||
|
return (long) F_COMMENT_SIZE.endOffset() + comment.length;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates the EOCD data.
|
||||||
|
*
|
||||||
|
* @return a byte representation of the EOCD that has exactly {@link #getEocdSize()} bytes
|
||||||
|
* @throws IOException failed to generate the EOCD data
|
||||||
|
*/
|
||||||
|
byte[] toBytes() throws IOException {
|
||||||
|
return byteSupplier.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the comment in the EOCD.
|
||||||
|
*
|
||||||
|
* @return the comment exactly as it is represented in the file (no encoding conversion is
|
||||||
|
* done)
|
||||||
|
*/
|
||||||
|
byte[] getComment() {
|
||||||
|
byte[] commentCopy = new byte[comment.length];
|
||||||
|
System.arraycopy(comment, 0, commentCopy, 0, comment.length);
|
||||||
|
return commentCopy;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the byte representation of the EOCD.
|
||||||
|
*
|
||||||
|
* @return a byte representation of the EOCD that has exactly {@link #getEocdSize()} bytes
|
||||||
|
* @throws UncheckedIOException failed to generate the EOCD data
|
||||||
|
*/
|
||||||
|
private byte[] computeByteRepresentation() {
|
||||||
|
ByteBuffer out = ByteBuffer.allocate(F_COMMENT_SIZE.endOffset() + comment.length);
|
||||||
|
|
||||||
|
try {
|
||||||
|
F_SIGNATURE.write(out);
|
||||||
|
F_NUMBER_OF_DISK.write(out);
|
||||||
|
F_DISK_CD_START.write(out);
|
||||||
|
F_RECORDS_DISK.write(out, totalRecords);
|
||||||
|
F_RECORDS_TOTAL.write(out, totalRecords);
|
||||||
|
F_CD_SIZE.write(out, directorySize);
|
||||||
|
F_CD_OFFSET.write(out, directoryOffset);
|
||||||
|
F_COMMENT_SIZE.write(out, comment.length);
|
||||||
|
out.put(comment);
|
||||||
|
|
||||||
|
return out.array();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOExceptionWrapper(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,696 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2018 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.utils.IOExceptionWrapper;
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.LittleEndianUtils;
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.base.Verify;
|
||||||
|
import com.google.common.primitives.Ints;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The collection of all data stored in all End of Central Directory records in the zip file. The
|
||||||
|
* {@code EOCDGroup} is meant to collect and manage all the information about the {@link Eocd},
|
||||||
|
* {@link Zip64EocdLocator}, and the {@link Zip64Eocd} in one place.
|
||||||
|
*/
|
||||||
|
public class EocdGroup {
|
||||||
|
|
||||||
|
/** Minimum size the EOCD can have. */
|
||||||
|
private static final int MIN_EOCD_SIZE = 22;
|
||||||
|
|
||||||
|
/** Maximum size for the EOCD. */
|
||||||
|
private static final int MAX_EOCD_COMMENT_SIZE = 65535;
|
||||||
|
|
||||||
|
/** How many bytes to look back from the end of the file to look for the EOCD signature. */
|
||||||
|
private static final int LAST_BYTES_TO_READ = MIN_EOCD_SIZE + MAX_EOCD_COMMENT_SIZE;
|
||||||
|
|
||||||
|
/** Signature of the Zip64 EOCD locator record. */
|
||||||
|
private static final int ZIP64_EOCD_LOCATOR_SIGNATURE = 0x07064b50;
|
||||||
|
|
||||||
|
/** Signature of the EOCD record. */
|
||||||
|
private static final long EOCD_SIGNATURE = 0x06054b50;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The EOCD entry. Will be {@code null} if there is no EOCD (because the zip is new) or the one
|
||||||
|
* that exists on disk is no longer valid (because the zip has been changed).
|
||||||
|
*
|
||||||
|
* <p>If the EOCD is deleted because the zip has been changed and the old EOCD was no longer
|
||||||
|
* valid, then {@link #eocdComment} will contain the comment saved from the EOCD.
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
private FileUseMapEntry<Eocd> eocdEntry;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The EOCD locator entry. Will be {@code null} if there is no EOCD (because the zip is new),
|
||||||
|
* the EOCD on disk is no longer valid (because the zip has been changed), or the zip file is not
|
||||||
|
* in Zip64 format (There are no values in the EOCD that overflow or any files with Zip64
|
||||||
|
* extended information.)
|
||||||
|
*
|
||||||
|
* <p> If this value is {@code nonnull} then the EOCD exists and is in Zip64 format (<i>i.e.</i>
|
||||||
|
* both {@link #eocdEntry} and {@link #eocd64Entry} will be {@code nonnull}).
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
private FileUseMapEntry<Zip64EocdLocator> eocd64Locator;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The Zip64 EOCD entry. Will be {@code null} if there is no EOCD (because the zip is new),
|
||||||
|
* the EOCD on disk is no longer valid (because the zip has been changed), or the zip file is not
|
||||||
|
* in Zip64 format (There are no values in the EOCD that overflow or any files with Zip64
|
||||||
|
* extended information.)
|
||||||
|
*
|
||||||
|
* <p> If this value is {@code nonnull} then the EOCD exists and is in Zip64 format (<i>i.e.</i>
|
||||||
|
* both {@link #eocdEntry} and {@link #eocd64Locator} will be {@code nonnull}).
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
private FileUseMapEntry<Zip64Eocd> eocd64Entry;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This field contains the comment in the zip's EOCD if there is no in-memory EOCD structure. This
|
||||||
|
* may happen, for example, if the zip has been changed and the Central Directory and EOCD have
|
||||||
|
* been deleted (in-memory). In that case, this field will save the comment to place on the EOCD
|
||||||
|
* once it is created.
|
||||||
|
*
|
||||||
|
* <p>This field will only be non-{@code null} if there is no in-memory EOCD structure
|
||||||
|
* (<i>i.e.</i>, {@link #eocdEntry} is {@code null}, If there is an {@link #eocdEntry}, then the
|
||||||
|
* comment will be there instead of being in this field.
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
private byte[] eocdComment;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This field contains the extensible data sector in the zip's Zip64 EOCD if there is no EOCD
|
||||||
|
* in-memory. This may happen if the zip has been modified and the Central Directory and EOCD have
|
||||||
|
* been deleted (in-memory). In that case, this field will save the data sector to place in the
|
||||||
|
* Zip64 EOCD once it is created.
|
||||||
|
*
|
||||||
|
* <p>This field will only be non-{@code null} if there is no in-memory EOCD structure
|
||||||
|
* (<i>i.e.</i>, {@link #eocdEntry} is {@code null}, If there is an {@link #eocdEntry}, then the
|
||||||
|
* data sector will be in the {@link #eocd64Entry} instead of being in this field.
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
private Zip64ExtensibleDataSector eocdDataSector;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Specifies whether the Zip64 Eocd will be in Version 2 or Version 1 format when it is
|
||||||
|
* constructed.
|
||||||
|
*/
|
||||||
|
private boolean useVersion2Header;
|
||||||
|
|
||||||
|
/** The zip file to which this EOCD record belongs. */
|
||||||
|
private final ZFile file;
|
||||||
|
|
||||||
|
/** The in-memory map of the pieces of the zip-file. */
|
||||||
|
private final FileUseMap map;
|
||||||
|
|
||||||
|
/** The zip file's log. */
|
||||||
|
private final VerifyLog verifyLog;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructs an empty EOCD group, which will have no in-memory EOCD structure.
|
||||||
|
*
|
||||||
|
* @param file The zip file to which this EOCD record belongs.
|
||||||
|
* @param map he in-memory map of the zip file.
|
||||||
|
*/
|
||||||
|
EocdGroup(ZFile file, FileUseMap map) {
|
||||||
|
|
||||||
|
eocd64Entry = null;
|
||||||
|
eocd64Locator = null;
|
||||||
|
eocdEntry = null;
|
||||||
|
eocdComment = new byte[0];
|
||||||
|
eocdDataSector = new Zip64ExtensibleDataSector();
|
||||||
|
this.file = file;
|
||||||
|
this.map = map;
|
||||||
|
this.verifyLog = file.getVerifyLog();
|
||||||
|
useVersion2Header = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Attempts to read the EOCD record into the {@link EocdGroup} from disk specified by
|
||||||
|
* {@link #file}. It will populate the in-memory EOCD structure (<i>i.e.</i> {@link #eocdEntry}),
|
||||||
|
* including the Zip64 EOCD record and locator if applicable.
|
||||||
|
*
|
||||||
|
* @param fileLength The length of the file on disk, used to help find the EOCD record.
|
||||||
|
* @throws IOException Failed to read the EOCD.
|
||||||
|
*/
|
||||||
|
void readRecord(long fileLength) throws IOException {
|
||||||
|
/*
|
||||||
|
* Read the last part of the zip into memory. If we don't find the EOCD signature by then,
|
||||||
|
* the file is corrupt.
|
||||||
|
*/
|
||||||
|
int lastToRead = LAST_BYTES_TO_READ;
|
||||||
|
if (lastToRead > fileLength) {
|
||||||
|
lastToRead = Ints.checkedCast(fileLength);
|
||||||
|
}
|
||||||
|
|
||||||
|
byte[] last = new byte[lastToRead];
|
||||||
|
file.directFullyRead(fileLength - lastToRead, last);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Start endIdx at the first possible location where the signature can be located and then
|
||||||
|
* move backwards. Because the EOCD must have at least MIN_EOCD size, the first byte of the
|
||||||
|
* signature (and first byte of the EOCD) must be located at last.length - MIN_EOCD_SIZE.
|
||||||
|
*
|
||||||
|
* Because the EOCD signature may exist in the file comment, when we find a signature we
|
||||||
|
* will try to read the Eocd. If we fail, we continue searching for the signature. However,
|
||||||
|
* we will keep the last exception in case we don't find any signature.
|
||||||
|
*/
|
||||||
|
Eocd eocd = null;
|
||||||
|
int foundEocdSignatureIdx = -1;
|
||||||
|
IOException errorFindingSignature = null;
|
||||||
|
long eocdStart = -1;
|
||||||
|
|
||||||
|
for (int endIdx = last.length - MIN_EOCD_SIZE;
|
||||||
|
endIdx >= 0 && foundEocdSignatureIdx == -1;
|
||||||
|
endIdx--) {
|
||||||
|
|
||||||
|
ByteBuffer potentialLocator = ByteBuffer.wrap(last, endIdx, 4);
|
||||||
|
if (LittleEndianUtils.readUnsigned4Le(potentialLocator) == EOCD_SIGNATURE) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We found a signature. Try to read the EOCD record.
|
||||||
|
*/
|
||||||
|
|
||||||
|
foundEocdSignatureIdx = endIdx;
|
||||||
|
ByteBuffer eocdBytes =
|
||||||
|
ByteBuffer.wrap(last, foundEocdSignatureIdx, last.length - foundEocdSignatureIdx);
|
||||||
|
|
||||||
|
try {
|
||||||
|
eocd = new Eocd(eocdBytes);
|
||||||
|
|
||||||
|
eocdStart = fileLength - lastToRead + foundEocdSignatureIdx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure the EOCD takes the whole file up to the end. Log an error if it
|
||||||
|
* doesn't.
|
||||||
|
*/
|
||||||
|
if (eocdStart + eocd.getEocdSize() != fileLength) {
|
||||||
|
verifyLog.log(
|
||||||
|
"EOCD starts at "
|
||||||
|
+ eocdStart
|
||||||
|
+ " and has "
|
||||||
|
+ eocd.getEocdSize()
|
||||||
|
+ " bytes, but file ends at "
|
||||||
|
+ fileLength
|
||||||
|
+ ".");
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
if (errorFindingSignature != null) {
|
||||||
|
e.addSuppressed(errorFindingSignature);
|
||||||
|
}
|
||||||
|
|
||||||
|
errorFindingSignature = e;
|
||||||
|
foundEocdSignatureIdx = -1;
|
||||||
|
eocd = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (foundEocdSignatureIdx == -1) {
|
||||||
|
throw new IOException(
|
||||||
|
"EOCD signature not found in the last " + lastToRead + " bytes of the file.",
|
||||||
|
errorFindingSignature);
|
||||||
|
}
|
||||||
|
|
||||||
|
Verify.verify(eocdStart >= 0);
|
||||||
|
eocdEntry = map.add(eocdStart, eocdStart + eocd.getEocdSize(), eocd);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Look for the Zip64 central directory locator. If we find it, then this file is a Zip64
|
||||||
|
* file and we need to read both the Zip64 EOCD locator and Zip64 EOCD
|
||||||
|
*/
|
||||||
|
long zip64LocatorStart = eocdStart - Zip64EocdLocator.LOCATOR_SIZE;
|
||||||
|
if (zip64LocatorStart >= 0) {
|
||||||
|
byte[] possibleZip64Locator = new byte[Zip64EocdLocator.LOCATOR_SIZE];
|
||||||
|
file.directFullyRead(zip64LocatorStart, possibleZip64Locator);
|
||||||
|
if (LittleEndianUtils.readUnsigned4Le(ByteBuffer.wrap(possibleZip64Locator))
|
||||||
|
== ZIP64_EOCD_LOCATOR_SIGNATURE) {
|
||||||
|
|
||||||
|
/* found the locator. Read it into memory. */
|
||||||
|
|
||||||
|
Zip64EocdLocator locator = new Zip64EocdLocator(ByteBuffer.wrap(possibleZip64Locator));
|
||||||
|
eocd64Locator = map.add(
|
||||||
|
zip64LocatorStart, zip64LocatorStart + locator.getSize(), locator);
|
||||||
|
|
||||||
|
/* Find the size of the Zip64 EOCD by reading its size field */
|
||||||
|
byte[] zip64EocdSizeHolder = new byte[8];
|
||||||
|
file.directFullyRead(
|
||||||
|
locator.getZ64EocdOffset() + Zip64Eocd.SIZE_OFFSET, zip64EocdSizeHolder);
|
||||||
|
long zip64EocdSize =
|
||||||
|
LittleEndianUtils.readUnsigned8Le(ByteBuffer.wrap(zip64EocdSizeHolder))
|
||||||
|
+ Zip64Eocd.TRUE_SIZE_DIFFERENCE;
|
||||||
|
|
||||||
|
/* read the Zip64 EOCD into memory */
|
||||||
|
|
||||||
|
byte[] zip64EocdBytes = new byte[Ints.checkedCast(zip64EocdSize)];
|
||||||
|
file.directFullyRead(locator.getZ64EocdOffset(), zip64EocdBytes);
|
||||||
|
Zip64Eocd zip64Eocd = new Zip64Eocd(ByteBuffer.wrap(zip64EocdBytes));
|
||||||
|
useVersion2Header =
|
||||||
|
zip64Eocd.getVersionToExtract()
|
||||||
|
>= CentralDirectoryHeaderCompressInfo.VERSION_WITH_CENTRAL_FILE_ENCRYPTION;
|
||||||
|
|
||||||
|
long zip64EocdEnd = locator.getZ64EocdOffset() + zip64EocdSize;
|
||||||
|
if (zip64EocdEnd != zip64LocatorStart) {
|
||||||
|
String msg =
|
||||||
|
"Zip64 EOCD record is stored in ["
|
||||||
|
+ locator.getZ64EocdOffset()
|
||||||
|
+ " - "
|
||||||
|
+ zip64EocdEnd
|
||||||
|
+ "] and EOCD starts at "
|
||||||
|
+ zip64LocatorStart
|
||||||
|
+ ".";
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If there is an empty space between the Zip64 EOCD and the EOCD locator, we proceed
|
||||||
|
* logging an error. If the Zip64 EOCD ends after the start of the EOCD locator (and
|
||||||
|
* therefore, they overlap), throw an exception.
|
||||||
|
*/
|
||||||
|
if (zip64EocdEnd > zip64LocatorStart) {
|
||||||
|
throw new IOException(msg);
|
||||||
|
} else {
|
||||||
|
verifyLog.log(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
eocd64Entry = map.add(
|
||||||
|
locator.getZ64EocdOffset(), zip64EocdEnd, zip64Eocd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the EOCD record from the given Central Directory entry in memory. This will populate
|
||||||
|
* the EOCD in-memory and possibly the Zip64 EOCD and Locator if applicable.
|
||||||
|
*
|
||||||
|
* @param directoryEntry The entry to create the EOCD record from.
|
||||||
|
* @param extraDirectoryOffset The offset between the last local entry and the Central Directory.
|
||||||
|
* This will be preserved by the EOCD if the Central Directory is empty.
|
||||||
|
* @throws IOException Failed to create the EOCD record.
|
||||||
|
*/
|
||||||
|
void computeRecord(
|
||||||
|
@Nullable FileUseMapEntry<CentralDirectory> directoryEntry,
|
||||||
|
long extraDirectoryOffset) throws IOException {
|
||||||
|
|
||||||
|
long dirStart;
|
||||||
|
long dirSize;
|
||||||
|
long dirNumEntries;
|
||||||
|
|
||||||
|
if (directoryEntry != null) {
|
||||||
|
dirStart = directoryEntry.getStart();
|
||||||
|
dirSize = directoryEntry.getSize();
|
||||||
|
dirNumEntries = directoryEntry.getStore().getEntries().size();
|
||||||
|
} else {
|
||||||
|
// if we do not have a directory, then we must leave any required offset.
|
||||||
|
dirStart = extraDirectoryOffset;
|
||||||
|
dirSize = 0;
|
||||||
|
dirNumEntries = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need a Zip64 EOCD if any value overflows or if Zip64 file extensions are used as stated
|
||||||
|
* in the Zip Specification.
|
||||||
|
*/
|
||||||
|
|
||||||
|
boolean useZip64Eocd =
|
||||||
|
dirStart > Eocd.MAX_CD_OFFSET ||
|
||||||
|
dirSize > Eocd.MAX_CD_SIZE ||
|
||||||
|
dirNumEntries > Eocd.MAX_TOTAL_RECORDS ||
|
||||||
|
(directoryEntry != null && directoryEntry.getStore().containsZip64Files());
|
||||||
|
|
||||||
|
/* construct the Zip64 EOCD and locator first, as they come before the standard EOCD */
|
||||||
|
if (useZip64Eocd) {
|
||||||
|
Verify.verify(eocdDataSector != null);
|
||||||
|
Zip64Eocd zip64Eocd =
|
||||||
|
new Zip64Eocd(dirNumEntries, dirStart, dirSize, useVersion2Header, eocdDataSector);
|
||||||
|
eocdDataSector = null;
|
||||||
|
byte[] zip64EocdBytes = zip64Eocd.toBytes();
|
||||||
|
long zip64Offset = map.size();
|
||||||
|
map.extend(zip64Offset + zip64EocdBytes.length);
|
||||||
|
eocd64Entry = map.add(zip64Offset, zip64Offset + zip64EocdBytes.length, zip64Eocd);
|
||||||
|
|
||||||
|
Zip64EocdLocator locator = new Zip64EocdLocator(eocd64Entry.getStart());
|
||||||
|
byte[] locatorBytes = locator.toBytes();
|
||||||
|
long locatorOffset = map.size();
|
||||||
|
map.extend(locatorOffset + locatorBytes.length);
|
||||||
|
eocd64Locator = map.add(locatorOffset, locatorOffset + locatorBytes.length, locator);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* add the EOCD to the end of the file */
|
||||||
|
|
||||||
|
Verify.verify(eocdComment != null);
|
||||||
|
Eocd eocd = new Eocd(
|
||||||
|
Math.min(dirNumEntries, Eocd.MAX_TOTAL_RECORDS),
|
||||||
|
Math.min(dirStart, Eocd.MAX_CD_OFFSET),
|
||||||
|
Math.min(dirSize, Eocd.MAX_CD_SIZE),
|
||||||
|
eocdComment);
|
||||||
|
eocdComment = null;
|
||||||
|
byte[] eocdBytes = eocd.toBytes();
|
||||||
|
long eocdOffset = map.size();
|
||||||
|
map.extend(eocdOffset + eocdBytes.length);
|
||||||
|
eocdEntry = map.add(eocdOffset, eocdOffset + eocdBytes.length, eocd);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes the entire EOCD record to the end of the file. The EOCDGroup must <i>not</i> be empty
|
||||||
|
* ({@link #isEmpty()}) by being populated by a call to
|
||||||
|
* {@link #computeRecord(FileUseMapEntry, long)}, and the Central Directory must already be
|
||||||
|
* written to the file. If the CentralDirectory has not written, then {@link #file} should have
|
||||||
|
* no entries.
|
||||||
|
*
|
||||||
|
* @throws IOException Failed to write the EOCD record.
|
||||||
|
*/
|
||||||
|
void appendToFile() throws IOException {
|
||||||
|
Preconditions.checkNotNull(eocdEntry, "eocdEntry == null");
|
||||||
|
|
||||||
|
if (eocd64Entry != null) {
|
||||||
|
Zip64Eocd zip64Eocd = eocd64Entry.getStore();
|
||||||
|
Preconditions.checkNotNull(zip64Eocd);
|
||||||
|
Zip64EocdLocator locator = eocd64Locator.getStore();
|
||||||
|
Preconditions.checkNotNull(locator);
|
||||||
|
|
||||||
|
file.directWrite(eocd64Entry.getStart(), zip64Eocd.toBytes());
|
||||||
|
file.directWrite(eocd64Locator.getStart(), locator.toBytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
Eocd eocd = eocdEntry.getStore();
|
||||||
|
Preconditions.checkNotNull(eocd, "eocd == null");
|
||||||
|
|
||||||
|
byte[] eocdBytes = eocd.toBytes();
|
||||||
|
long eocdOffset = eocdEntry.getStart();
|
||||||
|
|
||||||
|
file.directWrite(eocdOffset, eocdBytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the byte array representation of the EOCD. The EOCD must have already been computed for
|
||||||
|
* this method to be invoked.
|
||||||
|
*
|
||||||
|
* @return The byte representation of the EOCD.
|
||||||
|
* @throws IOException Failed to obtain the byte representation of the EOCD.
|
||||||
|
*/
|
||||||
|
byte[] getEocdBytes() throws IOException {
|
||||||
|
Preconditions.checkNotNull(eocdEntry, "eocdEntry == null");
|
||||||
|
|
||||||
|
Eocd eocd = eocdEntry.getStore();
|
||||||
|
Preconditions.checkNotNull(eocd, "eocd == null");
|
||||||
|
|
||||||
|
return eocd.toBytes();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the byte array representation of the Zip64 EOCD Locator. The EOCD record must already
|
||||||
|
* have been computed for this method to be invoked.
|
||||||
|
*
|
||||||
|
* @return The byte representation of the Zip64 EOCD Locator, or null if the EOCD record is not
|
||||||
|
* in Zip64 format.
|
||||||
|
* @throws IOException Failed to obtain the byte representation of the EOCD Locator.
|
||||||
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
|
@Nullable
|
||||||
|
byte[] getEocdLocatorBytes() throws IOException {
|
||||||
|
Preconditions.checkNotNull(eocdEntry);
|
||||||
|
|
||||||
|
if (eocd64Locator == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return eocd64Locator.getStore().toBytes();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the byte array representation of the Zip64 EOCD. The EOCD record must already
|
||||||
|
* have been computed for this method to be invoked.
|
||||||
|
*
|
||||||
|
* @return The byte representation of the Zip64 EOCD, or null if the EOCD record is not
|
||||||
|
* in Zip64 format.
|
||||||
|
* @throws IOException Failed to obtain the byte representation of the Zip64 EOCD.
|
||||||
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
|
@Nullable
|
||||||
|
byte[] getZ64EocdBytes() throws IOException {
|
||||||
|
Preconditions.checkNotNull(eocdEntry);
|
||||||
|
|
||||||
|
if (eocd64Entry == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return eocd64Entry.getStore().toBytes();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks whether the EOCD record is presently in-memory. (<i>i.e.</i> the EOCD was either read
|
||||||
|
* from disk and is still valid, or has been computed from the Central Directory).
|
||||||
|
*
|
||||||
|
* @return True iff the EOCD record is in-memory.
|
||||||
|
*/
|
||||||
|
boolean isEmpty() {
|
||||||
|
return eocdEntry == null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets whether or not the EOCD record should use the Version 1 or Version 2 of the Zip64 EOCD
|
||||||
|
* (iff the file needs a Zip64 record). The EOCD record should not be in-memory when trying to set
|
||||||
|
* this value, and the EOCD will need to be recomputed to have any affect.
|
||||||
|
*
|
||||||
|
* @param useVersion2Header True if the Version 2 header is to be used, and false for the Version
|
||||||
|
* 1 header.
|
||||||
|
*/
|
||||||
|
void setUseVersion2Header(boolean useVersion2Header) {
|
||||||
|
verifyLog.verify(eocdEntry == null, "eocdEntry != null");
|
||||||
|
|
||||||
|
this.useVersion2Header = useVersion2Header;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Specifies if the EOCD Group will be using a Version 2 Zip64 EOCD record or a Version 1 record
|
||||||
|
* if the file needs to be in Zip64 format.
|
||||||
|
*
|
||||||
|
* @return True if the Version 2 record will be used, and false if the Version 1 record will be
|
||||||
|
* used.
|
||||||
|
*/
|
||||||
|
boolean usingVersion2Header() {
|
||||||
|
return useVersion2Header;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes the EOCD record from memory.
|
||||||
|
*/
|
||||||
|
void deleteRecord() {
|
||||||
|
if (eocdEntry != null) {
|
||||||
|
map.remove(eocdEntry);
|
||||||
|
|
||||||
|
Eocd eocd = eocdEntry.getStore();
|
||||||
|
Verify.verify(eocd != null);
|
||||||
|
eocdComment = eocd.getComment();
|
||||||
|
eocdEntry = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (eocd64Locator != null) {
|
||||||
|
Verify.verify(eocd64Entry != null);
|
||||||
|
eocdDataSector = eocd64Entry.getStore().getExtraFields();
|
||||||
|
map.remove(eocd64Locator);
|
||||||
|
map.remove(eocd64Entry);
|
||||||
|
eocd64Locator = null;
|
||||||
|
eocd64Entry = null;
|
||||||
|
} else {
|
||||||
|
eocdDataSector = new Zip64ExtensibleDataSector();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the EOCD comment.
|
||||||
|
*
|
||||||
|
* @param comment The new comment; no conversion is done, these exact bytes will be placed in the
|
||||||
|
* EOCD comment.
|
||||||
|
* @throws IllegalArgumentException If the comment corrupts the ZipFile by having a valid EOCD
|
||||||
|
* record in it.
|
||||||
|
*/
|
||||||
|
void setEocdComment(byte[] comment) {
|
||||||
|
if (comment.length > MAX_EOCD_COMMENT_SIZE) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"EOCD comment size ("
|
||||||
|
+ comment.length
|
||||||
|
+ ") is larger than the maximum allowed ("
|
||||||
|
+ MAX_EOCD_COMMENT_SIZE
|
||||||
|
+ ")");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the EOCD signature appears anywhere in the comment we need to check if it
|
||||||
|
// is valid.
|
||||||
|
for (int i = 0; i < comment.length - MIN_EOCD_SIZE; i++) {
|
||||||
|
// Remember: little endian...
|
||||||
|
ByteBuffer potentialSignature = ByteBuffer.wrap(comment, i, 4);
|
||||||
|
try {
|
||||||
|
if (LittleEndianUtils.readUnsigned4Le(potentialSignature) == EOCD_SIGNATURE) {
|
||||||
|
// We found a possible EOCD signature at position i. Try to read it.
|
||||||
|
ByteBuffer bytes = ByteBuffer.wrap(comment, i, comment.length - i);
|
||||||
|
try {
|
||||||
|
new Eocd(bytes);
|
||||||
|
// If a valid record is found in the comment then this corrupts the Zip file record
|
||||||
|
// as we look for the EOCD at the back of the file (where the comment is) first.
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"Position " + i + " of the comment contains a valid EOCD record.");
|
||||||
|
} catch (IOException e) {
|
||||||
|
// Fine, this is an invalid record. Move along...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOExceptionWrapper(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
deleteRecord();
|
||||||
|
eocdComment = new byte[comment.length];
|
||||||
|
System.arraycopy(comment, 0, eocdComment, 0, comment.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the start of the EOCD record location in the file or -1 if the EOCD is not in memory.
|
||||||
|
*
|
||||||
|
* @return The start of the record.
|
||||||
|
*/
|
||||||
|
long getOffset() {
|
||||||
|
if (eocdEntry == null) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return getRecordStart();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the comment in the EOCD.
|
||||||
|
*
|
||||||
|
* @return The comment exactly as it was encoded in the EOCD, no encoding is done.
|
||||||
|
*/
|
||||||
|
byte[] getEocdComment() {
|
||||||
|
if (eocdEntry == null) {
|
||||||
|
Verify.verify(eocdComment != null);
|
||||||
|
byte[] eocdCommentCopy = eocdComment.clone();
|
||||||
|
return eocdCommentCopy;
|
||||||
|
}
|
||||||
|
|
||||||
|
Eocd eocd = eocdEntry.getStore();
|
||||||
|
Verify.verify(eocd != null);
|
||||||
|
return eocd.getComment();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the size of the central directory as specified from the EOCD record. The EOCD must be in
|
||||||
|
* memory before this method is invoked.
|
||||||
|
*
|
||||||
|
* @return The directory's size.
|
||||||
|
*/
|
||||||
|
long getDirectorySize() {
|
||||||
|
Preconditions.checkNotNull(eocdEntry, "eocdEntry == null");
|
||||||
|
|
||||||
|
Eocd eocd = eocdEntry.getStore();
|
||||||
|
|
||||||
|
if (eocd64Entry != null && eocd.getDirectorySize() == Eocd.MAX_CD_SIZE) {
|
||||||
|
return eocd64Entry.getStore().getDirectorySize();
|
||||||
|
} else {
|
||||||
|
return eocd.getDirectorySize();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the offset of the Central Directory from the start of the archive as specified from the
|
||||||
|
* EOCD record. The EOCD must be in memory before this method is invoked.
|
||||||
|
*
|
||||||
|
* @return The offset of the start of the Central Directory.
|
||||||
|
*/
|
||||||
|
long getDirectoryOffset() {
|
||||||
|
Preconditions.checkNotNull(eocdEntry, "eocdEntry == null");
|
||||||
|
|
||||||
|
Eocd eocd = eocdEntry.getStore();
|
||||||
|
|
||||||
|
if (eocd64Entry != null && eocd.getDirectoryOffset() == Eocd.MAX_CD_OFFSET) {
|
||||||
|
return eocd64Entry.getStore().getDirectoryOffset();
|
||||||
|
} else {
|
||||||
|
return eocd.getDirectoryOffset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the total number of entries in the Central Directory as specified from the EOCD record.
|
||||||
|
* The EOCD must be in memory before this method is invoked.
|
||||||
|
*
|
||||||
|
* @return The total number of records in the Central Directory.
|
||||||
|
*/
|
||||||
|
long getTotalDirectoryRecords() {
|
||||||
|
Preconditions.checkNotNull(eocdEntry, "eocdEntry == null");
|
||||||
|
|
||||||
|
Eocd eocd = eocdEntry.getStore();
|
||||||
|
if (eocd64Entry != null && eocd.getTotalRecords() == Eocd.MAX_TOTAL_RECORDS) {
|
||||||
|
return eocd64Entry.getStore().getTotalRecords();
|
||||||
|
}
|
||||||
|
|
||||||
|
return eocd.getTotalRecords();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the start of the EOCD record from the start of the archive. This will be the same as
|
||||||
|
* the start of the standard EOCD in a Zip32 file or in a Zip64 file will be the start of the
|
||||||
|
* Zip64 Eocd record. The EOCD must be in memory for this method to be invoked.
|
||||||
|
*
|
||||||
|
* @return The start of the entire EOCD record.
|
||||||
|
*/
|
||||||
|
long getRecordStart() {
|
||||||
|
Verify.verify(eocdEntry != null, "eocdEntry == null");
|
||||||
|
if (eocd64Entry != null) {
|
||||||
|
return eocd64Entry.getStart();
|
||||||
|
}
|
||||||
|
return eocdEntry.getStart();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the total size of the EOCD record. This will be the same as the standard EOCD size for
|
||||||
|
* a Zip32 file or in a Zip64 file will be the start of the Zip64 record to the end of the
|
||||||
|
* standard EOCD. the EOCD must be in memory for this method to be invoked.
|
||||||
|
*
|
||||||
|
* @return The total size of the EOCD record.
|
||||||
|
*/
|
||||||
|
public long getRecordSize() {
|
||||||
|
if (eocd64Entry != null) {
|
||||||
|
Verify.verify(eocdEntry != null);
|
||||||
|
return eocdEntry.getEnd() - eocd64Entry.getStart();
|
||||||
|
}
|
||||||
|
if (eocdEntry == null) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return eocdEntry.getSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the Zip64 Extensible Data Sector, or {@code null} if the EOCD record is not in the
|
||||||
|
* Zip64 format. The EOCD must be in memory for this method to be invoked.
|
||||||
|
*
|
||||||
|
* @return The Extensible data sector, or {@code null} if none exists.
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
public Zip64ExtensibleDataSector getExtensibleData() {
|
||||||
|
Verify.verify(eocdEntry != null);
|
||||||
|
if (eocd64Entry != null) {
|
||||||
|
return eocd64Entry.getStore().getExtraFields();
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,385 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.LittleEndianUtils;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.collect.ImmutableList;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Contains an extra field.
|
||||||
|
*
|
||||||
|
* <p>According to the zip specification, the extra field is composed of a sequence of fields. This
|
||||||
|
* class provides a way to access, parse and modify that information.
|
||||||
|
*
|
||||||
|
* <p>The zip specification calls fields to the fields inside the extra field. Because this
|
||||||
|
* terminology is confusing, we use <i>segment</i> to refer to a part of the extra field. Each
|
||||||
|
* segment is represented by an instance of {@link Segment} and contains a header ID and data.
|
||||||
|
*
|
||||||
|
* <p>Each instance of {@link ExtraField} is immutable. The extra field of a particular entry can be
|
||||||
|
* changed by creating a new instanceof {@link ExtraField} and pass it to {@link
|
||||||
|
* StoredEntry#setLocalExtra(ExtraField)}.
|
||||||
|
*
|
||||||
|
* <p>Instances of {@link ExtraField} can be created directly from the list of segments in it or
|
||||||
|
* from the raw byte data. If created from the raw byte data, the data will only be parsed on
|
||||||
|
* demand. So, if neither {@link #getSegments()} nor {@link #getSingleSegment(int)} is invoked, the
|
||||||
|
* extra field will not be parsed. This guarantees low performance impact of the using the extra
|
||||||
|
* field unless its contents are needed.
|
||||||
|
*/
|
||||||
|
public class ExtraField {
|
||||||
|
public static final ExtraField EMPTY = new ExtraField();
|
||||||
|
|
||||||
|
/** Header ID for field with zip alignment. */
|
||||||
|
static final int ALIGNMENT_ZIP_EXTRA_DATA_FIELD_HEADER_ID = 0xd935;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The field's raw data, if it is known. Either this variable or {@link #segments} must be
|
||||||
|
* non-{@code null}.
|
||||||
|
*/
|
||||||
|
@Nullable private final byte[] rawData;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The list of field's segments. Will be populated if the extra field is created based on a list
|
||||||
|
* of segments; will also be populated after parsing if the extra field is created based on the
|
||||||
|
* raw bytes.
|
||||||
|
*/
|
||||||
|
@Nullable private ImmutableList<Segment> segments;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an extra field based on existing raw data.
|
||||||
|
*
|
||||||
|
* @param rawData the raw data; will not be parsed unless needed
|
||||||
|
*/
|
||||||
|
public ExtraField(byte[] rawData) {
|
||||||
|
this.rawData = rawData;
|
||||||
|
segments = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Creates a new extra field with no segments. */
|
||||||
|
public ExtraField() {
|
||||||
|
rawData = null;
|
||||||
|
segments = ImmutableList.of();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new extra field with the given segments.
|
||||||
|
*
|
||||||
|
* @param segments the segments
|
||||||
|
*/
|
||||||
|
public ExtraField(ImmutableList<Segment> segments) {
|
||||||
|
rawData = null;
|
||||||
|
this.segments = segments;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains all segments in the extra field.
|
||||||
|
*
|
||||||
|
* @return all segments
|
||||||
|
* @throws IOException failed to parse the extra field
|
||||||
|
*/
|
||||||
|
public ImmutableList<Segment> getSegments() throws IOException {
|
||||||
|
if (segments == null) {
|
||||||
|
parseSegments();
|
||||||
|
}
|
||||||
|
|
||||||
|
Preconditions.checkNotNull(segments);
|
||||||
|
return segments;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the only segment with the provided header ID.
|
||||||
|
*
|
||||||
|
* @param headerId the header ID
|
||||||
|
* @return the segment found or {@code null} if no segment contains the provided header ID
|
||||||
|
* @throws IOException there is more than one header with the provided header ID
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
public Segment getSingleSegment(int headerId) throws IOException {
|
||||||
|
List<Segment> found = new ArrayList<>();
|
||||||
|
for (Segment s : getSegments()) {
|
||||||
|
if (s.getHeaderId() == headerId) {
|
||||||
|
found.add(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (found.isEmpty()) {
|
||||||
|
return null;
|
||||||
|
} else if (found.size() == 1) {
|
||||||
|
return found.get(0);
|
||||||
|
} else {
|
||||||
|
throw new IOException(found.size() + " segments with header ID " + headerId + "found");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parses the raw data and generates all segments in {@link #segments}.
|
||||||
|
*
|
||||||
|
* @throws IOException failed to parse the data
|
||||||
|
*/
|
||||||
|
private void parseSegments() throws IOException {
|
||||||
|
Preconditions.checkNotNull(rawData);
|
||||||
|
Preconditions.checkState(segments == null);
|
||||||
|
|
||||||
|
List<Segment> segments = new ArrayList<>();
|
||||||
|
ByteBuffer buffer = ByteBuffer.wrap(rawData);
|
||||||
|
|
||||||
|
while (buffer.remaining() > 0) {
|
||||||
|
int headerId = LittleEndianUtils.readUnsigned2Le(buffer);
|
||||||
|
int dataSize = LittleEndianUtils.readUnsigned2Le(buffer);
|
||||||
|
if (dataSize < 0) {
|
||||||
|
throw new IOException(
|
||||||
|
"Invalid data size for extra field segment with header ID "
|
||||||
|
+ headerId
|
||||||
|
+ ": "
|
||||||
|
+ dataSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
byte[] data = new byte[dataSize];
|
||||||
|
if (buffer.remaining() < dataSize) {
|
||||||
|
throw new IOException(
|
||||||
|
"Invalid data size for extra field segment with header ID "
|
||||||
|
+ headerId
|
||||||
|
+ ": "
|
||||||
|
+ dataSize
|
||||||
|
+ " (only "
|
||||||
|
+ buffer.remaining()
|
||||||
|
+ " bytes are available)");
|
||||||
|
}
|
||||||
|
buffer.get(data);
|
||||||
|
|
||||||
|
SegmentFactory factory = identifySegmentFactory(headerId);
|
||||||
|
Segment seg = factory.make(headerId, data);
|
||||||
|
segments.add(seg);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.segments = ImmutableList.copyOf(segments);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the size of the extra field.
|
||||||
|
*
|
||||||
|
* @return the size
|
||||||
|
*/
|
||||||
|
public int size() {
|
||||||
|
if (rawData != null) {
|
||||||
|
return rawData.length;
|
||||||
|
} else {
|
||||||
|
Preconditions.checkNotNull(segments);
|
||||||
|
int sz = 0;
|
||||||
|
for (Segment s : segments) {
|
||||||
|
sz += s.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
return sz;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes the extra field to the given output buffer.
|
||||||
|
*
|
||||||
|
* @param out the output buffer to write the field; exactly {@link #size()} bytes will be written
|
||||||
|
* @throws IOException failed to write the extra fields
|
||||||
|
*/
|
||||||
|
public void write(ByteBuffer out) throws IOException {
|
||||||
|
if (rawData != null) {
|
||||||
|
out.put(rawData);
|
||||||
|
} else {
|
||||||
|
Preconditions.checkNotNull(segments);
|
||||||
|
for (Segment s : segments) {
|
||||||
|
s.write(out);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Identifies the factory to create the segment with the provided header ID.
|
||||||
|
*
|
||||||
|
* @param headerId the header ID
|
||||||
|
* @return the segmnet factory that creates segments with the given header
|
||||||
|
*/
|
||||||
|
private static SegmentFactory identifySegmentFactory(int headerId) {
|
||||||
|
if (headerId == ALIGNMENT_ZIP_EXTRA_DATA_FIELD_HEADER_ID) {
|
||||||
|
return AlignmentSegment::new;
|
||||||
|
}
|
||||||
|
|
||||||
|
return RawDataSegment::new;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field inside the extra field. A segment contains a header ID and data. Specific types of
|
||||||
|
* segments implement this interface.
|
||||||
|
*/
|
||||||
|
public interface Segment {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the segment's header ID.
|
||||||
|
*
|
||||||
|
* @return the segment's header ID
|
||||||
|
*/
|
||||||
|
int getHeaderId();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the size of the segment including the header ID.
|
||||||
|
*
|
||||||
|
* @return the number of bytes needed to write the segment
|
||||||
|
*/
|
||||||
|
int size();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes the segment to a buffer.
|
||||||
|
*
|
||||||
|
* @param out the buffer where to write the segment to; exactly {@link #size()} bytes will be
|
||||||
|
* written
|
||||||
|
* @throws IOException failed to write segment data
|
||||||
|
*/
|
||||||
|
void write(ByteBuffer out) throws IOException;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Factory that creates a segment. */
|
||||||
|
interface SegmentFactory {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new segment.
|
||||||
|
*
|
||||||
|
* @param headerId the header ID
|
||||||
|
* @param data the segment's data
|
||||||
|
* @return the created segment
|
||||||
|
* @throws IOException failed to create the segment from the data
|
||||||
|
*/
|
||||||
|
Segment make(int headerId, byte[] data) throws IOException;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Segment of raw data: this class represents a general segment containing an array of bytes as
|
||||||
|
* data.
|
||||||
|
*/
|
||||||
|
public static class RawDataSegment implements Segment {
|
||||||
|
|
||||||
|
/** Header ID. */
|
||||||
|
private final int headerId;
|
||||||
|
|
||||||
|
/** Data in the segment. */
|
||||||
|
private final byte[] data;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new raw data segment.
|
||||||
|
*
|
||||||
|
* @param headerId the header ID
|
||||||
|
* @param data the segment data
|
||||||
|
*/
|
||||||
|
RawDataSegment(int headerId, byte[] data) {
|
||||||
|
this.headerId = headerId;
|
||||||
|
this.data = data;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getHeaderId() {
|
||||||
|
return headerId;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void write(ByteBuffer out) throws IOException {
|
||||||
|
LittleEndianUtils.writeUnsigned2Le(out, headerId);
|
||||||
|
LittleEndianUtils.writeUnsigned2Le(out, data.length);
|
||||||
|
out.put(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int size() {
|
||||||
|
return 4 + data.length;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Segment with information on an alignment: this segment contains information on how an entry
|
||||||
|
* should be aligned and contains zero-filled data to force alignment.
|
||||||
|
*
|
||||||
|
* <p>An alignment segment contains the header ID, the size of the data, the alignment value and
|
||||||
|
* zero bytes to pad
|
||||||
|
*/
|
||||||
|
public static class AlignmentSegment implements Segment {
|
||||||
|
|
||||||
|
/** Minimum size for an alignment segment. */
|
||||||
|
public static final int MINIMUM_SIZE = 6;
|
||||||
|
|
||||||
|
/** The alignment value. */
|
||||||
|
private int alignment;
|
||||||
|
|
||||||
|
/** How many bytes of padding are in this segment? */
|
||||||
|
private int padding;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new alignment segment.
|
||||||
|
*
|
||||||
|
* @param alignment the alignment value
|
||||||
|
* @param totalSize how many bytes should this segment take?
|
||||||
|
*/
|
||||||
|
public AlignmentSegment(int alignment, int totalSize) {
|
||||||
|
Preconditions.checkArgument(alignment > 0, "alignment <= 0");
|
||||||
|
Preconditions.checkArgument(totalSize >= MINIMUM_SIZE, "totalSize < MINIMUM_SIZE");
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We have 6 bytes of fixed data: header ID (2 bytes), data size (2 bytes), alignment
|
||||||
|
* value (2 bytes).
|
||||||
|
*/
|
||||||
|
this.alignment = alignment;
|
||||||
|
padding = totalSize - MINIMUM_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new alignment segment from extra data.
|
||||||
|
*
|
||||||
|
* @param headerId the header ID
|
||||||
|
* @param data the segment data
|
||||||
|
* @throws IOException failed to create the segment from the data
|
||||||
|
*/
|
||||||
|
public AlignmentSegment(int headerId, byte[] data) throws IOException {
|
||||||
|
Preconditions.checkArgument(headerId == ALIGNMENT_ZIP_EXTRA_DATA_FIELD_HEADER_ID);
|
||||||
|
|
||||||
|
ByteBuffer dataBuffer = ByteBuffer.wrap(data);
|
||||||
|
alignment = LittleEndianUtils.readUnsigned2Le(dataBuffer);
|
||||||
|
if (alignment <= 0) {
|
||||||
|
throw new IOException("Invalid alignment in alignment field: " + alignment);
|
||||||
|
}
|
||||||
|
|
||||||
|
padding = data.length - 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void write(ByteBuffer out) throws IOException {
|
||||||
|
LittleEndianUtils.writeUnsigned2Le(out, ALIGNMENT_ZIP_EXTRA_DATA_FIELD_HEADER_ID);
|
||||||
|
LittleEndianUtils.writeUnsigned2Le(out, padding + 2);
|
||||||
|
LittleEndianUtils.writeUnsigned2Le(out, alignment);
|
||||||
|
out.put(new byte[padding]);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int size() {
|
||||||
|
return padding + 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getHeaderId() {
|
||||||
|
return ALIGNMENT_ZIP_EXTRA_DATA_FIELD_HEADER_ID;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,598 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.base.Verify;
|
||||||
|
import com.google.common.collect.Lists;
|
||||||
|
import com.google.common.collect.Sets;
|
||||||
|
import com.google.common.primitives.Ints;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.SortedSet;
|
||||||
|
import java.util.TreeSet;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The file use map keeps track of which parts of the zip file are used which parts are not. It
|
||||||
|
* essentially maintains an ordered set of entries ({@link FileUseMapEntry}). Each entry either has
|
||||||
|
* some data (an entry, the Central Directory, the EOCD) or is a free entry.
|
||||||
|
*
|
||||||
|
* <p>For example: [0-95, "foo/"][95-260, "xpto"][260-310, free][310-360, Central Directory]
|
||||||
|
* [360-390,EOCD]
|
||||||
|
*
|
||||||
|
* <p>There are a few invariants in this structure:
|
||||||
|
*
|
||||||
|
* <ul>
|
||||||
|
* <li>there are no gaps between map entries;
|
||||||
|
* <li>the map is fully covered up to its size;
|
||||||
|
* <li>there are no two free entries next to each other; this is guaranteed by coalescing the
|
||||||
|
* entries upon removal (see {@link #coalesce(FileUseMapEntry)});
|
||||||
|
* <li>all free entries have a minimum size defined in the constructor, with the possible
|
||||||
|
* exception of the last one
|
||||||
|
* </ul>
|
||||||
|
*/
|
||||||
|
class FileUseMap {
|
||||||
|
/**
|
||||||
|
* Size of the file according to the map. This should always match the last entry in {@code #map}.
|
||||||
|
*/
|
||||||
|
private long size;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tree with all intervals ordered by position. Contains coverage from 0 up to {@link #size}. If
|
||||||
|
* {@link #size} is zero then this set is empty. This is the only situation in which the map will
|
||||||
|
* be empty.
|
||||||
|
*/
|
||||||
|
private final TreeSet<FileUseMapEntry<?>> map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tree with all free blocks ordered by size. This is essentially a view over {@link #map}
|
||||||
|
* containing only the free blocks, but in a different order.
|
||||||
|
*/
|
||||||
|
private final TreeSet<FileUseMapEntry<?>> freeBySize;
|
||||||
|
private final TreeSet<FileUseMapEntry<?>> freeByStart;
|
||||||
|
|
||||||
|
/** If defined, defines the minimum size for a free entry. */
|
||||||
|
private int mMinFreeSize;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new, empty file map.
|
||||||
|
*
|
||||||
|
* @param size the size of the file
|
||||||
|
* @param minFreeSize minimum size of a free entry
|
||||||
|
*/
|
||||||
|
FileUseMap(long size, int minFreeSize) {
|
||||||
|
Preconditions.checkArgument(size >= 0, "size < 0");
|
||||||
|
Preconditions.checkArgument(minFreeSize >= 0, "minFreeSize < 0");
|
||||||
|
|
||||||
|
this.size = size;
|
||||||
|
map = new TreeSet<>(FileUseMapEntry.COMPARE_BY_START);
|
||||||
|
freeBySize = new TreeSet<>(FileUseMapEntry.COMPARE_BY_SIZE);
|
||||||
|
freeByStart = new TreeSet<>(FileUseMapEntry.COMPARE_BY_START);
|
||||||
|
mMinFreeSize = minFreeSize;
|
||||||
|
|
||||||
|
if (size > 0) {
|
||||||
|
internalAdd(FileUseMapEntry.makeFree(0, size));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds an entry to the internal structures.
|
||||||
|
*
|
||||||
|
* @param entry the entry to add
|
||||||
|
*/
|
||||||
|
private void internalAdd(FileUseMapEntry<?> entry) {
|
||||||
|
map.add(entry);
|
||||||
|
|
||||||
|
if (entry.isFree()) {
|
||||||
|
freeBySize.add(entry);
|
||||||
|
freeByStart.add(entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes an entry from the internal structures.
|
||||||
|
*
|
||||||
|
* @param entry the entry to remove
|
||||||
|
*/
|
||||||
|
private void internalRemove(FileUseMapEntry<?> entry) {
|
||||||
|
boolean wasRemoved = map.remove(entry);
|
||||||
|
Preconditions.checkState(wasRemoved, "entry not in map");
|
||||||
|
|
||||||
|
if (entry.isFree()) {
|
||||||
|
freeBySize.remove(entry);
|
||||||
|
freeByStart.remove(entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a new file to the map. The interval specified by {@code entry} must fit inside an empty
|
||||||
|
* entry in the map. That entry will be replaced by entry and additional free entries will be
|
||||||
|
* added before and after if needed to make sure no spaces exist on the map.
|
||||||
|
*
|
||||||
|
* @param entry the entry to add
|
||||||
|
*/
|
||||||
|
private void add(FileUseMapEntry<?> entry) {
|
||||||
|
Preconditions.checkArgument(entry.getStart() < size, "entry.getStart() >= size");
|
||||||
|
Preconditions.checkArgument(entry.getEnd() <= size, "entry.getEnd() > size");
|
||||||
|
Preconditions.checkArgument(!entry.isFree(), "entry.isFree()");
|
||||||
|
|
||||||
|
FileUseMapEntry<?> container = findContainer(entry);
|
||||||
|
Verify.verify(container.isFree(), "!container.isFree()");
|
||||||
|
|
||||||
|
Set<FileUseMapEntry<?>> replacements = split(container, entry);
|
||||||
|
internalRemove(container);
|
||||||
|
for (FileUseMapEntry<?> r : replacements) {
|
||||||
|
internalAdd(r);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a new file to the map. The interval specified by ({@code start}, {@code end}) must fit
|
||||||
|
* inside an empty entry in the map. That entry will be replaced by entry and additional free
|
||||||
|
* entries will be added before and after if needed to make sure no spaces exist on the map.
|
||||||
|
*
|
||||||
|
* <p>The entry cannot extend beyong the end of the map. If necessary, extend the map using {@link
|
||||||
|
* #extend(long)}.
|
||||||
|
*
|
||||||
|
* @param start the start of this entry
|
||||||
|
* @param end the end of the entry
|
||||||
|
* @param store extra data to store with the entry
|
||||||
|
* @param <T> the type of data to store in the entry
|
||||||
|
* @return the new entry
|
||||||
|
*/
|
||||||
|
<T> FileUseMapEntry<T> add(long start, long end, T store) {
|
||||||
|
Preconditions.checkArgument(start >= 0, "start < 0");
|
||||||
|
Preconditions.checkArgument(end > start, "end < start");
|
||||||
|
|
||||||
|
FileUseMapEntry<T> entry = FileUseMapEntry.makeUsed(start, end, store);
|
||||||
|
add(entry);
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes a file from the map, replacing it with an empty one that is then coalesced with
|
||||||
|
* neighbors (if the neighbors are free).
|
||||||
|
*
|
||||||
|
* @param entry the entry
|
||||||
|
*/
|
||||||
|
void remove(FileUseMapEntry<?> entry) {
|
||||||
|
Preconditions.checkState(map.contains(entry), "!map.contains(entry)");
|
||||||
|
Preconditions.checkArgument(!entry.isFree(), "entry.isFree()");
|
||||||
|
|
||||||
|
internalRemove(entry);
|
||||||
|
|
||||||
|
FileUseMapEntry<?> replacement = FileUseMapEntry.makeFree(entry.getStart(), entry.getEnd());
|
||||||
|
internalAdd(replacement);
|
||||||
|
coalesce(replacement);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Finds the entry that fully contains the given one. It is assumed that one exists.
|
||||||
|
*
|
||||||
|
* @param entry the entry whose container we're looking for
|
||||||
|
* @return the container
|
||||||
|
*/
|
||||||
|
private FileUseMapEntry<?> findContainer(FileUseMapEntry<?> entry) {
|
||||||
|
FileUseMapEntry container = map.floor(entry);
|
||||||
|
Verify.verifyNotNull(container);
|
||||||
|
Verify.verify(container.getStart() <= entry.getStart());
|
||||||
|
Verify.verify(container.getEnd() >= entry.getEnd());
|
||||||
|
|
||||||
|
return container;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Splits a container to add an entry, adding new free entries before and after the provided entry
|
||||||
|
* if needed.
|
||||||
|
*
|
||||||
|
* @param container the container entry, a free entry that is in {@link #map} that that encloses
|
||||||
|
* {@code entry}
|
||||||
|
* @param entry the entry that will be used to split {@code container}
|
||||||
|
* @return a set of non-overlapping entries that completely covers {@code container} and that
|
||||||
|
* includes {@code entry}
|
||||||
|
*/
|
||||||
|
private static Set<FileUseMapEntry<?>> split(
|
||||||
|
FileUseMapEntry<?> container, FileUseMapEntry<?> entry) {
|
||||||
|
Preconditions.checkArgument(container.isFree(), "!container.isFree()");
|
||||||
|
|
||||||
|
long farStart = container.getStart();
|
||||||
|
long start = entry.getStart();
|
||||||
|
long end = entry.getEnd();
|
||||||
|
long farEnd = container.getEnd();
|
||||||
|
|
||||||
|
Verify.verify(farStart <= start, "farStart > start");
|
||||||
|
Verify.verify(start < end, "start >= end");
|
||||||
|
Verify.verify(farEnd >= end, "farEnd < end");
|
||||||
|
|
||||||
|
Set<FileUseMapEntry<?>> result = Sets.newHashSet();
|
||||||
|
if (farStart < start) {
|
||||||
|
result.add(FileUseMapEntry.makeFree(farStart, start));
|
||||||
|
}
|
||||||
|
|
||||||
|
result.add(entry);
|
||||||
|
|
||||||
|
if (end < farEnd) {
|
||||||
|
result.add(FileUseMapEntry.makeFree(end, farEnd));
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Coalesces a free entry replacing it and neighboring free entries with a single, larger entry.
|
||||||
|
* This method does nothing if {@code entry} does not have free neighbors.
|
||||||
|
*
|
||||||
|
* @param entry the free entry to coalesce with neighbors
|
||||||
|
*/
|
||||||
|
private void coalesce(FileUseMapEntry<?> entry) {
|
||||||
|
Preconditions.checkArgument(entry.isFree(), "!entry.isFree()");
|
||||||
|
|
||||||
|
FileUseMapEntry<?> prevToMerge = null;
|
||||||
|
long start = entry.getStart();
|
||||||
|
if (start > 0) {
|
||||||
|
/*
|
||||||
|
* See if we have a previous entry to merge with this one.
|
||||||
|
*/
|
||||||
|
prevToMerge = map.floor(FileUseMapEntry.makeFree(start - 1, start));
|
||||||
|
Verify.verifyNotNull(prevToMerge);
|
||||||
|
if (!prevToMerge.isFree()) {
|
||||||
|
prevToMerge = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
FileUseMapEntry<?> nextToMerge = null;
|
||||||
|
long end = entry.getEnd();
|
||||||
|
if (end < size) {
|
||||||
|
/*
|
||||||
|
* See if we have a next entry to merge with this one.
|
||||||
|
*/
|
||||||
|
nextToMerge = map.ceiling(FileUseMapEntry.makeFree(end, end + 1));
|
||||||
|
Verify.verifyNotNull(nextToMerge);
|
||||||
|
if (!nextToMerge.isFree()) {
|
||||||
|
nextToMerge = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (prevToMerge == null && nextToMerge == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
long newStart = start;
|
||||||
|
if (prevToMerge != null) {
|
||||||
|
newStart = prevToMerge.getStart();
|
||||||
|
internalRemove(prevToMerge);
|
||||||
|
}
|
||||||
|
|
||||||
|
long newEnd = end;
|
||||||
|
if (nextToMerge != null) {
|
||||||
|
newEnd = nextToMerge.getEnd();
|
||||||
|
internalRemove(nextToMerge);
|
||||||
|
}
|
||||||
|
|
||||||
|
internalRemove(entry);
|
||||||
|
internalAdd(FileUseMapEntry.makeFree(newStart, newEnd));
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Truncates map removing the top entry if it is free and reducing the map's size. */
|
||||||
|
void truncate() {
|
||||||
|
if (size == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Find the last entry.
|
||||||
|
*/
|
||||||
|
FileUseMapEntry<?> last = map.last();
|
||||||
|
Verify.verifyNotNull(last, "last == null");
|
||||||
|
if (last.isFree()) {
|
||||||
|
internalRemove(last);
|
||||||
|
size = last.getStart();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the size of the map.
|
||||||
|
*
|
||||||
|
* @return the size
|
||||||
|
*/
|
||||||
|
long size() {
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the largest used offset in the map. This will be size of the map after truncation.
|
||||||
|
*
|
||||||
|
* @return the size of the file discounting the last block if it is empty
|
||||||
|
*/
|
||||||
|
long usedSize() {
|
||||||
|
if (size == 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Find the last entry to see if it is an empty entry. If it is, we need to remove its size
|
||||||
|
* from the returned value.
|
||||||
|
*/
|
||||||
|
FileUseMapEntry<?> last = map.last();
|
||||||
|
Verify.verifyNotNull(last, "last == null");
|
||||||
|
if (last.isFree()) {
|
||||||
|
return last.getStart();
|
||||||
|
} else {
|
||||||
|
Verify.verify(last.getEnd() == size);
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extends the map to guarantee it has at least {@code size} bytes. If the current size is as
|
||||||
|
* large as {@code size}, this method does nothing.
|
||||||
|
*
|
||||||
|
* @param size the new size of the map that cannot be smaller that the current size
|
||||||
|
*/
|
||||||
|
void extend(long size) {
|
||||||
|
Preconditions.checkArgument(size >= this.size, "size < size");
|
||||||
|
|
||||||
|
if (this.size == size) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
FileUseMapEntry<?> newBlock = FileUseMapEntry.makeFree(this.size, size);
|
||||||
|
internalAdd(newBlock);
|
||||||
|
|
||||||
|
this.size = size;
|
||||||
|
|
||||||
|
coalesce(newBlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Locates a free area in the map with at least {@code size} bytes such that {@code ((start +
|
||||||
|
* alignOffset) % align == 0} and such that the free space before {@code start} is not smaller
|
||||||
|
* than the minimum free entry size. This method will follow the algorithm specified by {@code
|
||||||
|
* alg}.
|
||||||
|
*
|
||||||
|
* <p>If no free contiguous block exists in the map that can hold the provided size then the first
|
||||||
|
* free index at the end of the map is provided. This means that the map may need to be extended
|
||||||
|
* before data can be added.
|
||||||
|
*
|
||||||
|
* @param size the size of the contiguous area requested
|
||||||
|
* @param alignOffset an offset to which alignment needs to be computed (see method description)
|
||||||
|
* @param align alignment at the offset (see method description)
|
||||||
|
* @param alg which algorithm to use
|
||||||
|
* @return the location of the contiguous area; this may be located at the end of the map
|
||||||
|
*/
|
||||||
|
long locateFree(long size, long alignOffset, long align, PositionAlgorithm alg) {
|
||||||
|
Preconditions.checkArgument(size > 0, "size <= 0");
|
||||||
|
|
||||||
|
FileUseMapEntry<?> minimumSizedEntry = FileUseMapEntry.makeFree(0, size);
|
||||||
|
SortedSet<FileUseMapEntry<?>> matches;
|
||||||
|
|
||||||
|
switch (alg) {
|
||||||
|
case BEST_FIT:
|
||||||
|
matches = freeBySize.tailSet(minimumSizedEntry);
|
||||||
|
break;
|
||||||
|
case FIRST_FIT:
|
||||||
|
matches = freeByStart;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new AssertionError();
|
||||||
|
}
|
||||||
|
|
||||||
|
FileUseMapEntry<?> best = null;
|
||||||
|
long bestExtraSize = 0;
|
||||||
|
for (FileUseMapEntry<?> curr : matches) {
|
||||||
|
/*
|
||||||
|
* We don't care about blocks that aren't free.
|
||||||
|
*/
|
||||||
|
if (!curr.isFree()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Compute any extra size we need in this block to make sure we verify the alignment.
|
||||||
|
* There must be a better to do this...
|
||||||
|
*/
|
||||||
|
long extraSize;
|
||||||
|
if (align == 0) {
|
||||||
|
extraSize = 0;
|
||||||
|
} else {
|
||||||
|
extraSize = (align - ((curr.getStart() + alignOffset) % align)) % align;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We can't leave than mMinFreeSize before. So if the extraSize is less than
|
||||||
|
* mMinFreeSize, we have to increase it by 'align' as many times as needed. For
|
||||||
|
* example, if mMinFreeSize is 20, align 4 and extraSize is 5. We need to increase it
|
||||||
|
* to 21 (5 + 4 * 4)
|
||||||
|
*/
|
||||||
|
if (extraSize > 0 && extraSize < mMinFreeSize) {
|
||||||
|
int addAlignBlocks = Ints.checkedCast((mMinFreeSize - extraSize + align - 1) / align);
|
||||||
|
extraSize += addAlignBlocks * align;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We don't care about blocks where we don't fit in.
|
||||||
|
*/
|
||||||
|
if (curr.getSize() < (size + extraSize)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We don't care about blocks that leave less than the minimum size after. There are
|
||||||
|
* two exceptions: (1) this is the last block and (2) the next block is free in which
|
||||||
|
* case, after coalescing, the free block with have at least the minimum size.
|
||||||
|
*/
|
||||||
|
long emptySpaceLeft = curr.getSize() - (size + extraSize);
|
||||||
|
if (emptySpaceLeft > 0 && emptySpaceLeft < mMinFreeSize) {
|
||||||
|
FileUseMapEntry<?> next = map.higher(curr);
|
||||||
|
if (next != null && !next.isFree()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We don't care about blocks that are bigger than the best so far (otherwise this
|
||||||
|
* wouldn't be a best-fit algorithm).
|
||||||
|
*/
|
||||||
|
if (best != null && best.getSize() < curr.getSize()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
best = curr;
|
||||||
|
bestExtraSize = extraSize;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we're doing first fit, we don't want to search for a better one :)
|
||||||
|
*/
|
||||||
|
if (alg == PositionAlgorithm.FIRST_FIT) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If no entry that could hold size is found, get the first free byte.
|
||||||
|
*/
|
||||||
|
long firstFree = this.size;
|
||||||
|
if (best == null && !map.isEmpty()) {
|
||||||
|
FileUseMapEntry<?> last = map.last();
|
||||||
|
if (last.isFree()) {
|
||||||
|
firstFree = last.getStart();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We're done: either we found something or we didn't, in which the new entry needs to
|
||||||
|
* be added to the end of the map.
|
||||||
|
*/
|
||||||
|
if (best == null) {
|
||||||
|
long extra = (align - ((firstFree + alignOffset) % align)) % align;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If adding this entry at the end would create a space smaller than the minimum,
|
||||||
|
* push it for 'align' bytes forward.
|
||||||
|
*/
|
||||||
|
if (extra > 0) {
|
||||||
|
if (extra < mMinFreeSize) {
|
||||||
|
extra += align * (((mMinFreeSize - extra) + (align - 1)) / align);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return firstFree + extra;
|
||||||
|
} else {
|
||||||
|
return best.getStart() + bestExtraSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains all free areas of the map, excluding any trailing free area.
|
||||||
|
*
|
||||||
|
* @return all free areas, an empty set if there are no free areas; the areas are returned in file
|
||||||
|
* order, that is, if area {@code x} starts before area {@code y}, then area {@code x} will be
|
||||||
|
* stored before area {@code y} in the list
|
||||||
|
*/
|
||||||
|
List<FileUseMapEntry<?>> getFreeAreas() {
|
||||||
|
List<FileUseMapEntry<?>> freeAreas = Lists.newArrayList();
|
||||||
|
|
||||||
|
for (FileUseMapEntry<?> area : map) {
|
||||||
|
if (area.isFree() && area.getEnd() != size) {
|
||||||
|
freeAreas.add(area);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return freeAreas;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the entry that is located before the one provided.
|
||||||
|
*
|
||||||
|
* @param entry the map entry to get the previous one for; must belong to the map
|
||||||
|
* @return the entry before the provided one, {@code null} if {@code entry} is the first entry in
|
||||||
|
* the map
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
FileUseMapEntry<?> before(FileUseMapEntry<?> entry) {
|
||||||
|
Preconditions.checkNotNull(entry, "entry == null");
|
||||||
|
|
||||||
|
return map.lower(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the entry that is located after the one provided.
|
||||||
|
*
|
||||||
|
* @param entry the map entry to get the next one for; must belong to the map
|
||||||
|
* @return the entry after the provided one, {@code null} if {@code entry} is the last entry in
|
||||||
|
* the map
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
FileUseMapEntry<?> after(FileUseMapEntry<?> entry) {
|
||||||
|
Preconditions.checkNotNull(entry, "entry == null");
|
||||||
|
|
||||||
|
return map.higher(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the entry at the given offset.
|
||||||
|
*
|
||||||
|
* @param offset the offset to look for
|
||||||
|
* @return the entry found or {@code null} if there is no entry (not even a free one) at the given
|
||||||
|
* offset
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
FileUseMapEntry<?> at(long offset) {
|
||||||
|
Preconditions.checkArgument(offset >= 0, "offset < 0");
|
||||||
|
Preconditions.checkArgument(offset < size, "offset >= size");
|
||||||
|
|
||||||
|
FileUseMapEntry<?> entry = map.floor(FileUseMapEntry.makeFree(offset, offset + 1));
|
||||||
|
if (entry == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
Verify.verify(entry.getStart() <= offset);
|
||||||
|
Verify.verify(entry.getEnd() > offset);
|
||||||
|
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
boolean first = true;
|
||||||
|
for (FileUseMapEntry<?> entry : map) {
|
||||||
|
if (first) {
|
||||||
|
first = false;
|
||||||
|
} else {
|
||||||
|
builder.append(", ");
|
||||||
|
}
|
||||||
|
|
||||||
|
builder.append(entry.getStart());
|
||||||
|
builder.append(" - ");
|
||||||
|
builder.append(entry.getEnd());
|
||||||
|
builder.append(": ");
|
||||||
|
builder.append(entry.getStore());
|
||||||
|
}
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Algorithms used to position entries in blocks. */
|
||||||
|
public enum PositionAlgorithm {
|
||||||
|
/** Best fit: finds the smallest free block that can receive the entry. */
|
||||||
|
BEST_FIT,
|
||||||
|
|
||||||
|
/** First fit: finds the first free block that can receive the entry. */
|
||||||
|
FIRST_FIT
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,151 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.google.common.base.MoreObjects;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.primitives.Ints;
|
||||||
|
import java.util.Comparator;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents an entry in the {@link FileUseMap}. Each entry contains an interval of bytes. The end
|
||||||
|
* of the interval is exclusive.
|
||||||
|
*
|
||||||
|
* <p>Entries can either be free or used. Used entries <em>must</em> store an object. Free entries
|
||||||
|
* do not store anything.
|
||||||
|
*
|
||||||
|
* <p>File map entries are used to keep track of which parts of a file map are used and not.
|
||||||
|
*
|
||||||
|
* @param <T> the type of data stored
|
||||||
|
*/
|
||||||
|
class FileUseMapEntry<T> {
|
||||||
|
|
||||||
|
/** Comparator that compares entries by their start date. */
|
||||||
|
public static final Comparator<FileUseMapEntry<?>> COMPARE_BY_START =
|
||||||
|
(o1, o2) -> Ints.saturatedCast(o1.getStart() - o2.getStart());
|
||||||
|
|
||||||
|
/** Comparator that compares entries by their size. */
|
||||||
|
public static final Comparator<FileUseMapEntry<?>> COMPARE_BY_SIZE =
|
||||||
|
(o1, o2) -> Ints.saturatedCast(o1.getSize() - o2.getSize());
|
||||||
|
|
||||||
|
/** The first byte in the entry. */
|
||||||
|
private final long start;
|
||||||
|
|
||||||
|
/** The first byte no longer in the entry. */
|
||||||
|
private final long end;
|
||||||
|
|
||||||
|
/** The stored data. If {@code null} then this entry represents a free entry. */
|
||||||
|
@Nullable private final T store;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new map entry.
|
||||||
|
*
|
||||||
|
* @param start the start of the entry
|
||||||
|
* @param end the end of the entry (first byte no longer in the entry)
|
||||||
|
* @param store the data to store in the entry or {@code null} if this is a free entry
|
||||||
|
*/
|
||||||
|
private FileUseMapEntry(long start, long end, @Nullable T store) {
|
||||||
|
Preconditions.checkArgument(start >= 0, "start < 0");
|
||||||
|
Preconditions.checkArgument(end > start, "end <= start");
|
||||||
|
|
||||||
|
this.start = start;
|
||||||
|
this.end = end;
|
||||||
|
this.store = store;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new free entry.
|
||||||
|
*
|
||||||
|
* @param start the start of the entry
|
||||||
|
* @param end the end of the entry (first byte no longer in the entry)
|
||||||
|
* @return the entry
|
||||||
|
*/
|
||||||
|
public static FileUseMapEntry<Object> makeFree(long start, long end) {
|
||||||
|
return new FileUseMapEntry<>(start, end, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new used entry.
|
||||||
|
*
|
||||||
|
* @param start the start of the entry
|
||||||
|
* @param end the end of the entry (first byte no longer in the entry)
|
||||||
|
* @param store the data to store in the entry
|
||||||
|
* @param <T> the type of data to store in the entry
|
||||||
|
* @return the entry
|
||||||
|
*/
|
||||||
|
public static <T> FileUseMapEntry<T> makeUsed(long start, long end, T store) {
|
||||||
|
Preconditions.checkNotNull(store, "store == null");
|
||||||
|
return new FileUseMapEntry<>(start, end, store);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the first byte in the entry.
|
||||||
|
*
|
||||||
|
* @return the first byte in the entry (if the same value as {@link #getEnd()} then the entry is
|
||||||
|
* empty and contains no data)
|
||||||
|
*/
|
||||||
|
long getStart() {
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the first byte no longer in the entry.
|
||||||
|
*
|
||||||
|
* @return the first byte no longer in the entry
|
||||||
|
*/
|
||||||
|
long getEnd() {
|
||||||
|
return end;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the size of the entry.
|
||||||
|
*
|
||||||
|
* @return the number of bytes contained in the entry
|
||||||
|
*/
|
||||||
|
long getSize() {
|
||||||
|
return end - start;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determines if this is a free entry.
|
||||||
|
*
|
||||||
|
* @return is this entry free?
|
||||||
|
*/
|
||||||
|
boolean isFree() {
|
||||||
|
return store == null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the data stored in the entry.
|
||||||
|
*
|
||||||
|
* @return the data stored or {@code null} if this entry is a free entry
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
T getStore() {
|
||||||
|
return store;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return MoreObjects.toStringHelper(this)
|
||||||
|
.add("start", start)
|
||||||
|
.add("end", end)
|
||||||
|
.add("store", store)
|
||||||
|
.toString();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,159 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* General purpose bit flags. Contains the encoding of the zip's general purpose bits.
|
||||||
|
*
|
||||||
|
* <p>We don't really care about the method bit(s). These are bits 1 and 2. Here are the values:
|
||||||
|
*
|
||||||
|
* <ul>
|
||||||
|
* <li>0 (00): Normal (-en) compression option was used.
|
||||||
|
* <li>1 (01): Maximum (-exx/-ex) compression option was used.
|
||||||
|
* <li>2 (10): Fast (-ef) compression option was used.
|
||||||
|
* <li>3 (11): Super Fast (-es) compression option was used.
|
||||||
|
* </ul>
|
||||||
|
*/
|
||||||
|
class GPFlags {
|
||||||
|
|
||||||
|
/** Is the entry encrypted? */
|
||||||
|
private static final int BIT_ENCRYPTION = 1;
|
||||||
|
|
||||||
|
/** Has CRC computation been deferred and, therefore, does a data description block exist? */
|
||||||
|
private static final int BIT_DEFERRED_CRC = (1 << 3);
|
||||||
|
|
||||||
|
/** Is enhanced deflating used? */
|
||||||
|
private static final int BIT_ENHANCED_DEFLATING = (1 << 4);
|
||||||
|
|
||||||
|
/** Does the entry contain patched data? */
|
||||||
|
private static final int BIT_PATCHED_DATA = (1 << 5);
|
||||||
|
|
||||||
|
/** Is strong encryption used? */
|
||||||
|
private static final int BIT_STRONG_ENCRYPTION = (1 << 6) | (1 << 13);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If this bit is set the filename and comment fields for this file must be encoded using UTF-8.
|
||||||
|
*/
|
||||||
|
private static final int BIT_EFS = (1 << 11);
|
||||||
|
|
||||||
|
/** Unused bits. */
|
||||||
|
private static final int BIT_UNUSED =
|
||||||
|
(1 << 7) | (1 << 8) | (1 << 9) | (1 << 10) | (1 << 14) | (1 << 15);
|
||||||
|
|
||||||
|
/** Bit flag value. */
|
||||||
|
private final long value;
|
||||||
|
|
||||||
|
/** Has the CRC computation beeen deferred? */
|
||||||
|
private boolean deferredCrc;
|
||||||
|
|
||||||
|
/** Is the file name encoded in UTF-8? */
|
||||||
|
private boolean utf8FileName;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new flags object.
|
||||||
|
*
|
||||||
|
* @param value the value of the bit mask
|
||||||
|
*/
|
||||||
|
private GPFlags(long value) {
|
||||||
|
this.value = value;
|
||||||
|
|
||||||
|
deferredCrc = ((value & BIT_DEFERRED_CRC) != 0);
|
||||||
|
utf8FileName = ((value & BIT_EFS) != 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the flags value.
|
||||||
|
*
|
||||||
|
* @return the value of the bit mask
|
||||||
|
*/
|
||||||
|
public long getValue() {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Is the CRC computation deferred?
|
||||||
|
*
|
||||||
|
* @return is the CRC computation deferred?
|
||||||
|
*/
|
||||||
|
public boolean isDeferredCrc() {
|
||||||
|
return deferredCrc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Is the file name encoded in UTF-8?
|
||||||
|
*
|
||||||
|
* @return is the file name encoded in UTF-8?
|
||||||
|
*/
|
||||||
|
public boolean isUtf8FileName() {
|
||||||
|
return utf8FileName;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new bit mask.
|
||||||
|
*
|
||||||
|
* @param utf8Encoding should UTF-8 encoding be used?
|
||||||
|
* @return the new bit mask
|
||||||
|
*/
|
||||||
|
static GPFlags make(boolean utf8Encoding) {
|
||||||
|
long flags = 0;
|
||||||
|
|
||||||
|
if (utf8Encoding) {
|
||||||
|
flags |= BIT_EFS;
|
||||||
|
}
|
||||||
|
|
||||||
|
return new GPFlags(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates the flag information from a byte. This method will also validate that only supported
|
||||||
|
* options are defined in the flag.
|
||||||
|
*
|
||||||
|
* @param bits the bit mask
|
||||||
|
* @return the created flag information
|
||||||
|
* @throws IOException unsupported options are used in the bit mask
|
||||||
|
*/
|
||||||
|
static GPFlags from(long bits) throws IOException {
|
||||||
|
if ((bits & BIT_ENCRYPTION) != 0) {
|
||||||
|
throw new IOException("Zip files with encrypted of entries not supported.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((bits & BIT_ENHANCED_DEFLATING) != 0) {
|
||||||
|
throw new IOException("Enhanced deflating not supported.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((bits & BIT_PATCHED_DATA) != 0) {
|
||||||
|
throw new IOException("Compressed patched data not supported.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((bits & BIT_STRONG_ENCRYPTION) != 0) {
|
||||||
|
throw new IOException("Strong encryption not supported.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((bits & BIT_UNUSED) != 0) {
|
||||||
|
throw new IOException(
|
||||||
|
"Unused bits set in directory entry. Weird. I don't know what's " + "going on.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((bits & 0xffffffff00000000L) != 0) {
|
||||||
|
throw new IOException("Unsupported bits after 32.");
|
||||||
|
}
|
||||||
|
|
||||||
|
return new GPFlags(bits);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,61 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.SequenceInputStream;
|
||||||
|
import java.util.zip.Inflater;
|
||||||
|
import java.util.zip.InflaterInputStream;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Byte source that inflates another byte source. It assumed the inner byte source has deflated
|
||||||
|
* data.
|
||||||
|
*/
|
||||||
|
public class InflaterByteSource extends CloseableByteSource {
|
||||||
|
|
||||||
|
/** The stream factory for the deflated data. */
|
||||||
|
private final CloseableByteSource deflatedSource;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new source.
|
||||||
|
*
|
||||||
|
* @param byteSource the factory for deflated data
|
||||||
|
*/
|
||||||
|
public InflaterByteSource(CloseableByteSource byteSource) {
|
||||||
|
deflatedSource = byteSource;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InputStream openStream() throws IOException {
|
||||||
|
/*
|
||||||
|
* The extra byte is a dummy byte required by the inflater. Weirdo.
|
||||||
|
* (see the java.util.Inflater documentation). Looks like a hack...
|
||||||
|
* "Oh, I need an extra dummy byte to allow for some... err... optimizations..."
|
||||||
|
*/
|
||||||
|
ByteArrayInputStream hackByte = new ByteArrayInputStream(new byte[] {0});
|
||||||
|
return new InflaterInputStream(
|
||||||
|
new SequenceInputStream(deflatedSource.openStream(), hackByte), new Inflater(true));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void innerClose() throws IOException {
|
||||||
|
deflatedSource.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,153 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.hash.HashCode;
|
||||||
|
import com.google.common.hash.HashFunction;
|
||||||
|
import com.google.common.io.ByteProcessor;
|
||||||
|
import com.google.common.io.ByteSink;
|
||||||
|
import com.google.common.io.ByteSource;
|
||||||
|
import com.google.common.io.CharSource;
|
||||||
|
import com.google.common.util.concurrent.ListenableFuture;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
import java.nio.charset.Charset;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@code ByteSource} that delegates all operations to another {@code ByteSource}. The other byte
|
||||||
|
* source, the <em>delegate</em>, may be computed lazily.
|
||||||
|
*/
|
||||||
|
public class LazyDelegateByteSource extends CloseableByteSource {
|
||||||
|
|
||||||
|
/** Byte source where we delegate operations to. */
|
||||||
|
private final ListenableFuture<CloseableByteSource> delegate;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new byte source that delegates operations to the provided source.
|
||||||
|
*
|
||||||
|
* @param delegate the source that will receive all operations
|
||||||
|
*/
|
||||||
|
public LazyDelegateByteSource(ListenableFuture<CloseableByteSource> delegate) {
|
||||||
|
this.delegate = delegate;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the delegate future.
|
||||||
|
*
|
||||||
|
* @return the delegate future, that may be computed or not
|
||||||
|
*/
|
||||||
|
public ListenableFuture<CloseableByteSource> getDelegate() {
|
||||||
|
return delegate;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the byte source, waiting for the future to be computed.
|
||||||
|
*
|
||||||
|
* @return the byte source
|
||||||
|
* @throws IOException failed to compute the future :)
|
||||||
|
*/
|
||||||
|
private CloseableByteSource get() throws IOException {
|
||||||
|
try {
|
||||||
|
CloseableByteSource r = delegate.get();
|
||||||
|
if (r == null) {
|
||||||
|
throw new IOException("Delegate byte source computation resulted in null.");
|
||||||
|
}
|
||||||
|
|
||||||
|
return r;
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
throw new IOException("Interrupted while waiting for byte source computation.", e);
|
||||||
|
} catch (ExecutionException e) {
|
||||||
|
throw new IOException("Failed to compute byte source.", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CharSource asCharSource(Charset charset) {
|
||||||
|
try {
|
||||||
|
return get().asCharSource(charset);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InputStream openBufferedStream() throws IOException {
|
||||||
|
return get().openBufferedStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ByteSource slice(long offset, long length) {
|
||||||
|
try {
|
||||||
|
return get().slice(offset, length);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isEmpty() throws IOException {
|
||||||
|
return get().isEmpty();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long size() throws IOException {
|
||||||
|
return get().size();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long copyTo(OutputStream output) throws IOException {
|
||||||
|
return get().copyTo(output);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long copyTo(ByteSink sink) throws IOException {
|
||||||
|
return get().copyTo(sink);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public byte[] read() throws IOException {
|
||||||
|
return get().read();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public <T> T read(ByteProcessor<T> processor) throws IOException {
|
||||||
|
return get().read(processor);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public HashCode hash(HashFunction hashFunction) throws IOException {
|
||||||
|
return get().hash(hashFunction);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean contentEquals(ByteSource other) throws IOException {
|
||||||
|
return get().contentEquals(other);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InputStream openStream() throws IOException {
|
||||||
|
return get().openStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void innerClose() throws IOException {
|
||||||
|
get().close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,77 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.io.Closer;
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Container that has two bytes sources: one representing raw data and another processed data. In
|
||||||
|
* case of compression, the raw data is the compressed data and the processed data is the
|
||||||
|
* uncompressed data. It is valid for a RaP ("Raw-and-Processed") to contain the same byte sources
|
||||||
|
* for both processed and raw data.
|
||||||
|
*/
|
||||||
|
public class ProcessedAndRawByteSources implements Closeable {
|
||||||
|
|
||||||
|
/** The processed byte source. */
|
||||||
|
private final CloseableByteSource processedSource;
|
||||||
|
|
||||||
|
/** The processed raw source. */
|
||||||
|
private final CloseableByteSource rawSource;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new container.
|
||||||
|
*
|
||||||
|
* @param processedSource the processed source
|
||||||
|
* @param rawSource the raw source
|
||||||
|
*/
|
||||||
|
public ProcessedAndRawByteSources(
|
||||||
|
CloseableByteSource processedSource, CloseableByteSource rawSource) {
|
||||||
|
this.processedSource = processedSource;
|
||||||
|
this.rawSource = rawSource;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains a byte source that read the processed contents of the entry.
|
||||||
|
*
|
||||||
|
* @return a byte source
|
||||||
|
*/
|
||||||
|
public CloseableByteSource getProcessedByteSource() {
|
||||||
|
return processedSource;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains a byte source that reads the raw contents of an entry. This is the data that is
|
||||||
|
* ultimately stored in the file and, in the case of compressed files, is the same data in the
|
||||||
|
* source returned by {@link #getProcessedByteSource()}.
|
||||||
|
*
|
||||||
|
* @return a byte source
|
||||||
|
*/
|
||||||
|
public CloseableByteSource getRawByteSource() {
|
||||||
|
return rawSource;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
Closer closer = Closer.create();
|
||||||
|
closer.register(processedSource);
|
||||||
|
closer.register(rawSource);
|
||||||
|
closer.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,775 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.bytestorage.ByteStorage;
|
||||||
|
import com.android.tools.build.apkzlib.bytestorage.CloseableByteSourceFromOutputStreamBuilder;
|
||||||
|
import com.android.tools.build.apkzlib.utils.IOExceptionWrapper;
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.base.Supplier;
|
||||||
|
import com.google.common.base.Suppliers;
|
||||||
|
import com.google.common.base.Verify;
|
||||||
|
import com.google.common.io.ByteStreams;
|
||||||
|
import com.google.common.primitives.Ints;
|
||||||
|
import java.io.BufferedInputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.Comparator;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A stored entry represents a file in the zip. The entry may or may not be written to the zip file.
|
||||||
|
*
|
||||||
|
* <p>Stored entries provide the operations that are related to the files themselves, not to the
|
||||||
|
* zip. It is through the {@code StoredEntry} class that entries can be deleted ({@link #delete()},
|
||||||
|
* open ({@link #open()}) or realigned ({@link #realign()}).
|
||||||
|
*
|
||||||
|
* <p>Entries are not created directly. They are created using {@link ZFile#add(String, InputStream,
|
||||||
|
* boolean)} and obtained from the zip file using {@link ZFile#get(String)} or {@link
|
||||||
|
* ZFile#entries()}.
|
||||||
|
*
|
||||||
|
* <p>Most of the data in the an entry is in the Central Directory Header. This includes the name,
|
||||||
|
* compression method, file compressed and uncompressed sizes, CRC32 checksum, etc. The CDH can be
|
||||||
|
* obtained using the {@link #getCentralDirectoryHeader()} method.
|
||||||
|
*/
|
||||||
|
public class StoredEntry {
|
||||||
|
|
||||||
|
/** Comparator that compares instances of {@link StoredEntry} by their names. */
|
||||||
|
static final Comparator<StoredEntry> COMPARE_BY_NAME =
|
||||||
|
(o1, o2) -> {
|
||||||
|
if (o1 == null && o2 == null) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (o1 == null) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (o2 == null) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
String name1 = o1.getCentralDirectoryHeader().getName();
|
||||||
|
String name2 = o2.getCentralDirectoryHeader().getName();
|
||||||
|
return name1.compareTo(name2);
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Signature of the data descriptor. */
|
||||||
|
private static final int DATA_DESC_SIGNATURE = 0x08074b50;
|
||||||
|
|
||||||
|
/** Local header field: signature. */
|
||||||
|
private static final ZipField.F4 F_LOCAL_SIGNATURE = new ZipField.F4(0, 0x04034b50, "Signature");
|
||||||
|
|
||||||
|
/** Local header field: version to extract, should match the CDH's. */
|
||||||
|
@VisibleForTesting
|
||||||
|
static final ZipField.F2 F_VERSION_EXTRACT =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_LOCAL_SIGNATURE.endOffset(), "Version to extract", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Local header field: GP bit flag, should match the CDH's. */
|
||||||
|
private static final ZipField.F2 F_GP_BIT =
|
||||||
|
new ZipField.F2(F_VERSION_EXTRACT.endOffset(), "GP bit flag");
|
||||||
|
|
||||||
|
/** Local header field: compression method, should match the CDH's. */
|
||||||
|
private static final ZipField.F2 F_METHOD =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_GP_BIT.endOffset(), "Compression method", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Local header field: last modification time, should match the CDH's. */
|
||||||
|
private static final ZipField.F2 F_LAST_MOD_TIME =
|
||||||
|
new ZipField.F2(F_METHOD.endOffset(), "Last modification time");
|
||||||
|
|
||||||
|
/** Local header field: last modification time, should match the CDH's. */
|
||||||
|
private static final ZipField.F2 F_LAST_MOD_DATE =
|
||||||
|
new ZipField.F2(F_LAST_MOD_TIME.endOffset(), "Last modification date");
|
||||||
|
|
||||||
|
/** Local header field: CRC32 checksum, should match the CDH's. 0 if there is no data. */
|
||||||
|
private static final ZipField.F4 F_CRC32 = new ZipField.F4(F_LAST_MOD_DATE.endOffset(), "CRC32");
|
||||||
|
|
||||||
|
/** Local header field: compressed size, size the data takes in the zip file. */
|
||||||
|
private static final ZipField.F4 F_COMPRESSED_SIZE =
|
||||||
|
new ZipField.F4(F_CRC32.endOffset(), "Compressed size", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Local header field: uncompressed size, size the data takes after extraction. */
|
||||||
|
private static final ZipField.F4 F_UNCOMPRESSED_SIZE =
|
||||||
|
new ZipField.F4(
|
||||||
|
F_COMPRESSED_SIZE.endOffset(), "Uncompressed size", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Local header field: length of the file name. */
|
||||||
|
private static final ZipField.F2 F_FILE_NAME_LENGTH =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_UNCOMPRESSED_SIZE.endOffset(), "@File name length", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Local header filed: length of the extra field. */
|
||||||
|
private static final ZipField.F2 F_EXTRA_LENGTH =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_FILE_NAME_LENGTH.endOffset(), "Extra length", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Local header size (fixed part, not counting file name or extra field). */
|
||||||
|
static final int FIXED_LOCAL_FILE_HEADER_SIZE = F_EXTRA_LENGTH.endOffset();
|
||||||
|
|
||||||
|
/** Type of entry. */
|
||||||
|
private final StoredEntryType type;
|
||||||
|
|
||||||
|
/** The central directory header with information about the file. */
|
||||||
|
private final CentralDirectoryHeader cdh;
|
||||||
|
|
||||||
|
/** The file this entry is associated with */
|
||||||
|
private final ZFile file;
|
||||||
|
|
||||||
|
/** Has this entry been deleted? */
|
||||||
|
private boolean deleted;
|
||||||
|
|
||||||
|
/** Extra field specified in the local directory. */
|
||||||
|
private ExtraField localExtra;
|
||||||
|
|
||||||
|
/** Type of data descriptor associated with the entry. */
|
||||||
|
private Supplier<DataDescriptorType> dataDescriptorType;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Source for this entry's data. If this entry is a directory, this source has to have zero size.
|
||||||
|
*/
|
||||||
|
private ProcessedAndRawByteSources source;
|
||||||
|
|
||||||
|
/** Verify log for the entry. */
|
||||||
|
private final VerifyLog verifyLog;
|
||||||
|
|
||||||
|
/** Storage used to create buffers when loading storage into memory. */
|
||||||
|
private final ByteStorage storage;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new stored entry.
|
||||||
|
*
|
||||||
|
* @param header the header with the entry information; if the header does not contain an offset
|
||||||
|
* it means that this entry is not yet written in the zip file
|
||||||
|
* @param file the zip file containing the entry
|
||||||
|
* @param source the entry's data source; it can be {@code null} only if the source can be read
|
||||||
|
* from the zip file, that is, if {@code header.getOffset()} is non-negative
|
||||||
|
* @throws IOException failed to create the entry
|
||||||
|
*/
|
||||||
|
StoredEntry(
|
||||||
|
CentralDirectoryHeader header,
|
||||||
|
ZFile file,
|
||||||
|
@Nullable ProcessedAndRawByteSources source,
|
||||||
|
ByteStorage storage)
|
||||||
|
throws IOException {
|
||||||
|
cdh = header;
|
||||||
|
this.file = file;
|
||||||
|
deleted = false;
|
||||||
|
verifyLog = file.makeVerifyLog();
|
||||||
|
this.storage = storage;
|
||||||
|
|
||||||
|
if (header.getOffset() >= 0) {
|
||||||
|
readLocalHeader();
|
||||||
|
|
||||||
|
Preconditions.checkArgument(
|
||||||
|
source == null, "Source was defined but contents already exist on file.");
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since the file is already in the zip, dynamically create a source that will read
|
||||||
|
* the file from the zip when needed. The assignment is not really needed, but we
|
||||||
|
* would get a warning because of the @NotNull otherwise.
|
||||||
|
*/
|
||||||
|
this.source = createSourceFromZip(cdh.getOffset());
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* There is no local extra data for new files.
|
||||||
|
*/
|
||||||
|
localExtra = new ExtraField();
|
||||||
|
|
||||||
|
Preconditions.checkNotNull(source, "Source was not defined, but contents are not on file.");
|
||||||
|
this.source = source;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It seems that zip utilities store directories as names ending with "/".
|
||||||
|
* This seems to be respected by all zip utilities although I could not find there anywhere
|
||||||
|
* in the specification.
|
||||||
|
*/
|
||||||
|
if (cdh.getName().endsWith(Character.toString(ZFile.SEPARATOR))) {
|
||||||
|
type = StoredEntryType.DIRECTORY;
|
||||||
|
verifyLog.verify(
|
||||||
|
this.source.getProcessedByteSource().isEmpty(), "Directory source is not empty.");
|
||||||
|
verifyLog.verify(cdh.getCrc32() == 0, "Directory has CRC32 = %s.", cdh.getCrc32());
|
||||||
|
verifyLog.verify(
|
||||||
|
cdh.getUncompressedSize() == 0,
|
||||||
|
"Directory has uncompressed size = %s.",
|
||||||
|
cdh.getUncompressedSize());
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some clever (OMG!) tools, like jar will actually try to compress the directory
|
||||||
|
* contents and generate a 2 byte compressed data. Of course, the uncompressed size is
|
||||||
|
* zero and we're just wasting space.
|
||||||
|
*/
|
||||||
|
long compressedSize = cdh.getCompressionInfoWithWait().getCompressedSize();
|
||||||
|
verifyLog.verify(
|
||||||
|
compressedSize == 0 || compressedSize == 2,
|
||||||
|
"Directory has compressed size = %s.",
|
||||||
|
compressedSize);
|
||||||
|
} else {
|
||||||
|
type = StoredEntryType.FILE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* By default we assume there is no data descriptor unless the CRC is marked as deferred
|
||||||
|
* in the header's GP Bit.
|
||||||
|
*/
|
||||||
|
dataDescriptorType = Suppliers.ofInstance(DataDescriptorType.NO_DATA_DESCRIPTOR);
|
||||||
|
if (header.getGpBit().isDeferredCrc()) {
|
||||||
|
/*
|
||||||
|
* If the deferred CRC bit exists, then we have an extra descriptor field. This extra
|
||||||
|
* field may have a signature.
|
||||||
|
*/
|
||||||
|
Verify.verify(
|
||||||
|
header.getOffset() >= 0,
|
||||||
|
"Files that are not on disk cannot have the " + "deferred CRC bit set.");
|
||||||
|
|
||||||
|
dataDescriptorType =
|
||||||
|
Suppliers.memoize(
|
||||||
|
() -> {
|
||||||
|
try {
|
||||||
|
return readDataDescriptorRecord();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOExceptionWrapper(
|
||||||
|
new IOException("Failed to read data descriptor record.", e));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the size of the local header of this entry.
|
||||||
|
*
|
||||||
|
* @return the local header size in bytes
|
||||||
|
*/
|
||||||
|
public int getLocalHeaderSize() {
|
||||||
|
Preconditions.checkState(!deleted, "deleted");
|
||||||
|
return FIXED_LOCAL_FILE_HEADER_SIZE + cdh.getEncodedFileName().length + localExtra.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the size of the whole entry on disk, including local header and data descriptor. This
|
||||||
|
* method will wait until compression information is complete, if needed.
|
||||||
|
*
|
||||||
|
* @return the number of bytes
|
||||||
|
* @throws IOException failed to get compression information
|
||||||
|
*/
|
||||||
|
long getInFileSize() throws IOException {
|
||||||
|
Preconditions.checkState(!deleted, "deleted");
|
||||||
|
return cdh.getCompressionInfoWithWait().getCompressedSize()
|
||||||
|
+ getLocalHeaderSize()
|
||||||
|
+ dataDescriptorType.get().size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains a stream that allows reading from the entry.
|
||||||
|
*
|
||||||
|
* @return a stream that will return as many bytes as the uncompressed entry size
|
||||||
|
* @throws IOException failed to open the stream
|
||||||
|
*/
|
||||||
|
public InputStream open() throws IOException {
|
||||||
|
return source.getProcessedByteSource().openStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the contents of the file.
|
||||||
|
*
|
||||||
|
* @return a byte array with the contents of the file (uncompressed if the file was compressed)
|
||||||
|
* @throws IOException failed to read the file
|
||||||
|
*/
|
||||||
|
public byte[] read() throws IOException {
|
||||||
|
try (InputStream is = new BufferedInputStream(open())) {
|
||||||
|
return ByteStreams.toByteArray(is);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the contents of the file in an existing buffer.
|
||||||
|
*
|
||||||
|
* @param bytes buffer to read the file contents in.
|
||||||
|
* @return the number of bytes read
|
||||||
|
* @throws IOException failed to read the file.
|
||||||
|
*/
|
||||||
|
public int read(byte[] bytes) throws IOException {
|
||||||
|
if (bytes.length < getCentralDirectoryHeader().getUncompressedSize()) {
|
||||||
|
throw new RuntimeException(
|
||||||
|
"Buffer to small while reading {}" + getCentralDirectoryHeader().getName());
|
||||||
|
}
|
||||||
|
try (InputStream is = new BufferedInputStream(open())) {
|
||||||
|
return ByteStreams.read(is, bytes, 0, bytes.length);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the type of entry.
|
||||||
|
*
|
||||||
|
* @return the type of entry
|
||||||
|
*/
|
||||||
|
public StoredEntryType getType() {
|
||||||
|
Preconditions.checkState(!deleted, "deleted");
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deletes this entry from the zip file. Invoking this method doesn't update the zip itself. To
|
||||||
|
* eventually write updates to disk, {@link ZFile#update()} must be called.
|
||||||
|
*
|
||||||
|
* @throws IOException failed to delete the entry
|
||||||
|
* @throws IllegalStateException if the zip file was open in read-only mode
|
||||||
|
*/
|
||||||
|
public void delete() throws IOException {
|
||||||
|
delete(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deletes this entry from the zip file. Invoking this method doesn't update the zip itself. To
|
||||||
|
* eventually write updates to disk, {@link ZFile#update()} must be called.
|
||||||
|
*
|
||||||
|
* @param notify should listeners be notified of the deletion? This will only be {@code false} if
|
||||||
|
* the entry is being removed as part of a replacement
|
||||||
|
* @throws IOException failed to delete the entry
|
||||||
|
* @throws IllegalStateException if the zip file was open in read-only mode
|
||||||
|
*/
|
||||||
|
void delete(boolean notify) throws IOException {
|
||||||
|
Preconditions.checkState(!deleted, "deleted");
|
||||||
|
file.delete(this, notify);
|
||||||
|
deleted = true;
|
||||||
|
source.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns {@code true} if this entry has been deleted/replaced. */
|
||||||
|
public boolean isDeleted() {
|
||||||
|
return deleted;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the CDH associated with this entry.
|
||||||
|
*
|
||||||
|
* @return the CDH
|
||||||
|
*/
|
||||||
|
public CentralDirectoryHeader getCentralDirectoryHeader() {
|
||||||
|
return cdh;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads the file's local header and verifies that it matches the Central Directory Header
|
||||||
|
* provided in the constructor. This method should only be called if the entry already exists on
|
||||||
|
* disk; new entries do not have local headers.
|
||||||
|
*
|
||||||
|
* <p>This method will define the {@link #localExtra} field that is only defined in the local
|
||||||
|
* descriptor.
|
||||||
|
*
|
||||||
|
* @throws IOException failed to read the local header
|
||||||
|
*/
|
||||||
|
private void readLocalHeader() throws IOException {
|
||||||
|
byte[] localHeader = new byte[FIXED_LOCAL_FILE_HEADER_SIZE];
|
||||||
|
file.directFullyRead(cdh.getOffset(), localHeader);
|
||||||
|
|
||||||
|
CentralDirectoryHeaderCompressInfo compressInfo = cdh.getCompressionInfoWithWait();
|
||||||
|
|
||||||
|
ByteBuffer bytes = ByteBuffer.wrap(localHeader);
|
||||||
|
F_LOCAL_SIGNATURE.verify(bytes);
|
||||||
|
F_VERSION_EXTRACT.verify(bytes, compressInfo.getVersionExtract(), verifyLog);
|
||||||
|
F_GP_BIT.verify(bytes, cdh.getGpBit().getValue(), verifyLog);
|
||||||
|
F_METHOD.verify(bytes, compressInfo.getMethod().methodCode, verifyLog);
|
||||||
|
|
||||||
|
if (file.areTimestampsIgnored()) {
|
||||||
|
F_LAST_MOD_TIME.skip(bytes);
|
||||||
|
F_LAST_MOD_DATE.skip(bytes);
|
||||||
|
} else {
|
||||||
|
F_LAST_MOD_TIME.verify(bytes, cdh.getLastModTime(), verifyLog);
|
||||||
|
F_LAST_MOD_DATE.verify(bytes, cdh.getLastModDate(), verifyLog);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If CRC-32, compressed size and uncompressed size are deferred, their values in Local
|
||||||
|
* File Header must be ignored and their actual values must be read from the Data
|
||||||
|
* Descriptor following the contents of this entry. See readDataDescriptorRecord().
|
||||||
|
*/
|
||||||
|
if (cdh.getGpBit().isDeferredCrc()) {
|
||||||
|
F_CRC32.skip(bytes);
|
||||||
|
F_COMPRESSED_SIZE.skip(bytes);
|
||||||
|
F_UNCOMPRESSED_SIZE.skip(bytes);
|
||||||
|
} else {
|
||||||
|
F_CRC32.verify(bytes, cdh.getCrc32(), verifyLog);
|
||||||
|
F_COMPRESSED_SIZE.verify(bytes, compressInfo.getCompressedSize(), verifyLog);
|
||||||
|
F_UNCOMPRESSED_SIZE.verify(bytes, cdh.getUncompressedSize(), verifyLog);
|
||||||
|
}
|
||||||
|
|
||||||
|
F_FILE_NAME_LENGTH.verify(bytes, cdh.getEncodedFileName().length);
|
||||||
|
long extraLength = F_EXTRA_LENGTH.read(bytes);
|
||||||
|
long fileNameStart = cdh.getOffset() + F_EXTRA_LENGTH.endOffset();
|
||||||
|
byte[] fileNameData = new byte[cdh.getEncodedFileName().length];
|
||||||
|
file.directFullyRead(fileNameStart, fileNameData);
|
||||||
|
|
||||||
|
String fileName = EncodeUtils.decode(fileNameData, cdh.getGpBit());
|
||||||
|
if (!fileName.equals(cdh.getName())) {
|
||||||
|
verifyLog.log(
|
||||||
|
String.format(
|
||||||
|
"Central directory reports file as being named '%s' but local header"
|
||||||
|
+ "reports file being named '%s'.",
|
||||||
|
cdh.getName(), fileName));
|
||||||
|
}
|
||||||
|
|
||||||
|
long localExtraStart = fileNameStart + cdh.getEncodedFileName().length;
|
||||||
|
byte[] localExtraRaw = new byte[Ints.checkedCast(extraLength)];
|
||||||
|
file.directFullyRead(localExtraStart, localExtraRaw);
|
||||||
|
localExtra = new ExtraField(localExtraRaw);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads the data descriptor record. This method can only be invoked once it is established that a
|
||||||
|
* data descriptor does exist. It will read the data descriptor and check that the data described
|
||||||
|
* there matches the data provided in the Central Directory.
|
||||||
|
*
|
||||||
|
* <p>This method will set the {@link #dataDescriptorType} field to the appropriate type of data
|
||||||
|
* descriptor record.
|
||||||
|
*
|
||||||
|
* @throws IOException failed to read the data descriptor record
|
||||||
|
*/
|
||||||
|
private DataDescriptorType readDataDescriptorRecord() throws IOException {
|
||||||
|
CentralDirectoryHeaderCompressInfo compressInfo = cdh.getCompressionInfoWithWait();
|
||||||
|
|
||||||
|
long ddStart =
|
||||||
|
cdh.getOffset()
|
||||||
|
+ FIXED_LOCAL_FILE_HEADER_SIZE
|
||||||
|
+ cdh.getName().length()
|
||||||
|
+ localExtra.size()
|
||||||
|
+ compressInfo.getCompressedSize();
|
||||||
|
byte[] ddData = new byte[DataDescriptorType.DATA_DESCRIPTOR_WITH_SIGNATURE.size];
|
||||||
|
file.directFullyRead(ddStart, ddData);
|
||||||
|
|
||||||
|
ByteBuffer ddBytes = ByteBuffer.wrap(ddData);
|
||||||
|
|
||||||
|
ZipField.F4 signatureField = new ZipField.F4(0, "Data descriptor signature");
|
||||||
|
int cpos = ddBytes.position();
|
||||||
|
long sig = signatureField.read(ddBytes);
|
||||||
|
DataDescriptorType result;
|
||||||
|
if (sig == DATA_DESC_SIGNATURE) {
|
||||||
|
result = DataDescriptorType.DATA_DESCRIPTOR_WITH_SIGNATURE;
|
||||||
|
} else {
|
||||||
|
result = DataDescriptorType.DATA_DESCRIPTOR_WITHOUT_SIGNATURE;
|
||||||
|
ddBytes.position(cpos);
|
||||||
|
}
|
||||||
|
|
||||||
|
ZipField.F4 crc32Field = new ZipField.F4(0, "CRC32");
|
||||||
|
ZipField.F4 compressedField = new ZipField.F4(crc32Field.endOffset(), "Compressed size");
|
||||||
|
ZipField.F4 uncompressedField =
|
||||||
|
new ZipField.F4(compressedField.endOffset(), "Uncompressed size");
|
||||||
|
|
||||||
|
crc32Field.verify(ddBytes, cdh.getCrc32(), verifyLog);
|
||||||
|
compressedField.verify(ddBytes, compressInfo.getCompressedSize(), verifyLog);
|
||||||
|
uncompressedField.verify(ddBytes, cdh.getUncompressedSize(), verifyLog);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new source that reads data from the zip.
|
||||||
|
*
|
||||||
|
* @param zipOffset the offset into the zip file where the data is, must be non-negative
|
||||||
|
* @throws IOException failed to close the old source
|
||||||
|
* @return the created source
|
||||||
|
*/
|
||||||
|
private ProcessedAndRawByteSources createSourceFromZip(final long zipOffset) throws IOException {
|
||||||
|
Preconditions.checkArgument(zipOffset >= 0, "zipOffset < 0");
|
||||||
|
|
||||||
|
final CentralDirectoryHeaderCompressInfo compressInfo;
|
||||||
|
try {
|
||||||
|
compressInfo = cdh.getCompressionInfoWithWait();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException(
|
||||||
|
"IOException should never occur here because compression "
|
||||||
|
+ "information should be immediately available if reading from zip.",
|
||||||
|
e);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create a source that will return whatever is on the zip file.
|
||||||
|
*/
|
||||||
|
CloseableByteSource rawContents =
|
||||||
|
new CloseableByteSource() {
|
||||||
|
@Override
|
||||||
|
public long size() throws IOException {
|
||||||
|
return compressInfo.getCompressedSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InputStream openStream() throws IOException {
|
||||||
|
Preconditions.checkState(!deleted, "deleted");
|
||||||
|
|
||||||
|
long dataStart = zipOffset + getLocalHeaderSize();
|
||||||
|
long dataEnd = dataStart + compressInfo.getCompressedSize();
|
||||||
|
|
||||||
|
file.openReadOnlyIfClosed();
|
||||||
|
return file.directOpen(dataStart, dataEnd);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void innerClose() throws IOException {
|
||||||
|
/*
|
||||||
|
* Nothing to do here.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return createSourcesFromRawContents(rawContents);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a {@link ProcessedAndRawByteSources} from the raw data source . The processed source
|
||||||
|
* will either inflate or do nothing depending on the compression information that, at this point,
|
||||||
|
* should already be available
|
||||||
|
*
|
||||||
|
* @param rawContents the raw data to create the source from
|
||||||
|
* @return the sources for this entry
|
||||||
|
*/
|
||||||
|
private ProcessedAndRawByteSources createSourcesFromRawContents(CloseableByteSource rawContents) {
|
||||||
|
CentralDirectoryHeaderCompressInfo compressInfo;
|
||||||
|
try {
|
||||||
|
compressInfo = cdh.getCompressionInfoWithWait();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException(
|
||||||
|
"IOException should never occur here because compression "
|
||||||
|
+ "information should be immediately available if creating from raw "
|
||||||
|
+ "contents.",
|
||||||
|
e);
|
||||||
|
}
|
||||||
|
|
||||||
|
CloseableByteSource contents;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the contents are deflated, wrap that source in an inflater source so we get the
|
||||||
|
* uncompressed data.
|
||||||
|
*/
|
||||||
|
if (compressInfo.getMethod() == CompressionMethod.DEFLATE) {
|
||||||
|
contents = new InflaterByteSource(rawContents);
|
||||||
|
} else {
|
||||||
|
contents = rawContents;
|
||||||
|
}
|
||||||
|
|
||||||
|
return new ProcessedAndRawByteSources(contents, rawContents);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Replaces {@link #source} with one that reads file data from the zip file.
|
||||||
|
*
|
||||||
|
* @param zipFileOffset the offset in the zip file where data is written; must be non-negative
|
||||||
|
* @throws IOException failed to replace the source
|
||||||
|
*/
|
||||||
|
void replaceSourceFromZip(long zipFileOffset) throws IOException {
|
||||||
|
Preconditions.checkArgument(zipFileOffset >= 0, "zipFileOffset < 0");
|
||||||
|
|
||||||
|
ProcessedAndRawByteSources oldSource = source;
|
||||||
|
source = createSourceFromZip(zipFileOffset);
|
||||||
|
cdh.setOffset(zipFileOffset);
|
||||||
|
oldSource.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Loads all data in memory and replaces {@link #source} with one that contains all the data in
|
||||||
|
* memory.
|
||||||
|
*
|
||||||
|
* <p>If the entry's contents are already in memory, this call does nothing.
|
||||||
|
*
|
||||||
|
* @throws IOException failed to replace the source
|
||||||
|
*/
|
||||||
|
void loadSourceIntoMemory() throws IOException {
|
||||||
|
if (cdh.getOffset() == -1) {
|
||||||
|
/*
|
||||||
|
* No offset in the CDR means data has not been written to disk which, in turn,
|
||||||
|
* means data is already loaded into memory.
|
||||||
|
*/
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
CloseableByteSourceFromOutputStreamBuilder rawBuilder = storage.makeBuilder();
|
||||||
|
try (InputStream input = source.getRawByteSource().openStream()) {
|
||||||
|
ByteStreams.copy(input, rawBuilder);
|
||||||
|
}
|
||||||
|
|
||||||
|
CloseableByteSource newRaw = rawBuilder.build();
|
||||||
|
ProcessedAndRawByteSources newSources = createSourcesFromRawContents(newRaw);
|
||||||
|
|
||||||
|
try (ProcessedAndRawByteSources oldSource = source) {
|
||||||
|
source = newSources;
|
||||||
|
cdh.setOffset(-1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the source data for this entry. This method can only be called for files, it cannot be
|
||||||
|
* called for directories.
|
||||||
|
*
|
||||||
|
* @return the entry source
|
||||||
|
*/
|
||||||
|
ProcessedAndRawByteSources getSource() {
|
||||||
|
return source;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the type of data descriptor used in the entry.
|
||||||
|
*
|
||||||
|
* @return the type of data descriptor
|
||||||
|
*/
|
||||||
|
public DataDescriptorType getDataDescriptorType() {
|
||||||
|
return dataDescriptorType.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes the data descriptor, if it has one and resets the data descriptor bit in the central
|
||||||
|
* directory header.
|
||||||
|
*
|
||||||
|
* @return was the data descriptor remove?
|
||||||
|
*/
|
||||||
|
boolean removeDataDescriptor() {
|
||||||
|
if (dataDescriptorType.get() == DataDescriptorType.NO_DATA_DESCRIPTOR) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
dataDescriptorType = Suppliers.ofInstance(DataDescriptorType.NO_DATA_DESCRIPTOR);
|
||||||
|
cdh.resetDeferredCrc();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the local header data.
|
||||||
|
*
|
||||||
|
* @param buffer a buffer to write header data to
|
||||||
|
* @return the header data size
|
||||||
|
* @throws IOException failed to get header byte data
|
||||||
|
*/
|
||||||
|
int toHeaderData(byte[] buffer) throws IOException {
|
||||||
|
Preconditions.checkArgument(
|
||||||
|
buffer.length
|
||||||
|
>= F_EXTRA_LENGTH.endOffset() + cdh.getEncodedFileName().length + localExtra.size(),
|
||||||
|
"Buffer should be at least the header size");
|
||||||
|
|
||||||
|
ByteBuffer out = ByteBuffer.wrap(buffer);
|
||||||
|
writeData(out);
|
||||||
|
return out.position();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void writeData(ByteBuffer out) throws IOException {
|
||||||
|
CentralDirectoryHeaderCompressInfo compressInfo = cdh.getCompressionInfoWithWait();
|
||||||
|
|
||||||
|
F_LOCAL_SIGNATURE.write(out);
|
||||||
|
F_VERSION_EXTRACT.write(out, compressInfo.getVersionExtract());
|
||||||
|
F_GP_BIT.write(out, cdh.getGpBit().getValue());
|
||||||
|
F_METHOD.write(out, compressInfo.getMethod().methodCode);
|
||||||
|
|
||||||
|
if (file.areTimestampsIgnored()) {
|
||||||
|
F_LAST_MOD_TIME.write(out, 0);
|
||||||
|
F_LAST_MOD_DATE.write(out, 0);
|
||||||
|
} else {
|
||||||
|
F_LAST_MOD_TIME.write(out, cdh.getLastModTime());
|
||||||
|
F_LAST_MOD_DATE.write(out, cdh.getLastModDate());
|
||||||
|
}
|
||||||
|
|
||||||
|
F_CRC32.write(out, cdh.getCrc32());
|
||||||
|
F_COMPRESSED_SIZE.write(out, compressInfo.getCompressedSize());
|
||||||
|
F_UNCOMPRESSED_SIZE.write(out, cdh.getUncompressedSize());
|
||||||
|
F_FILE_NAME_LENGTH.write(out, cdh.getEncodedFileName().length);
|
||||||
|
F_EXTRA_LENGTH.write(out, localExtra.size());
|
||||||
|
|
||||||
|
out.put(cdh.getEncodedFileName());
|
||||||
|
localExtra.write(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Requests that this entry be realigned. If this entry is already aligned according to the rules
|
||||||
|
* in {@link ZFile} then this method does nothing. Otherwise it will move the file's data into
|
||||||
|
* memory and place it in a different area of the zip.
|
||||||
|
*
|
||||||
|
* @return has this file been changed? Note that if the entry has not yet been written on the
|
||||||
|
* file, realignment does not count as a change as nothing needs to be updated in the file;
|
||||||
|
* also, if the entry has been changed, this object may have been marked as deleted and a new
|
||||||
|
* stored entry may need to be fetched from the file
|
||||||
|
* @throws IOException failed to realign the entry; the entry may no longer exist in the zip file
|
||||||
|
*/
|
||||||
|
public boolean realign() throws IOException {
|
||||||
|
Preconditions.checkState(!deleted, "Entry has been deleted.");
|
||||||
|
|
||||||
|
return file.realign(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the contents of the local extra field.
|
||||||
|
*
|
||||||
|
* @return the contents of the local extra field
|
||||||
|
*/
|
||||||
|
public ExtraField getLocalExtra() {
|
||||||
|
return localExtra;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the contents of the local extra field.
|
||||||
|
*
|
||||||
|
* @param localExtra the contents of the local extra field
|
||||||
|
* @throws IOException failed to update the zip file
|
||||||
|
*/
|
||||||
|
public void setLocalExtra(ExtraField localExtra) throws IOException {
|
||||||
|
boolean resized = setLocalExtraNoNotify(localExtra);
|
||||||
|
file.localHeaderChanged(this, resized);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the contents of the local extra field, does not notify the {@link ZFile} of the change.
|
||||||
|
* This is used internally when the {@link ZFile} itself wants to change the local extra and
|
||||||
|
* doesn't need the callback.
|
||||||
|
*
|
||||||
|
* @param localExtra the contents of the local extra field
|
||||||
|
* @return has the local header size changed?
|
||||||
|
* @throws IOException failed to load the file
|
||||||
|
*/
|
||||||
|
boolean setLocalExtraNoNotify(ExtraField localExtra) throws IOException {
|
||||||
|
boolean sizeChanged;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure we load into memory.
|
||||||
|
*
|
||||||
|
* If we change the size of the local header, the actual start of the file changes
|
||||||
|
* according to our in-memory structures so, if we don't read the file now, we won't be
|
||||||
|
* able to load it later :)
|
||||||
|
*
|
||||||
|
* But, even if the size doesn't change, we need to read it force the entry to be
|
||||||
|
* rewritten otherwise the changes in the local header aren't written. Of course this case
|
||||||
|
* may be optimized with some extra complexity added :)
|
||||||
|
*/
|
||||||
|
loadSourceIntoMemory();
|
||||||
|
|
||||||
|
if (this.localExtra.size() != localExtra.size()) {
|
||||||
|
sizeChanged = true;
|
||||||
|
} else {
|
||||||
|
sizeChanged = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.localExtra = localExtra;
|
||||||
|
return sizeChanged;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the verify log for the entry.
|
||||||
|
*
|
||||||
|
* @return the verify log
|
||||||
|
*/
|
||||||
|
public VerifyLog getVerifyLog() {
|
||||||
|
return verifyLog;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,26 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
/** Type of stored entry. */
|
||||||
|
public enum StoredEntryType {
|
||||||
|
/** Entry is a file. */
|
||||||
|
FILE,
|
||||||
|
|
||||||
|
/** Entry is a directory. */
|
||||||
|
DIRECTORY
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,54 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2017 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.google.common.collect.ImmutableList;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The verify log contains verification messages. It is used to capture validation issues with a zip
|
||||||
|
* file or with parts of a zip file.
|
||||||
|
*/
|
||||||
|
public interface VerifyLog {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Logs a message.
|
||||||
|
*
|
||||||
|
* @param message the message to verify
|
||||||
|
*/
|
||||||
|
void log(String message);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains all save logged messages.
|
||||||
|
*
|
||||||
|
* @return the logged messages
|
||||||
|
*/
|
||||||
|
ImmutableList<String> getLogs();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs verification of a non-critical condition, logging a message if the condition is not
|
||||||
|
* verified.
|
||||||
|
*
|
||||||
|
* @param condition the condition
|
||||||
|
* @param message the message to write if {@code condition} is {@code false}.
|
||||||
|
* @param args arguments for formatting {@code message} using {@code String.format}
|
||||||
|
*/
|
||||||
|
default void verify(boolean condition, String message, Object... args) {
|
||||||
|
if (!condition) {
|
||||||
|
log(String.format(message, args));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,67 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2017 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.google.common.collect.ImmutableList;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/** Factory for verification logs. */
|
||||||
|
final class VerifyLogs {
|
||||||
|
|
||||||
|
private VerifyLogs() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a {@link VerifyLog} that ignores all messages logged.
|
||||||
|
*
|
||||||
|
* @return the log
|
||||||
|
*/
|
||||||
|
static VerifyLog devNull() {
|
||||||
|
return new VerifyLog() {
|
||||||
|
@Override
|
||||||
|
public void log(String message) {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ImmutableList<String> getLogs() {
|
||||||
|
return ImmutableList.of();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a {@link VerifyLog} that stores all log messages.
|
||||||
|
*
|
||||||
|
* @return the log
|
||||||
|
*/
|
||||||
|
static VerifyLog unlimited() {
|
||||||
|
return new VerifyLog() {
|
||||||
|
|
||||||
|
/** All saved messages. */
|
||||||
|
private final List<String> messages = new ArrayList<>();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void log(String message) {
|
||||||
|
messages.add(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ImmutableList<String> getLogs() {
|
||||||
|
return ImmutableList.copyOf(messages);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,140 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.utils.IOExceptionRunnable;
|
||||||
|
import java.io.IOException;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An extension of a {@link ZFile}. Extensions are notified when files are open, updated, closed and
|
||||||
|
* when files are added or removed from the zip. These notifications are received after the zip has
|
||||||
|
* been updated in memory for open, when files are added or removed and when the zip has been
|
||||||
|
* updated on disk or closed.
|
||||||
|
*
|
||||||
|
* <p>An extension is also notified before the file is updated, allowing it to modify the file
|
||||||
|
* before the update happens. If it does, then all extensions are notified of the changes on the zip
|
||||||
|
* file. Because the order of the notifications is preserved, all extensions are notified in the
|
||||||
|
* same order. For example, if two extensions E1 and E2 are registered and they both add a file at
|
||||||
|
* update time, this would be the flow:
|
||||||
|
*
|
||||||
|
* <ul>
|
||||||
|
* <li>E1 receives {@code beforeUpdate} notification.
|
||||||
|
* <li>E1 adds file F1 to the zip (notifying the addition is suspended because another
|
||||||
|
* notification is in progress).
|
||||||
|
* <li>E2 receives {@code beforeUpdate} notification.
|
||||||
|
* <li>E2 adds file F2 to the zip (notifying the addition is suspended because another
|
||||||
|
* notification is in progress).
|
||||||
|
* <li>E1 is notified F1 was added.
|
||||||
|
* <li>E2 is notified F1 was added.
|
||||||
|
* <li>E1 is notified F2 was added.
|
||||||
|
* <li>E2 is notified F2 was added.
|
||||||
|
* <li>(zip file is updated on disk)
|
||||||
|
* <li>E1 is notified the zip was updated.
|
||||||
|
* <li>E2 is notified the zip was updated.
|
||||||
|
* </ul>
|
||||||
|
*
|
||||||
|
* <p>An extension should not modify the zip file when notified of changes. If allowed, this would
|
||||||
|
* break event notification order in case multiple extensions are registered with the zip file. To
|
||||||
|
* allow performing changes to the zip file, all notification method return a {@code
|
||||||
|
* IOExceptionRunnable} that is invoked when {@link ZFile} has finished notifying all extensions.
|
||||||
|
*/
|
||||||
|
public abstract class ZFileExtension {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The zip file has been open and the zip's contents have been read. The default implementation
|
||||||
|
* does nothing and returns {@code null}.
|
||||||
|
*
|
||||||
|
* @return an optional runnable to run when notification of all listeners has ended
|
||||||
|
* @throws IOException failed to process the event
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
public IOExceptionRunnable open() throws IOException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The zip will be updated. This method allows the extension to register changes to the zip file
|
||||||
|
* before the file is written. The default implementation does nothing and returns {@code null}.
|
||||||
|
*
|
||||||
|
* <p>After this notification is received, the extension will receive further {@link
|
||||||
|
* #added(StoredEntry, StoredEntry)} and {@link #removed(StoredEntry)} notifications if it or
|
||||||
|
* other extensions add or remove files before update.
|
||||||
|
*
|
||||||
|
* <p>When no more files are updated, the {@link #entriesWritten()} notification is sent.
|
||||||
|
*
|
||||||
|
* @return an optional runnable to run when notification of all listeners has ended
|
||||||
|
* @throws IOException failed to process the event
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
public IOExceptionRunnable beforeUpdate() throws IOException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This notification is sent when all entries have been written in the file but the central
|
||||||
|
* directory and the EOCD have not yet been written. No entries should be added, removed or
|
||||||
|
* updated during this notification. If this method forces an update of either the central
|
||||||
|
* directory or EOCD, then this method will be invoked again for all extensions with the new
|
||||||
|
* central directory and EOCD.
|
||||||
|
*
|
||||||
|
* <p>After this notification, {@link #updated()} is sent.
|
||||||
|
*
|
||||||
|
* @throws IOException failed to process the event
|
||||||
|
*/
|
||||||
|
public void entriesWritten() throws IOException {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The zip file has been updated on disk. The default implementation does nothing.
|
||||||
|
*
|
||||||
|
* @throws IOException failed to perform update tasks
|
||||||
|
*/
|
||||||
|
public void updated() throws IOException {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The zip file has been closed. Note that if {@link ZFile#close()} requires that the zip file be
|
||||||
|
* updated (because it had in-memory changes), {@link #updated()} will be called before this
|
||||||
|
* method. The default implementation does nothing.
|
||||||
|
*/
|
||||||
|
public void closed() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A new entry has been added to the zip, possibly replacing an entry in there. The default
|
||||||
|
* implementation does nothing and returns {@code null}.
|
||||||
|
*
|
||||||
|
* @param entry the entry that was added
|
||||||
|
* @param replaced the entry that was replaced, if any
|
||||||
|
* @return an optional runnable to run when notification of all listeners has ended
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
public IOExceptionRunnable added(StoredEntry entry, @Nullable StoredEntry replaced) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An entry has been removed from the zip. This method is not invoked for entries that have been
|
||||||
|
* replaced. Those entries are notified using <em>replaced</em> in {@link #added(StoredEntry,
|
||||||
|
* StoredEntry)}. The default implementation does nothing and returns {@code null}.
|
||||||
|
*
|
||||||
|
* @param entry the entry that was deleted
|
||||||
|
* @return an optional runnable to run when notification of all listeners has ended
|
||||||
|
*/
|
||||||
|
@Nullable
|
||||||
|
public IOExceptionRunnable removed(StoredEntry entry) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,260 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.bytestorage.ByteStorageFactory;
|
||||||
|
import com.android.tools.build.apkzlib.bytestorage.ChunkBasedByteStorageFactory;
|
||||||
|
import com.android.tools.build.apkzlib.bytestorage.OverflowToDiskByteStorageFactory;
|
||||||
|
import com.android.tools.build.apkzlib.bytestorage.TemporaryDirectory;
|
||||||
|
import com.android.tools.build.apkzlib.zip.compress.DeflateExecutionCompressor;
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.ByteTracker;
|
||||||
|
import com.google.common.base.Supplier;
|
||||||
|
import java.util.zip.Deflater;
|
||||||
|
|
||||||
|
/** Options to create a {@link ZFile}. */
|
||||||
|
public class ZFileOptions {
|
||||||
|
|
||||||
|
/** The storage to use. */
|
||||||
|
private ByteStorageFactory storageFactory;
|
||||||
|
|
||||||
|
/** The compressor to use. */
|
||||||
|
private Compressor compressor;
|
||||||
|
|
||||||
|
/** Should timestamps be zeroed? */
|
||||||
|
private boolean noTimestamps;
|
||||||
|
|
||||||
|
/** The alignment rule to use. */
|
||||||
|
private AlignmentRule alignmentRule;
|
||||||
|
|
||||||
|
/** Should the extra field be used to cover empty space? */
|
||||||
|
private boolean coverEmptySpaceUsingExtraField;
|
||||||
|
|
||||||
|
/** Should files be automatically sorted before update? */
|
||||||
|
private boolean autoSortFiles;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Skip expensive validation during {@link ZFile} creation?
|
||||||
|
*
|
||||||
|
* <p>During incremental build we are absolutely sure that the zip file is valid, so we do not
|
||||||
|
* have to spend time verifying different fields (some of these checks are relatively expensive
|
||||||
|
* and should be skipped if possible for performance)
|
||||||
|
*/
|
||||||
|
private boolean skipValidation;
|
||||||
|
|
||||||
|
/** Factory creating verification logs to use. */
|
||||||
|
private Supplier<VerifyLog> verifyLogFactory;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Whether to always generate the MANIFEST.MF file regardless whether the APK will be signed with
|
||||||
|
* v1 signing scheme (i.e. jar signing).
|
||||||
|
*/
|
||||||
|
private boolean alwaysGenerateJarManifest;
|
||||||
|
|
||||||
|
/** Creates a new options object. All options are set to their defaults. */
|
||||||
|
public ZFileOptions() {
|
||||||
|
storageFactory =
|
||||||
|
new ChunkBasedByteStorageFactory(
|
||||||
|
new OverflowToDiskByteStorageFactory(TemporaryDirectory::newSystemTemporaryDirectory));
|
||||||
|
compressor = new DeflateExecutionCompressor(Runnable::run, Deflater.DEFAULT_COMPRESSION);
|
||||||
|
alignmentRule = AlignmentRules.compose();
|
||||||
|
verifyLogFactory = VerifyLogs::devNull;
|
||||||
|
|
||||||
|
// We set this to true because many utilities stream the zip and expect no space between entries
|
||||||
|
// in the zip file.
|
||||||
|
coverEmptySpaceUsingExtraField = true;
|
||||||
|
skipValidation = false;
|
||||||
|
// True by default for backwards compatibility.
|
||||||
|
alwaysGenerateJarManifest = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the ZFile's byte storage factory.
|
||||||
|
*
|
||||||
|
* @return the factory used to create byte storages used to store data
|
||||||
|
*/
|
||||||
|
public ByteStorageFactory getStorageFactory() {
|
||||||
|
return storageFactory;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Deprecated
|
||||||
|
public ByteTracker getTracker() {
|
||||||
|
return new ByteTracker();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the byte storage factory to use.
|
||||||
|
*
|
||||||
|
* @param storage the factory to use to create storage for new instances of {@link ZFile} created
|
||||||
|
* for these options.
|
||||||
|
*/
|
||||||
|
public ZFileOptions setStorageFactory(ByteStorageFactory storage) {
|
||||||
|
this.storageFactory = storage;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the compressor to use.
|
||||||
|
*
|
||||||
|
* @return the compressor
|
||||||
|
*/
|
||||||
|
public Compressor getCompressor() {
|
||||||
|
return compressor;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the compressor to use.
|
||||||
|
*
|
||||||
|
* @param compressor the compressor
|
||||||
|
*/
|
||||||
|
public ZFileOptions setCompressor(Compressor compressor) {
|
||||||
|
this.compressor = compressor;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains whether timestamps should be zeroed.
|
||||||
|
*
|
||||||
|
* @return should timestamps be zeroed?
|
||||||
|
*/
|
||||||
|
public boolean getNoTimestamps() {
|
||||||
|
return noTimestamps;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets whether timestamps should be zeroed.
|
||||||
|
*
|
||||||
|
* @param noTimestamps should timestamps be zeroed?
|
||||||
|
*/
|
||||||
|
public ZFileOptions setNoTimestamps(boolean noTimestamps) {
|
||||||
|
this.noTimestamps = noTimestamps;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the alignment rule.
|
||||||
|
*
|
||||||
|
* @return the alignment rule
|
||||||
|
*/
|
||||||
|
public AlignmentRule getAlignmentRule() {
|
||||||
|
return alignmentRule;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the alignment rule.
|
||||||
|
*
|
||||||
|
* @param alignmentRule the alignment rule
|
||||||
|
*/
|
||||||
|
public ZFileOptions setAlignmentRule(AlignmentRule alignmentRule) {
|
||||||
|
this.alignmentRule = alignmentRule;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains whether the extra field should be used to cover empty spaces. See {@link ZFile} for an
|
||||||
|
* explanation on using the extra field for covering empty spaces.
|
||||||
|
*
|
||||||
|
* @return should the extra field be used to cover empty spaces?
|
||||||
|
*/
|
||||||
|
public boolean getCoverEmptySpaceUsingExtraField() {
|
||||||
|
return coverEmptySpaceUsingExtraField;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets whether the extra field should be used to cover empty spaces. See {@link ZFile} for an
|
||||||
|
* explanation on using the extra field for covering empty spaces.
|
||||||
|
*
|
||||||
|
* @param coverEmptySpaceUsingExtraField should the extra field be used to cover empty spaces?
|
||||||
|
*/
|
||||||
|
public ZFileOptions setCoverEmptySpaceUsingExtraField(boolean coverEmptySpaceUsingExtraField) {
|
||||||
|
this.coverEmptySpaceUsingExtraField = coverEmptySpaceUsingExtraField;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains whether files should be automatically sorted before updating the zip file. See {@link
|
||||||
|
* ZFile} for an explanation on automatic sorting.
|
||||||
|
*
|
||||||
|
* @return should the file be automatically sorted?
|
||||||
|
*/
|
||||||
|
public boolean getAutoSortFiles() {
|
||||||
|
return autoSortFiles;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets whether files should be automatically sorted before updating the zip file. See {@link
|
||||||
|
* ZFile} for an explanation on automatic sorting.
|
||||||
|
*
|
||||||
|
* @param autoSortFiles should the file be automatically sorted?
|
||||||
|
*/
|
||||||
|
public ZFileOptions setAutoSortFiles(boolean autoSortFiles) {
|
||||||
|
this.autoSortFiles = autoSortFiles;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the verification log factory.
|
||||||
|
*
|
||||||
|
* @param verifyLogFactory verification log factory
|
||||||
|
*/
|
||||||
|
public ZFileOptions setVerifyLogFactory(Supplier<VerifyLog> verifyLogFactory) {
|
||||||
|
this.verifyLogFactory = verifyLogFactory;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the verification log factory. By default, the verification log doesn't store anything
|
||||||
|
* and will always return an empty log.
|
||||||
|
*
|
||||||
|
* @return the verification log factory
|
||||||
|
*/
|
||||||
|
public Supplier<VerifyLog> getVerifyLogFactory() {
|
||||||
|
return verifyLogFactory;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets whether expensive validation should be skipped during {@link ZFile} creation
|
||||||
|
*
|
||||||
|
* @param skipValidation during creation?
|
||||||
|
*/
|
||||||
|
public ZFileOptions setSkipValidation(boolean skipValidation) {
|
||||||
|
this.skipValidation = skipValidation;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets whether expensive validation should be performed during {@link ZFile} creation
|
||||||
|
*
|
||||||
|
* @return skip verification during creation?
|
||||||
|
*/
|
||||||
|
public boolean getSkipValidation() {
|
||||||
|
return skipValidation;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets whether to always generate the MANIFEST.MF file, regardless whether the APK is signed with
|
||||||
|
* v1 signing scheme.
|
||||||
|
*/
|
||||||
|
public ZFileOptions setAlwaysGenerateJarManifest(boolean alwaysGenerateJarManifest) {
|
||||||
|
this.alwaysGenerateJarManifest = alwaysGenerateJarManifest;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns whether the MANIFEST.MF file should always be generated. */
|
||||||
|
public boolean getAlwaysGenerateJarManifest() {
|
||||||
|
return alwaysGenerateJarManifest;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,408 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2018 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.utils.CachedSupplier;
|
||||||
|
import com.android.tools.build.apkzlib.utils.IOExceptionWrapper;
|
||||||
|
import com.google.common.primitives.Ints;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Zip64 End of Central Directory record in a zip file.
|
||||||
|
*/
|
||||||
|
public class Zip64Eocd {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default "version made by" field: upper byte needs to be 0 to set to MS-DOS compatibility. Lower
|
||||||
|
* byte can be anything, really. We use 0x18 because aapt uses 0x17 :)
|
||||||
|
*/
|
||||||
|
private static final int DEFAULT_VERSION_MADE_BY = 0x0018;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Minimum size that can be stored in the {@link #F_EOCD_SIZE} field of the record.
|
||||||
|
*/
|
||||||
|
private static final int MIN_EOCD_SIZE = 44;
|
||||||
|
|
||||||
|
/** Field in the record: the record signature, fixed at this value by the specification */
|
||||||
|
private static final ZipField.F4 F_SIGNATURE =
|
||||||
|
new ZipField.F4(0, 0x06064b50, "Zip64 EOCD signature");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: the size of the central directory record, not including the first 12
|
||||||
|
* bytes of data (the signature and this size information). Therefore this variable should be:
|
||||||
|
*
|
||||||
|
* <code>size = sizeOfFixedFields + sizeOfVariableData - 12</code>
|
||||||
|
*
|
||||||
|
* as specified by the zip specification.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F8 F_EOCD_SIZE =
|
||||||
|
new ZipField.F8(
|
||||||
|
F_SIGNATURE.endOffset(),
|
||||||
|
"Zip64 EOCD size",
|
||||||
|
new ZipFieldInvariantMinValue(MIN_EOCD_SIZE));
|
||||||
|
|
||||||
|
/** Field in the record: ID program that made the zip (we don't actually use this). */
|
||||||
|
private static final ZipField.F2 F_MADE_BY =
|
||||||
|
new ZipField.F2(F_EOCD_SIZE.endOffset(), "Made by", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: Version needed to extract the Zip. We expect this value to be at least
|
||||||
|
* {@link CentralDirectoryHeaderCompressInfo#VERSION_WITH_ZIP64_EXTENSIONS}. This value also
|
||||||
|
* determines whether we are using Version 1 or Version 2 of the Zip64 EOCD record.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_VERSION_EXTRACT =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_MADE_BY.endOffset(),
|
||||||
|
"Version to extract",
|
||||||
|
new ZipFieldInvariantMinValue(
|
||||||
|
CentralDirectoryHeaderCompressInfo.VERSION_WITH_ZIP64_EXTENSIONS));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: the number of disk where the Zip64 EOCD is located. It must be zero
|
||||||
|
* as multi-file archives are not supported.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F4 F_NUMBER_OF_DISK =
|
||||||
|
new ZipField.F4(F_VERSION_EXTRACT.endOffset(), 0, "Number of this disk");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: the number of the disk where the central directory resides. This must be
|
||||||
|
* zero as multi-file archives are not supported.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F4 F_DISK_CD_START =
|
||||||
|
new ZipField.F4(F_NUMBER_OF_DISK.endOffset(), 0, "Disk where CD starts");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: the number of entries in the Central Directory on this disk. Because we do
|
||||||
|
* not support multi-file archives, this is the same as {@link #F_RECORDS_TOTAL}
|
||||||
|
*/
|
||||||
|
private static final ZipField.F8 F_RECORDS_DISK =
|
||||||
|
new ZipField.F8(
|
||||||
|
F_DISK_CD_START.endOffset(),
|
||||||
|
"Record on disk count",
|
||||||
|
new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Field in the record: the total number of entries in the Central Directory. */
|
||||||
|
private static final ZipField.F8 F_RECORDS_TOTAL =
|
||||||
|
new ZipField.F8(
|
||||||
|
F_RECORDS_DISK.endOffset(),
|
||||||
|
"Total records",
|
||||||
|
new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Field in the record: number of bytes of the Central Directory. */
|
||||||
|
private static final ZipField.F8 F_CD_SIZE =
|
||||||
|
new ZipField.F8(
|
||||||
|
F_RECORDS_TOTAL.endOffset(), "Directory size", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Field in the record: offset, from the archive start, where the Central Directory starts. */
|
||||||
|
private static final ZipField.F8 F_CD_OFFSET =
|
||||||
|
new ZipField.F8(
|
||||||
|
F_CD_SIZE.endOffset(), "Directory offset", new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in Version 2 of the record: The compression method used for the Central Directory in the
|
||||||
|
* given Zip file. Although we do support version 2 of the Zip64 EOCD, we presently do not support
|
||||||
|
* any compression method, and thus this value must be zero.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_V2_CD_COMPRESSION_METHOD =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_CD_OFFSET.endOffset(), 0, "Version 2: Directory Compression method");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in Version 2 of the record: The compressed size of the Central Directory. As Compression
|
||||||
|
* is not supported for the CD, this value should always be the same as
|
||||||
|
* {@link #F_V2_CD_UNCOMPRESSED_SIZE}.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F8 F_V2_CD_COMPRESSED_SIZE =
|
||||||
|
new ZipField.F8(
|
||||||
|
F_V2_CD_COMPRESSION_METHOD.endOffset(),
|
||||||
|
"Version 2: Directory Compressed Size",
|
||||||
|
new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/** Field in Version 2 of the record: The uncompressed size of the Central Directory. */
|
||||||
|
private static final ZipField.F8 F_V2_CD_UNCOMPRESSED_SIZE =
|
||||||
|
new ZipField.F8(
|
||||||
|
F_V2_CD_COMPRESSED_SIZE.endOffset(),
|
||||||
|
"Version 2: Directory Uncompressed Size",
|
||||||
|
new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in Version 2 of the record: The ID for the type of encryption used to encrypt the Central
|
||||||
|
* directory. Since Central Directory encryption is not supported, this value has to be zero.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_V2_CD_ENCRYPTION_ID =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_V2_CD_UNCOMPRESSED_SIZE.endOffset(),
|
||||||
|
0,
|
||||||
|
"Version 2: Directory Encryption");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in Version 2 of the record: The length of the encryption key for the encryption of the
|
||||||
|
* Central Directory given by {@link #F_V2_CD_ENCRYPTION_ID}. Since encryption of the Central
|
||||||
|
* Directory is not supported, this value has to be zero.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_V2_CD_ENCRYPTION_KEY_LENGTH =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_V2_CD_ENCRYPTION_ID.endOffset(),
|
||||||
|
0,
|
||||||
|
"Version 2: Directory Encryption key length");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in Version 2 of the record: The flags for the encryption method used on the Central
|
||||||
|
* Directory. As encryption of the Central Directory is not supported, this value has to be zero.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_V2_CD_ENCRYPTION_FLAGS =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_V2_CD_ENCRYPTION_KEY_LENGTH.endOffset(),
|
||||||
|
0,
|
||||||
|
"Version 2: Directory Encryption Flags");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in Version 2 of the record: ID of the algorithm used to hash the Central Directory data.
|
||||||
|
* Hashing of the Central Directory is not supported, so this value has to be zero.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_V2_HASH_ID =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_V2_CD_ENCRYPTION_FLAGS.endOffset(),
|
||||||
|
0,
|
||||||
|
"Version 2: Hash algorithm ID");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in Version 2 of the record: Length of the data for the hash of the Central Directory.
|
||||||
|
* Hashing of the Central Directory is not supported, so this value has to be zero.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F2 F_V2_HASH_LENGTH =
|
||||||
|
new ZipField.F2(
|
||||||
|
F_V2_HASH_ID.endOffset(),
|
||||||
|
0,
|
||||||
|
"Version 2: Hash length");
|
||||||
|
|
||||||
|
/** The location of the Zip64 size field relative to the start of the Zip64 EOCD. */
|
||||||
|
public static final int SIZE_OFFSET = F_EOCD_SIZE.offset();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The difference between the size in the size field and the true size of the Zip64 EOCD. The size
|
||||||
|
* field in the EOCD does not consider the size field and the identifier field when calculating
|
||||||
|
* the size of the Zip64 EOCD record.
|
||||||
|
*/
|
||||||
|
public static final int TRUE_SIZE_DIFFERENCE = F_EOCD_SIZE.endOffset();
|
||||||
|
|
||||||
|
/** Code of the program that made the zip. We actually don't care about this. */
|
||||||
|
private final long madeBy;
|
||||||
|
|
||||||
|
/** Version needed to extract the zip. */
|
||||||
|
private final long versionToExtract;
|
||||||
|
|
||||||
|
/** Number of entries in the Central Directory. */
|
||||||
|
private final long totalRecords;
|
||||||
|
|
||||||
|
/** Offset from the beginning of the archive where the Central Directory is located. */
|
||||||
|
private final long directoryOffset;
|
||||||
|
|
||||||
|
/** Number of bytes of the Central Directory. */
|
||||||
|
private final long directorySize;
|
||||||
|
|
||||||
|
/** The variable extra fields at the end of the Zip64 EOCD (in both Version 1 and 2). */
|
||||||
|
private final Zip64ExtensibleDataSector extraFields;
|
||||||
|
|
||||||
|
/** Supplier of the byte representation of the Zip64 EOCD. */
|
||||||
|
private final CachedSupplier<byte[]> byteSupplier;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a Zip64Eocd record from the given information from the central directory record.
|
||||||
|
*
|
||||||
|
* @param totalRecords the number of entries in the central directory.
|
||||||
|
* @param directoryOffset the offset of the central directory from the start of the archive.
|
||||||
|
* @param directorySize the size (in bytes) of the central directory record.
|
||||||
|
* @param useVersion2 whether we want to use Version 2 of the Zip64 EOCD.
|
||||||
|
* @param dataSector the extensible data sector.
|
||||||
|
*/
|
||||||
|
Zip64Eocd(
|
||||||
|
long totalRecords,
|
||||||
|
long directoryOffset,
|
||||||
|
long directorySize,
|
||||||
|
boolean useVersion2,
|
||||||
|
Zip64ExtensibleDataSector dataSector) {
|
||||||
|
this.madeBy = DEFAULT_VERSION_MADE_BY;
|
||||||
|
this.totalRecords = totalRecords;
|
||||||
|
this.directorySize = directorySize;
|
||||||
|
this.directoryOffset = directoryOffset;
|
||||||
|
this.versionToExtract =
|
||||||
|
useVersion2
|
||||||
|
? CentralDirectoryHeaderCompressInfo.VERSION_WITH_CENTRAL_FILE_ENCRYPTION
|
||||||
|
: CentralDirectoryHeaderCompressInfo.VERSION_WITH_ZIP64_EXTENSIONS;
|
||||||
|
extraFields = dataSector;
|
||||||
|
|
||||||
|
byteSupplier = new CachedSupplier<>(this::computeByteRepresentation);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a Zip64 EOCD from the given byte information. It does verify that the record starts
|
||||||
|
* with the correct header information.
|
||||||
|
*
|
||||||
|
* @param bytes the bytes to be read as a Zip64 EOCD
|
||||||
|
* @throws IOException the bytes could not be read as a Zip64 EOCD
|
||||||
|
*/
|
||||||
|
Zip64Eocd(ByteBuffer bytes) throws IOException {
|
||||||
|
|
||||||
|
F_SIGNATURE.verify(bytes);
|
||||||
|
long eocdSize = F_EOCD_SIZE.read(bytes);
|
||||||
|
long madeBy = F_MADE_BY.read(bytes);
|
||||||
|
long versionToExtract = F_VERSION_EXTRACT.read(bytes);
|
||||||
|
F_NUMBER_OF_DISK.verify(bytes);
|
||||||
|
F_DISK_CD_START.verify(bytes);
|
||||||
|
long totalRecords1 = F_RECORDS_DISK.read(bytes);
|
||||||
|
long totalRecords2 = F_RECORDS_TOTAL.read(bytes);
|
||||||
|
long directorySize = F_CD_SIZE.read(bytes);
|
||||||
|
long directoryOffset = F_CD_OFFSET.read(bytes);
|
||||||
|
long sizeOfFixedFields = F_CD_OFFSET.endOffset();
|
||||||
|
|
||||||
|
// sanity checks for Version 1 fields.
|
||||||
|
if (totalRecords1 != totalRecords2) {
|
||||||
|
throw new IOException(
|
||||||
|
"Zip states records split in multiple disks, which is not supported");
|
||||||
|
}
|
||||||
|
|
||||||
|
// read Version 2 fields if necessary
|
||||||
|
if (versionToExtract
|
||||||
|
>= CentralDirectoryHeaderCompressInfo.VERSION_WITH_CENTRAL_FILE_ENCRYPTION) {
|
||||||
|
if (eocdSize < F_V2_HASH_LENGTH.endOffset() - F_EOCD_SIZE.endOffset()) {
|
||||||
|
throw new IOException(
|
||||||
|
"Zip states the size of Zip64 EOCD is too small for version 2 format.");
|
||||||
|
}
|
||||||
|
|
||||||
|
F_V2_CD_COMPRESSION_METHOD.verify(bytes);
|
||||||
|
long compressedSize = F_V2_CD_COMPRESSED_SIZE.read(bytes);
|
||||||
|
long uncompressedSize = F_V2_CD_UNCOMPRESSED_SIZE.read(bytes);
|
||||||
|
F_V2_CD_ENCRYPTION_ID.verify(bytes);
|
||||||
|
F_V2_CD_ENCRYPTION_KEY_LENGTH.verify(bytes);
|
||||||
|
F_V2_CD_ENCRYPTION_FLAGS.verify(bytes);
|
||||||
|
F_V2_HASH_ID.verify(bytes);
|
||||||
|
F_V2_HASH_LENGTH.verify(bytes);
|
||||||
|
sizeOfFixedFields = F_V2_HASH_LENGTH.endOffset();
|
||||||
|
|
||||||
|
// sanity checks for version 2 fields.
|
||||||
|
if (compressedSize != uncompressedSize) {
|
||||||
|
throw new IOException(
|
||||||
|
"Zip states Central Directory Compression is used, which is not supported");
|
||||||
|
}
|
||||||
|
directorySize = uncompressedSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.madeBy = madeBy;
|
||||||
|
this.versionToExtract = versionToExtract;
|
||||||
|
this.totalRecords = totalRecords1;
|
||||||
|
this.directorySize = directorySize;
|
||||||
|
this.directoryOffset = directoryOffset;
|
||||||
|
|
||||||
|
long extensibleDataSize = eocdSize - (sizeOfFixedFields - F_EOCD_SIZE.endOffset());
|
||||||
|
|
||||||
|
if (extensibleDataSize > Integer.MAX_VALUE) {
|
||||||
|
throw new IOException("Extensible data of size: " + extensibleDataSize + "not supported");
|
||||||
|
}
|
||||||
|
byte[] rawData = new byte[Ints.checkedCast(extensibleDataSize)];
|
||||||
|
bytes.get(rawData);
|
||||||
|
extraFields = new Zip64ExtensibleDataSector(rawData);
|
||||||
|
byteSupplier = new CachedSupplier<>(this::computeByteRepresentation);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The size of the fixed field in the Zip64 EOCD. This vaue may be different if we are using a
|
||||||
|
* version 1 or version 2 record.
|
||||||
|
*
|
||||||
|
* @return the size of the fixed fields.
|
||||||
|
*/
|
||||||
|
private int sizeOfFixedFields() {
|
||||||
|
return versionToExtract
|
||||||
|
>= CentralDirectoryHeaderCompressInfo.VERSION_WITH_CENTRAL_FILE_ENCRYPTION
|
||||||
|
? F_V2_HASH_LENGTH.endOffset()
|
||||||
|
: F_CD_OFFSET.endOffset();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the size (in bytes) of the Zip64 EOCD record.
|
||||||
|
*
|
||||||
|
* @return the size of the record.
|
||||||
|
*/
|
||||||
|
public int size() {
|
||||||
|
return sizeOfFixedFields() + extraFields.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getTotalRecords() {
|
||||||
|
return totalRecords;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getDirectorySize() {
|
||||||
|
return directorySize;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getDirectoryOffset() {
|
||||||
|
return directoryOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Zip64ExtensibleDataSector getExtraFields() {
|
||||||
|
return extraFields;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getVersionToExtract() { return versionToExtract; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the byte representation of The Zip64 EOCD record.
|
||||||
|
*
|
||||||
|
* @return the bytes of the EOCD.
|
||||||
|
*/
|
||||||
|
public byte[] toBytes() {
|
||||||
|
return byteSupplier.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
private byte[] computeByteRepresentation() {
|
||||||
|
int size = size();
|
||||||
|
ByteBuffer out = ByteBuffer.allocate(size);
|
||||||
|
|
||||||
|
try {
|
||||||
|
F_SIGNATURE.write(out);
|
||||||
|
F_EOCD_SIZE.write(out, size - F_EOCD_SIZE.endOffset());
|
||||||
|
F_MADE_BY.write(out, madeBy);
|
||||||
|
F_VERSION_EXTRACT.write(out, versionToExtract);
|
||||||
|
F_NUMBER_OF_DISK.write(out);
|
||||||
|
F_DISK_CD_START.write(out);
|
||||||
|
F_RECORDS_DISK.write(out, totalRecords);
|
||||||
|
F_RECORDS_TOTAL.write(out, totalRecords);
|
||||||
|
F_CD_SIZE.write(out, directorySize);
|
||||||
|
F_CD_OFFSET.write(out, directoryOffset);
|
||||||
|
|
||||||
|
// write version 2 fields if necessary.
|
||||||
|
if (versionToExtract
|
||||||
|
>= CentralDirectoryHeaderCompressInfo.VERSION_WITH_CENTRAL_FILE_ENCRYPTION) {
|
||||||
|
F_V2_CD_COMPRESSION_METHOD.write(out);
|
||||||
|
F_V2_CD_COMPRESSED_SIZE.write(out, directorySize);
|
||||||
|
F_V2_CD_UNCOMPRESSED_SIZE.write(out, directorySize);
|
||||||
|
F_V2_CD_ENCRYPTION_ID.write(out);
|
||||||
|
F_V2_CD_ENCRYPTION_KEY_LENGTH.write(out);
|
||||||
|
F_V2_CD_ENCRYPTION_FLAGS.write(out);
|
||||||
|
F_V2_HASH_ID.write(out);
|
||||||
|
F_V2_HASH_LENGTH.write(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
extraFields.write(out);
|
||||||
|
|
||||||
|
return out.array();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOExceptionWrapper(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,155 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2018 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.utils.CachedSupplier;
|
||||||
|
import com.android.tools.build.apkzlib.utils.IOExceptionWrapper;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.base.Verify;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.UncheckedIOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Zip64 End of Central Directory Locator. Used to locate the Zip64 EOCD record in
|
||||||
|
* the Zip64 format. This will be located right above the standard EOCD record, if it exists.
|
||||||
|
*/
|
||||||
|
class Zip64EocdLocator {
|
||||||
|
/** Field in the record: the record signature, fixed at this value by the specification. */
|
||||||
|
private static final ZipField.F4 F_SIGNATURE =
|
||||||
|
new ZipField.F4(0, 0x07064b50, "Zip64 EOCD Locator signature");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: the number of the disk where the Zip64 EOCD is located. This has to be
|
||||||
|
* zero because multi-file archives are not supported.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F4 F_NUMBER_OF_DISK =
|
||||||
|
new ZipField.F4(F_SIGNATURE.endOffset(), 0, "Number of disk with Zip64 EOCD");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: the location of the zip64 EOCD record on the disk specified by
|
||||||
|
* {@link #F_NUMBER_OF_DISK}.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F8 F_Z64_EOCD_OFFSET =
|
||||||
|
new ZipField.F8(
|
||||||
|
F_NUMBER_OF_DISK.endOffset(),
|
||||||
|
"Offset of Zip64 EOCD",
|
||||||
|
new ZipFieldInvariantNonNegative());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Field in the record: the total number of disks in the archive. This has to be zero because
|
||||||
|
* multi-file archives are not supported.
|
||||||
|
*/
|
||||||
|
private static final ZipField.F4 F_TOTAL_NUMBER_OF_DISKS =
|
||||||
|
new ZipField.F4(
|
||||||
|
F_Z64_EOCD_OFFSET.endOffset(), 0,"Total number of disks");
|
||||||
|
|
||||||
|
|
||||||
|
public static final int LOCATOR_SIZE = F_TOTAL_NUMBER_OF_DISKS.endOffset();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Offset from the beginning of the archive to where the Zip64 End of Central Directory record
|
||||||
|
* is located.
|
||||||
|
*/
|
||||||
|
private final long z64EocdOffset;
|
||||||
|
|
||||||
|
/** Supplier of the byte representation of the zip64 Eocd Locator. */
|
||||||
|
private final CachedSupplier<byte[]> byteSupplier;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new Zip64 EOCD Locator, reading it from a byte source. This method will parse the
|
||||||
|
* byte source and obtain the EOCD Locator. It will check that the byte source starts with the
|
||||||
|
* EOCD Locator signature.
|
||||||
|
*
|
||||||
|
* @param bytes the byte buffer with the Locator data; when this method finishes, the byte buffer
|
||||||
|
* will have its position moved to the end of the Locator (the beginning of the standard EOCD)
|
||||||
|
* @throws IOException failed to read information or the EOCD data is corrupt or invalid.
|
||||||
|
*/
|
||||||
|
Zip64EocdLocator(ByteBuffer bytes) throws IOException {
|
||||||
|
F_SIGNATURE.verify(bytes);
|
||||||
|
F_NUMBER_OF_DISK.verify(bytes);
|
||||||
|
long z64EocdOffset = F_Z64_EOCD_OFFSET.read(bytes);
|
||||||
|
F_TOTAL_NUMBER_OF_DISKS.verify(bytes);
|
||||||
|
|
||||||
|
Verify.verify(z64EocdOffset >= 0);
|
||||||
|
this.z64EocdOffset = z64EocdOffset;
|
||||||
|
byteSupplier = new CachedSupplier<>(this::computeByteRepresentation);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new Zip64 EOCD Locator. This is used when generating an EOCD Locator for a
|
||||||
|
* Zip64 EOCD that has been generated.
|
||||||
|
*
|
||||||
|
* @param z64EocdOffset offset position of the Zip64 EOCD from the beginning of the archive
|
||||||
|
*/
|
||||||
|
Zip64EocdLocator(long z64EocdOffset) {
|
||||||
|
Preconditions.checkArgument(z64EocdOffset >= 0, "z64EocdOffset < 0");
|
||||||
|
|
||||||
|
this.z64EocdOffset = z64EocdOffset;
|
||||||
|
byteSupplier = new CachedSupplier<>(this::computeByteRepresentation);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the offset from the beginning of the archive to where the Zip64 EOCD is located.
|
||||||
|
*
|
||||||
|
* @return the Zip64 EOCD offset.
|
||||||
|
*/
|
||||||
|
long getZ64EocdOffset() {
|
||||||
|
return z64EocdOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the size of the Zip64 EOCD Locator
|
||||||
|
*
|
||||||
|
* @return the size, in bytes, of the EOCD Locator.<em> i.e. </em> 20.
|
||||||
|
*/
|
||||||
|
long getSize() {
|
||||||
|
return F_TOTAL_NUMBER_OF_DISKS.endOffset();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates the EOCD Locator data.
|
||||||
|
*
|
||||||
|
* @return a byte representation of the EOCD Locator that has exactly {@link #getSize()} bytes
|
||||||
|
* @throws IOException failed to generate the EOCD data.
|
||||||
|
*/
|
||||||
|
byte[] toBytes() throws IOException {
|
||||||
|
return byteSupplier.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the byte representation of the EOCD Locator.
|
||||||
|
*
|
||||||
|
* @return a byte representation of the Zip64 EOCD Locator that has exactly {@link #getSize()}
|
||||||
|
* bytes
|
||||||
|
* @throws UncheckedIOException failed to generate the EOCD Locator data
|
||||||
|
*/
|
||||||
|
private byte[] computeByteRepresentation() {
|
||||||
|
ByteBuffer out = ByteBuffer.allocate(F_TOTAL_NUMBER_OF_DISKS.endOffset());
|
||||||
|
|
||||||
|
try {
|
||||||
|
F_SIGNATURE.write(out);
|
||||||
|
F_NUMBER_OF_DISK.write(out);
|
||||||
|
F_Z64_EOCD_OFFSET.write(out, z64EocdOffset);
|
||||||
|
F_TOTAL_NUMBER_OF_DISKS.write(out);
|
||||||
|
|
||||||
|
return out.array();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOExceptionWrapper(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,219 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2018 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.LittleEndianUtils;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.collect.ImmutableList;
|
||||||
|
import com.google.common.primitives.Ints;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Contains the special purpose data for the Zip64 EOCD record.
|
||||||
|
*
|
||||||
|
* <p>According to the zip specification, the Zip64 EOCD is composed of a sequence of zero or more
|
||||||
|
* Special Purpose Data fields. This class provides a way to access, parse, and modify that
|
||||||
|
* information.
|
||||||
|
*
|
||||||
|
* <p>Each Special Purpose Data is represented by an instance of {@link Z64SpecialPurposeData} and
|
||||||
|
* contains a header ID and data.
|
||||||
|
*/
|
||||||
|
public class Zip64ExtensibleDataSector {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The extensible data sector's raw data, if it is known. Either this variable or {@link #fields}
|
||||||
|
* must be non-{@code null}.
|
||||||
|
*/
|
||||||
|
@Nullable private final byte[] rawData;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The list of fields in the data sector. Will be populated if the data sector is created based on
|
||||||
|
* a list of special purpose data; will also be populated after parsing if the Data Sector is
|
||||||
|
* created based on the raw bytes.
|
||||||
|
*/
|
||||||
|
@Nullable private ImmutableList<Z64SpecialPurposeData> fields;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a Zip64 Extensible Data Sector based on existing raw data.
|
||||||
|
*
|
||||||
|
* @param rawData the raw data; will only be parsed if needed.
|
||||||
|
*/
|
||||||
|
public Zip64ExtensibleDataSector(byte[] rawData) {
|
||||||
|
this.rawData = rawData;
|
||||||
|
fields = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an Extensible Data Sector with no special purpose data.
|
||||||
|
*/
|
||||||
|
public Zip64ExtensibleDataSector() {
|
||||||
|
rawData = null;
|
||||||
|
fields = ImmutableList.of();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a Zip64 Extensible Data with the given Special purpose data.
|
||||||
|
*
|
||||||
|
* @param fields all special purpose data.
|
||||||
|
*/
|
||||||
|
public Zip64ExtensibleDataSector(ImmutableList<Z64SpecialPurposeData> fields) {
|
||||||
|
rawData = null;
|
||||||
|
this.fields = fields;
|
||||||
|
}
|
||||||
|
|
||||||
|
int size() {
|
||||||
|
if (rawData != null) {
|
||||||
|
return rawData.length;
|
||||||
|
} else {
|
||||||
|
Preconditions.checkNotNull(fields);
|
||||||
|
int sumSizes = 0;
|
||||||
|
for (Z64SpecialPurposeData data : fields){
|
||||||
|
sumSizes += data.size();
|
||||||
|
}
|
||||||
|
return sumSizes;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void write(ByteBuffer out) throws IOException {
|
||||||
|
if (rawData != null) {
|
||||||
|
out.put(rawData);
|
||||||
|
} else {
|
||||||
|
Preconditions.checkNotNull(fields);
|
||||||
|
for (Z64SpecialPurposeData data : fields) {
|
||||||
|
data.write(out);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public ImmutableList<Z64SpecialPurposeData> getFields() throws IOException {
|
||||||
|
if (fields == null) {
|
||||||
|
parseData();
|
||||||
|
}
|
||||||
|
|
||||||
|
Preconditions.checkNotNull(fields);
|
||||||
|
return fields;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void parseData() throws IOException {
|
||||||
|
Preconditions.checkNotNull(rawData);
|
||||||
|
Preconditions.checkState(fields == null);
|
||||||
|
|
||||||
|
List<Z64SpecialPurposeData> fields = new ArrayList<>();
|
||||||
|
ByteBuffer buffer = ByteBuffer.wrap(rawData);
|
||||||
|
|
||||||
|
while (buffer.remaining() > 0) {
|
||||||
|
int headerId = LittleEndianUtils.readUnsigned2Le(buffer);
|
||||||
|
long dataSize = LittleEndianUtils.readUnsigned4Le(buffer);
|
||||||
|
|
||||||
|
byte[] data = new byte[Ints.checkedCast(dataSize)];
|
||||||
|
if (dataSize < 0) {
|
||||||
|
throw new IOException(
|
||||||
|
"Invalid data size for special purpose data with header ID "
|
||||||
|
+ headerId
|
||||||
|
+ ": "
|
||||||
|
+ dataSize);
|
||||||
|
}
|
||||||
|
buffer.get(data);
|
||||||
|
|
||||||
|
SpecialPurposeDataFactory factory = RawSpecialPurposeData::new;
|
||||||
|
Z64SpecialPurposeData spd = factory.make(headerId, data);
|
||||||
|
fields.add(spd);
|
||||||
|
}
|
||||||
|
this.fields = ImmutableList.copyOf(fields);
|
||||||
|
}
|
||||||
|
|
||||||
|
public interface Z64SpecialPurposeData {
|
||||||
|
|
||||||
|
/** Length of header id and the size length fields that comes before the data */
|
||||||
|
int PREFIX_LENGTH = 6;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the Special purpose data's header id.
|
||||||
|
*
|
||||||
|
* @return the data's header id.
|
||||||
|
*/
|
||||||
|
int getHeaderId();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the size of the data in this special purpose data.
|
||||||
|
*
|
||||||
|
* @return the number of bytes needed to write the data.
|
||||||
|
*/
|
||||||
|
int size();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes the special purpose data to the buffer.
|
||||||
|
*
|
||||||
|
* @param out the buffer where to write the data to; exactly {@link #size()} bytes will be
|
||||||
|
* written.
|
||||||
|
* @throws IOException failed to write special purpose data.
|
||||||
|
*/
|
||||||
|
void write(ByteBuffer out) throws IOException;
|
||||||
|
}
|
||||||
|
|
||||||
|
public interface SpecialPurposeDataFactory {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new special purpose data.
|
||||||
|
*
|
||||||
|
* @param headerId the header ID
|
||||||
|
* @param data the data in the special purpose data
|
||||||
|
* @return the created special purpose data.
|
||||||
|
* @throws IOException failed to create the special purpose data from the data given.
|
||||||
|
*/
|
||||||
|
Z64SpecialPurposeData make(int headerId, byte[] data) throws IOException;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Special Purpose Data containing raw data: this class represents a general "special purpose
|
||||||
|
* data" containing an array of bytes as data.
|
||||||
|
*/
|
||||||
|
public static class RawSpecialPurposeData implements Z64SpecialPurposeData {
|
||||||
|
|
||||||
|
/** Header ID. */
|
||||||
|
private final int headerId;
|
||||||
|
|
||||||
|
/** Data in the segment */
|
||||||
|
private final byte[] data;
|
||||||
|
|
||||||
|
RawSpecialPurposeData(int headerId, byte[] data) {
|
||||||
|
this.headerId = headerId;
|
||||||
|
this.data = data;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getHeaderId() {
|
||||||
|
return headerId;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int size() {
|
||||||
|
return PREFIX_LENGTH + data.length;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void write(ByteBuffer out) throws IOException {
|
||||||
|
LittleEndianUtils.writeUnsigned2Le(out, headerId);
|
||||||
|
LittleEndianUtils.writeUnsigned4Le(out, data.length);
|
||||||
|
out.put(data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,396 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.LittleEndianUtils;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.base.Verify;
|
||||||
|
import com.google.common.collect.Sets;
|
||||||
|
import com.google.common.primitives.Ints;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.nio.ByteOrder;
|
||||||
|
import java.util.Set;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The ZipField class represents a field in a record in a zip file. Zip files are made with records
|
||||||
|
* that have fields. This class makes it easy to read, write and verify field values.
|
||||||
|
*
|
||||||
|
* <p>There are three main types of fields: 2-byte, 4-byte, and 8-byte fields. We represent each
|
||||||
|
* one as a subclass of {@code ZipField}, {@code F2} for the 2-byte field, {@code F4} for the 4-byte
|
||||||
|
* field and {@code F8} for the 8-byte field. Because Java's {@code int} data type is guaranteed to
|
||||||
|
* be 4-byte, all methods use Java's native {@link int} as data type.
|
||||||
|
*
|
||||||
|
* <p>The {@code F8} subclass is to support the 8-byte fields in the Zip64 specification. Because
|
||||||
|
* Java's 8-byte {@code long} does not support unsigned types, which reduces the support to 8-byte
|
||||||
|
* numbers of the form 2^63-1 or less. As {@code F8} fields refer to file sizes, this should be
|
||||||
|
* sufficient.
|
||||||
|
*
|
||||||
|
* <p>For each field we can either read, write or verify. Verification is used for fields whose
|
||||||
|
* value we know. Some fields, <em>e.g.</em> signature fields, have fixed value. Other fields have
|
||||||
|
* variable values, but in some situations we know which value they have. For example, the last
|
||||||
|
* modification time of a file's local header will have to match the value of the file's
|
||||||
|
* modification time as stored in the central directory.
|
||||||
|
*
|
||||||
|
* <p>Because records are compact, <em>i.e.</em> fields are stored sequentially with no empty
|
||||||
|
* spaces, fields are generally created in the sequence they exist and the end offset of a field is
|
||||||
|
* used as the offset of the next one. The end of a field can be obtained by invoking {@link
|
||||||
|
* #endOffset()}. This allows creating fields in sequence without doing offset computation:
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* ZipField.F2 firstField = new ZipField.F2(0, "First field");
|
||||||
|
* ZipField.F4 secondField = new ZipField(firstField.endOffset(), "Second field");
|
||||||
|
* ZipField.F8 thirdField = new ZipField(secondField.endOffset(), "Third field");
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
abstract class ZipField {
|
||||||
|
|
||||||
|
/** Field name. Used for providing (more) useful error messages. */
|
||||||
|
private final String name;
|
||||||
|
|
||||||
|
/** Offset of the file in the record. */
|
||||||
|
protected final int offset;
|
||||||
|
|
||||||
|
/** Size of the field. Only 2, 4, or 8 allowed. */
|
||||||
|
private final int size;
|
||||||
|
|
||||||
|
/** If a fixed value exists for the field, then this attribute will contain that value. */
|
||||||
|
@Nullable private final Long expected;
|
||||||
|
|
||||||
|
/** All invariants that this field must verify. */
|
||||||
|
private final Set<ZipFieldInvariant> invariants;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new field that does not contain a fixed value.
|
||||||
|
*
|
||||||
|
* @param offset the field's offset in the record
|
||||||
|
* @param size the field size
|
||||||
|
* @param name the field's name
|
||||||
|
* @param invariants the invariants that must be verified by the field
|
||||||
|
*/
|
||||||
|
ZipField(int offset, int size, String name, ZipFieldInvariant... invariants) {
|
||||||
|
Preconditions.checkArgument(offset >= 0, "offset >= 0");
|
||||||
|
Preconditions.checkArgument(
|
||||||
|
size == 2 || size == 4 || size == 8,
|
||||||
|
"size != 2 && size != 4 && size != 8");
|
||||||
|
|
||||||
|
this.name = name;
|
||||||
|
this.offset = offset;
|
||||||
|
this.size = size;
|
||||||
|
expected = null;
|
||||||
|
this.invariants = Sets.newHashSet(invariants);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new field that contains a fixed value.
|
||||||
|
*
|
||||||
|
* @param offset the field's offset in the record
|
||||||
|
* @param size the field size
|
||||||
|
* @param expected the expected field value
|
||||||
|
* @param name the field's name
|
||||||
|
*/
|
||||||
|
ZipField(int offset, int size, long expected, String name) {
|
||||||
|
Preconditions.checkArgument(offset >= 0, "offset >= 0");
|
||||||
|
Preconditions.checkArgument(
|
||||||
|
size == 2 || size == 4 || size == 8,
|
||||||
|
"size != 2 && size != 4 && size != 8");
|
||||||
|
|
||||||
|
this.name = name;
|
||||||
|
this.offset = offset;
|
||||||
|
this.size = size;
|
||||||
|
this.expected = expected;
|
||||||
|
invariants = Sets.newHashSet();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks whether a value verifies the field's invariants. Nothing happens if the value verifies
|
||||||
|
* the invariants.
|
||||||
|
*
|
||||||
|
* @param value the value
|
||||||
|
* @throws IOException the invariants are not verified
|
||||||
|
*/
|
||||||
|
private void checkVerifiesInvariants(long value) throws IOException {
|
||||||
|
for (ZipFieldInvariant invariant : invariants) {
|
||||||
|
if (!invariant.isValid(value)) {
|
||||||
|
throw new IOException(
|
||||||
|
"Value "
|
||||||
|
+ value
|
||||||
|
+ " of field "
|
||||||
|
+ name
|
||||||
|
+ " is invalid "
|
||||||
|
+ "(fails '"
|
||||||
|
+ invariant.getName()
|
||||||
|
+ "').");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Advances the position in the provided byte buffer by the size of this field.
|
||||||
|
*
|
||||||
|
* @param bytes the byte buffer; at the end of the method its position will be greater by the size
|
||||||
|
* of this field
|
||||||
|
* @throws IOException failed to advance the buffer
|
||||||
|
*/
|
||||||
|
void skip(ByteBuffer bytes) throws IOException {
|
||||||
|
if (bytes.remaining() < size) {
|
||||||
|
throw new IOException(
|
||||||
|
"Cannot skip field "
|
||||||
|
+ name
|
||||||
|
+ " because only "
|
||||||
|
+ bytes.remaining()
|
||||||
|
+ " remain in the buffer.");
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes.position(bytes.position() + size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads a field value.
|
||||||
|
*
|
||||||
|
* @param bytes the byte buffer with the record data; after this method finishes, the buffer will
|
||||||
|
* be positioned at the first byte after the field
|
||||||
|
* @return the value of the field
|
||||||
|
* @throws IOException failed to read the field
|
||||||
|
*/
|
||||||
|
long read(ByteBuffer bytes) throws IOException {
|
||||||
|
if (bytes.remaining() < size) {
|
||||||
|
throw new IOException(
|
||||||
|
"Cannot skip field "
|
||||||
|
+ name
|
||||||
|
+ " because only "
|
||||||
|
+ bytes.remaining()
|
||||||
|
+ " remain in the buffer.");
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes.order(ByteOrder.LITTLE_ENDIAN);
|
||||||
|
|
||||||
|
long r;
|
||||||
|
if (size == 2) {
|
||||||
|
r = LittleEndianUtils.readUnsigned2Le(bytes);
|
||||||
|
} else if (size == 4) {
|
||||||
|
r = LittleEndianUtils.readUnsigned4Le(bytes);
|
||||||
|
} else {
|
||||||
|
r = LittleEndianUtils.readUnsigned8Le(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
checkVerifiesInvariants(r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verifies that the field at the current buffer position has the expected value. The field must
|
||||||
|
* have been created with the constructor that defines the expected value.
|
||||||
|
*
|
||||||
|
* @param bytes the byte buffer with the record data; after this method finishes, the buffer will
|
||||||
|
* be positioned at the first byte after the field
|
||||||
|
* @throws IOException failed to read the field or the field does not have the expected value
|
||||||
|
*/
|
||||||
|
void verify(ByteBuffer bytes) throws IOException {
|
||||||
|
verify(bytes, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verifies that the field at the current buffer position has the expected value. The field must
|
||||||
|
* have been created with the constructor that defines the expected value.
|
||||||
|
*
|
||||||
|
* @param bytes the byte buffer with the record data; after this method finishes, the buffer will
|
||||||
|
* be positioned at the first byte after the field
|
||||||
|
* @param verifyLog if non-{@code null}, will log the verification error
|
||||||
|
* @throws IOException failed to read the data or the field does not have the expected value; only
|
||||||
|
* thrown if {@code verifyLog} is {@code null}
|
||||||
|
*/
|
||||||
|
void verify(ByteBuffer bytes, @Nullable VerifyLog verifyLog) throws IOException {
|
||||||
|
Preconditions.checkState(expected != null, "expected == null");
|
||||||
|
verify(bytes, expected, verifyLog);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verifies that the field has an expected value.
|
||||||
|
*
|
||||||
|
* @param bytes the byte buffer with the record data; after this method finishes, the buffer will
|
||||||
|
* be positioned at the first byte after the field
|
||||||
|
* @param expected the value we expect the field to have; if this field has invariants, the value
|
||||||
|
* must verify them
|
||||||
|
* @throws IOException failed to read the data or the field does not have the expected value
|
||||||
|
*/
|
||||||
|
void verify(ByteBuffer bytes, long expected) throws IOException {
|
||||||
|
verify(bytes, expected, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verifies that the field has an expected value.
|
||||||
|
*
|
||||||
|
* @param bytes the byte buffer with the record data; after this method finishes, the buffer will
|
||||||
|
* be positioned at the first byte after the field
|
||||||
|
* @param expected the value we expect the field to have; if this field has invariants, the value
|
||||||
|
* must verify them
|
||||||
|
* @param verifyLog if non-{@code null}, will log the verification error
|
||||||
|
* @throws IOException failed to read the data or the field does not have the expected value; only
|
||||||
|
* thrown if {@code verifyLog} is {@code null}
|
||||||
|
*/
|
||||||
|
void verify(ByteBuffer bytes, long expected, @Nullable VerifyLog verifyLog) throws IOException {
|
||||||
|
checkVerifiesInvariants(expected);
|
||||||
|
long r = read(bytes);
|
||||||
|
if (r != expected) {
|
||||||
|
String error =
|
||||||
|
String.format(
|
||||||
|
"Incorrect value for field '%s': value is %s but %s expected.", name, r, expected);
|
||||||
|
|
||||||
|
if (verifyLog == null) {
|
||||||
|
throw new IOException(error);
|
||||||
|
} else {
|
||||||
|
verifyLog.log(error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes the value of the field.
|
||||||
|
*
|
||||||
|
* @param output where to write the field; the field will be written at the current position of
|
||||||
|
* the buffer
|
||||||
|
* @param value the value to write
|
||||||
|
* @throws IOException failed to write the value in the stream
|
||||||
|
*/
|
||||||
|
void write(ByteBuffer output, long value) throws IOException {
|
||||||
|
checkVerifiesInvariants(value);
|
||||||
|
|
||||||
|
Preconditions.checkArgument(value >= 0, "value (%s) < 0", value);
|
||||||
|
|
||||||
|
if (size == 2) {
|
||||||
|
Preconditions.checkArgument(value <= 0x0000ffff, "value (%s) > 0x0000ffff", value);
|
||||||
|
LittleEndianUtils.writeUnsigned2Le(output, Ints.checkedCast(value));
|
||||||
|
} else if (size == 4) {
|
||||||
|
Preconditions.checkArgument(
|
||||||
|
value <= 0x00000000ffffffffL, "value (%s) > 0x00000000ffffffffL", value);
|
||||||
|
LittleEndianUtils.writeUnsigned4Le(output, value);
|
||||||
|
} else {
|
||||||
|
Verify.verify(size == 8);
|
||||||
|
LittleEndianUtils.writeUnsigned8Le(output, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes the value of the field. The field must have an expected value set in the constructor.
|
||||||
|
*
|
||||||
|
* @param output where to write the field; the field will be written at the current position of
|
||||||
|
* the buffer
|
||||||
|
* @throws IOException failed to write the value in the stream
|
||||||
|
*/
|
||||||
|
void write(ByteBuffer output) throws IOException {
|
||||||
|
Preconditions.checkState(expected != null, "expected == null");
|
||||||
|
write(output, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the offset at which the field starts.
|
||||||
|
*
|
||||||
|
* @return the start offset
|
||||||
|
*/
|
||||||
|
int offset() {
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the offset at which the field ends. This is the exact offset at which the next field
|
||||||
|
* starts.
|
||||||
|
*
|
||||||
|
* @return the end offset
|
||||||
|
*/
|
||||||
|
int endOffset() {
|
||||||
|
return offset + size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Concrete implementation of {@link ZipField} that represents a 2-byte field. */
|
||||||
|
static class F2 extends ZipField {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new field.
|
||||||
|
*
|
||||||
|
* @param offset the field's offset in the record
|
||||||
|
* @param name the field's name
|
||||||
|
* @param invariants the invariants that must be verified by the field
|
||||||
|
*/
|
||||||
|
F2(int offset, String name, ZipFieldInvariant... invariants) {
|
||||||
|
super(offset, 2, name, invariants);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new field that contains a fixed value.
|
||||||
|
*
|
||||||
|
* @param offset the field's offset in the record
|
||||||
|
* @param expected the expected field value
|
||||||
|
* @param name the field's name
|
||||||
|
*/
|
||||||
|
F2(int offset, long expected, String name) {
|
||||||
|
super(offset, 2, expected, name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Concrete implementation of {@link ZipField} that represents a 4-byte field. */
|
||||||
|
static class F4 extends ZipField {
|
||||||
|
/**
|
||||||
|
* Creates a new field.
|
||||||
|
*
|
||||||
|
* @param offset the field's offset in the record
|
||||||
|
* @param name the field's name
|
||||||
|
* @param invariants the invariants that must be verified by the field
|
||||||
|
*/
|
||||||
|
F4(int offset, String name, ZipFieldInvariant... invariants) {
|
||||||
|
super(offset, 4, name, invariants);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new field that contains a fixed value.
|
||||||
|
*
|
||||||
|
* @param offset the field's offset in the record
|
||||||
|
* @param expected the expected field value
|
||||||
|
* @param name the field's name
|
||||||
|
*/
|
||||||
|
F4(int offset, long expected, String name) {
|
||||||
|
super(offset, 4, expected, name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Concrete implementation of {@link ZipField} that represents a 8-byte field. */
|
||||||
|
static class F8 extends ZipField {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new field
|
||||||
|
*
|
||||||
|
* @param offset offset the field's offset in the record
|
||||||
|
* @param name the field's name
|
||||||
|
* @param invariants the invariants that must be verified by the field
|
||||||
|
*/
|
||||||
|
F8(int offset, String name, ZipFieldInvariant... invariants) {
|
||||||
|
super(offset, 8, name, invariants);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new field that contains a fixed value.
|
||||||
|
*
|
||||||
|
* @param offset the field's offset in the record
|
||||||
|
* @param expected the expected field value
|
||||||
|
* @param name the field's name
|
||||||
|
*/
|
||||||
|
F8(int offset, long expected, String name) {
|
||||||
|
super(offset, 8, expected, name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,39 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A field rule defines an invariant (<em>i.e.</em>, a constraint) that has to be verified by a
|
||||||
|
* field value.
|
||||||
|
*/
|
||||||
|
interface ZipFieldInvariant {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Evalutes the invariant against a value.
|
||||||
|
*
|
||||||
|
* @param value the value to check the invariant
|
||||||
|
* @return is the invariant valid?
|
||||||
|
*/
|
||||||
|
boolean isValid(long value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the name of the invariant. Used for information purposes.
|
||||||
|
*
|
||||||
|
* @return the name of the invariant
|
||||||
|
*/
|
||||||
|
String getName();
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,43 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
/** Invariant checking a zip field does not exceed a threshold. */
|
||||||
|
class ZipFieldInvariantMaxValue implements ZipFieldInvariant {
|
||||||
|
|
||||||
|
/** The maximum value allowed. */
|
||||||
|
private final long max;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new invariant.
|
||||||
|
*
|
||||||
|
* @param max the maximum value allowed for the field
|
||||||
|
*/
|
||||||
|
ZipFieldInvariantMaxValue(long max) {
|
||||||
|
this.max = max;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isValid(long value) {
|
||||||
|
return value <= max;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getName() {
|
||||||
|
return "Maximum value " + max;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,43 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2018 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
/** Invariant checking a zip field doesn't go below a given value.*/
|
||||||
|
class ZipFieldInvariantMinValue implements ZipFieldInvariant {
|
||||||
|
|
||||||
|
/** The minimum value allowed. */
|
||||||
|
private final long min;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new invariant.
|
||||||
|
*
|
||||||
|
* @param min the minimum value allowed for the field
|
||||||
|
*/
|
||||||
|
ZipFieldInvariantMinValue(long min) {
|
||||||
|
this.min = min;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isValid(long value) {
|
||||||
|
return value >= min;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getName() {
|
||||||
|
return "Min value " + min;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,31 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
/** Invariant that verifies a field's value is not negative. */
|
||||||
|
class ZipFieldInvariantNonNegative implements ZipFieldInvariant {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isValid(long value) {
|
||||||
|
return value >= 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getName() {
|
||||||
|
return "Is positive";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,29 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip;
|
||||||
|
|
||||||
|
/** The {@code ZipFileState} enumeration holds the state of a {@link ZFile}. */
|
||||||
|
enum ZipFileState {
|
||||||
|
/** Zip file is closed. */
|
||||||
|
CLOSED,
|
||||||
|
|
||||||
|
/** File file is open in read-only mode. */
|
||||||
|
OPEN_RO,
|
||||||
|
|
||||||
|
/** File file is open in read-write mode. */
|
||||||
|
OPEN_RW
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,84 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip.compress;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.bytestorage.ByteStorage;
|
||||||
|
import com.android.tools.build.apkzlib.zip.CompressionResult;
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.ByteTracker;
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import java.util.concurrent.Executor;
|
||||||
|
import java.util.zip.Deflater;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compressor that tries both the best and default compression algorithms and picks the default
|
||||||
|
* unless the best is at least a given percentage smaller.
|
||||||
|
*/
|
||||||
|
public class BestAndDefaultDeflateExecutorCompressor extends ExecutorCompressor {
|
||||||
|
|
||||||
|
/** Deflater using the default compression level. */
|
||||||
|
private final DeflateExecutionCompressor defaultDeflater;
|
||||||
|
|
||||||
|
/** Deflater using the best compression level. */
|
||||||
|
private final DeflateExecutionCompressor bestDeflater;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Minimum best compression size / default compression size ratio needed to pick the default
|
||||||
|
* compression size.
|
||||||
|
*/
|
||||||
|
private final double minRatio;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new compressor.
|
||||||
|
*
|
||||||
|
* @param executor the executor used to perform compression activities.
|
||||||
|
* @param minRatio the minimum best compression size / default compression size needed to pick the
|
||||||
|
* default compression size; if {@code 0.0} then the default compression is always picked, if
|
||||||
|
* {@code 1.0} then the best compression is always picked unless it produces the exact same
|
||||||
|
* size as the default compression.
|
||||||
|
*/
|
||||||
|
public BestAndDefaultDeflateExecutorCompressor(Executor executor, double minRatio) {
|
||||||
|
super(executor);
|
||||||
|
|
||||||
|
Preconditions.checkArgument(minRatio >= 0.0, "minRatio < 0.0");
|
||||||
|
Preconditions.checkArgument(minRatio <= 1.0, "minRatio > 1.0");
|
||||||
|
|
||||||
|
defaultDeflater = new DeflateExecutionCompressor(executor, Deflater.DEFAULT_COMPRESSION);
|
||||||
|
bestDeflater = new DeflateExecutionCompressor(executor, Deflater.BEST_COMPRESSION);
|
||||||
|
this.minRatio = minRatio;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Deprecated
|
||||||
|
public BestAndDefaultDeflateExecutorCompressor(
|
||||||
|
Executor executor, ByteTracker tracker, double minRatio) {
|
||||||
|
this(executor, minRatio);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected CompressionResult immediateCompress(CloseableByteSource source, ByteStorage storage)
|
||||||
|
throws Exception {
|
||||||
|
CompressionResult defaultResult = defaultDeflater.immediateCompress(source, storage);
|
||||||
|
CompressionResult bestResult = bestDeflater.immediateCompress(source, storage);
|
||||||
|
|
||||||
|
double sizeRatio = bestResult.getSize() / (double) defaultResult.getSize();
|
||||||
|
if (sizeRatio >= minRatio) {
|
||||||
|
return defaultResult;
|
||||||
|
} else {
|
||||||
|
return bestResult;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,73 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip.compress;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.bytestorage.ByteStorage;
|
||||||
|
import com.android.tools.build.apkzlib.bytestorage.CloseableByteSourceFromOutputStreamBuilder;
|
||||||
|
import com.android.tools.build.apkzlib.zip.CompressionMethod;
|
||||||
|
import com.android.tools.build.apkzlib.zip.CompressionResult;
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.ByteTracker;
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.io.ByteStreams;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.util.concurrent.Executor;
|
||||||
|
import java.util.zip.Deflater;
|
||||||
|
import java.util.zip.DeflaterOutputStream;
|
||||||
|
|
||||||
|
/** Compressor that uses deflate with an executor. */
|
||||||
|
public class DeflateExecutionCompressor extends ExecutorCompressor {
|
||||||
|
|
||||||
|
/** Deflate compression level. */
|
||||||
|
private final int level;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new compressor.
|
||||||
|
*
|
||||||
|
* @param executor the executor to run deflation tasks
|
||||||
|
* @param level the compression level
|
||||||
|
*/
|
||||||
|
public DeflateExecutionCompressor(Executor executor, int level) {
|
||||||
|
super(executor);
|
||||||
|
|
||||||
|
this.level = level;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Deprecated
|
||||||
|
public DeflateExecutionCompressor(Executor executor, ByteTracker tracker, int level) {
|
||||||
|
this(executor, level);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected CompressionResult immediateCompress(CloseableByteSource source, ByteStorage storage)
|
||||||
|
throws Exception {
|
||||||
|
Deflater deflater = new Deflater(level, true);
|
||||||
|
CloseableByteSourceFromOutputStreamBuilder resultBuilder = storage.makeBuilder();
|
||||||
|
|
||||||
|
try (InputStream inputStream = source.openBufferedStream();
|
||||||
|
DeflaterOutputStream dos = new DeflaterOutputStream(resultBuilder, deflater)) {
|
||||||
|
ByteStreams.copy(inputStream, dos);
|
||||||
|
}
|
||||||
|
|
||||||
|
CloseableByteSource result = resultBuilder.build();
|
||||||
|
if (result.size() >= source.size()) {
|
||||||
|
result.close();
|
||||||
|
return new CompressionResult(source, CompressionMethod.STORE, source.size());
|
||||||
|
} else {
|
||||||
|
return new CompressionResult(result, CompressionMethod.DEFLATE, result.size());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,71 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip.compress;
|
||||||
|
|
||||||
|
import com.android.tools.build.apkzlib.bytestorage.ByteStorage;
|
||||||
|
import com.android.tools.build.apkzlib.zip.CompressionResult;
|
||||||
|
import com.android.tools.build.apkzlib.zip.Compressor;
|
||||||
|
import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
|
||||||
|
import com.google.common.util.concurrent.ListenableFuture;
|
||||||
|
import com.google.common.util.concurrent.SettableFuture;
|
||||||
|
import java.util.concurrent.Executor;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A synchronous compressor is a compressor that computes the result of compression immediately and
|
||||||
|
* never returns an uncomputed future object.
|
||||||
|
*/
|
||||||
|
public abstract class ExecutorCompressor implements Compressor {
|
||||||
|
|
||||||
|
/** The executor that does the work. */
|
||||||
|
private final Executor executor;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compressor that delegates execution into the given executor.
|
||||||
|
*
|
||||||
|
* @param executor the executor that will do the compress
|
||||||
|
*/
|
||||||
|
public ExecutorCompressor(Executor executor) {
|
||||||
|
this.executor = executor;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ListenableFuture<CompressionResult> compress(
|
||||||
|
CloseableByteSource source, ByteStorage storage) {
|
||||||
|
final SettableFuture<CompressionResult> future = SettableFuture.create();
|
||||||
|
executor.execute(
|
||||||
|
() -> {
|
||||||
|
try {
|
||||||
|
future.set(immediateCompress(source, storage));
|
||||||
|
} catch (Throwable e) {
|
||||||
|
future.setException(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return future;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Immediately compresses a source.
|
||||||
|
*
|
||||||
|
* @param source the source to compress
|
||||||
|
* @param storage a byte storage where the compressor can obtain data sources from
|
||||||
|
* @return the result of compression
|
||||||
|
* @throws Exception failed to compress
|
||||||
|
*/
|
||||||
|
protected abstract CompressionResult immediateCompress(
|
||||||
|
CloseableByteSource source, ByteStorage storage) throws Exception;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,27 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2017 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip.compress;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/** Exception raised by ZFile when encountering unsupported Zip64 format jar files. */
|
||||||
|
public class Zip64NotSupportedException extends IOException {
|
||||||
|
|
||||||
|
public Zip64NotSupportedException(String message) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,18 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2017 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/** Compressors to use with the {@code zip} package. */
|
||||||
|
package com.android.tools.build.apkzlib.zip.compress;
|
||||||
|
|
@ -0,0 +1,27 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip.utils;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Keeps track of used bytes allowing gauging memory usage.
|
||||||
|
*
|
||||||
|
* @deprecated will be removed shortly.
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
|
public class ByteTracker {
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,59 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip.utils;
|
||||||
|
|
||||||
|
import com.google.common.io.ByteSource;
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Byte source that can be closed. Closing a byte source allows releasing any resources associated
|
||||||
|
* with it. This should not be confused with closing streams. For example, {@link ByteTracker} uses
|
||||||
|
* {@code CloseableByteSources} to know when the data associated with the byte source can be
|
||||||
|
* released.
|
||||||
|
*/
|
||||||
|
public abstract class CloseableByteSource extends ByteSource implements Closeable {
|
||||||
|
|
||||||
|
/** Has the source been closed? */
|
||||||
|
private boolean closed;
|
||||||
|
|
||||||
|
/** Creates a new byte source. */
|
||||||
|
public CloseableByteSource() {
|
||||||
|
closed = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final synchronized void close() throws IOException {
|
||||||
|
if (closed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
innerClose();
|
||||||
|
} finally {
|
||||||
|
closed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Closes the by source. This method is only invoked once, even if {@link #close()} is called
|
||||||
|
* multiple times.
|
||||||
|
*
|
||||||
|
* @throws IOException failed to close
|
||||||
|
*/
|
||||||
|
protected abstract void innerClose() throws IOException;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,159 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2016 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip.utils;
|
||||||
|
|
||||||
|
import com.google.common.hash.HashCode;
|
||||||
|
import com.google.common.hash.HashFunction;
|
||||||
|
import com.google.common.io.ByteProcessor;
|
||||||
|
import com.google.common.io.ByteSink;
|
||||||
|
import com.google.common.io.ByteSource;
|
||||||
|
import com.google.common.io.CharSource;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
import java.nio.charset.Charset;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
/** Closeable byte source that delegates to another byte source. */
|
||||||
|
public class CloseableDelegateByteSource extends CloseableByteSource {
|
||||||
|
|
||||||
|
/** The byte source we delegate all operations to. {@code null} if disposed. */
|
||||||
|
@Nullable private ByteSource inner;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Size of the byte source. This is the same as {@code inner.size()} (when {@code inner} is not
|
||||||
|
* {@code null}), but we keep it separate to avoid calling {@code inner.size()} because it might
|
||||||
|
* throw {@code IOException}.
|
||||||
|
*/
|
||||||
|
private final long mSize;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new byte source.
|
||||||
|
*
|
||||||
|
* @param inner the inner byte source
|
||||||
|
* @param size the size of the source
|
||||||
|
*/
|
||||||
|
public CloseableDelegateByteSource(ByteSource inner, long size) {
|
||||||
|
this.inner = inner;
|
||||||
|
mSize = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the inner byte source. Will throw an exception if the inner by byte source has been
|
||||||
|
* disposed of.
|
||||||
|
*
|
||||||
|
* @return the inner byte source
|
||||||
|
*/
|
||||||
|
private synchronized ByteSource get() {
|
||||||
|
if (inner == null) {
|
||||||
|
throw new ByteSourceDisposedException();
|
||||||
|
}
|
||||||
|
|
||||||
|
return inner;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Mark the byte source as disposed. */
|
||||||
|
@Override
|
||||||
|
protected synchronized void innerClose() throws IOException {
|
||||||
|
if (inner == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
inner = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains the size of this byte source. Equivalent to {@link #size()} but not throwing {@code
|
||||||
|
* IOException}.
|
||||||
|
*
|
||||||
|
* @return the size of the byte source
|
||||||
|
*/
|
||||||
|
public long sizeNoException() {
|
||||||
|
return mSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CharSource asCharSource(Charset charset) {
|
||||||
|
return get().asCharSource(charset);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InputStream openBufferedStream() throws IOException {
|
||||||
|
return get().openBufferedStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ByteSource slice(long offset, long length) {
|
||||||
|
return get().slice(offset, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isEmpty() throws IOException {
|
||||||
|
return get().isEmpty();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long size() throws IOException {
|
||||||
|
return get().size();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long copyTo(OutputStream output) throws IOException {
|
||||||
|
return get().copyTo(output);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long copyTo(ByteSink sink) throws IOException {
|
||||||
|
return get().copyTo(sink);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public byte[] read() throws IOException {
|
||||||
|
return get().read();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public <T> T read(ByteProcessor<T> processor) throws IOException {
|
||||||
|
return get().read(processor);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public HashCode hash(HashFunction hashFunction) throws IOException {
|
||||||
|
return get().hash(hashFunction);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean contentEquals(ByteSource other) throws IOException {
|
||||||
|
return get().contentEquals(other);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InputStream openStream() throws IOException {
|
||||||
|
return get().openStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Exception thrown when trying to use a byte source that has been disposed. */
|
||||||
|
private static class ByteSourceDisposedException extends RuntimeException {
|
||||||
|
|
||||||
|
/** Creates a new exception. */
|
||||||
|
private ByteSourceDisposedException() {
|
||||||
|
super(
|
||||||
|
"Byte source was created by a ByteTracker and is now disposed. If you see "
|
||||||
|
+ "this message, then there is a bug.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,157 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip.utils;
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.base.Verify;
|
||||||
|
import java.io.EOFException;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.nio.ByteOrder;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Utilities to read and write 16, 32, and 64 bit integers with support for little-endian encoding,
|
||||||
|
* as used in zip files. Zip files actually use unsigned data types. We use Java's native (signed)
|
||||||
|
* data types but will use long (64 bit) to ensure we can fit the whole range for the 16 and 32
|
||||||
|
* bit fields.
|
||||||
|
*/
|
||||||
|
public class LittleEndianUtils {
|
||||||
|
/** Utility class, no constructor. */
|
||||||
|
private LittleEndianUtils() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads 8 bytes in little-endian format and converts them into a 64-bit value.
|
||||||
|
*
|
||||||
|
* @param bytes from where should the bytes be read; the first 8 bytes of the source will be read.
|
||||||
|
* @return the 64-bit value
|
||||||
|
* @throws IOException failed to read the value.
|
||||||
|
*/
|
||||||
|
public static long readUnsigned8Le(ByteBuffer bytes) throws IOException {
|
||||||
|
Preconditions.checkNotNull(bytes, "bytes == null");
|
||||||
|
|
||||||
|
if (bytes.remaining() < 8) {
|
||||||
|
throw new EOFException(
|
||||||
|
"Not enough data: 8 bytes expected, " + bytes.remaining() + " available.");
|
||||||
|
}
|
||||||
|
|
||||||
|
ByteOrder order = bytes.order();
|
||||||
|
bytes.order(ByteOrder.LITTLE_ENDIAN);
|
||||||
|
long r = bytes.getLong();
|
||||||
|
bytes.order(order);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads 4 bytes in little-endian format and converts them into a 32-bit value.
|
||||||
|
*
|
||||||
|
* @param bytes from where should the bytes be read; the first 4 bytes of the source will be read
|
||||||
|
* @return the 32-bit value
|
||||||
|
* @throws IOException failed to read the value
|
||||||
|
*/
|
||||||
|
public static long readUnsigned4Le(ByteBuffer bytes) throws IOException {
|
||||||
|
Preconditions.checkNotNull(bytes, "bytes == null");
|
||||||
|
|
||||||
|
if (bytes.remaining() < 4) {
|
||||||
|
throw new EOFException(
|
||||||
|
"Not enough data: 4 bytes expected, " + bytes.remaining() + " available.");
|
||||||
|
}
|
||||||
|
|
||||||
|
byte b0 = bytes.get();
|
||||||
|
byte b1 = bytes.get();
|
||||||
|
byte b2 = bytes.get();
|
||||||
|
byte b3 = bytes.get();
|
||||||
|
long r = (b0 & 0xff) | ((b1 & 0xff) << 8) | ((b2 & 0xff) << 16) | ((b3 & 0xffL) << 24);
|
||||||
|
Verify.verify(r >= 0);
|
||||||
|
Verify.verify(r <= 0x00000000ffffffffL);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads 2 bytes in little-endian format and converts them into a 16-bit value.
|
||||||
|
*
|
||||||
|
* @param bytes from where should the bytes be read; the first 2 bytes of the source will be read
|
||||||
|
* @return the 16-bit value
|
||||||
|
* @throws IOException failed to read the value
|
||||||
|
*/
|
||||||
|
public static int readUnsigned2Le(ByteBuffer bytes) throws IOException {
|
||||||
|
Preconditions.checkNotNull(bytes, "bytes == null");
|
||||||
|
|
||||||
|
if (bytes.remaining() < 2) {
|
||||||
|
throw new EOFException(
|
||||||
|
"Not enough data: 2 bytes expected, " + bytes.remaining() + " available.");
|
||||||
|
}
|
||||||
|
|
||||||
|
byte b0 = bytes.get();
|
||||||
|
byte b1 = bytes.get();
|
||||||
|
int r = (b0 & 0xff) | ((b1 & 0xff) << 8);
|
||||||
|
|
||||||
|
Verify.verify(r >= 0);
|
||||||
|
Verify.verify(r <= 0x0000ffff);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes 8 bytes in little-endian format, converting them from a <em> signed </em> 64-bit value.
|
||||||
|
*
|
||||||
|
* @param output the output stream where the bytes will be written.
|
||||||
|
* @param value the 64-bit value to convert.
|
||||||
|
* @throws IOException failed to write the value data.
|
||||||
|
*/
|
||||||
|
public static void writeUnsigned8Le(ByteBuffer output, long value) throws IOException {
|
||||||
|
Preconditions.checkNotNull(output, "output == null");
|
||||||
|
|
||||||
|
ByteOrder order = output.order();
|
||||||
|
output.order(ByteOrder.LITTLE_ENDIAN);
|
||||||
|
output.putLong(value);
|
||||||
|
output.order(order);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes 4 bytes in little-endian format, converting them from a 32-bit value.
|
||||||
|
*
|
||||||
|
* @param output the output stream where the bytes will be written
|
||||||
|
* @param value the 32-bit value to convert
|
||||||
|
* @throws IOException failed to write the value data
|
||||||
|
*/
|
||||||
|
public static void writeUnsigned4Le(ByteBuffer output, long value) throws IOException {
|
||||||
|
Preconditions.checkNotNull(output, "output == null");
|
||||||
|
Preconditions.checkArgument(value >= 0, "value (%s) < 0", value);
|
||||||
|
Preconditions.checkArgument(
|
||||||
|
value <= 0x00000000ffffffffL, "value (%s) > 0x00000000ffffffffL", value);
|
||||||
|
|
||||||
|
output.put((byte) (value & 0xff));
|
||||||
|
output.put((byte) ((value >> 8) & 0xff));
|
||||||
|
output.put((byte) ((value >> 16) & 0xff));
|
||||||
|
output.put((byte) ((value >> 24) & 0xff));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes 2 bytes in little-endian format, converting them from a 16-bit value.
|
||||||
|
*
|
||||||
|
* @param output the output stream where the bytes will be written
|
||||||
|
* @param value the 16-bit value to convert
|
||||||
|
* @throws IOException failed to write the value data
|
||||||
|
*/
|
||||||
|
public static void writeUnsigned2Le(ByteBuffer output, int value) throws IOException {
|
||||||
|
Preconditions.checkNotNull(output, "output == null");
|
||||||
|
Preconditions.checkArgument(value >= 0, "value (%s) < 0", value);
|
||||||
|
Preconditions.checkArgument(value <= 0x0000ffff, "value (%s) > 0x0000ffff", value);
|
||||||
|
|
||||||
|
output.put((byte) (value & 0xff));
|
||||||
|
output.put((byte) ((value >> 8) & 0xff));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,105 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip.utils;
|
||||||
|
|
||||||
|
import com.google.common.base.Verify;
|
||||||
|
import java.util.Calendar;
|
||||||
|
import java.util.Date;
|
||||||
|
|
||||||
|
/** Yes. This actually refers to MS-DOS in 2015. That's all I have to say about legacy stuff. */
|
||||||
|
public class MsDosDateTimeUtils {
|
||||||
|
/** Utility class: no constructor. */
|
||||||
|
private MsDosDateTimeUtils() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Packs java time value into an MS-DOS time value.
|
||||||
|
*
|
||||||
|
* @param time the time value
|
||||||
|
* @return the MS-DOS packed time
|
||||||
|
*/
|
||||||
|
public static int packTime(long time) {
|
||||||
|
Calendar c = Calendar.getInstance();
|
||||||
|
c.setTime(new Date(time));
|
||||||
|
|
||||||
|
int seconds = c.get(Calendar.SECOND);
|
||||||
|
int minutes = c.get(Calendar.MINUTE);
|
||||||
|
int hours = c.get(Calendar.HOUR_OF_DAY);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Here is how MS-DOS packs a time value:
|
||||||
|
* 0-4: seconds (divided by 2 because we only have 5 bits = 32 different numbers)
|
||||||
|
* 5-10: minutes (6 bits = 64 possible values)
|
||||||
|
* 11-15: hours (5 bits = 32 possible values)
|
||||||
|
*
|
||||||
|
* source: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724247(v=vs.85).aspx
|
||||||
|
*/
|
||||||
|
return (hours << 11) | (minutes << 5) | (seconds / 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Packs the current time value into an MS-DOS time value.
|
||||||
|
*
|
||||||
|
* @return the MS-DOS packed time
|
||||||
|
*/
|
||||||
|
public static int packCurrentTime() {
|
||||||
|
return packTime(new Date().getTime());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Packs java time value into an MS-DOS date value.
|
||||||
|
*
|
||||||
|
* @param time the time value
|
||||||
|
* @return the MS-DOS packed date
|
||||||
|
*/
|
||||||
|
public static int packDate(long time) {
|
||||||
|
Calendar c = Calendar.getInstance();
|
||||||
|
c.setTime(new Date(time));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Even MS-DOS used 1 for January. Someone wasn't really thinking when they decided on Java
|
||||||
|
* it would start at 0...
|
||||||
|
*/
|
||||||
|
int day = c.get(Calendar.DAY_OF_MONTH);
|
||||||
|
int month = c.get(Calendar.MONTH) + 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* MS-DOS counts years starting from 1980. Since its launch date was in 81, it was obviously
|
||||||
|
* not necessary to talk about dates earlier than that.
|
||||||
|
*/
|
||||||
|
int year = c.get(Calendar.YEAR) - 1980;
|
||||||
|
Verify.verify(year >= 0 && year < 128);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Here is how MS-DOS packs a date value:
|
||||||
|
* 0-4: day (5 bits = 32 values)
|
||||||
|
* 5-8: month (4 bits = 16 values)
|
||||||
|
* 9-15: year (7 bits = 128 values)
|
||||||
|
*
|
||||||
|
* source: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724247(v=vs.85).aspx
|
||||||
|
*/
|
||||||
|
return (year << 9) | (month << 5) | day;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Packs the current time value into an MS-DOS date value.
|
||||||
|
*
|
||||||
|
* @return the MS-DOS packed date
|
||||||
|
*/
|
||||||
|
public static int packCurrentDate() {
|
||||||
|
return packDate(new Date().getTime());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,55 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.android.tools.build.apkzlib.zip.utils;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.RandomAccessFile;
|
||||||
|
|
||||||
|
/** Utility class with utility methods for random access files. */
|
||||||
|
public final class RandomAccessFileUtils {
|
||||||
|
|
||||||
|
private RandomAccessFileUtils() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads from an random access file until the provided array is filled. Data is read from the
|
||||||
|
* current position in the file.
|
||||||
|
*
|
||||||
|
* @param raf the file to read data from
|
||||||
|
* @param data the array that will receive the data
|
||||||
|
* @throws IOException failed to read the data
|
||||||
|
*/
|
||||||
|
public static void fullyRead(RandomAccessFile raf, byte[] data) throws IOException {
|
||||||
|
int r;
|
||||||
|
int p = 0;
|
||||||
|
|
||||||
|
while ((r = raf.read(data, p, data.length - p)) > 0) {
|
||||||
|
p += r;
|
||||||
|
if (p == data.length) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (p < data.length) {
|
||||||
|
throw new IOException(
|
||||||
|
"Failed to read "
|
||||||
|
+ data.length
|
||||||
|
+ " bytes from file. Only "
|
||||||
|
+ p
|
||||||
|
+ " bytes could be read.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -4,12 +4,9 @@ buildscript {
|
||||||
repositories {
|
repositories {
|
||||||
google()
|
google()
|
||||||
mavenCentral()
|
mavenCentral()
|
||||||
maven { url "https://jcenter.bintray.com" }
|
|
||||||
maven { url "https://jitpack.io" }
|
|
||||||
}
|
}
|
||||||
dependencies {
|
dependencies {
|
||||||
classpath 'com.android.tools.build:gradle:7.1.0-alpha10'
|
classpath 'com.android.tools.build:gradle:7.1.0-alpha10'
|
||||||
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:1.5.21"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -32,8 +29,6 @@ allprojects {
|
||||||
repositories {
|
repositories {
|
||||||
google()
|
google()
|
||||||
mavenCentral()
|
mavenCentral()
|
||||||
maven { url "https://jcenter.bintray.com" }
|
|
||||||
maven { url "https://jitpack.io" }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -13,8 +13,8 @@ dependencies {
|
||||||
implementation fileTree(dir: 'libs', include: ['*.jar'])
|
implementation fileTree(dir: 'libs', include: ['*.jar'])
|
||||||
implementation project(':axmlprinter')
|
implementation project(':axmlprinter')
|
||||||
implementation project(':share')
|
implementation project(':share')
|
||||||
implementation 'commons-io:commons-io:2.10.0'
|
implementation project(':apkzlib')
|
||||||
implementation 'com.android.tools.build:apkzlib:4.2.2'
|
implementation 'commons-io:commons-io:2.11.0'
|
||||||
implementation 'com.beust:jcommander:1.81'
|
implementation 'com.beust:jcommander:1.81'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -40,7 +40,7 @@ jar {
|
||||||
from fileTree(dir: "$rootProject.projectDir/out/so")
|
from fileTree(dir: "$rootProject.projectDir/out/so")
|
||||||
}
|
}
|
||||||
|
|
||||||
exclude 'META-INF/*.SF', 'META-INF/*.DSA', 'META-INF/*.RSA', 'META-INF/*.MF', 'META-INF/*.txt'
|
exclude 'META-INF/*.SF', 'META-INF/*.DSA', 'META-INF/*.RSA', 'META-INF/*.MF', 'META-INF/*.txt', "META-INF/versions/**"
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks.register("buildDebug") {
|
tasks.register("buildDebug") {
|
||||||
|
|
|
||||||
|
|
@ -14,3 +14,4 @@ include ':patch'
|
||||||
include ':axmlprinter'
|
include ':axmlprinter'
|
||||||
include ':share'
|
include ':share'
|
||||||
include ':appstub'
|
include ':appstub'
|
||||||
|
include ':apkzlib'
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue