Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 110c80f

Browse files
bit more cleanup
1 parent 11dac99 commit 110c80f

File tree

3 files changed

+11
-9
lines changed

3 files changed

+11
-9
lines changed

arrow/src/main/java/org/apache/iceberg/arrow/vectorized/VectorizedArrowReader.java

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -628,12 +628,11 @@ public VectorHolder read(VectorHolder reuse, int numValsToRead) {
628628
VectorHolder ids = idReader.read(null, numValsToRead);
629629
BigIntVector vec = allocateBigIntVector(ROW_ID_ARROW_FIELD, numValsToRead);
630630
ArrowBuf dataBuffer = vec.getDataBuffer();
631-
boolean isNullReader = ids.vector() == null;
632631
ArrowVectorAccessor<?, String, ?, ?> idsAccessor =
633-
isNullReader ? null : ArrowVectorAccessors.getVectorAccessor(ids);
632+
ids.vector() == null ? null : ArrowVectorAccessors.getVectorAccessor(ids);
634633
for (int i = 0; i < numValsToRead; i += 1) {
635634
long bufferOffset = (long) i * Long.BYTES;
636-
if (isNullReader || ids.nullabilityHolder().isNullAt(i) == 1) {
635+
if (idsAccessor == null || isNull(ids, i)) {
637636
long rowId = firstRowId + (Long) positions.getObject(i);
638637
dataBuffer.setLong(bufferOffset, rowId);
639638
} else {
@@ -688,14 +687,14 @@ public VectorHolder read(VectorHolder reuse, int numValsToRead) {
688687
BigIntVector vec = allocateBigIntVector(LAST_UPDATED_SEQ, numValsToRead);
689688
ArrowBuf dataBuffer = vec.getDataBuffer();
690689
VectorHolder seqNumbers = seqReader.read(null, numValsToRead);
691-
ArrowVectorAccessor<?, String, ?, ?> accessor =
690+
ArrowVectorAccessor<?, String, ?, ?> seqAccessor =
692691
seqNumbers.vector() == null ? null : ArrowVectorAccessors.getVectorAccessor(seqNumbers);
693692
for (int i = 0; i < numValsToRead; i += 1) {
694693
long bufferOffset = (long) i * Long.BYTES;
695-
if (seqNumbers.vector() == null || seqNumbers.nullabilityHolder().isNullAt(i) == 1) {
694+
if (seqAccessor == null || isNull(seqNumbers, i)) {
696695
dataBuffer.setLong(bufferOffset, lastUpdatedSeq);
697696
} else {
698-
long materializedSeqNumber = accessor.getLong(i);
697+
long materializedSeqNumber = seqAccessor.getLong(i);
699698
dataBuffer.setLong(bufferOffset, materializedSeqNumber);
700699
}
701700
}
@@ -725,6 +724,10 @@ public void close() {
725724
}
726725
}
727726

727+
private static boolean isNull(VectorHolder holder, int index) {
728+
return holder.nullabilityHolder().isNullAt(index) == 1;
729+
}
730+
728731
private static BigIntVector allocateBigIntVector(Field field, int valueCount) {
729732
BigIntVector vector = (BigIntVector) field.createVector(ArrowAllocation.rootAllocator());
730733
vector.allocateNew(valueCount);

spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/data/GenericsHelpers.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,11 +77,11 @@ public static void assertEqualsBatch(
7777
Iterator<Record> expectedRecords,
7878
ColumnarBatch batch,
7979
Map<Integer, Object> idToConstant,
80-
Integer numRowsAlreadyRead) {
80+
Integer batchFirstRowPos) {
8181
for (int rowPos = 0; rowPos < batch.numRows(); rowPos++) {
8282
InternalRow row = batch.getRow(rowPos);
8383
Record expectedRecord = expectedRecords.next();
84-
assertEqualsUnsafe(struct, expectedRecord, row, idToConstant, numRowsAlreadyRead + rowPos);
84+
assertEqualsUnsafe(struct, expectedRecord, row, idToConstant, batchFirstRowPos + rowPos);
8585
}
8686
}
8787

spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/data/TestHelpers.java

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,6 @@
5050
import org.apache.iceberg.FileContent;
5151
import org.apache.iceberg.FileScanTask;
5252
import org.apache.iceberg.ManifestFile;
53-
import org.apache.iceberg.MetadataColumns;
5453
import org.apache.iceberg.Schema;
5554
import org.apache.iceberg.Snapshot;
5655
import org.apache.iceberg.Table;

0 commit comments

Comments
 (0)