additonalArguments = new HashMap<>();
+ if (getTransactionIsolationLevel() != null) {
+ additonalArguments.put(TransactionIsolationLevel.CONF_KEY, getTransactionIsolationLevel());
+ }
+ return additonalArguments;
+ }
+
+ public String getTransactionIsolationLevel() {
+ if (transactionIsolationLevel == null) {
+ return null;
+ }
+ return TransactionIsolationLevel.Level.valueOf(transactionIsolationLevel).name();
+ }
}
+
diff --git a/database-commons/src/main/java/io/cdap/plugin/db/source/AbstractDBSource.java b/database-commons/src/main/java/io/cdap/plugin/db/source/AbstractDBSource.java
index 987b5cc17..8eeb4a155 100644
--- a/database-commons/src/main/java/io/cdap/plugin/db/source/AbstractDBSource.java
+++ b/database-commons/src/main/java/io/cdap/plugin/db/source/AbstractDBSource.java
@@ -484,7 +484,7 @@ public void validateSchema(Schema actualSchema, FailureCollector collector) {
}
@VisibleForTesting
- static void validateSchema(Schema actualSchema, Schema configSchema, FailureCollector collector) {
+ void validateSchema(Schema actualSchema, Schema configSchema, FailureCollector collector) {
if (configSchema == null) {
collector.addFailure("Schema should not be null or empty.", null)
.withConfigProperty(SCHEMA);
@@ -505,14 +505,20 @@ static void validateSchema(Schema actualSchema, Schema configSchema, FailureColl
Schema expectedFieldSchema = field.getSchema().isNullable() ?
field.getSchema().getNonNullable() : field.getSchema();
- if (actualFieldSchema.getType() != expectedFieldSchema.getType() ||
- actualFieldSchema.getLogicalType() != expectedFieldSchema.getLogicalType()) {
- collector.addFailure(
- String.format("Schema field '%s' has type '%s but found '%s'.",
- field.getName(), expectedFieldSchema.getDisplayName(),
- actualFieldSchema.getDisplayName()), null)
- .withOutputSchemaField(field.getName());
- }
+ validateField(collector, field, actualFieldSchema, expectedFieldSchema);
+ }
+ }
+
+ protected void validateField(FailureCollector collector, Schema.Field field, Schema actualFieldSchema,
+ Schema expectedFieldSchema) {
+ if (actualFieldSchema.getType() != expectedFieldSchema.getType() ||
+ actualFieldSchema.getLogicalType() != expectedFieldSchema.getLogicalType()) {
+ collector.addFailure(
+ String.format("Schema field '%s' is expected to have type '%s but found '%s'.", field.getName(),
+ expectedFieldSchema.getDisplayName(), actualFieldSchema.getDisplayName()),
+ String.format("Change the data type of field %s to %s.", field.getName(),
+ actualFieldSchema.getDisplayName()))
+ .withOutputSchemaField(field.getName());
}
}
diff --git a/database-commons/src/main/java/io/cdap/plugin/db/source/DataDrivenETLDBInputFormat.java b/database-commons/src/main/java/io/cdap/plugin/db/source/DataDrivenETLDBInputFormat.java
index 3416360a9..1c347dd03 100644
--- a/database-commons/src/main/java/io/cdap/plugin/db/source/DataDrivenETLDBInputFormat.java
+++ b/database-commons/src/main/java/io/cdap/plugin/db/source/DataDrivenETLDBInputFormat.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.db.DBConfiguration;
import org.apache.hadoop.mapreduce.lib.db.DBInputFormat;
+import org.apache.hadoop.mapreduce.lib.db.DBSplitter;
import org.apache.hadoop.mapreduce.lib.db.DBWritable;
import org.apache.hadoop.mapreduce.lib.db.DataDrivenDBInputFormat;
import org.slf4j.Logger;
@@ -39,6 +40,7 @@
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
+import java.sql.Types;
import java.util.Properties;
/**
@@ -128,6 +130,15 @@ public Connection createConnection() {
return getConnection();
}
+ @Override
+ protected DBSplitter getSplitter(int sqlDataType) {
+ // Use SafeBigDecimalSplitter for columns having high precision decimal or numeric columns
+ if (sqlDataType == Types.NUMERIC || sqlDataType == Types.DECIMAL) {
+ return new SafeBigDecimalSplitter();
+ }
+ return super.getSplitter(sqlDataType);
+ }
+
@Override
public RecordReader createDBRecordReader(DBInputSplit split, Configuration conf) throws IOException {
final RecordReader dbRecordReader = super.createDBRecordReader(split, conf);
diff --git a/database-commons/src/main/java/io/cdap/plugin/db/source/SafeBigDecimalSplitter.java b/database-commons/src/main/java/io/cdap/plugin/db/source/SafeBigDecimalSplitter.java
new file mode 100644
index 000000000..8649515e8
--- /dev/null
+++ b/database-commons/src/main/java/io/cdap/plugin/db/source/SafeBigDecimalSplitter.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright © 2025 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.db.source;
+
+import org.apache.hadoop.mapreduce.lib.db.BigDecimalSplitter;
+import java.math.BigDecimal;
+import java.math.RoundingMode;
+
+/**
+ * Safe implementation of {@link BigDecimalSplitter} to ensure precise division of BigDecimal values while calculating
+ * split points for NUMERIC and DECIMAL types.
+ *
+ * Problem: The default {@link BigDecimalSplitter} implementation may return 0 when the numerator is smaller than the
+ * denominator (e.g., 1 / 4 = 0), due to the lack of a defined scale for division. Since the result (0) is smaller than
+ * {@link BigDecimalSplitter#MIN_INCREMENT} (i.e. {@code 10000 * Double.MIN_VALUE}), the split size defaults to
+ * {@code MIN_INCREMENT}, leading to an excessive number of splits (~10M) and potential OOM errors.
+ *
+ * Fix: This implementation derives scale from column metadata, adds a buffer of 5 decimal places, and uses
+ * {@link RoundingMode#HALF_UP} as the rounding mode.
Note: This class is used by {@link DataDrivenETLDBInputFormat}.
+ */
+public class SafeBigDecimalSplitter extends BigDecimalSplitter {
+
+ /* An additional buffer of +5 digits is applied to preserve accuracy during division. */
+ public static final int SCALE_BUFFER = 5;
+ /**
+ * Performs safe division with correct scale handling.
+ *
+ * @param numerator the dividend (BigDecimal)
+ * @param denominator the divisor (BigDecimal)
+ * @return quotient with derived scale
+ * @throws ArithmeticException if denominator is zero
+ */
+ @Override
+ protected BigDecimal tryDivide(BigDecimal numerator, BigDecimal denominator) {
+ // Determine the required scale for the division and add a buffer to ensure accuracy
+ int effectiveScale = Math.max(numerator.scale(), denominator.scale()) + SCALE_BUFFER;
+ return numerator.divide(denominator, effectiveScale, RoundingMode.HALF_UP);
+ }
+}
diff --git a/database-commons/src/test/java/io/cdap/plugin/db/source/AbstractDBSourceTest.java b/database-commons/src/test/java/io/cdap/plugin/db/source/AbstractDBSourceTest.java
index 3dc7a2d1c..a8be38b46 100644
--- a/database-commons/src/test/java/io/cdap/plugin/db/source/AbstractDBSourceTest.java
+++ b/database-commons/src/test/java/io/cdap/plugin/db/source/AbstractDBSourceTest.java
@@ -43,11 +43,17 @@ public class AbstractDBSourceTest {
Schema.Field.of("double_column", Schema.nullableOf(Schema.of(Schema.Type.DOUBLE))),
Schema.Field.of("boolean_column", Schema.nullableOf(Schema.of(Schema.Type.BOOLEAN)))
);
+ private static final AbstractDBSource.DBSourceConfig TEST_CONFIG = new AbstractDBSource.DBSourceConfig() {
+ @Override
+ public String getConnectionString() {
+ return "";
+ }
+ };
@Test
public void testValidateSourceSchemaCorrectSchema() {
MockFailureCollector collector = new MockFailureCollector(MOCK_STAGE);
- AbstractDBSource.DBSourceConfig.validateSchema(SCHEMA, SCHEMA, collector);
+ TEST_CONFIG.validateSchema(SCHEMA, SCHEMA, collector);
Assert.assertEquals(0, collector.getValidationFailures().size());
}
@@ -65,7 +71,7 @@ public void testValidateSourceSchemaMismatchFields() {
);
MockFailureCollector collector = new MockFailureCollector(MOCK_STAGE);
- AbstractDBSource.DBSourceConfig.validateSchema(actualSchema, SCHEMA, collector);
+ TEST_CONFIG.validateSchema(actualSchema, SCHEMA, collector);
assertPropertyValidationFailed(collector, "boolean_column");
}
@@ -84,7 +90,7 @@ public void testValidateSourceSchemaInvalidFieldType() {
);
MockFailureCollector collector = new MockFailureCollector(MOCK_STAGE);
- AbstractDBSource.DBSourceConfig.validateSchema(actualSchema, SCHEMA, collector);
+ TEST_CONFIG.validateSchema(actualSchema, SCHEMA, collector);
assertPropertyValidationFailed(collector, "boolean_column");
}
diff --git a/database-commons/src/test/java/io/cdap/plugin/db/source/SafeBigDecimalSplitterTest.java b/database-commons/src/test/java/io/cdap/plugin/db/source/SafeBigDecimalSplitterTest.java
new file mode 100644
index 000000000..4aff4eac2
--- /dev/null
+++ b/database-commons/src/test/java/io/cdap/plugin/db/source/SafeBigDecimalSplitterTest.java
@@ -0,0 +1,98 @@
+/*
+ * Copyright © 2025 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.db.source;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.lib.db.BigDecimalSplitter;
+import org.junit.Test;
+
+import java.math.BigDecimal;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test class for {@link SafeBigDecimalSplitter}
+ */
+public class SafeBigDecimalSplitterTest {
+ private final SafeBigDecimalSplitter splitter = new SafeBigDecimalSplitter();
+
+ @Test
+ public void testSmallRangeDivision() {
+ BigDecimal result = splitter.tryDivide(BigDecimal.ONE, new BigDecimal("4"));
+ assertEquals(new BigDecimal("0.25000"), result);
+ }
+
+ @Test
+ public void testLargePrecision() {
+ BigDecimal numerator = new BigDecimal("1.0000000000000000001");
+ BigDecimal denominator = new BigDecimal("3");
+ BigDecimal result = splitter.tryDivide(numerator, denominator);
+ assertTrue(result.compareTo(BigDecimal.ZERO) > 0);
+ }
+
+ @Test
+ public void testDivisionByZero() {
+ assertThrows(ArithmeticException.class, () ->
+ splitter.tryDivide(BigDecimal.ONE, BigDecimal.ZERO));
+ }
+
+ @Test
+ public void testDivisionWithZeroNumerator() {
+ // when minVal == maxVal
+ BigDecimal result = splitter.tryDivide(BigDecimal.ZERO, BigDecimal.ONE);
+ assertEquals(0, result.compareTo(BigDecimal.ZERO));
+ }
+
+ @Test
+ public void testSplits() throws SQLException {
+ BigDecimal minVal = BigDecimal.valueOf(1);
+ BigDecimal maxVal = BigDecimal.valueOf(2);
+ int numSplits = 4;
+ ResultSet resultSet = mock(ResultSet.class);
+ Configuration conf = mock(Configuration.class);
+ when(conf.getInt("mapreduce.job.maps", 1)).thenReturn(numSplits);
+ when(resultSet.getBigDecimal(1)).thenReturn(minVal);
+ when(resultSet.getBigDecimal(2)).thenReturn(maxVal);
+ BigDecimalSplitter bigDecimalSplitter = new SafeBigDecimalSplitter();
+ List actualSplits = bigDecimalSplitter.split(conf, resultSet, "id");
+ assertEquals(numSplits, actualSplits.size());
+ }
+
+ @Test
+ public void testSplitsWithMinValueEqualToMaxValue() throws SQLException {
+ // when minVal == maxVal
+ BigDecimal minVal = BigDecimal.valueOf(1);
+ BigDecimal maxVal = BigDecimal.valueOf(1);
+ int numSplits = 1;
+ ResultSet resultSet = mock(ResultSet.class);
+ Configuration conf = mock(Configuration.class);
+ when(conf.getInt("mapreduce.job.maps", 1)).thenReturn(numSplits);
+ when(resultSet.getBigDecimal(1)).thenReturn(minVal);
+ when(resultSet.getBigDecimal(2)).thenReturn(maxVal);
+ BigDecimalSplitter bigDecimalSplitter = new SafeBigDecimalSplitter();
+ List actualSplits = bigDecimalSplitter.split(conf, resultSet, "id");
+ assertEquals(numSplits, actualSplits.size());
+ }
+}
diff --git a/db2-plugin/pom.xml b/db2-plugin/pom.xml
index a43bcb92e..330794adb 100644
--- a/db2-plugin/pom.xml
+++ b/db2-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.11.0-SNAPSHOT
+ 1.11.10
IBM DB2 plugin
@@ -98,12 +98,12 @@
<_exportcontents>
- io.cdap.plugin.db2.*;
- io.cdap.plugin.db.source.*;
- io.cdap.plugin.db.sink.*;
- org.apache.commons.lang;
- org.apache.commons.logging.*;
- org.codehaus.jackson.*
+ io.cdap.plugin.db2.*;
+ io.cdap.plugin.db.source.*;
+ io.cdap.plugin.db.sink.*;
+ org.apache.commons.lang;
+ org.apache.commons.logging.*;
+ org.codehaus.jackson.*
*;inline=false;scope=compile
true
diff --git a/generic-database-plugin/pom.xml b/generic-database-plugin/pom.xml
index dbcd46d47..98f5a0599 100644
--- a/generic-database-plugin/pom.xml
+++ b/generic-database-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.11.0-SNAPSHOT
+ 1.11.10
Generic database plugin
@@ -97,9 +97,9 @@
<_exportcontents>
- io.cdap.plugin.jdbc.*;
- io.cdap.plugin.db.source.*;
- io.cdap.plugin.db.sink.*;
+ io.cdap.plugin.jdbc.*;
+ io.cdap.plugin.db.source.*;
+ io.cdap.plugin.db.sink.*;
*;inline=false;scope=compile
true
diff --git a/generic-db-argument-setter/pom.xml b/generic-db-argument-setter/pom.xml
index 8a8dcd1c4..654a8fd77 100644
--- a/generic-db-argument-setter/pom.xml
+++ b/generic-db-argument-setter/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.11.0-SNAPSHOT
+ 1.11.10
Generic database argument setter plugin
@@ -97,12 +97,12 @@
<_exportcontents>
- io.cdap.plugin.jdbc.*;
- io.cdap.plugin.db.source.*;
- io.cdap.plugin.db.sink.*;
- org.apache.commons.lang;
- org.apache.commons.logging.*;
- org.codehaus.jackson.*
+ io.cdap.plugin.jdbc.*;
+ io.cdap.plugin.db.source.*;
+ io.cdap.plugin.db.sink.*;
+ org.apache.commons.lang;
+ org.apache.commons.logging.*;
+ org.codehaus.jackson.*
*;inline=false;scope=compile
true
diff --git a/mariadb-plugin/docs/Mariadb-batchsink.md b/mariadb-plugin/docs/Mariadb-batchsink.md
index 11176c0db..e4541fe67 100644
--- a/mariadb-plugin/docs/Mariadb-batchsink.md
+++ b/mariadb-plugin/docs/Mariadb-batchsink.md
@@ -60,41 +60,39 @@ connections.
Data Types Mapping
----------
- +--------------------------------+-----------------------+------------------------------------+
- | MariaDB Data Type | CDAP Schema Data Type | Comment |
- +--------------------------------+-----------------------+------------------------------------+
- | TINYINT | int | |
- | BOOLEAN, BOOL | boolean | |
- | SMALLINT | int | |
- | MEDIUMINT | int | |
- | INT, INTEGER | int | |
- | BIGINT | long | |
- | DECIMAL, DEC, NUMERIC, FIXED | decimal | |
- | FLOAT | float | |
- | DOUBLE, DOUBLE PRECISION, REAL | decimal | |
- | BIT | boolean | |
- | CHAR | string | |
- | VARCHAR | string | |
- | BINARY | bytes | |
- | CHAR BYTE | bytes | |
- | VARBINARY | bytes | |
- | TINYBLOB | bytes | |
- | BLOB | bytes | |
- | MEDIUMBLOB | bytes | |
- | LONGBLOB | bytes | |
- | TINYTEXT | string | |
- | TEXT | string | |
- | MEDIUMTEXT | string | |
- | LONGTEXT | string | |
- | JSON | string | In MariaDB it is alias to LONGTEXT |
- | ENUM | string | Mapping to String by default |
- | SET | string | |
- | DATE | date | |
- | TIME | time_micros | |
- | DATETIME | timestamp_micros | |
- | TIMESTAMP | timestamp_micros | |
- | YEAR | date | |
- +--------------------------------+-----------------------+------------------------------------+
+ | MariaDB Data Type | CDAP Schema Data Type | Comment |
+ |--------------------------------|-----------------------|---------------------------------------------------------|
+ | TINYINT | int | |
+ | BOOLEAN, BOOL | boolean | |
+ | SMALLINT | int | |
+ | MEDIUMINT | int | |
+ | INT, INTEGER | int | |
+ | BIGINT | long | |
+ | DECIMAL, DEC, NUMERIC, FIXED | decimal | |
+ | FLOAT | float | |
+ | DOUBLE, DOUBLE PRECISION, REAL | decimal | |
+ | BIT | boolean | |
+ | CHAR | string | |
+ | VARCHAR | string | |
+ | BINARY | bytes | |
+ | CHAR BYTE | bytes | |
+ | VARBINARY | bytes | |
+ | TINYBLOB | bytes | |
+ | BLOB | bytes | |
+ | MEDIUMBLOB | bytes | |
+ | LONGBLOB | bytes | |
+ | TINYTEXT | string | |
+ | TEXT | string | |
+ | MEDIUMTEXT | string | |
+ | LONGTEXT | string | |
+ | JSON | string | In MariaDB it is alias to LONGTEXT |
+ | ENUM | string | Mapping to String by default |
+ | SET | string | |
+ | DATE | date | |
+ | TIME | time_micros | |
+ | DATETIME | timestamp_micros | |
+ | TIMESTAMP | timestamp_micros | |
+ | YEAR | int | Users can manually set output schema to map it to Date. |
Example
-------
diff --git a/mariadb-plugin/docs/Mariadb-batchsource.md b/mariadb-plugin/docs/Mariadb-batchsource.md
index 2b1fe3944..713af2ee8 100644
--- a/mariadb-plugin/docs/Mariadb-batchsource.md
+++ b/mariadb-plugin/docs/Mariadb-batchsource.md
@@ -78,43 +78,39 @@ with the tradeoff of higher memory usage.
Data Types Mapping
----------
-
- +--------------------------------+-----------------------+------------------------------------+
- | MariaDB Data Type | CDAP Schema Data Type | Comment |
- +--------------------------------+-----------------------+------------------------------------+
- | TINYINT | int | |
- | BOOLEAN, BOOL | boolean | |
- | SMALLINT | int | |
- | MEDIUMINT | int | |
- | INT, INTEGER | int | |
- | BIGINT | long | |
- | DECIMAL, DEC, NUMERIC, FIXED | decimal | |
- | FLOAT | float | |
- | DOUBLE, DOUBLE PRECISION, REAL | decimal | |
- | BIT | boolean | |
- | CHAR | string | |
- | VARCHAR | string | |
- | BINARY | bytes | |
- | CHAR BYTE | bytes | |
- | VARBINARY | bytes | |
- | TINYBLOB | bytes | |
- | BLOB | bytes | |
- | MEDIUMBLOB | bytes | |
- | LONGBLOB | bytes | |
- | TINYTEXT | string | |
- | TEXT | string | |
- | MEDIUMTEXT | string | |
- | LONGTEXT | string | |
- | JSON | string | In MariaDB it is alias to LONGTEXT |
- | ENUM | string | Mapping to String by default |
- | SET | string | |
- | DATE | date | |
- | TIME | time_micros | |
- | DATETIME | timestamp_micros | |
- | TIMESTAMP | timestamp_micros | |
- | YEAR | date | |
- +--------------------------------+-----------------------+------------------------------------+
-
+ | MariaDB Data Type | CDAP Schema Data Type | Comment |
+ |--------------------------------|-----------------------|---------------------------------------------------------|
+ | TINYINT | int | |
+ | BOOLEAN, BOOL | boolean | |
+ | SMALLINT | int | |
+ | MEDIUMINT | int | |
+ | INT, INTEGER | int | |
+ | BIGINT | long | |
+ | DECIMAL, DEC, NUMERIC, FIXED | decimal | |
+ | FLOAT | float | |
+ | DOUBLE, DOUBLE PRECISION, REAL | decimal | |
+ | BIT | boolean | |
+ | CHAR | string | |
+ | VARCHAR | string | |
+ | BINARY | bytes | |
+ | CHAR BYTE | bytes | |
+ | VARBINARY | bytes | |
+ | TINYBLOB | bytes | |
+ | BLOB | bytes | |
+ | MEDIUMBLOB | bytes | |
+ | LONGBLOB | bytes | |
+ | TINYTEXT | string | |
+ | TEXT | string | |
+ | MEDIUMTEXT | string | |
+ | LONGTEXT | string | |
+ | JSON | string | In MariaDB it is alias to LONGTEXT |
+ | ENUM | string | Mapping to String by default |
+ | SET | string | |
+ | DATE | date | |
+ | TIME | time_micros | |
+ | DATETIME | timestamp_micros | |
+ | TIMESTAMP | timestamp_micros | |
+ | YEAR | int | Users can manually set output schema to map it to Date. |
Example
------
diff --git a/mariadb-plugin/pom.xml b/mariadb-plugin/pom.xml
index 0e9a09e02..c87f59a85 100644
--- a/mariadb-plugin/pom.xml
+++ b/mariadb-plugin/pom.xml
@@ -17,108 +17,113 @@
-
- database-plugins-parent
- io.cdap.plugin
- 1.11.0-SNAPSHOT
-
+
+ database-plugins-parent
+ io.cdap.plugin
+ 1.11.10
+
- Maria DB plugin
- mariadb-plugin
- 4.0.0
+ Maria DB plugin
+ mariadb-plugin
+ 4.0.0
-
-
- io.cdap.cdap
- cdap-etl-api
-
-
- io.cdap.plugin
- database-commons
- ${project.version}
-
-
- io.cdap.plugin
- hydrator-common
-
-
- com.google.guava
- guava
-
+
+
+ io.cdap.cdap
+ cdap-etl-api
+
+
+ io.cdap.plugin
+ database-commons
+ ${project.version}
+
+
+ io.cdap.plugin
+ hydrator-common
+
+
+ com.google.guava
+ guava
+
-
-
- io.cdap.plugin
- database-commons
- ${project.version}
- test-jar
- test
-
-
- io.cdap.cdap
- hydrator-test
-
-
- io.cdap.cdap
- cdap-data-pipeline3_2.12
-
-
- junit
- junit
-
-
- io.cdap.cdap
- cdap-api
- provided
-
-
- org.mariadb.jdbc
- mariadb-java-client
- 2.7.3
- test
-
-
- org.jetbrains
- annotations
- RELEASE
- compile
-
-
-
-
-
- io.cdap
- cdap-maven-plugin
-
-
- org.apache.felix
- maven-bundle-plugin
- 5.1.2
- true
-
-
- <_exportcontents>
- io.cdap.plugin.mariadb.*;
- io.cdap.plugin.db.source.*;
- io.cdap.plugin.db.sink.*;
- org.apache.commons.lang;
- org.apache.commons.logging.*;
- org.codehaus.jackson.*
-
- *;inline=false;scope=compile
- true
- lib
-
-
-
-
- package
-
- bundle
-
-
-
-
-
-
+
+
+ io.cdap.plugin
+ database-commons
+ ${project.version}
+ test-jar
+ test
+
+
+ io.cdap.cdap
+ hydrator-test
+
+
+ io.cdap.cdap
+ cdap-data-pipeline3_2.12
+
+
+ junit
+ junit
+
+
+ io.cdap.cdap
+ cdap-api
+ provided
+
+
+ org.mariadb.jdbc
+ mariadb-java-client
+ 2.7.3
+ test
+
+
+ org.jetbrains
+ annotations
+ RELEASE
+ compile
+
+
+ io.cdap.plugin
+ mysql-plugin
+ ${project.version}
+
+
+
+
+
+ io.cdap
+ cdap-maven-plugin
+
+
+ org.apache.felix
+ maven-bundle-plugin
+ 5.1.2
+ true
+
+
+ <_exportcontents>
+ io.cdap.plugin.mariadb.*;
+ io.cdap.plugin.db.source.*;
+ io.cdap.plugin.db.sink.*;
+ org.apache.commons.lang;
+ org.apache.commons.logging.*;
+ org.codehaus.jackson.*
+
+ *;inline=false;scope=compile
+ true
+ lib
+
+
+
+
+ package
+
+ bundle
+
+
+
+
+
+
diff --git a/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbDBRecord.java b/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbDBRecord.java
new file mode 100644
index 000000000..94498c787
--- /dev/null
+++ b/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbDBRecord.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright © 2025 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.mariadb;
+
+import io.cdap.cdap.api.data.format.StructuredRecord;
+import io.cdap.plugin.db.ColumnType;
+import io.cdap.plugin.mysql.MysqlDBRecord;
+import java.util.List;
+
+/**
+ * Writable class for MariaDB Source/Sink.
+ */
+public class MariadbDBRecord extends MysqlDBRecord {
+
+ /**
+ * Used in map-reduce. Do not remove.
+ */
+ @SuppressWarnings("unused")
+ public MariadbDBRecord() {
+ // Required by Hadoop DBRecordReader to create an instance
+ }
+
+ public MariadbDBRecord(StructuredRecord record, List columnTypes) {
+ super(record, columnTypes);
+ }
+}
diff --git a/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbFieldsValidator.java b/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbFieldsValidator.java
new file mode 100644
index 000000000..71ccb0d06
--- /dev/null
+++ b/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbFieldsValidator.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright © 2025 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.mariadb;
+
+import io.cdap.plugin.mysql.MysqlFieldsValidator;
+
+/**
+ * Field validator for maraidb
+ */
+public class MariadbFieldsValidator extends MysqlFieldsValidator {
+}
diff --git a/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbSchemaReader.java b/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbSchemaReader.java
new file mode 100644
index 000000000..37ac12a93
--- /dev/null
+++ b/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbSchemaReader.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright © 2025 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.mariadb;
+
+
+import io.cdap.plugin.mysql.MysqlSchemaReader;
+import java.util.Map;
+
+/**
+ * Schema reader for mapping Maria DB type
+ */
+public class MariadbSchemaReader extends MysqlSchemaReader {
+
+ public MariadbSchemaReader (String sessionID) {
+ super(sessionID);
+ }
+
+ public MariadbSchemaReader (String sessionID, Map connectionArguments) {
+ super(sessionID, connectionArguments);
+ }
+
+}
diff --git a/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbSink.java b/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbSink.java
index ab20f3c5d..57455cbdc 100644
--- a/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbSink.java
+++ b/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbSink.java
@@ -19,9 +19,13 @@
import io.cdap.cdap.api.annotation.Description;
import io.cdap.cdap.api.annotation.Name;
import io.cdap.cdap.api.annotation.Plugin;
+import io.cdap.cdap.api.data.format.StructuredRecord;
import io.cdap.cdap.etl.api.batch.BatchSink;
+import io.cdap.plugin.db.DBRecord;
+import io.cdap.plugin.db.SchemaReader;
import io.cdap.plugin.db.config.DBSpecificSinkConfig;
import io.cdap.plugin.db.sink.AbstractDBSink;
+import io.cdap.plugin.db.sink.FieldsValidator;
import java.util.Map;
import javax.annotation.Nullable;
@@ -45,6 +49,22 @@ public MariadbSink(MariadbSinkConfig mariadbSinkConfig) {
this.mariadbSinkConfig = mariadbSinkConfig;
}
+ @Override
+ protected DBRecord getDBRecord(StructuredRecord output) {
+ return new MariadbDBRecord(output, columnTypes);
+ }
+
+ @Override
+ protected SchemaReader getSchemaReader() {
+ return new MariadbSchemaReader(null);
+ }
+
+
+ @Override
+ protected FieldsValidator getFieldsValidator() {
+ return new MariadbFieldsValidator();
+ }
+
/**
* MariaDB Sink Config.
*/
diff --git a/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbSource.java b/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbSource.java
index d5ffcb290..3a473dca7 100644
--- a/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbSource.java
+++ b/mariadb-plugin/src/main/java/io/cdap/plugin/mariadb/MariadbSource.java
@@ -19,10 +19,19 @@
import io.cdap.cdap.api.annotation.Description;
import io.cdap.cdap.api.annotation.Name;
import io.cdap.cdap.api.annotation.Plugin;
+import io.cdap.cdap.api.data.schema.Schema;
+import io.cdap.cdap.etl.api.FailureCollector;
import io.cdap.cdap.etl.api.batch.BatchSource;
+import io.cdap.cdap.etl.api.batch.BatchSourceContext;
+import io.cdap.plugin.common.Asset;
+import io.cdap.plugin.common.LineageRecorder;
+import io.cdap.plugin.db.SchemaReader;
import io.cdap.plugin.db.config.DBSpecificSourceConfig;
import io.cdap.plugin.db.source.AbstractDBSource;
+import io.cdap.plugin.util.DBUtils;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.annotation.Nullable;
@@ -53,10 +62,36 @@ protected String createConnectionString() {
mariadbSourceConfig.host, mariadbSourceConfig.port, mariadbSourceConfig.database);
}
+ @Override
+ protected Class extends DBWritable> getDBRecordType() {
+ return MariadbDBRecord.class;
+ }
+
+ @Override
+ protected LineageRecorder getLineageRecorder(BatchSourceContext context) {
+ String fqn = DBUtils.constructFQN("mariadb",
+ mariadbSourceConfig.host,
+ mariadbSourceConfig.port,
+ mariadbSourceConfig.database,
+ mariadbSourceConfig.getReferenceName());
+ Asset asset = Asset.builder(mariadbSourceConfig.getReferenceName()).setFqn(fqn).build();
+ return new LineageRecorder(context, asset);
+ }
+
+ @Override
+ protected SchemaReader getSchemaReader() {
+ return new MariadbSchemaReader(null, mariadbSourceConfig.getConnectionArguments());
+ }
+
/**
* MaraiDB source mariadbSourceConfig.
*/
public static class MariadbSourceConfig extends DBSpecificSourceConfig {
+ private static final String JDBC_PROPERTY_CONNECT_TIMEOUT = "connectTimeout";
+ private static final String JDBC_PROPERTY_SOCKET_TIMEOUT = "socketTimeout";
+ private static final String JDBC_REWRITE_BATCHED_STATEMENTS = "rewriteBatchedStatements";
+
+ private static final String MARIADB_TINYINT1_IS_BIT = "tinyInt1isBit";
@Name(MariadbConstants.AUTO_RECONNECT)
@Description("Should the driver try to re-establish stale and/or dead connections")
@@ -116,5 +151,43 @@ public Map getDBSpecificArguments() {
public List getInitQueries() {
return MariadbUtil.composeDbInitQueries(useAnsiQuotes);
}
+
+ @Override
+ public Map getConnectionArguments() {
+ Map arguments = new HashMap<>(super.getConnectionArguments());
+ // the unit below is millisecond
+ arguments.putIfAbsent(JDBC_PROPERTY_CONNECT_TIMEOUT, "20000");
+ arguments.putIfAbsent(JDBC_PROPERTY_SOCKET_TIMEOUT, "20000");
+ arguments.putIfAbsent(JDBC_REWRITE_BATCHED_STATEMENTS, "true");
+ // MariaDB property to ensure that TINYINT(1) type data is not converted to MariaDB Bit/Boolean type in the
+ // ResultSet.
+ arguments.putIfAbsent(MARIADB_TINYINT1_IS_BIT, "false");
+ return arguments;
+ }
+
+ @Override
+ protected void validateField(FailureCollector collector,
+ Schema.Field field,
+ Schema actualFieldSchema,
+ Schema expectedFieldSchema) {
+ // Backward compatibility changes to support MySQL YEAR to Date type conversion
+ if (Schema.LogicalType.DATE.equals(expectedFieldSchema.getLogicalType())
+ && Schema.Type.INT.equals(actualFieldSchema.getType())) {
+ return;
+ }
+
+ // Backward compatibility change to support MySQL MEDIUMINT UNSIGNED to Long type conversion
+ if (Schema.Type.LONG.equals(expectedFieldSchema.getType())
+ && Schema.Type.INT.equals(actualFieldSchema.getType())) {
+ return;
+ }
+
+ // Backward compatibility change to support MySQL TINYINT(1) to Bool type conversion
+ if (Schema.Type.BOOLEAN.equals(expectedFieldSchema.getType())
+ && Schema.Type.INT.equals(actualFieldSchema.getType())) {
+ return;
+ }
+ super.validateField(collector, field, actualFieldSchema, expectedFieldSchema);
+ }
}
}
diff --git a/memsql-plugin/pom.xml b/memsql-plugin/pom.xml
index 5c50a857e..981240c53 100644
--- a/memsql-plugin/pom.xml
+++ b/memsql-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.11.0-SNAPSHOT
+ 1.11.10
Memsql plugin
@@ -95,12 +95,12 @@
<_exportcontents>
- io.cdap.plugin.memsql.*;
- io.cdap.plugin.db.source.*;
- io.cdap.plugin.db.sink.*;
- org.apache.commons.lang;
- org.apache.commons.logging.*;
- org.codehaus.jackson.*
+ io.cdap.plugin.memsql.*;
+ io.cdap.plugin.db.source.*;
+ io.cdap.plugin.db.sink.*;
+ org.apache.commons.lang;
+ org.apache.commons.logging.*;
+ org.codehaus.jackson.*
*;inline=false;scope=compile
true
diff --git a/mssql-plugin/docs/SQL Server-connector.md b/mssql-plugin/docs/SQL Server-connector.md
index cb72161f5..6f0038715 100644
--- a/mssql-plugin/docs/SQL Server-connector.md
+++ b/mssql-plugin/docs/SQL Server-connector.md
@@ -22,6 +22,14 @@ authentication. Optional for databases that do not require authentication.
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the database connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- TRANSACTION_READ_UNCOMMITTED: Allows dirty reads (reading uncommitted changes from other transactions). Non-repeatable reads and phantom reads are possible.
+
+For more details on the Transaction Isolation Levels supported in SQL Server, refer to the [SQL Server documentation](https://learn.microsoft.com/en-us/sql/t-sql/statements/set-transaction-isolation-level-transact-sql?view=sql-server-ver16)
+
**Authentication Type:** Indicates which authentication method will be used for the connection. Use 'SQL Login'. to
connect to a SQL Server using username and password properties. Use 'Active Directory Password' to connect to an Azure
SQL Database/Data Warehouse using an Azure AD principal name and password.
diff --git a/mssql-plugin/docs/SqlServer-batchsink.md b/mssql-plugin/docs/SqlServer-batchsink.md
index 5d10b4bb6..b4ca1cbc5 100644
--- a/mssql-plugin/docs/SqlServer-batchsink.md
+++ b/mssql-plugin/docs/SqlServer-batchsink.md
@@ -46,6 +46,14 @@ an Azure SQL Database/Data Warehouse using an Azure AD principal name and passwo
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the database connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- TRANSACTION_READ_UNCOMMITTED: Allows dirty reads (reading uncommitted changes from other transactions). Non-repeatable reads and phantom reads are possible.
+
+For more details on the Transaction Isolation Levels supported in SQL Server, refer to the [SQL Server documentation](https://learn.microsoft.com/en-us/sql/t-sql/statements/set-transaction-isolation-level-transact-sql?view=sql-server-ver16)
+
**Instance Name:** SQL Server instance name to connect to. When it is not specified, a
connection is made to the default instance. For the case where both the instanceName and port are specified,
see the notes for port. If you specify a Virtual Network Name in the Server connection property, you cannot
diff --git a/mssql-plugin/docs/SqlServer-batchsource.md b/mssql-plugin/docs/SqlServer-batchsource.md
index c8e30f77e..5c917621c 100644
--- a/mssql-plugin/docs/SqlServer-batchsource.md
+++ b/mssql-plugin/docs/SqlServer-batchsource.md
@@ -56,6 +56,14 @@ an Azure SQL Database/Data Warehouse using an Azure AD principal name and passwo
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the database connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- TRANSACTION_READ_UNCOMMITTED: Allows dirty reads (reading uncommitted changes from other transactions). Non-repeatable reads and phantom reads are possible.
+
+For more details on the Transaction Isolation Levels supported in SQL Server, refer to the [SQL Server documentation](https://learn.microsoft.com/en-us/sql/t-sql/statements/set-transaction-isolation-level-transact-sql?view=sql-server-ver16)
+
**Instance Name:** SQL Server instance name to connect to. When it is not specified, a
connection is made to the default instance. For the case where both the instanceName and port are specified,
see the notes for port. If you specify a Virtual Network Name in the Server connection property, you cannot
diff --git a/mssql-plugin/pom.xml b/mssql-plugin/pom.xml
index 45e2b9c03..3abf98fbe 100644
--- a/mssql-plugin/pom.xml
+++ b/mssql-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.11.0-SNAPSHOT
+ 1.11.10
Microsoft SQL Server plugin
diff --git a/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSink.java b/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSink.java
index 0fa8991c5..7b749cdc5 100644
--- a/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSink.java
+++ b/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSink.java
@@ -167,6 +167,11 @@ public Map getDBSpecificArguments() {
packetSize, queryTimeout);
}
+ @Override
+ public String getTransactionIsolationLevel() {
+ return connection.getTransactionIsolationLevel();
+ }
+
@Override
public String getConnectionString() {
return String.format(SqlServerConstants.SQL_SERVER_CONNECTION_STRING_FORMAT,
diff --git a/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSource.java b/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSource.java
index 9603b24db..a76ed732d 100644
--- a/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSource.java
+++ b/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSource.java
@@ -188,6 +188,11 @@ public List getInitQueries() {
return Collections.emptyList();
}
+ @Override
+ public String getTransactionIsolationLevel() {
+ return connection.getTransactionIsolationLevel();
+ }
+
@Override
public void validate(FailureCollector collector) {
ConfigUtil.validateConnection(this, useConnection, connection, collector);
diff --git a/mssql-plugin/widgets/SQL Server-connector.json b/mssql-plugin/widgets/SQL Server-connector.json
index 171076295..c326cd81d 100644
--- a/mssql-plugin/widgets/SQL Server-connector.json
+++ b/mssql-plugin/widgets/SQL Server-connector.json
@@ -64,6 +64,20 @@
"widget-type": "password",
"label": "Password",
"name": "password"
+ },
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_UNCOMMITTED",
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
}
]
},
diff --git a/mssql-plugin/widgets/SqlServer-batchsink.json b/mssql-plugin/widgets/SqlServer-batchsink.json
index 260c66259..fb20cad9d 100644
--- a/mssql-plugin/widgets/SqlServer-batchsink.json
+++ b/mssql-plugin/widgets/SqlServer-batchsink.json
@@ -84,6 +84,20 @@
"label": "Password",
"name": "password"
},
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_UNCOMMITTED",
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
+ },
{
"widget-type": "keyvalue",
"label": "Connection Arguments",
@@ -280,6 +294,10 @@
{
"type": "property",
"name": "connectionArguments"
+ },
+ {
+ "type": "property",
+ "name": "transactionIsolationLevel"
}
]
},
diff --git a/mssql-plugin/widgets/SqlServer-batchsource.json b/mssql-plugin/widgets/SqlServer-batchsource.json
index dad5f4708..b3494e485 100644
--- a/mssql-plugin/widgets/SqlServer-batchsource.json
+++ b/mssql-plugin/widgets/SqlServer-batchsource.json
@@ -84,6 +84,20 @@
"label": "Password",
"name": "password"
},
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_UNCOMMITTED",
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
+ },
{
"widget-type": "keyvalue",
"label": "Connection Arguments",
@@ -316,6 +330,10 @@
{
"type": "property",
"name": "connectionArguments"
+ },
+ {
+ "type": "property",
+ "name": "transactionIsolationLevel"
}
]
},
diff --git a/mysql-plugin/docs/MySQL-connector.md b/mysql-plugin/docs/MySQL-connector.md
index fb5c1fbb8..f586084c1 100644
--- a/mysql-plugin/docs/MySQL-connector.md
+++ b/mysql-plugin/docs/MySQL-connector.md
@@ -22,6 +22,14 @@ authentication. Optional for databases that do not require authentication.
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the databse connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- TRANSACTION_READ_UNCOMMITTED: Allows dirty reads (reading uncommitted changes from other transactions). Non-repeatable reads and phantom reads are possible.
+
+For more details on the Transaction Isolation Levels supported in MySQL, refer to the [MySQL documentation](https://dev.mysql.com/doc/refman/8.4/en/innodb-transaction-isolation-levels.html)
+
**Connection Arguments:** A list of arbitrary string tag/value pairs as connection arguments. These arguments
will be passed to the JDBC driver, as connection arguments, for JDBC drivers that may need additional configurations.
This is a semicolon-separated list of key-value pairs, where each pair is separated by a equals '=' and specifies
diff --git a/mysql-plugin/docs/Mysql-batchsink.md b/mysql-plugin/docs/Mysql-batchsink.md
index b28a28618..46a763f9d 100644
--- a/mysql-plugin/docs/Mysql-batchsink.md
+++ b/mysql-plugin/docs/Mysql-batchsink.md
@@ -39,6 +39,14 @@ You also can use the macro function ${conn(connection-name)}.
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the databse connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- TRANSACTION_READ_UNCOMMITTED: Allows dirty reads (reading uncommitted changes from other transactions). Non-repeatable reads and phantom reads are possible.
+
+For more details on the Transaction Isolation Levels supported in MySQL, refer to the [MySQL documentation](https://dev.mysql.com/doc/refman/8.4/en/innodb-transaction-isolation-levels.html)
+
**Connection Arguments:** A list of arbitrary string key/value pairs as connection arguments. These arguments
will be passed to the JDBC driver as connection arguments for JDBC drivers that may need additional configurations.
diff --git a/mysql-plugin/docs/Mysql-batchsource.md b/mysql-plugin/docs/Mysql-batchsource.md
index 010e08216..552bb5504 100644
--- a/mysql-plugin/docs/Mysql-batchsource.md
+++ b/mysql-plugin/docs/Mysql-batchsource.md
@@ -49,6 +49,14 @@ For example, 'SELECT MIN(id),MAX(id) FROM table'. Not required if numSplits is s
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the database connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- TRANSACTION_READ_UNCOMMITTED: Allows dirty reads (reading uncommitted changes from other transactions). Non-repeatable reads and phantom reads are possible.
+
+For more details on the Transaction Isolation Levels supported in MySQL, refer to the [MySQL documentation](https://dev.mysql.com/doc/refman/8.4/en/innodb-transaction-isolation-levels.html)
+
**Connection Arguments:** A list of arbitrary string key/value pairs as connection arguments. These arguments
will be passed to the JDBC driver as connection arguments for JDBC drivers that may need additional configurations.
diff --git a/mysql-plugin/pom.xml b/mysql-plugin/pom.xml
index f691a15f2..9a4f1f8f0 100644
--- a/mysql-plugin/pom.xml
+++ b/mysql-plugin/pom.xml
@@ -20,13 +20,13 @@
database-plugins-parent
io.cdap.plugin
- 1.11.0-SNAPSHOT
+ 1.11.10
Mysql plugin
mysql-plugin
4.0.0
-
+
io.cdap.cdap
diff --git a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlConnector.java b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlConnector.java
index 3dede5d49..e7e935135 100644
--- a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlConnector.java
+++ b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlConnector.java
@@ -16,6 +16,7 @@
package io.cdap.plugin.mysql;
+import com.google.common.collect.Maps;
import io.cdap.cdap.api.annotation.Category;
import io.cdap.cdap.api.annotation.Description;
import io.cdap.cdap.api.annotation.Name;
@@ -62,7 +63,7 @@ public boolean supportSchema() {
@Override
protected SchemaReader getSchemaReader(String sessionID) {
- return new MysqlSchemaReader(sessionID);
+ return new MysqlSchemaReader(sessionID, Maps.fromProperties(config.getConnectionArgumentsProperties()));
}
@Override
diff --git a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlConnectorConfig.java b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlConnectorConfig.java
index 9b481e4fe..8c20798d3 100644
--- a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlConnectorConfig.java
+++ b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlConnectorConfig.java
@@ -57,9 +57,9 @@ public int getDefaultPort() {
public Properties getConnectionArgumentsProperties() {
Properties prop = super.getConnectionArgumentsProperties();
// the unit below is milli-second
- prop.put(JDBC_PROPERTY_CONNECT_TIMEOUT, "20000");
- prop.put(JDBC_PROPERTY_SOCKET_TIMEOUT, "20000");
- prop.put(JDBC_REWRITE_BATCHED_STATEMENTS, "true");
+ prop.putIfAbsent(JDBC_PROPERTY_CONNECT_TIMEOUT, "20000");
+ prop.putIfAbsent(JDBC_PROPERTY_SOCKET_TIMEOUT, "20000");
+ prop.putIfAbsent(JDBC_REWRITE_BATCHED_STATEMENTS, "true");
// MySQL property to ensure that TINYINT(1) type data is not converted to MySQL Bit/Boolean type in the ResultSet.
prop.putIfAbsent(MYSQL_TINYINT1_IS_BIT, "false");
return prop;
diff --git a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlConstants.java b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlConstants.java
index 39c0b8d08..54593f580 100644
--- a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlConstants.java
+++ b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlConstants.java
@@ -39,6 +39,7 @@ private MysqlConstants() {
public static final String TRUST_CERT_KEYSTORE_PASSWORD = "trustCertificateKeyStorePassword";
public static final String MYSQL_CONNECTION_STRING_FORMAT = "jdbc:mysql://%s:%s/%s";
public static final String USE_CURSOR_FETCH = "useCursorFetch";
+ public static final String ZERO_DATE_TIME_BEHAVIOR = "zeroDateTimeBehavior";
/**
* Query to set SQL_MODE system variable.
diff --git a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlDBRecord.java b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlDBRecord.java
index 0560b10c3..94b711786 100644
--- a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlDBRecord.java
+++ b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlDBRecord.java
@@ -93,4 +93,13 @@ protected void writeNonNullToDB(PreparedStatement stmt, Schema fieldSchema,
super.writeNonNullToDB(stmt, fieldSchema, fieldName, fieldIndex);
}
+
+ @Override
+ protected void insertOperation(PreparedStatement stmt) throws SQLException {
+ for (int fieldIndex = 0; fieldIndex < columnTypes.size(); fieldIndex++) {
+ ColumnType columnType = columnTypes.get(fieldIndex);
+ Schema.Field field = record.getSchema().getField(columnType.getName(), true);
+ writeToDB(stmt, field, fieldIndex);
+ }
+ }
}
diff --git a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSchemaReader.java b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSchemaReader.java
index a842ba568..50907c063 100644
--- a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSchemaReader.java
+++ b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSchemaReader.java
@@ -16,12 +16,16 @@
package io.cdap.plugin.mysql;
+import com.google.common.collect.Lists;
import io.cdap.cdap.api.data.schema.Schema;
import io.cdap.plugin.db.CommonSchemaReader;
+import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Types;
+import java.util.List;
+import java.util.Map;
/**
* Schema reader for mapping Mysql DB type
@@ -31,12 +35,42 @@ public class MysqlSchemaReader extends CommonSchemaReader {
public static final String YEAR_TYPE_NAME = "YEAR";
public static final String MEDIUMINT_UNSIGNED_TYPE_NAME = "MEDIUMINT UNSIGNED";
private final String sessionID;
+ private boolean zeroDateTimeToNull;
public MysqlSchemaReader(String sessionID) {
super();
this.sessionID = sessionID;
}
+ public MysqlSchemaReader(String sessionID, Map connectionArguments) {
+ super();
+ this.sessionID = sessionID;
+ this.zeroDateTimeToNull = MysqlUtil.isZeroDateTimeToNull(connectionArguments);
+ }
+
+ @Override
+ public List getSchemaFields(ResultSet resultSet) throws SQLException {
+ List schemaFields = Lists.newArrayList();
+ ResultSetMetaData metadata = resultSet.getMetaData();
+ // ResultSetMetadata columns are numbered starting with 1
+ for (int i = 1; i <= metadata.getColumnCount(); i++) {
+ if (shouldIgnoreColumn(metadata, i)) {
+ continue;
+ }
+
+ String columnName = metadata.getColumnName(i);
+ Schema columnSchema = getSchema(metadata, i);
+
+ if (ResultSetMetaData.columnNullable == metadata.isNullable(i)
+ || (zeroDateTimeToNull && MysqlUtil.isDateTimeLikeType(metadata.getColumnType(i)))) {
+ columnSchema = Schema.nullableOf(columnSchema);
+ }
+ Schema.Field field = Schema.Field.of(columnName, columnSchema);
+ schemaFields.add(field);
+ }
+ return schemaFields;
+ }
+
@Override
public boolean shouldIgnoreColumn(ResultSetMetaData metadata, int index) throws SQLException {
return metadata.getColumnName(index).equals("c_" + sessionID) ||
diff --git a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSink.java b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSink.java
index c839cb12b..bf3b6fe5b 100644
--- a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSink.java
+++ b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSink.java
@@ -16,6 +16,7 @@
package io.cdap.plugin.mysql;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import io.cdap.cdap.api.annotation.Description;
import io.cdap.cdap.api.annotation.Macro;
@@ -24,6 +25,7 @@
import io.cdap.cdap.api.annotation.Name;
import io.cdap.cdap.api.annotation.Plugin;
import io.cdap.cdap.api.data.format.StructuredRecord;
+import io.cdap.cdap.api.data.schema.Schema;
import io.cdap.cdap.etl.api.FailureCollector;
import io.cdap.cdap.etl.api.batch.BatchSink;
import io.cdap.cdap.etl.api.batch.BatchSinkContext;
@@ -39,9 +41,12 @@
import io.cdap.plugin.db.sink.FieldsValidator;
import io.cdap.plugin.util.DBUtils;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
+import java.util.StringJoiner;
+import java.util.stream.Collectors;
import javax.annotation.Nullable;
/**
@@ -54,6 +59,7 @@
public class MysqlSink extends AbstractDBSink {
private final MysqlSinkConfig mysqlSinkConfig;
+ private static final Character ESCAPE_CHAR = '`';
public MysqlSink(MysqlSinkConfig mysqlSinkConfig) {
super(mysqlSinkConfig);
@@ -85,6 +91,24 @@ protected SchemaReader getSchemaReader() {
return new MysqlSchemaReader(null);
}
+ @Override
+ protected void setColumnsInfo(List fields) {
+ List columnsList = new ArrayList<>();
+ StringJoiner columnsJoiner = new StringJoiner(",");
+ for (Schema.Field field : fields) {
+ columnsList.add(field.getName());
+ columnsJoiner.add(ESCAPE_CHAR + field.getName() + ESCAPE_CHAR);
+ }
+
+ super.columns = Collections.unmodifiableList(columnsList);
+ super.dbColumns = columnsJoiner.toString();
+ }
+
+ @VisibleForTesting
+ String getDbColumns() {
+ return dbColumns;
+ }
+
/**
* MySQL action configuration.
*/
@@ -160,6 +184,11 @@ public Map getDBSpecificArguments() {
trustCertificateKeyStorePassword, false);
}
+ @Override
+ public String getTransactionIsolationLevel() {
+ return connection.getTransactionIsolationLevel();
+ }
+
@Override
public MysqlConnectorConfig getConnection() {
return connection;
diff --git a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSource.java b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSource.java
index 71f113436..b8fd3975c 100644
--- a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSource.java
+++ b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSource.java
@@ -81,7 +81,7 @@ protected LineageRecorder getLineageRecorder(BatchSourceContext context) {
@Override
protected SchemaReader getSchemaReader() {
- return new MysqlSchemaReader(null);
+ return new MysqlSchemaReader(null, mysqlSourceConfig.getConnectionArguments());
}
/**
@@ -187,6 +187,11 @@ public MysqlConnectorConfig getConnection() {
return connection;
}
+ @Override
+ public String getTransactionIsolationLevel() {
+ return connection.getTransactionIsolationLevel();
+ }
+
@Override
public void validate(FailureCollector collector) {
ConfigUtil.validateConnection(this, useConnection, connection, collector);
diff --git a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlUtil.java b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlUtil.java
index c1c770c06..abb4aa27b 100644
--- a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlUtil.java
+++ b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlUtil.java
@@ -18,6 +18,7 @@
import com.google.common.collect.ImmutableMap;
+import java.sql.Types;
import java.util.Map;
/**
@@ -91,4 +92,20 @@ public static Map composeDbSpecificArgumentsMap(Boolean autoReco
public static String getConnectionString(String host, Integer port, String database) {
return String.format(MysqlConstants.MYSQL_CONNECTION_STRING_FORMAT, host, port, database);
}
+
+ public static boolean isDateTimeLikeType(int columnType) {
+ int[] dateTimeLikeTypes = new int[]{Types.TIMESTAMP, Types.TIMESTAMP_WITH_TIMEZONE, Types.DATE};
+
+ for (int dttType : dateTimeLikeTypes) {
+ if (dttType == columnType) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public static boolean isZeroDateTimeToNull(Map connectionArguments) {
+ String argValue = connectionArguments.getOrDefault(MysqlConstants.ZERO_DATE_TIME_BEHAVIOR, "");
+ return argValue.equals("CONVERT_TO_NULL") || argValue.equals("convertToNull");
+ }
}
diff --git a/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlFailedConnectionTest.java b/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlFailedConnectionTest.java
index a1be6a754..5c4f35828 100644
--- a/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlFailedConnectionTest.java
+++ b/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlFailedConnectionTest.java
@@ -31,10 +31,26 @@ public void test() throws ClassNotFoundException, IOException {
new MysqlConnectorConfig("localhost", 3306, "username", "password", "jdbc", ""));
super.test(JDBC_DRIVER_CLASS_NAME, connector, "Failed to create connection to database via connection string: " +
- "jdbc:mysql://localhost:3306 and arguments: {user=username, " +
- "rewriteBatchedStatements=true, " +
- "connectTimeout=20000, tinyInt1isBit=false, " +
- "socketTimeout=20000}. Error: " +
- "ConnectException: Connection refused (Connection refused).");
+ "jdbc:mysql://localhost:3306 and arguments: {user=username, " +
+ "rewriteBatchedStatements=true, " +
+ "connectTimeout=20000, tinyInt1isBit=false, " +
+ "socketTimeout=20000}. Error: " +
+ "ConnectException: Connection refused (Connection refused).");
}
+
+ @Test
+ public void testWithUpdatedConnectionArguments() throws ClassNotFoundException, IOException {
+
+ MysqlConnector connector = new MysqlConnector(
+ new MysqlConnectorConfig("localhost", 3306, "username", "password", "jdbc",
+ "connectTimeout=30000;socketTimeout=30000"));
+
+ super.test(JDBC_DRIVER_CLASS_NAME, connector, "Failed to create connection to database via connection string: " +
+ "jdbc:mysql://localhost:3306 and arguments: {user=username, " +
+ "rewriteBatchedStatements=true, " +
+ "connectTimeout=30000, tinyInt1isBit=false, " +
+ "socketTimeout=30000}. Error: " +
+ "ConnectException: Connection refused (Connection refused).");
+ }
+
}
diff --git a/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlSchemaReaderUnitTest.java b/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlSchemaReaderUnitTest.java
index 28582bc3b..fa7029c8f 100644
--- a/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlSchemaReaderUnitTest.java
+++ b/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlSchemaReaderUnitTest.java
@@ -21,9 +21,13 @@
import org.junit.Test;
import org.mockito.Mockito;
+import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Types;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
public class MysqlSchemaReaderUnitTest {
@@ -37,4 +41,33 @@ public void validateYearTypeToStringTypeConversion() throws SQLException {
Schema schema = schemaReader.getSchema(metadata, 1);
Assert.assertTrue(Schema.of(Schema.Type.INT).equals(schema));
}
+
+ @Test
+ public void validateZeroDateTimeBehavior() throws SQLException {
+ ResultSet resultSet = Mockito.mock(ResultSet.class);
+ ResultSetMetaData metadata = Mockito.mock(ResultSetMetaData.class);
+ Mockito.when(resultSet.getMetaData()).thenReturn(metadata);
+
+ Mockito.when(metadata.getColumnCount()).thenReturn(1);
+ Mockito.when(metadata.getColumnName(Mockito.eq(1))).thenReturn("some_date");
+
+ Mockito.when(metadata.getColumnType(Mockito.eq(1))).thenReturn(Types.DATE);
+ Mockito.when(metadata.getColumnTypeName(Mockito.eq(1))).thenReturn(MysqlSchemaReader.YEAR_TYPE_NAME);
+
+ // non-nullable column
+ Mockito.when(metadata.isNullable(Mockito.eq(1))).thenReturn(0);
+
+ // test that non-nullable date remains non-nullable when no conn arg is present
+ MysqlSchemaReader schemaReader = new MysqlSchemaReader(null);
+ List schemaFields = schemaReader.getSchemaFields(resultSet);
+ Assert.assertFalse(schemaFields.get(0).getSchema().isNullable());
+
+ // test that it converts non-nullable date column to nullable when zeroDateTimeBehavior is convert to null
+ Map connectionArguments = new HashMap<>();
+ connectionArguments.put("zeroDateTimeBehavior", "CONVERT_TO_NULL");
+
+ schemaReader = new MysqlSchemaReader(null, connectionArguments);
+ schemaFields = schemaReader.getSchemaFields(resultSet);
+ Assert.assertTrue(schemaFields.get(0).getSchema().isNullable());
+ }
}
diff --git a/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlSinkTest.java b/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlSinkTest.java
new file mode 100644
index 000000000..1dd4e809e
--- /dev/null
+++ b/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlSinkTest.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright © 2024 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.mysql;
+
+import io.cdap.cdap.api.data.schema.Schema;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class MysqlSinkTest {
+ @Test
+ public void testSetColumnsInfo() {
+ Schema outputSchema = Schema.recordOf("output",
+ Schema.Field.of("id", Schema.of(Schema.Type.INT)),
+ Schema.Field.of("name", Schema.of(Schema.Type.STRING)),
+ Schema.Field.of("insert", Schema.of(Schema.Type.STRING)));
+ MysqlSink mySQLSink = new MysqlSink(new MysqlSink.MysqlSinkConfig());
+ Assert.assertNotNull(outputSchema.getFields());
+ mySQLSink.setColumnsInfo(outputSchema.getFields());
+ Assert.assertEquals("`id`,`name`,`insert`", mySQLSink.getDbColumns());
+ }
+}
diff --git a/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlUtilUnitTest.java b/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlUtilUnitTest.java
new file mode 100644
index 000000000..9481068f1
--- /dev/null
+++ b/mysql-plugin/src/test/java/io/cdap/plugin/mysql/MysqlUtilUnitTest.java
@@ -0,0 +1,62 @@
+
+/*
+ * Copyright © 2024 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.mysql;
+
+import org.junit.Test;
+
+import java.sql.Types;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class MysqlUtilUnitTest {
+
+ @Test
+ public void testIsZeroDateTimeToNull() {
+ Map connArgsMap = new HashMap<>(1);
+
+ connArgsMap.put("zeroDateTimeBehavior", "");
+ assertFalse(MysqlUtil.isZeroDateTimeToNull(connArgsMap));
+
+ connArgsMap.put("zeroDateTimeBehavior", "ROUND");
+ assertFalse(MysqlUtil.isZeroDateTimeToNull(connArgsMap));
+
+ connArgsMap.put("zeroDateTimeBehavior", "CONVERT_TO_NULL");
+ assertTrue(MysqlUtil.isZeroDateTimeToNull(connArgsMap));
+
+ connArgsMap.put("zeroDateTimeBehavior", "convertToNull");
+ assertTrue(MysqlUtil.isZeroDateTimeToNull(connArgsMap));
+ }
+
+ @Test
+ public void testIsDateTimeLikeType() {
+ int dateType = Types.DATE;
+ int timestampType = Types.TIMESTAMP;
+ int timestampWithTimezoneType = Types.TIMESTAMP_WITH_TIMEZONE;
+ int timeType = Types.TIME;
+ int stringType = Types.VARCHAR;
+
+ assertTrue(MysqlUtil.isDateTimeLikeType(dateType));
+ assertTrue(MysqlUtil.isDateTimeLikeType(timestampType));
+ assertTrue(MysqlUtil.isDateTimeLikeType(timestampWithTimezoneType));
+ assertFalse(MysqlUtil.isDateTimeLikeType(timeType));
+ assertFalse(MysqlUtil.isDateTimeLikeType(stringType));
+ }
+}
diff --git a/mysql-plugin/widgets/MySQL-connector.json b/mysql-plugin/widgets/MySQL-connector.json
index 9064d1bf6..f60f5526f 100644
--- a/mysql-plugin/widgets/MySQL-connector.json
+++ b/mysql-plugin/widgets/MySQL-connector.json
@@ -30,6 +30,20 @@
"widget-attributes": {
"default": "3306"
}
+ },
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_UNCOMMITTED",
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
}
]
},
diff --git a/mysql-plugin/widgets/Mysql-batchsink.json b/mysql-plugin/widgets/Mysql-batchsink.json
index c525ead40..58596aae2 100644
--- a/mysql-plugin/widgets/Mysql-batchsink.json
+++ b/mysql-plugin/widgets/Mysql-batchsink.json
@@ -65,6 +65,20 @@
"label": "Password",
"name": "password"
},
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_UNCOMMITTED",
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
+ },
{
"widget-type": "keyvalue",
"label": "Connection Arguments",
@@ -225,6 +239,10 @@
"type": "property",
"name": "password"
},
+ {
+ "type": "property",
+ "name": "transactionIsolationLevel"
+ },
{
"type": "property",
"name": "host"
diff --git a/mysql-plugin/widgets/Mysql-batchsource.json b/mysql-plugin/widgets/Mysql-batchsource.json
index 9175bd5ed..506e837f7 100644
--- a/mysql-plugin/widgets/Mysql-batchsource.json
+++ b/mysql-plugin/widgets/Mysql-batchsource.json
@@ -65,6 +65,20 @@
"label": "Password",
"name": "password"
},
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_UNCOMMITTED",
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
+ },
{
"widget-type": "keyvalue",
"label": "Connection Arguments",
@@ -277,6 +291,10 @@
"type": "property",
"name": "password"
},
+ {
+ "type": "property",
+ "name": "transactionIsolationLevel"
+ },
{
"type": "property",
"name": "host"
diff --git a/netezza-plugin/pom.xml b/netezza-plugin/pom.xml
index 900e430fe..2b70b1f65 100644
--- a/netezza-plugin/pom.xml
+++ b/netezza-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.11.0-SNAPSHOT
+ 1.11.10
Netezza plugin
@@ -92,12 +92,12 @@
<_exportcontents>
- io.cdap.plugin.netezza.*;
- io.cdap.plugin.db.source.*;
- io.cdap.plugin.db.sink.*;
- org.apache.commons.lang;
- org.apache.commons.logging.*;
- org.codehaus.jackson.*
+ io.cdap.plugin.netezza.*;
+ io.cdap.plugin.db.source.*;
+ io.cdap.plugin.db.sink.*;
+ org.apache.commons.lang;
+ org.apache.commons.logging.*;
+ org.codehaus.jackson.*
*;inline=false;scope=compile
true
diff --git a/oracle-plugin/pom.xml b/oracle-plugin/pom.xml
index e0ed7ff50..9b3946806 100644
--- a/oracle-plugin/pom.xml
+++ b/oracle-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.11.0-SNAPSHOT
+ 1.11.10
Oracle plugin
@@ -113,12 +113,12 @@
<_exportcontents>
- io.cdap.plugin.oracle.*;
- io.cdap.plugin.db.source.*;
- io.cdap.plugin.db.sink.*;
- org.apache.commons.lang;
- org.apache.commons.logging.*;
- org.codehaus.jackson.*
+ io.cdap.plugin.oracle.*;
+ io.cdap.plugin.db.source.*;
+ io.cdap.plugin.db.sink.*;
+ org.apache.commons.lang;
+ org.apache.commons.logging.*;
+ org.codehaus.jackson.*
*;inline=false;scope=compile
true
diff --git a/oracle-plugin/src/e2e-test/features/sink/OracleRunTime.feature b/oracle-plugin/src/e2e-test/features/sink/OracleRunTime.feature
index c2b56e8b7..70b1bdba6 100644
--- a/oracle-plugin/src/e2e-test/features/sink/OracleRunTime.feature
+++ b/oracle-plugin/src/e2e-test/features/sink/OracleRunTime.feature
@@ -117,3 +117,104 @@ Feature: Oracle - Verify data transfer from BigQuery source to Oracle sink
Then Verify the pipeline status is "Succeeded"
Then Validate records transferred to target table with record counts of BigQuery table
Then Validate the values of records transferred to target Oracle table is equal to the values from source BigQuery table
+
+ @BQ_SOURCE_TEST_SMALL_CASE @ORACLE_TEST_TABLE
+ Scenario: To verify data is getting transferred from BigQuery source to Oracle sink successfully when schema is coming in small case
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "BigQuery" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "BigQuery" and "Oracle" to establish connection
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqSourceTable"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "bqOutputDatatypesSchemaSmallCase"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Replace input plugin property: "dbSchemaName" with value: "schema"
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Verify the preview of pipeline is "success"
+ Then Click on preview data for Oracle sink
+ Then Close the preview data
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Validate records transferred to target table with record counts of BigQuery table
+ Then Validate the values of records transferred to target Oracle table is equal to the values from source BigQuery table with case
+
+
+ @BQ_SOURCE_TEST_DATE @ORACLE_DATE_TABLE
+ Scenario: To verify data is getting transferred from BigQuery source to Oracle sink successfully when schema is having date and timestamp fields
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "BigQuery" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "BigQuery" and "Oracle" to establish connection
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqSourceTable"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputDatatypesDateTimeSchema"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Replace input plugin property: "dbSchemaName" with value: "schema"
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Verify the preview of pipeline is "success"
+ Then Click on preview data for Oracle sink
+ Then Close the preview data
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Validate records transferred to target table with record counts of BigQuery table
+ Then Validate the values of records transferred to target Oracle table is equal to the values from source BigQuery table
diff --git a/oracle-plugin/src/e2e-test/java/io.cdap.plugin/BQValidation.java b/oracle-plugin/src/e2e-test/java/io.cdap.plugin/BQValidation.java
index 6edfcc8fd..b5a82e420 100644
--- a/oracle-plugin/src/e2e-test/java/io.cdap.plugin/BQValidation.java
+++ b/oracle-plugin/src/e2e-test/java/io.cdap.plugin/BQValidation.java
@@ -33,7 +33,12 @@
import java.sql.Types;
import java.text.ParseException;
import java.text.SimpleDateFormat;
+import java.time.LocalDateTime;
+import java.time.ZonedDateTime;
+import java.time.format.DateTimeFormatter;
+import java.time.format.DateTimeParseException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Base64;
import java.util.Date;
import java.util.List;
@@ -44,6 +49,13 @@
public class BQValidation {
+ private static final List TIMESTAMP_DATE_FORMATS = Arrays.asList(
+ new SimpleDateFormat("yyyy-MM-dd'T'hh:mm:ss"),
+ new SimpleDateFormat("yyyy-MM-dd"));
+ private static final List TIMESTAMP_TZ_DATE_FORMATS = Arrays.asList(
+ DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ssXXX"),
+ DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSSSSXXX"));
+
/**
* Extracts entire data from source and target tables.
*
@@ -68,11 +80,12 @@ public static boolean validateDBToBQRecordValues(String schema, String sourceTab
ResultSet.HOLD_CURSORS_OVER_COMMIT);
ResultSet rsSource = statement1.executeQuery(getSourceQuery);
- return compareResultSetAndJsonData(rsSource, jsonResponse);
+ return compareResultSetAndJsonData(rsSource, jsonResponse, false);
}
}
- public static boolean validateBQToDBRecordValues(String schema, String sourceTable, String targetTable)
+ public static boolean validateBQToDBRecordValues(String schema, String sourceTable, String targetTable,
+ boolean isSchemaSmallCase)
throws SQLException, ClassNotFoundException, ParseException, IOException, InterruptedException {
List jsonResponse = new ArrayList<>();
List bigQueryRows = new ArrayList<>();
@@ -88,7 +101,7 @@ public static boolean validateBQToDBRecordValues(String schema, String sourceTab
ResultSet.HOLD_CURSORS_OVER_COMMIT);
ResultSet rsTarget = statement1.executeQuery(getTargetQuery);
- return compareResultSetAndJsonData(rsTarget, jsonResponse);
+ return compareResultSetAndJsonData(rsTarget, jsonResponse, isSchemaSmallCase);
}
}
@@ -119,7 +132,8 @@ private static void getBigQueryTableData(String table, List bigQueryRows
* @throws ParseException If an error occurs while parsing the data.
*/
- public static boolean compareResultSetAndJsonData(ResultSet rsSource, List bigQueryData)
+ public static boolean compareResultSetAndJsonData(ResultSet rsSource, List bigQueryData,
+ boolean isSchemaSmallCase)
throws SQLException, ParseException {
ResultSetMetaData mdSource = rsSource.getMetaData();
boolean result = false;
@@ -146,7 +160,8 @@ public static boolean compareResultSetAndJsonData(ResultSet rsSource, List getDBSpecificArguments() {
return ImmutableMap.of(OracleConstants.DEFAULT_BATCH_VALUE, String.valueOf(defaultBatchValue));
diff --git a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleConnector.java b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleConnector.java
index fde72c8ad..3094e7152 100644
--- a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleConnector.java
+++ b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleConnector.java
@@ -112,7 +112,9 @@ protected DBConnectorPath getDBConnectorPath(String path) {
@Override
protected SchemaReader getSchemaReader(String sessionID) {
- return new OracleSourceSchemaReader(sessionID);
+ return new OracleSourceSchemaReader(sessionID, config.getTreatAsOldTimestamp(),
+ config.getTreatPrecisionlessNumAsDeci(),
+ config.getTreatTimestampLTZAsTimestamp());
}
@Override
@@ -125,15 +127,8 @@ protected String getConnectionString(@Nullable String database) {
if (database == null) {
return config.getConnectionString();
}
- if (OracleConstants.TNS_CONNECTION_TYPE.equals(config.getConnectionType())) {
- return String.format(OracleConstants.ORACLE_CONNECTION_STRING_TNS_FORMAT, database);
- } else if (OracleConstants.SERVICE_CONNECTION_TYPE.equals(config.getConnectionType())) {
- return String.format(OracleConstants.ORACLE_CONNECTION_STRING_SERVICE_NAME_FORMAT, config.getHost(),
- config.getPort(), database);
- } else {
- return String.format(OracleConstants.ORACLE_CONNECTION_STRING_SID_FORMAT,
- config.getHost(), config.getPort(), database);
- }
+ return OracleConstants.getConnectionString(config.getConnectionType(),
+ config.getHost(), config.getPort(), database, config.getSSlMode());
}
@Override
diff --git a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleConnectorConfig.java b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleConnectorConfig.java
index 73b005243..79d14215b 100644
--- a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleConnectorConfig.java
+++ b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleConnectorConfig.java
@@ -22,8 +22,6 @@
import io.cdap.plugin.db.TransactionIsolationLevel;
import io.cdap.plugin.db.connector.AbstractDBSpecificConnectorConfig;
-import java.util.HashMap;
-import java.util.Map;
import java.util.Properties;
import javax.annotation.Nullable;
@@ -43,12 +41,15 @@ public OracleConnectorConfig(String host, int port, String user, String password
public OracleConnectorConfig(String host, int port, String user, String password, String jdbcPluginName,
String connectionArguments, String connectionType, String database) {
- this(host, port, user, password, jdbcPluginName, connectionArguments, connectionType, database, null);
+ this(host, port, user, password, jdbcPluginName, connectionArguments, connectionType, database, null, null, null,
+ null, null);
}
public OracleConnectorConfig(String host, int port, String user, String password, String jdbcPluginName,
String connectionArguments, String connectionType, String database,
- String role) {
+ String role, Boolean useSSL, @Nullable Boolean treatAsOldTimestamp,
+ @Nullable Boolean treatPrecisionlessNumAsDeci,
+ @Nullable Boolean treatTimestampLTZAsTimestamp) {
this.host = host;
this.port = port;
@@ -59,17 +60,15 @@ public OracleConnectorConfig(String host, int port, String user, String password
this.connectionType = connectionType;
this.database = database;
this.role = role;
+ this.useSSL = useSSL;
+ this.treatAsOldTimestamp = treatAsOldTimestamp;
+ this.treatPrecisionlessNumAsDeci = treatPrecisionlessNumAsDeci;
+ this.treatTimestampLTZAsTimestamp = treatTimestampLTZAsTimestamp;
}
@Override
public String getConnectionString() {
- if (OracleConstants.TNS_CONNECTION_TYPE.equals(getConnectionType())) {
- return String.format(OracleConstants.ORACLE_CONNECTION_STRING_TNS_FORMAT, database);
- } else if (OracleConstants.SERVICE_CONNECTION_TYPE.equals(getConnectionType())) {
- return String.format(OracleConstants.ORACLE_CONNECTION_STRING_SERVICE_NAME_FORMAT, host, getPort(), database);
- } else {
- return String.format(OracleConstants.ORACLE_CONNECTION_STRING_SID_FORMAT, host, getPort(), database);
- }
+ return OracleConstants.getConnectionString(connectionType, host, getPort(), database, useSSL);
}
@Name(OracleConstants.CONNECTION_TYPE)
@@ -86,11 +85,25 @@ public String getConnectionString() {
@Macro
private String database;
- @Name(OracleConstants.TRANSACTION_ISOLATION_LEVEL)
- @Description("The transaction isolation level for the database session.")
- @Macro
+ @Name(OracleConstants.USE_SSL)
+ @Description("Turns on SSL encryption. Connection will fail if SSL is not available")
+ @Nullable
+ public Boolean useSSL;
+
+ @Name(OracleConstants.TREAT_AS_OLD_TIMESTAMP)
+ @Description("A hidden field to handle timestamp as CDAP's timestamp micros or string as per old behavior.")
+ @Nullable
+ public Boolean treatAsOldTimestamp;
+
+ @Name(OracleConstants.TREAT_PRECISIONLESSNUM_AS_DECI)
+ @Description("A hidden field to handle precision less number as CDAP's decimal per old behavior.")
@Nullable
- private String transactionIsolationLevel;
+ public Boolean treatPrecisionlessNumAsDeci;
+
+ @Name(OracleConstants.TREAT_TIMESTAMP_LTZ_AS_TIMESTAMP)
+ @Description("A hidden field to handle mapping of Oracle Timestamp_LTZ data type to BQ Timestamp.")
+ @Nullable
+ public Boolean treatTimestampLTZAsTimestamp;
@Override
protected int getDefaultPort() {
@@ -109,6 +122,23 @@ public String getDatabase() {
return database;
}
+ public Boolean getSSlMode() {
+ // return false if useSSL is null, otherwise return its value
+ return useSSL != null && useSSL;
+ }
+
+ public Boolean getTreatAsOldTimestamp() {
+ return Boolean.TRUE.equals(treatAsOldTimestamp);
+ }
+
+ public Boolean getTreatPrecisionlessNumAsDeci() {
+ return Boolean.TRUE.equals(treatPrecisionlessNumAsDeci);
+ }
+
+ public Boolean getTreatTimestampLTZAsTimestamp() {
+ return Boolean.TRUE.equals(treatTimestampLTZAsTimestamp);
+ }
+
@Override
public Properties getConnectionArgumentsProperties() {
Properties prop = super.getConnectionArgumentsProperties();
@@ -119,6 +149,7 @@ public Properties getConnectionArgumentsProperties() {
return prop;
}
+ @Override
public String getTransactionIsolationLevel() {
//if null default to the highest isolation level possible
if (transactionIsolationLevel == null) {
@@ -128,16 +159,7 @@ public String getTransactionIsolationLevel() {
//This ensures that the role is mapped to the right serialization level, even w/ incorrect user input
//if role is SYSDBA or SYSOP it will map to read_committed. else serialized
return (!getRole().equals(ROLE_NORMAL)) ? TransactionIsolationLevel.Level.TRANSACTION_READ_COMMITTED.name() :
- TransactionIsolationLevel.Level.valueOf(transactionIsolationLevel).name();
- }
-
- @Override
- public Map getAdditionalArguments() {
- Map additonalArguments = new HashMap<>();
- if (getTransactionIsolationLevel() != null) {
- additonalArguments.put(TransactionIsolationLevel.CONF_KEY, getTransactionIsolationLevel());
- }
- return additonalArguments;
+ TransactionIsolationLevel.Level.valueOf(transactionIsolationLevel).name();
}
@Override
diff --git a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleConstants.java b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleConstants.java
index 040780a89..a3560e969 100644
--- a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleConstants.java
+++ b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleConstants.java
@@ -16,6 +16,8 @@
package io.cdap.plugin.oracle;
+import javax.annotation.Nullable;
+
/**
* Oracle Constants.
*/
@@ -27,6 +29,11 @@ private OracleConstants() {
public static final String PLUGIN_NAME = "Oracle";
public static final String ORACLE_CONNECTION_STRING_SID_FORMAT = "jdbc:oracle:thin:@%s:%s:%s";
public static final String ORACLE_CONNECTION_STRING_SERVICE_NAME_FORMAT = "jdbc:oracle:thin:@//%s:%s/%s";
+ // Connection formats using TNS DESCRIPTOR to accept protocol
+ public static final String ORACLE_SERVICE_NAME_FORMAT_TNS_DESCRIPTOR_WITH_PROTOCOL =
+ "jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS=(PROTOCOL=%s)(HOST=%s)(PORT=%s))(CONNECT_DATA=(SERVICE_NAME=%s)))";
+ public static final String ORACLE_SID_FORMAT_TNS_DESCRIPTOR_WITH_PROTOCOL =
+ "jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS=(PROTOCOL=%s)(HOST=%s)(PORT=%s))(CONNECT_DATA=(SID=%s)))";
public static final String ORACLE_CONNECTION_STRING_TNS_FORMAT = "jdbc:oracle:thin:@%s";
public static final String DEFAULT_BATCH_VALUE = "defaultBatchValue";
public static final String DEFAULT_ROW_PREFETCH = "defaultRowPrefetch";
@@ -34,6 +41,99 @@ private OracleConstants() {
public static final String CONNECTION_TYPE = "connectionType";
public static final String ROLE = "role";
public static final String NAME_DATABASE = "database";
- public static final String TNS_CONNECTION_TYPE = "TNS";
+ public static final String TNS_CONNECTION_TYPE = "tns";
public static final String TRANSACTION_ISOLATION_LEVEL = "transactionIsolationLevel";
+ public static final String USE_SSL = "useSSL";
+ public static final String TREAT_AS_OLD_TIMESTAMP = "treatAsOldTimestamp";
+ public static final String TREAT_PRECISIONLESSNUM_AS_DECI = "treatPrecisionlessNumAsDeci";
+ public static final String TREAT_TIMESTAMP_LTZ_AS_TIMESTAMP = "treatTimestampLTZAsTimestamp";
+
+ /**
+ * Constructs the Oracle connection string based on the provided connection type, host, port, and database.
+ * If SSL is enabled, the connection protocol will be "tcps" instead of "tcp".
+ *
+ * @param connectionType TNS/Service/SID
+ * @param host Host name of the oracle server
+ * @param port Port of the oracle server
+ * @param database Database to connect to
+ * @param useSSL Whether SSL/TLS is required(YES/NO)
+ * @return Connection String based on the given parameters and connection type.
+ */
+ public static String getConnectionString(String connectionType,
+ @Nullable String host,
+ @Nullable int port,
+ String database,
+ @Nullable Boolean useSSL) {
+ // Use protocol as "tcps" when SSL is requested or else use "tcp".
+ String connectionProtocol;
+ boolean isSSLEnabled = false;
+ if (useSSL != null && useSSL) {
+ connectionProtocol = "tcps";
+ isSSLEnabled = true;
+ } else {
+ connectionProtocol = "tcp";
+ }
+
+ switch (connectionType.toLowerCase()) {
+ case OracleConstants.TNS_CONNECTION_TYPE:
+ // TNS connection doesn't require protocol
+ return String.format(OracleConstants.ORACLE_CONNECTION_STRING_TNS_FORMAT, database);
+ case OracleConstants.SERVICE_CONNECTION_TYPE:
+ // Create connection string for SERVICE type.
+ return getConnectionStringWithService(host, port, database, connectionProtocol, isSSLEnabled);
+ default:
+ // Default to SID format if no matching case is found.
+ return getConnectionStringWithSID(host, port, database, connectionProtocol, isSSLEnabled);
+ }
+ }
+
+ /**
+ * Constructs the connection string for a SERVICE connection type.
+ *
+ * @param host Host name of the Oracle server.
+ * @param port Port of the Oracle server.
+ * @param database Database name to connect to.
+ * @param connectionProtocol Protocol to use for the connection ("tcp" or "tcps").
+ * @param isSSLEnabled Indicates if SSL is enabled.
+ * @return Formatted connection string for a SERVICE connection.
+ */
+ private static String getConnectionStringWithService(@Nullable String host,
+ @Nullable int port,
+ String database,
+ String connectionProtocol,
+ boolean isSSLEnabled) {
+ // Choose the appropriate format based on whether SSL is enabled.
+ if (isSSLEnabled) {
+ // Use the TNS descriptor format for TCPS to prevent automatic security injection.
+ return String.format(ORACLE_SERVICE_NAME_FORMAT_TNS_DESCRIPTOR_WITH_PROTOCOL,
+ connectionProtocol, host, port, database);
+ }
+ return String.format(OracleConstants.ORACLE_CONNECTION_STRING_SERVICE_NAME_FORMAT,
+ host, port, database);
+ }
+
+ /**
+ * Constructs the connection string for a SID connection type.
+ *
+ * @param host Host name of the Oracle server.
+ * @param port Port of the Oracle server.
+ * @param database Database name to connect to.
+ * @param connectionProtocol Protocol to use for the connection ("tcp" or "tcps").
+ * @param isSSLEnabled Indicates if SSL is enabled.
+ * @return Formatted connection string for a SID connection.
+ */
+ private static String getConnectionStringWithSID(@Nullable String host,
+ @Nullable int port,
+ String database,
+ String connectionProtocol,
+ boolean isSSLEnabled) {
+ // Choose the appropriate format based on whether SSL is enabled.
+ if (isSSLEnabled) {
+ // Use the TNS descriptor format for TCPS to prevent automatic security injection.
+ return String.format(ORACLE_SID_FORMAT_TNS_DESCRIPTOR_WITH_PROTOCOL,
+ connectionProtocol, host, port, database);
+ }
+ return String.format(OracleConstants.ORACLE_CONNECTION_STRING_SID_FORMAT,
+ host, port, database);
+ }
}
diff --git a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OraclePostAction.java b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OraclePostAction.java
index f8ebd9ac2..e11e455c1 100644
--- a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OraclePostAction.java
+++ b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OraclePostAction.java
@@ -57,13 +57,7 @@ public static class OracleQueryActionConfig extends DBSpecificQueryActionConfig
@Override
public String getConnectionString() {
- if (OracleConstants.TNS_CONNECTION_TYPE.equals(this.connectionType)) {
- return String.format(OracleConstants.ORACLE_CONNECTION_STRING_TNS_FORMAT, database);
- } else if (OracleConstants.SERVICE_CONNECTION_TYPE.equals(this.connectionType)) {
- return String.format(OracleConstants.ORACLE_CONNECTION_STRING_SERVICE_NAME_FORMAT, host, port, database);
- } else {
- return String.format(OracleConstants.ORACLE_CONNECTION_STRING_SID_FORMAT, host, port, database);
- }
+ return OracleConstants.getConnectionString(this.connectionType, host, port, database, null);
}
@Override
diff --git a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSinkDBRecord.java b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSinkDBRecord.java
index 7bbd25f22..01b9a8247 100644
--- a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSinkDBRecord.java
+++ b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSinkDBRecord.java
@@ -17,9 +17,12 @@
package io.cdap.plugin.oracle;
import io.cdap.cdap.api.data.format.StructuredRecord;
+import io.cdap.cdap.api.data.schema.Schema;
import io.cdap.plugin.db.ColumnType;
import io.cdap.plugin.db.SchemaReader;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
import java.util.List;
/**
@@ -37,4 +40,14 @@ public OracleSinkDBRecord(StructuredRecord record, List columnTypes)
protected SchemaReader getSchemaReader() {
return new OracleSinkSchemaReader();
}
+
+ @Override
+ protected void insertOperation(PreparedStatement stmt) throws SQLException {
+ for (int fieldIndex = 0; fieldIndex < columnTypes.size(); fieldIndex++) {
+ ColumnType columnType = columnTypes.get(fieldIndex);
+ // Get the field from the schema using the column name with ignoring case.
+ Schema.Field field = record.getSchema().getField(columnType.getName(), true);
+ writeToDB(stmt, field, fieldIndex);
+ }
+ }
}
diff --git a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSource.java b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSource.java
index 9a554a4a4..7cde9a8e9 100644
--- a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSource.java
+++ b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSource.java
@@ -63,7 +63,14 @@ protected String createConnectionString() {
@Override
protected SchemaReader getSchemaReader() {
- return new OracleSourceSchemaReader();
+ // PLUGIN-1893 : Based on field/properties from Oracle source and Oracle connection we will pass the flag to control
+ // handle schema to make it backward compatible.
+ boolean treatAsOldTimestamp = oracleSourceConfig.getConnection().getTreatAsOldTimestamp();
+ boolean treatPrecisionlessNumAsDeci = oracleSourceConfig.getConnection().getTreatPrecisionlessNumAsDeci();
+ boolean treatTimestampLTZAsTimestamp = oracleSourceConfig.getConnection().getTreatTimestampLTZAsTimestamp();
+
+ return new OracleSourceSchemaReader(null, treatAsOldTimestamp, treatPrecisionlessNumAsDeci,
+ treatTimestampLTZAsTimestamp);
}
@Override
@@ -117,9 +124,11 @@ public OracleSourceConfig(String host, int port, String user, String password, S
String connectionArguments, String connectionType, String database, String role,
int defaultBatchValue, int defaultRowPrefetch,
String importQuery, Integer numSplits, int fetchSize,
- String boundingQuery, String splitBy) {
+ String boundingQuery, String splitBy, Boolean useSSL, Boolean treatAsOldTimestamp,
+ Boolean treatPrecisionlessNumAsDeci, Boolean treatTimestampLTZAsTimestamp) {
this.connection = new OracleConnectorConfig(host, port, user, password, jdbcPluginName, connectionArguments,
- connectionType, database, role);
+ connectionType, database, role, useSSL, treatAsOldTimestamp,
+ treatPrecisionlessNumAsDeci, treatTimestampLTZAsTimestamp);
this.defaultBatchValue = defaultBatchValue;
this.defaultRowPrefetch = defaultRowPrefetch;
this.fetchSize = fetchSize;
@@ -131,15 +140,8 @@ public OracleSourceConfig(String host, int port, String user, String password, S
@Override
public String getConnectionString() {
- if (OracleConstants.TNS_CONNECTION_TYPE.equals(connection.getConnectionType())) {
- return String.format(OracleConstants.ORACLE_CONNECTION_STRING_TNS_FORMAT, connection.getDatabase());
- } else if (OracleConstants.SERVICE_CONNECTION_TYPE.equals(connection.getConnectionType())) {
- return String.format(OracleConstants.ORACLE_CONNECTION_STRING_SERVICE_NAME_FORMAT, connection.getHost(),
- connection.getPort(), connection.getDatabase());
- } else {
- return String.format(OracleConstants.ORACLE_CONNECTION_STRING_SID_FORMAT,
- connection.getHost(), connection.getPort(), connection.getDatabase());
- }
+ return OracleConstants.getConnectionString(connection.getConnectionType(), connection.getHost(),
+ connection.getPort(), connection.getDatabase(), connection.getSSlMode());
}
@Override
diff --git a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSourceDBRecord.java b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSourceDBRecord.java
index 3f7c2a20a..7d7c69d2b 100644
--- a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSourceDBRecord.java
+++ b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSourceDBRecord.java
@@ -160,8 +160,8 @@ protected void writeNonNullToDB(PreparedStatement stmt, Schema fieldSchema,
String timestampString = Timestamp.valueOf(localDateTime).toString();
Object timestampWithTimeZone = createOracleTimestamp(stmt.getConnection(), timestampString);
stmt.setObject(sqlIndex, timestampWithTimeZone);
- } else if (Schema.LogicalType.TIMESTAMP_MICROS.equals(fieldSchema.getLogicalType())) {
- // Deprecated: Handle the case when the Timestamp is mapped to CDAP Timestamp type
+ } else {
+ // Handle the case when the Timestamp is mapped to CDAP Timestamp type or CDAP Date type.
super.writeNonNullToDB(stmt, fieldSchema, fieldName, fieldIndex);
}
} else {
diff --git a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSourceSchemaReader.java b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSourceSchemaReader.java
index 7d35f9bc7..208b70410 100644
--- a/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSourceSchemaReader.java
+++ b/oracle-plugin/src/main/java/io/cdap/plugin/oracle/OracleSourceSchemaReader.java
@@ -19,6 +19,7 @@
import com.google.common.collect.ImmutableSet;
import io.cdap.cdap.api.data.schema.Schema;
import io.cdap.plugin.db.CommonSchemaReader;
+import org.jetbrains.annotations.NotNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -26,6 +27,7 @@
import java.sql.SQLException;
import java.sql.Types;
import java.util.Set;
+import javax.annotation.Nullable;
/**
* Oracle Source schema reader.
@@ -65,14 +67,19 @@ public class OracleSourceSchemaReader extends CommonSchemaReader {
);
private final String sessionID;
+ private final Boolean isTimestampOldBehavior;
+ private final Boolean isPrecisionlessNumAsDecimal;
+ private final Boolean isTimestampLtzFieldTimestamp;
public OracleSourceSchemaReader() {
- this(null);
+ this(null, false, false, false);
}
-
- public OracleSourceSchemaReader(String sessionID) {
- super();
+ public OracleSourceSchemaReader(@Nullable String sessionID, boolean isTimestampOldBehavior,
+ boolean isPrecisionlessNumAsDecimal, boolean isTimestampLtzFieldTimestamp) {
this.sessionID = sessionID;
+ this.isTimestampOldBehavior = isTimestampOldBehavior;
+ this.isPrecisionlessNumAsDecimal = isPrecisionlessNumAsDecimal;
+ this.isTimestampLtzFieldTimestamp = isTimestampLtzFieldTimestamp;
}
@Override
@@ -81,10 +88,11 @@ public Schema getSchema(ResultSetMetaData metadata, int index) throws SQLExcepti
switch (sqlType) {
case TIMESTAMP_TZ:
- return Schema.of(Schema.LogicalType.TIMESTAMP_MICROS);
- case Types.TIMESTAMP:
+ return isTimestampOldBehavior ? Schema.of(Schema.Type.STRING) : Schema.of(Schema.LogicalType.TIMESTAMP_MICROS);
case TIMESTAMP_LTZ:
- return Schema.of(Schema.LogicalType.DATETIME);
+ return getTimestampLtzSchema();
+ case Types.TIMESTAMP:
+ return isTimestampOldBehavior ? super.getSchema(metadata, index) : Schema.of(Schema.LogicalType.DATETIME);
case BINARY_FLOAT:
return Schema.of(Schema.Type.FLOAT);
case BINARY_DOUBLE:
@@ -107,12 +115,24 @@ public Schema getSchema(ResultSetMetaData metadata, int index) throws SQLExcepti
// For a Number type without specified precision and scale, precision will be 0 and scale will be -127
if (precision == 0) {
// reference : https://docs.oracle.com/cd/B28359_01/server.111/b28318/datatype.htm#CNCPT1832
- LOG.warn(String.format("Field '%s' is a %s type without precision and scale, "
- + "converting into STRING type to avoid any precision loss.",
- metadata.getColumnName(index),
- metadata.getColumnTypeName(index),
- metadata.getColumnName(index)));
- return Schema.of(Schema.Type.STRING);
+ if (isPrecisionlessNumAsDecimal) {
+ precision = 38;
+ scale = 0;
+ LOG.warn(String.format("%s type with undefined precision and scale is detected, "
+ + "there may be a precision loss while running the pipeline. "
+ + "Please define an output precision and scale for field '%s' to avoid "
+ + "precision loss.",
+ metadata.getColumnTypeName(index),
+ metadata.getColumnName(index)));
+ return Schema.decimalOf(precision, scale);
+ } else {
+ LOG.warn(String.format("Field '%s' is a %s type without precision and scale, "
+ + "converting into STRING type to avoid any precision loss.",
+ metadata.getColumnName(index),
+ metadata.getColumnTypeName(index),
+ metadata.getColumnName(index)));
+ return Schema.of(Schema.Type.STRING);
+ }
}
return Schema.decimalOf(precision, scale);
}
@@ -121,6 +141,12 @@ public Schema getSchema(ResultSetMetaData metadata, int index) throws SQLExcepti
}
}
+ private @NotNull Schema getTimestampLtzSchema() {
+ return isTimestampOldBehavior || isTimestampLtzFieldTimestamp
+ ? Schema.of(Schema.LogicalType.TIMESTAMP_MICROS)
+ : Schema.of(Schema.LogicalType.DATETIME);
+ }
+
@Override
public boolean shouldIgnoreColumn(ResultSetMetaData metadata, int index) throws SQLException {
if (sessionID == null) {
diff --git a/oracle-plugin/src/test/java/io/cdap/plugin/oracle/OracleFailedConnectionTest.java b/oracle-plugin/src/test/java/io/cdap/plugin/oracle/OracleFailedConnectionTest.java
index a2c9bcd5e..7ec6f3844 100644
--- a/oracle-plugin/src/test/java/io/cdap/plugin/oracle/OracleFailedConnectionTest.java
+++ b/oracle-plugin/src/test/java/io/cdap/plugin/oracle/OracleFailedConnectionTest.java
@@ -28,7 +28,8 @@ public class OracleFailedConnectionTest extends DBSpecificFailedConnectionTest {
public void test() throws ClassNotFoundException, IOException {
OracleConnector connector = new OracleConnector(
- new OracleConnectorConfig("localhost", 1521, "username", "password", "jdbc", "", "database"));
+ new OracleConnectorConfig("localhost", 1521, "username", "password", "jdbc", "",
+ "SID", "database"));
super.test(JDBC_DRIVER_CLASS_NAME, connector, "Failed to create connection to database via connection string:" +
" jdbc:oracle:thin:@localhost:1521:database and arguments: " +
diff --git a/oracle-plugin/src/test/java/io/cdap/plugin/oracle/OracleSchemaReaderTest.java b/oracle-plugin/src/test/java/io/cdap/plugin/oracle/OracleSchemaReaderTest.java
new file mode 100644
index 000000000..1ff77c533
--- /dev/null
+++ b/oracle-plugin/src/test/java/io/cdap/plugin/oracle/OracleSchemaReaderTest.java
@@ -0,0 +1,94 @@
+/*
+ * Copyright © 2025 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.oracle;
+
+import com.google.common.collect.Lists;
+import io.cdap.cdap.api.data.schema.Schema;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mockito;
+import org.mockito.junit.MockitoJUnitRunner;
+
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.util.List;
+
+public class OracleSchemaReaderTest {
+
+ @Test
+ public void getSchema_timestampLTZFieldTrue_returnTimestamp() throws SQLException {
+ OracleSourceSchemaReader schemaReader = new OracleSourceSchemaReader(null, false, false, true);
+
+ ResultSet resultSet = Mockito.mock(ResultSet.class);
+ ResultSetMetaData metadata = Mockito.mock(ResultSetMetaData.class);
+
+ Mockito.when(resultSet.getMetaData()).thenReturn(metadata);
+
+ Mockito.when(metadata.getColumnCount()).thenReturn(2);
+ // -101 is for TIMESTAMP_TZ
+ Mockito.when(metadata.getColumnType(1)).thenReturn(-101);
+ Mockito.when(metadata.getColumnName(1)).thenReturn("column1");
+
+ // -102 is for TIMESTAMP_LTZ
+ Mockito.when(metadata.getColumnType(2)).thenReturn(-102);
+ Mockito.when(metadata.getColumnName(2)).thenReturn("column2");
+
+ List expectedSchemaFields = Lists.newArrayList();
+ expectedSchemaFields.add(Schema.Field.of("column1", Schema.of(Schema.LogicalType.TIMESTAMP_MICROS)));
+ expectedSchemaFields.add(Schema.Field.of("column2", Schema.of(Schema.LogicalType.TIMESTAMP_MICROS)));
+
+ List actualSchemaFields = schemaReader.getSchemaFields(resultSet);
+
+ Assert.assertEquals(expectedSchemaFields.get(0).getName(), actualSchemaFields.get(0).getName());
+ Assert.assertEquals(expectedSchemaFields.get(0).getSchema(), actualSchemaFields.get(0).getSchema());
+ Assert.assertEquals(expectedSchemaFields.get(1).getName(), actualSchemaFields.get(1).getName());
+ Assert.assertEquals(expectedSchemaFields.get(1).getSchema(), actualSchemaFields.get(1).getSchema());
+
+ }
+
+ @Test
+ public void getSchema_timestampLTZFieldFalse_returnDatetime() throws SQLException {
+ OracleSourceSchemaReader schemaReader = new OracleSourceSchemaReader(null, false, false, false);
+
+ ResultSet resultSet = Mockito.mock(ResultSet.class);
+ ResultSetMetaData metadata = Mockito.mock(ResultSetMetaData.class);
+
+ Mockito.when(resultSet.getMetaData()).thenReturn(metadata);
+
+ Mockito.when(metadata.getColumnCount()).thenReturn(2);
+ // -101 is for TIMESTAMP_TZ
+ Mockito.when(metadata.getColumnType(1)).thenReturn(-101);
+ Mockito.when(metadata.getColumnName(1)).thenReturn("column1");
+
+ // -102 is for TIMESTAMP_LTZ
+ Mockito.when(metadata.getColumnType(2)).thenReturn(-102);
+ Mockito.when(metadata.getColumnName(2)).thenReturn("column2");
+
+ List expectedSchemaFields = Lists.newArrayList();
+ expectedSchemaFields.add(Schema.Field.of("column1", Schema.of(Schema.LogicalType.TIMESTAMP_MICROS)));
+ expectedSchemaFields.add(Schema.Field.of("column2", Schema.of(Schema.LogicalType.DATETIME)));
+
+ List actualSchemaFields = schemaReader.getSchemaFields(resultSet);
+
+ Assert.assertEquals(expectedSchemaFields.get(0).getName(), actualSchemaFields.get(0).getName());
+ Assert.assertEquals(expectedSchemaFields.get(0).getSchema(), actualSchemaFields.get(0).getSchema());
+ Assert.assertEquals(expectedSchemaFields.get(1).getName(), actualSchemaFields.get(1).getName());
+ Assert.assertEquals(expectedSchemaFields.get(1).getSchema(), actualSchemaFields.get(1).getSchema());
+ }
+}
diff --git a/oracle-plugin/widgets/Oracle-batchsink.json b/oracle-plugin/widgets/Oracle-batchsink.json
index 30d5b345f..8d6168780 100644
--- a/oracle-plugin/widgets/Oracle-batchsink.json
+++ b/oracle-plugin/widgets/Oracle-batchsink.json
@@ -100,6 +100,26 @@
"default": "TRANSACTION_SERIALIZABLE"
}
},
+ {
+ "widget-type": "hidden",
+ "label": "TLS Encryption",
+ "name": "useSSL",
+ "description": "Enable TLS encryption (true/false)",
+ "widget-attributes": {
+ "layout": "inline",
+ "default": "false",
+ "options": [
+ {
+ "id": "true",
+ "label": "true"
+ },
+ {
+ "id": "false",
+ "label": "false"
+ }
+ ]
+ }
+ },
{
"name": "connectionType",
"label": "Connection Type",
diff --git a/oracle-plugin/widgets/Oracle-batchsource.json b/oracle-plugin/widgets/Oracle-batchsource.json
index 0fc0a5285..ab35f3e8c 100644
--- a/oracle-plugin/widgets/Oracle-batchsource.json
+++ b/oracle-plugin/widgets/Oracle-batchsource.json
@@ -100,6 +100,83 @@
"default": "TRANSACTION_SERIALIZABLE"
}
},
+ {
+ "widget-type": "hidden",
+ "label": "TLS Encryption",
+ "name": "useSSL",
+ "description": "Enable TLS encryption (true/false)",
+ "widget-attributes": {
+ "layout": "inline",
+ "default": "false",
+ "options": [
+ {
+ "id": "true",
+ "label": "true"
+ },
+ {
+ "id": "false",
+ "label": "false"
+ }
+ ]
+ }
+ },
+ {
+ "widget-type": "hidden",
+ "label": "Treat as old timestamp",
+ "name": "treatAsOldTimestamp",
+ "widget-attributes": {
+ "layout": "inline",
+ "default": "false",
+ "options": [
+ {
+ "id": "true",
+ "label": "true"
+ },
+ {
+ "id": "false",
+ "label": "false"
+ }
+ ]
+ }
+ },
+ {
+ "widget-type": "hidden",
+ "label": "Treat precision less number as Decimal(old behavior)",
+ "name": "treatPrecisionlessNumAsDeci",
+ "widget-attributes": {
+ "layout": "inline",
+ "default": "false",
+ "options": [
+ {
+ "id": "true",
+ "label": "true"
+ },
+ {
+ "id": "false",
+ "label": "false"
+ }
+ ]
+ }
+ },
+ {
+ "widget-type": "hidden",
+ "label": "Treat Timestamp_LTZ as Timestamp",
+ "name": "treatTimestampLTZAsTimestamp",
+ "widget-attributes": {
+ "layout": "inline",
+ "default": "false",
+ "options": [
+ {
+ "id": "true",
+ "label": "true"
+ },
+ {
+ "id": "false",
+ "label": "false"
+ }
+ ]
+ }
+ },
{
"name": "connectionType",
"label": "Connection Type",
@@ -306,6 +383,14 @@
{
"type": "property",
"name": "transactionIsolationLevel"
+ },
+ {
+ "type": "property",
+ "name": "getTreatAsOldTimestampConn"
+ },
+ {
+ "type": "property",
+ "name": "treatPrecisionlessNumAsDeci"
}
]
},
diff --git a/oracle-plugin/widgets/Oracle-connector.json b/oracle-plugin/widgets/Oracle-connector.json
index 46f006c9c..005c3ffbd 100644
--- a/oracle-plugin/widgets/Oracle-connector.json
+++ b/oracle-plugin/widgets/Oracle-connector.json
@@ -109,6 +109,83 @@
],
"default": "TRANSACTION_SERIALIZABLE"
}
+ },
+ {
+ "widget-type": "hidden",
+ "label": "TLS Encryption",
+ "name": "useSSL",
+ "description": "Enable TLS encryption (true/false)",
+ "widget-attributes": {
+ "layout": "inline",
+ "default": "false",
+ "options": [
+ {
+ "id": "true",
+ "label": "true"
+ },
+ {
+ "id": "false",
+ "label": "false"
+ }
+ ]
+ }
+ },
+ {
+ "widget-type": "hidden",
+ "label": "Treat as old timestamp",
+ "name": "treatAsOldTimestamp",
+ "widget-attributes": {
+ "layout": "inline",
+ "default": "false",
+ "options": [
+ {
+ "id": "true",
+ "label": "true"
+ },
+ {
+ "id": "false",
+ "label": "false"
+ }
+ ]
+ }
+ },
+ {
+ "widget-type": "hidden",
+ "label": "Treat precision less number as Decimal(old behavior)",
+ "name": "treatPrecisionlessNumAsDeci",
+ "widget-attributes": {
+ "layout": "inline",
+ "default": "false",
+ "options": [
+ {
+ "id": "true",
+ "label": "true"
+ },
+ {
+ "id": "false",
+ "label": "false"
+ }
+ ]
+ }
+ },
+ {
+ "widget-type": "hidden",
+ "label": "Treat Timestamp_LTZ as Timestamp",
+ "name": "treatTimestampLTZAsTimestamp",
+ "widget-attributes": {
+ "layout": "inline",
+ "default": "false",
+ "options": [
+ {
+ "id": "true",
+ "label": "true"
+ },
+ {
+ "id": "false",
+ "label": "false"
+ }
+ ]
+ }
}
]
},
diff --git a/pom.xml b/pom.xml
index a6b40960c..bb39c5a96 100644
--- a/pom.xml
+++ b/pom.xml
@@ -20,7 +20,7 @@
io.cdap.plugin
database-plugins-parent
- 1.11.0-SNAPSHOT
+ 1.11.10
pom
Database Plugins
Collection of database plugins
@@ -44,6 +44,7 @@
cloudsql-postgresql-plugin
teradata-plugin
generic-db-argument-setter
+ amazon-redshift-plugin
@@ -77,23 +78,12 @@
-
- sonatype
- https://oss.sonatype.org/content/groups/public
-
sonatype-snapshots
- https://oss.sonatype.org/content/repositories/snapshots
+ https://central.sonatype.com/repository/maven-snapshots
-
-
- sonatype
- https://oss.sonatype.org/content/groups/public/
-
-
-
@@ -348,16 +338,6 @@
-
-
- sonatype.release
- https://oss.sonatype.org/service/local/staging/deploy/maven2
-
-
- sonatype.snapshots
- https://oss.sonatype.org/content/repositories/snapshots
-
-
${testSourceLocation}
@@ -399,7 +379,9 @@
maven-surefire-plugin
2.22.0
- -Xmx3g -Djava.awt.headless=true -XX:MaxPermSize=256m -XX:+UseConcMarkSweepGC -Djava.net.preferIPv4Stack=true
+ -Xmx3g -Djava.awt.headless=true -XX:MaxPermSize=256m -XX:+UseConcMarkSweepGC
+ -Djava.net.preferIPv4Stack=true
+
${surefire.redirectTestOutputToFile}
false
plain
@@ -531,14 +513,14 @@
- org.sonatype.plugins
- nexus-staging-maven-plugin
- 1.6.2
+ org.sonatype.central
+ central-publishing-maven-plugin
+ 0.8.0
true
- https://oss.sonatype.org
- sonatype.release
- 655dc88dc770c3
+ sonatype.release
+ false
+ true
@@ -723,7 +705,7 @@
io.cdap.tests.e2e
cdap-e2e-framework
- 0.3.0-SNAPSHOT
+ 0.3.0
test
diff --git a/postgresql-plugin/docs/PostgreSQL-connector.md b/postgresql-plugin/docs/PostgreSQL-connector.md
index 739c678e3..fe442cbf1 100644
--- a/postgresql-plugin/docs/PostgreSQL-connector.md
+++ b/postgresql-plugin/docs/PostgreSQL-connector.md
@@ -22,6 +22,14 @@ authentication. Optional for databases that do not require authentication.
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the databse connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- Note: PostgreSQL does not implement `TRANSACTION_READ_UNCOMMITTED` as a distinct isolation level. Instead, this mode behaves identically to`TRANSACTION_READ_COMMITTED`, which is why it is not exposed as a separate option.
+
+For more details on the Transaction Isolation Levels supported in PostgreSQL, refer to the [PostgreSQL documentation](https://www.postgresql.org/docs/current/transaction-iso.html#TRANSACTION-ISO)
+
**Database:** The name of the database to connect to.
**Connection Arguments:** A list of arbitrary string tag/value pairs as connection arguments. These arguments
diff --git a/postgresql-plugin/docs/Postgres-batchsink.md b/postgresql-plugin/docs/Postgres-batchsink.md
index b8a996463..82065e0fd 100644
--- a/postgresql-plugin/docs/Postgres-batchsink.md
+++ b/postgresql-plugin/docs/Postgres-batchsink.md
@@ -39,6 +39,14 @@ You also can use the macro function ${conn(connection-name)}.
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the databse connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- Note: PostgreSQL does not implement `TRANSACTION_READ_UNCOMMITTED` as a distinct isolation level. Instead, this mode behaves identically to`TRANSACTION_READ_COMMITTED`, which is why it is not exposed as a separate option.
+
+For more details on the Transaction Isolation Levels supported in PostgreSQL, refer to the [PostgreSQL documentation](https://www.postgresql.org/docs/current/transaction-iso.html#TRANSACTION-ISO)
+
**Connection Arguments:** A list of arbitrary string key/value pairs as connection arguments. These arguments
will be passed to the JDBC driver as connection arguments for JDBC drivers that may need additional configurations.
diff --git a/postgresql-plugin/docs/Postgres-batchsource.md b/postgresql-plugin/docs/Postgres-batchsource.md
index af359022d..559723526 100644
--- a/postgresql-plugin/docs/Postgres-batchsource.md
+++ b/postgresql-plugin/docs/Postgres-batchsource.md
@@ -49,6 +49,14 @@ For example, 'SELECT MIN(id),MAX(id) FROM table'. Not required if numSplits is s
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the databse connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- Note: PostgreSQL does not implement `TRANSACTION_READ_UNCOMMITTED` as a distinct isolation level. Instead, this mode behaves identically to`TRANSACTION_READ_COMMITTED`, which is why it is not exposed as a separate option.
+
+For more details on the Transaction Isolation Levels supported in PostgreSQL, refer to the [PostgreSQL documentation](https://www.postgresql.org/docs/current/transaction-iso.html#TRANSACTION-ISO)
+
**Connection Arguments:** A list of arbitrary string key/value pairs as connection arguments. These arguments
will be passed to the JDBC driver as connection arguments for JDBC drivers that may need additional configurations.
diff --git a/postgresql-plugin/pom.xml b/postgresql-plugin/pom.xml
index 7f3e6f14c..6a3a95880 100644
--- a/postgresql-plugin/pom.xml
+++ b/postgresql-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.11.0-SNAPSHOT
+ 1.11.10
PostgreSQL plugin
@@ -100,9 +100,9 @@
<_exportcontents>
- io.cdap.plugin.postgres.*;
- io.cdap.plugin.db.source.*;
- io.cdap.plugin.db.sink.*;
+ io.cdap.plugin.postgres.*;
+ io.cdap.plugin.db.source.*;
+ io.cdap.plugin.db.sink.*;
*;inline=false;scope=compile
true
diff --git a/postgresql-plugin/src/main/java/io/cdap/plugin/postgres/PostgresSink.java b/postgresql-plugin/src/main/java/io/cdap/plugin/postgres/PostgresSink.java
index 8fd91cc63..6525a3dfa 100644
--- a/postgresql-plugin/src/main/java/io/cdap/plugin/postgres/PostgresSink.java
+++ b/postgresql-plugin/src/main/java/io/cdap/plugin/postgres/PostgresSink.java
@@ -165,6 +165,11 @@ public Map getDBSpecificArguments() {
return ImmutableMap.of(PostgresConstants.CONNECTION_TIMEOUT, String.valueOf(connectionTimeout));
}
+ @Override
+ public String getTransactionIsolationLevel() {
+ return connection.getTransactionIsolationLevel();
+ }
+
@Override
protected PostgresConnectorConfig getConnection() {
return connection;
diff --git a/postgresql-plugin/src/main/java/io/cdap/plugin/postgres/PostgresSource.java b/postgresql-plugin/src/main/java/io/cdap/plugin/postgres/PostgresSource.java
index d6677884f..ccef4078e 100644
--- a/postgresql-plugin/src/main/java/io/cdap/plugin/postgres/PostgresSource.java
+++ b/postgresql-plugin/src/main/java/io/cdap/plugin/postgres/PostgresSource.java
@@ -133,6 +133,11 @@ protected PostgresConnectorConfig getConnection() {
return connection;
}
+ @Override
+ public String getTransactionIsolationLevel() {
+ return connection.getTransactionIsolationLevel();
+ }
+
@Override
public void validate(FailureCollector collector) {
ConfigUtil.validateConnection(this, useConnection, connection, collector);
diff --git a/postgresql-plugin/widgets/PostgreSQL-connector.json b/postgresql-plugin/widgets/PostgreSQL-connector.json
index 091afc972..9a7a02e14 100644
--- a/postgresql-plugin/widgets/PostgreSQL-connector.json
+++ b/postgresql-plugin/widgets/PostgreSQL-connector.json
@@ -31,6 +31,19 @@
"default": "5432"
}
},
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
+ },
{
"widget-type": "textbox",
"label": "Database",
diff --git a/postgresql-plugin/widgets/Postgres-batchsink.json b/postgresql-plugin/widgets/Postgres-batchsink.json
index 6aa2dad8a..14e6f8154 100644
--- a/postgresql-plugin/widgets/Postgres-batchsink.json
+++ b/postgresql-plugin/widgets/Postgres-batchsink.json
@@ -65,6 +65,19 @@
"label": "Password",
"name": "password"
},
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
+ },
{
"widget-type": "keyvalue",
"label": "Connection Arguments",
@@ -186,6 +199,10 @@
"type": "property",
"name": "port"
},
+ {
+ "type": "property",
+ "name": "transactionIsolationLevel"
+ },
{
"type": "property",
"name": "database"
diff --git a/postgresql-plugin/widgets/Postgres-batchsource.json b/postgresql-plugin/widgets/Postgres-batchsource.json
index 0e4ba28c1..60de4725f 100644
--- a/postgresql-plugin/widgets/Postgres-batchsource.json
+++ b/postgresql-plugin/widgets/Postgres-batchsource.json
@@ -65,6 +65,19 @@
"label": "Password",
"name": "password"
},
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
+ },
{
"widget-type": "keyvalue",
"label": "Connection Arguments",
@@ -206,6 +219,10 @@
"type": "property",
"name": "port"
},
+ {
+ "type": "property",
+ "name": "transactionIsolationLevel"
+ },
{
"type": "property",
"name": "database"
diff --git a/saphana-plugin/pom.xml b/saphana-plugin/pom.xml
index 86b40a38e..0cd76ee65 100644
--- a/saphana-plugin/pom.xml
+++ b/saphana-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.11.0-SNAPSHOT
+ 1.11.10
SAP HANA plugin
@@ -85,13 +85,13 @@
<_exportcontents>
- io.cdap.plugin.saphana.*;
- io.cdap.plugin.db.source.*;
- io.cdap.plugin.db.sink.*;
- io.cdap.plugin.saphana.*;
- org.apache.commons.lang;
- org.apache.commons.logging.*;
- org.codehaus.jackson.*
+ io.cdap.plugin.saphana.*;
+ io.cdap.plugin.db.source.*;
+ io.cdap.plugin.db.sink.*;
+ io.cdap.plugin.saphana.*;
+ org.apache.commons.lang;
+ org.apache.commons.logging.*;
+ org.codehaus.jackson.*
*;inline=false;scope=compile
true
diff --git a/teradata-plugin/pom.xml b/teradata-plugin/pom.xml
index fa770a19a..ef8bb98f6 100644
--- a/teradata-plugin/pom.xml
+++ b/teradata-plugin/pom.xml
@@ -21,7 +21,7 @@
database-plugins-parent
io.cdap.plugin
- 1.11.0-SNAPSHOT
+ 1.11.10
teradata-plugin
@@ -90,14 +90,14 @@
<_exportcontents>
- io.cdap.plugin.teradata.*;
- io.cdap.plugin.util.*;
- io.cdap.plugin.db.source.*;
- io.cdap.plugin.db.sink.*;
- io.cdap.plugin.saphana.*;
- org.apache.commons.lang;
- org.apache.commons.logging.*;
- org.codehaus.jackson.*
+ io.cdap.plugin.teradata.*;
+ io.cdap.plugin.util.*;
+ io.cdap.plugin.db.source.*;
+ io.cdap.plugin.db.sink.*;
+ io.cdap.plugin.saphana.*;
+ org.apache.commons.lang;
+ org.apache.commons.logging.*;
+ org.codehaus.jackson.*
*;inline=false;scope=compile
true