Unverified Commit 406657e2 authored by aiwenmo's avatar aiwenmo Committed by GitHub

[Feature] [connector] Add flink connecotr phoenix 1.14

[Feature] [connector] Add flink connecotr phoenix 1.14
parents 3f6d9ae9 2ee59f1c
...@@ -143,14 +143,15 @@ public class PhoenixInputFormat extends RichInputFormat<Row, InputSplit> ...@@ -143,14 +143,15 @@ public class PhoenixInputFormat extends RichInputFormat<Row, InputSplit>
dbConn.setAutoCommit(autoCommit); dbConn.setAutoCommit(autoCommit);
} }
LOG.info("openInputFormat query :" +queryTemplate); LOG.debug("openInputFormat query :" +queryTemplate);
LOG.info("openInputFormat resultSetType :" +resultSetType); //删除 ` 号 phoenix中不支持
LOG.info("openInputFormat resultSetConcurrency :" +resultSetConcurrency);
String initQuery = StringUtils.remove(queryTemplate, "\\`"); String initQuery = StringUtils.remove(queryTemplate, "\\`");
LOG.debug("openInputFormat initQuery :" +initQuery);
//将 " 双引号替换成 ' 单引号
String replaceQuery = StringUtils.replace(initQuery, "\"", "'");
LOG.info("openInputFormat replaceQuery :" +replaceQuery);
LOG.info("openInputFormat initQuery :" +initQuery); statement = dbConn.prepareStatement(replaceQuery, resultSetType, resultSetConcurrency);
statement = dbConn.prepareStatement(initQuery, resultSetType, resultSetConcurrency);
if (fetchSize == Integer.MIN_VALUE || fetchSize > 0) { if (fetchSize == Integer.MIN_VALUE || fetchSize > 0) {
statement.setFetchSize(fetchSize); statement.setFetchSize(fetchSize);
} }
......
...@@ -25,6 +25,12 @@ public class PhoenixDialect extends AbstractDialect { ...@@ -25,6 +25,12 @@ public class PhoenixDialect extends AbstractDialect {
private static final int MAX_DECIMAL_PRECISION = 65; private static final int MAX_DECIMAL_PRECISION = 65;
private static final int MIN_DECIMAL_PRECISION = 1; private static final int MIN_DECIMAL_PRECISION = 1;
@Override
public String getSelectFromStatement(String tableName, String[] selectFields, String[] conditionFields) {
return null;
}
@Override @Override
public boolean canHandle(String url) { public boolean canHandle(String url) {
return url.startsWith("jdbc:phoenix:"); return url.startsWith("jdbc:phoenix:");
......
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>dlink-connectors</artifactId>
<groupId>com.dlink</groupId>
<version>0.6.5-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>dlink-connector-phoenix-1.14</artifactId>
<properties>
<java.version>1.8</java.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<flink.version>1.14.3</flink.version>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
<junit.version>4.12</junit.version>
<!-- compile/provided-->
<phoenix.scope.runtime>provided</phoenix.scope.runtime>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-table-api-java-bridge_${scala.binary.version}</artifactId>
<version>${flink.version}</version>
<scope>${phoenix.scope.runtime}</scope>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-streaming-java_${scala.binary.version}</artifactId>
<version>${flink.version}</version>
<!--<scope>provided</scope>-->
<scope>${phoenix.scope.runtime}</scope>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-table-common</artifactId>
<version>${flink.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<!-- A planner dependency won't be necessary once FLIP-32 has been completed. -->
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-table-planner_${scala.binary.version}</artifactId>
<version>${flink.version}</version>
<scope>test</scope>
</dependency>
<!-- A planner dependency won't be necessary once FLIP-32 has been completed. -->
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-table-planner_${scala.binary.version}</artifactId>
<version>${flink.version}</version>
<scope>test</scope>
</dependency>
<!-- <dependency>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix-core</artifactId>
<version>4.14.2-HBase-1.4</version>
<scope>${scope.runtime}</scope>
</dependency>-->
<dependency>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix-core</artifactId>
<version>5.0.0-HBase-2.0</version>
<scope>${scope.runtime}</scope>
</dependency>
</dependencies>
</project>
\ No newline at end of file
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.util.Preconditions;
import javax.annotation.Nullable;
import java.io.Serializable;
import java.util.Optional;
/** JDBC connection options. */
@PublicEvolving
public class JdbcConnectionOptions implements Serializable {
private static final long serialVersionUID = 1L;
protected final String url;
@Nullable protected final String driverName;
protected final int connectionCheckTimeoutSeconds;
@Nullable protected final String username;
@Nullable protected final String password;
protected JdbcConnectionOptions(
String url,
String driverName,
String username,
String password,
int connectionCheckTimeoutSeconds) {
Preconditions.checkArgument(connectionCheckTimeoutSeconds > 0);
this.url = Preconditions.checkNotNull(url, "jdbc url is empty");
this.driverName = driverName;
this.username = username;
this.password = password;
this.connectionCheckTimeoutSeconds = connectionCheckTimeoutSeconds;
}
public String getDbURL() {
return url;
}
@Nullable
public String getDriverName() {
return driverName;
}
public Optional<String> getUsername() {
return Optional.ofNullable(username);
}
public Optional<String> getPassword() {
return Optional.ofNullable(password);
}
public int getConnectionCheckTimeoutSeconds() {
return connectionCheckTimeoutSeconds;
}
/** Builder for {@link JdbcConnectionOptions}. */
public static class JdbcConnectionOptionsBuilder {
private String url;
private String driverName;
private String username;
private String password;
private int connectionCheckTimeoutSeconds = 60;
public JdbcConnectionOptionsBuilder withUrl(String url) {
this.url = url;
return this;
}
public JdbcConnectionOptionsBuilder withDriverName(String driverName) {
this.driverName = driverName;
return this;
}
public JdbcConnectionOptionsBuilder withUsername(String username) {
this.username = username;
return this;
}
public JdbcConnectionOptionsBuilder withPassword(String password) {
this.password = password;
return this;
}
/**
* Set the maximum timeout between retries, default is 60 seconds.
*
* @param connectionCheckTimeoutSeconds the timeout seconds, shouldn't smaller than 1
* second.
*/
public JdbcConnectionOptionsBuilder withConnectionCheckTimeoutSeconds(
int connectionCheckTimeoutSeconds) {
this.connectionCheckTimeoutSeconds = connectionCheckTimeoutSeconds;
return this;
}
public JdbcConnectionOptions build() {
return new JdbcConnectionOptions(
url, driverName, username, password, connectionCheckTimeoutSeconds);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.util.Preconditions;
import java.io.Serializable;
import java.util.Optional;
/**
* JDBC exactly once sink options.
*
* <p><b>maxCommitAttempts</b> - maximum number of commit attempts to make per transaction; must be
* > 0; state size is proportional to the product of max number of in-flight snapshots and this
* number.
*
* <p><b>allowOutOfOrderCommits</b> - If true, all prepared transactions will be attempted to commit
* regardless of any transient failures during this operation. This may lead to inconsistency.
* Default: false.
*
* <p><b>recoveredAndRollback</b> - whether to rollback prepared transactions known to XA RM on
* startup (after committing <b>known</b> transactions, i.e. restored from state).
*
* <p>NOTE that setting this parameter to true may:
*
* <ol>
* <li>interfere with other subtasks or applications (one subtask rolling back transactions
* prepared by the other one (and known to it))
* <li>block when using with some non-MVCC databases, if there are ended-not-prepared transactions
* </ol>
*
* <p>See also {@link org.apache.flink.connector.jdbc.xa.XaFacade#recover()}
*/
@PublicEvolving
public class JdbcExactlyOnceOptions implements Serializable {
private static final boolean DEFAULT_RECOVERED_AND_ROLLBACK = true;
private static final int DEFAULT_MAX_COMMIT_ATTEMPTS = 3;
private static final boolean DEFAULT_ALLOW_OUT_OF_ORDER_COMMITS = false;
public static final boolean DEFAULT_TRANSACTION_PER_CONNECTION = false;
private final boolean discoverAndRollbackOnRecovery;
private final int maxCommitAttempts;
private final boolean allowOutOfOrderCommits;
private final Integer timeoutSec;
private final boolean transactionPerConnection;
private JdbcExactlyOnceOptions(
boolean discoverAndRollbackOnRecovery,
int maxCommitAttempts,
boolean allowOutOfOrderCommits,
Optional<Integer> timeoutSec,
boolean transactionPerConnection) {
this.discoverAndRollbackOnRecovery = discoverAndRollbackOnRecovery;
this.maxCommitAttempts = maxCommitAttempts;
this.allowOutOfOrderCommits = allowOutOfOrderCommits;
this.timeoutSec = timeoutSec.orElse(null);
this.transactionPerConnection = transactionPerConnection;
Preconditions.checkArgument(this.maxCommitAttempts > 0, "maxCommitAttempts should be > 0");
}
public static JdbcExactlyOnceOptions defaults() {
return builder().build();
}
public boolean isDiscoverAndRollbackOnRecovery() {
return discoverAndRollbackOnRecovery;
}
public boolean isAllowOutOfOrderCommits() {
return allowOutOfOrderCommits;
}
public int getMaxCommitAttempts() {
return maxCommitAttempts;
}
public Integer getTimeoutSec() {
return timeoutSec;
}
public boolean isTransactionPerConnection() {
return transactionPerConnection;
}
public static JDBCExactlyOnceOptionsBuilder builder() {
return new JDBCExactlyOnceOptionsBuilder();
}
/** JDBCExactlyOnceOptionsBuilder. */
public static class JDBCExactlyOnceOptionsBuilder {
private boolean recoveredAndRollback = DEFAULT_RECOVERED_AND_ROLLBACK;
private int maxCommitAttempts = DEFAULT_MAX_COMMIT_ATTEMPTS;
private boolean allowOutOfOrderCommits = DEFAULT_ALLOW_OUT_OF_ORDER_COMMITS;
private Optional<Integer> timeoutSec = Optional.empty();
private boolean transactionPerConnection = DEFAULT_TRANSACTION_PER_CONNECTION;
/**
* Toggle discovery and rollback of prepared transactions upon recovery to prevent new
* transactions from being blocked by the older ones. Each subtask rollbacks its own
* transaction. This flag must be disabled when rescaling to prevent data loss.
*/
public JDBCExactlyOnceOptionsBuilder withRecoveredAndRollback(
boolean recoveredAndRollback) {
this.recoveredAndRollback = recoveredAndRollback;
return this;
}
/**
* Set the number of attempt to commit a transaction (takes effect only if transient failure
* happens).
*/
public JDBCExactlyOnceOptionsBuilder withMaxCommitAttempts(int maxCommitAttempts) {
this.maxCommitAttempts = maxCommitAttempts;
return this;
}
/**
* Set whether transactions may be committed out-of-order in case of retries and this option
* is enabled.
*/
public JDBCExactlyOnceOptionsBuilder withAllowOutOfOrderCommits(
boolean allowOutOfOrderCommits) {
this.allowOutOfOrderCommits = allowOutOfOrderCommits;
return this;
}
/** Set transaction timeout in seconds (vendor-specific). */
public JDBCExactlyOnceOptionsBuilder withTimeoutSec(Optional<Integer> timeoutSec) {
this.timeoutSec = timeoutSec;
return this;
}
/**
* Set whether the same connection can be used for multiple XA transactions. A transaction
* is prepared each time a checkpoint is performed; it is committed once the checkpoint is
* confirmed. There can be multiple un-confirmed checkpoints and therefore multiple prepared
* transactions.
*
* <p>Some databases support this natively (e.g. Oracle); while others only allow a single
* XA transaction per connection (e.g. MySQL, PostgreSQL).
*
* <p>If enabled, each transaction uses a separate connection from a pool. The database
* limit of open connections might need to be adjusted.
*
* <p>Disabled by default.
*/
public JDBCExactlyOnceOptionsBuilder withTransactionPerConnection(
boolean transactionPerConnection) {
this.transactionPerConnection = transactionPerConnection;
return this;
}
public JdbcExactlyOnceOptions build() {
return new JdbcExactlyOnceOptions(
recoveredAndRollback,
maxCommitAttempts,
allowOutOfOrderCommits,
timeoutSec,
transactionPerConnection);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.util.Preconditions;
import java.io.Serializable;
import java.util.Objects;
/** JDBC sink batch options. */
@PublicEvolving
public class JdbcExecutionOptions implements Serializable {
public static final int DEFAULT_MAX_RETRY_TIMES = 3;
private static final int DEFAULT_INTERVAL_MILLIS = 0;
public static final int DEFAULT_SIZE = 5000;
private final long batchIntervalMs;
private final int batchSize;
private final int maxRetries;
private JdbcExecutionOptions(long batchIntervalMs, int batchSize, int maxRetries) {
Preconditions.checkArgument(maxRetries >= 0);
this.batchIntervalMs = batchIntervalMs;
this.batchSize = batchSize;
this.maxRetries = maxRetries;
}
public long getBatchIntervalMs() {
return batchIntervalMs;
}
public int getBatchSize() {
return batchSize;
}
public int getMaxRetries() {
return maxRetries;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
JdbcExecutionOptions that = (JdbcExecutionOptions) o;
return batchIntervalMs == that.batchIntervalMs
&& batchSize == that.batchSize
&& maxRetries == that.maxRetries;
}
@Override
public int hashCode() {
return Objects.hash(batchIntervalMs, batchSize, maxRetries);
}
public static Builder builder() {
return new Builder();
}
public static JdbcExecutionOptions defaults() {
return builder().build();
}
/** Builder for {@link JdbcExecutionOptions}. */
public static final class Builder {
private long intervalMs = DEFAULT_INTERVAL_MILLIS;
private int size = DEFAULT_SIZE;
private int maxRetries = DEFAULT_MAX_RETRY_TIMES;
public Builder withBatchSize(int size) {
this.size = size;
return this;
}
public Builder withBatchIntervalMs(long intervalMs) {
this.intervalMs = intervalMs;
return this;
}
public Builder withMaxRetries(int maxRetries) {
this.maxRetries = maxRetries;
return this;
}
public JdbcExecutionOptions build() {
return new JdbcExecutionOptions(intervalMs, size, maxRetries);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor;
import org.apache.flink.util.function.BiConsumerWithException;
import java.io.Serializable;
import java.sql.PreparedStatement;
import java.sql.SQLException;
/**
* Sets {@link PreparedStatement} parameters to use in JDBC Sink based on a specific type of
* StreamRecord.
*
* @param <T> type of payload in {@link org.apache.flink.streaming.runtime.streamrecord.StreamRecord
* StreamRecord}
* @see JdbcBatchStatementExecutor
*/
@PublicEvolving
public interface JdbcStatementBuilder<T>
extends BiConsumerWithException<PreparedStatement, T, SQLException>, Serializable {}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix;
import org.apache.commons.lang.StringUtils;
import org.apache.flink.annotation.Experimental;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.io.DefaultInputSplitAssigner;
import org.apache.flink.api.common.io.InputFormat;
import org.apache.flink.api.common.io.RichInputFormat;
import org.apache.flink.api.common.io.statistics.BaseStatistics;
import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.split.JdbcParameterValuesProvider;
import org.apache.flink.core.io.GenericInputSplit;
import org.apache.flink.core.io.InputSplit;
import org.apache.flink.core.io.InputSplitAssigner;
import org.apache.flink.types.Row;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.*;
import java.util.Arrays;
/**
* InputFormat to read data from a database and generate Rows. The InputFormat has to be configured
* using the supplied InputFormatBuilder. A valid RowTypeInfo must be properly configured in the
* builder, e.g.:
*
* <pre><code>
* TypeInformation<?>[] fieldTypes = new TypeInformation<?>[] {
* BasicTypeInfo.INT_TYPE_INFO,
* BasicTypeInfo.STRING_TYPE_INFO,
* BasicTypeInfo.STRING_TYPE_INFO,
* BasicTypeInfo.DOUBLE_TYPE_INFO,
* BasicTypeInfo.INT_TYPE_INFO
* };
*
* RowTypeInfo rowTypeInfo = new RowTypeInfo(fieldTypes);
*
* JdbcInputFormat jdbcInputFormat = JdbcInputFormat.buildJdbcInputFormat()
* .setDrivername("org.apache.derby.jdbc.EmbeddedDriver")
* .setDBUrl("jdbc:derby:memory:ebookshop")
* .setQuery("select * from books")
* .setRowTypeInfo(rowTypeInfo)
* .finish();
* </code></pre>
*
* <p>In order to query the JDBC source in parallel, you need to provide a parameterized query
* template (i.e. a valid {@link PreparedStatement}) and a {@link JdbcParameterValuesProvider} which
* provides binding values for the query parameters. E.g.:
*
* <pre><code>
*
* Serializable[][] queryParameters = new String[2][1];
* queryParameters[0] = new String[]{"Kumar"};
* queryParameters[1] = new String[]{"Tan Ah Teck"};
*
* JdbcInputFormat jdbcInputFormat = JdbcInputFormat.buildJdbcInputFormat()
* .setDrivername("org.apache.derby.jdbc.EmbeddedDriver")
* .setDBUrl("jdbc:derby:memory:ebookshop")
* .setQuery("select * from books WHERE author = ?")
* .setRowTypeInfo(rowTypeInfo)
* .setParametersProvider(new JdbcGenericParameterValuesProvider(queryParameters))
* .finish();
* </code></pre>
*
* @see Row
* @see JdbcParameterValuesProvider
* @see PreparedStatement
* @see DriverManager
*/
@Experimental
public class PhoenixInputFormat extends RichInputFormat<Row, InputSplit>
implements ResultTypeQueryable<Row> {
protected static final long serialVersionUID = 2L;
protected static final Logger LOG = LoggerFactory.getLogger(PhoenixInputFormat.class);
protected JdbcConnectionProvider connectionProvider;
protected String queryTemplate;
protected int resultSetType;
protected int resultSetConcurrency;
protected RowTypeInfo rowTypeInfo;
protected boolean namespaceMappingEnabled;
protected boolean mapSystemTablesEnabled;
protected transient PreparedStatement statement;
protected transient ResultSet resultSet;
protected int fetchSize;
// Boolean to distinguish between default value and explicitly set autoCommit mode.
protected Boolean autoCommit;
protected boolean hasNext;
protected Object[][] parameterValues;
public PhoenixInputFormat() {
}
@Override
public RowTypeInfo getProducedType() {
return rowTypeInfo;
}
@Override
public void configure(Configuration parameters) {
// do nothing here
}
@Override
public void openInputFormat() {
// called once per inputFormat (on open)
try {
Connection dbConn = connectionProvider.getOrEstablishConnection();
// set autoCommit mode only if it was explicitly configured.
// keep connection default otherwise.
if (autoCommit != null) {
dbConn.setAutoCommit(autoCommit);
}
LOG.debug("openInputFormat query :" +queryTemplate);
//删除 ` 号 phoenix中不支持
String initQuery = StringUtils.remove(queryTemplate, "\\`");
LOG.debug("openInputFormat initQuery :" +initQuery);
//将 " 双引号替换成 ' 单引号
String replaceQuery = StringUtils.replace(initQuery, "\"", "'");
LOG.info("openInputFormat replaceQuery :" +replaceQuery);
statement = dbConn.prepareStatement(replaceQuery, resultSetType, resultSetConcurrency);
if (fetchSize == Integer.MIN_VALUE || fetchSize > 0) {
statement.setFetchSize(fetchSize);
}
} catch (SQLException se) {
throw new IllegalArgumentException("open() failed." + se.getMessage(), se);
} catch (ClassNotFoundException cnfe) {
throw new IllegalArgumentException(
"JDBC-Class not found. - " + cnfe.getMessage(), cnfe);
}
}
@Override
public void closeInputFormat() {
// called once per inputFormat (on close)
try {
if (statement != null) {
statement.close();
}
} catch (SQLException se) {
LOG.info("Inputformat Statement couldn't be closed - " + se.getMessage());
} finally {
statement = null;
}
connectionProvider.closeConnection();
parameterValues = null;
}
/**
* Connects to the source database and executes the query in a <b>parallel fashion</b> if this
* {@link InputFormat} is built using a parameterized query (i.e. using a {@link
* PreparedStatement}) and a proper {@link JdbcParameterValuesProvider}, in a <b>non-parallel
* fashion</b> otherwise.
*
* @param inputSplit which is ignored if this InputFormat is executed as a non-parallel source,
* a "hook" to the query parameters otherwise (using its <i>splitNumber</i>)
* @throws IOException if there's an error during the execution of the query
*/
@Override
public void open(InputSplit inputSplit) throws IOException {
try {
if (inputSplit != null && parameterValues != null) {
for (int i = 0; i < parameterValues[inputSplit.getSplitNumber()].length; i++) {
Object param = parameterValues[inputSplit.getSplitNumber()][i];
if (param instanceof String) {
statement.setString(i + 1, (String) param);
} else if (param instanceof Long) {
statement.setLong(i + 1, (Long) param);
} else if (param instanceof Integer) {
statement.setInt(i + 1, (Integer) param);
} else if (param instanceof Double) {
statement.setDouble(i + 1, (Double) param);
} else if (param instanceof Boolean) {
statement.setBoolean(i + 1, (Boolean) param);
} else if (param instanceof Float) {
statement.setFloat(i + 1, (Float) param);
} else if (param instanceof BigDecimal) {
statement.setBigDecimal(i + 1, (BigDecimal) param);
} else if (param instanceof Byte) {
statement.setByte(i + 1, (Byte) param);
} else if (param instanceof Short) {
statement.setShort(i + 1, (Short) param);
} else if (param instanceof Date) {
statement.setDate(i + 1, (Date) param);
} else if (param instanceof Time) {
statement.setTime(i + 1, (Time) param);
} else if (param instanceof Timestamp) {
statement.setTimestamp(i + 1, (Timestamp) param);
} else if (param instanceof Array) {
statement.setArray(i + 1, (Array) param);
} else {
// extends with other types if needed
throw new IllegalArgumentException(
"open() failed. Parameter "
+ i
+ " of type "
+ param.getClass()
+ " is not handled (yet).");
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(
String.format(
"Executing '%s' with parameters %s",
queryTemplate,
Arrays.deepToString(
parameterValues[inputSplit.getSplitNumber()])));
}
}
resultSet = statement.executeQuery();
hasNext = resultSet.next();
} catch (SQLException se) {
throw new IllegalArgumentException("open() failed." + se.getMessage(), se);
}
}
/**
* Closes all resources used.
*
* @throws IOException Indicates that a resource could not be closed.
*/
@Override
public void close() throws IOException {
if (resultSet == null) {
return;
}
try {
resultSet.close();
} catch (SQLException se) {
LOG.info("Inputformat ResultSet couldn't be closed - " + se.getMessage());
}
}
/**
* Checks whether all data has been read.
*
* @return boolean value indication whether all data has been read.
* @throws IOException
*/
@Override
public boolean reachedEnd() throws IOException {
return !hasNext;
}
/**
* Stores the next resultSet row in a tuple.
*
* @param reuse row to be reused.
* @return row containing next {@link Row}
* @throws IOException
*/
@Override
public Row nextRecord(Row reuse) throws IOException {
try {
if (!hasNext) {
return null;
}
for (int pos = 0; pos < reuse.getArity(); pos++) {
reuse.setField(pos, resultSet.getObject(pos + 1));
}
// update hasNext after we've read the record
hasNext = resultSet.next();
return reuse;
} catch (SQLException se) {
throw new IOException("Couldn't read data - " + se.getMessage(), se);
} catch (NullPointerException npe) {
throw new IOException("Couldn't access resultSet", npe);
}
}
@Override
public BaseStatistics getStatistics(BaseStatistics cachedStatistics) throws IOException {
return cachedStatistics;
}
@Override
public InputSplit[] createInputSplits(int minNumSplits) throws IOException {
if (parameterValues == null) {
return new GenericInputSplit[]{new GenericInputSplit(0, 1)};
}
GenericInputSplit[] ret = new GenericInputSplit[parameterValues.length];
for (int i = 0; i < ret.length; i++) {
ret[i] = new GenericInputSplit(i, ret.length);
}
return ret;
}
@Override
public InputSplitAssigner getInputSplitAssigner(InputSplit[] inputSplits) {
return new DefaultInputSplitAssigner(inputSplits);
}
@VisibleForTesting
protected PreparedStatement getStatement() {
return statement;
}
@VisibleForTesting
protected Connection getDbConn() {
return connectionProvider.getConnection();
}
/**
* A builder used to set parameters to the output format's configuration in a fluent way.
*
* @return builder
*/
public static PhoenixInputFormatBuilder buildJdbcInputFormat() {
return new PhoenixInputFormatBuilder();
}
/**
* Builder for {@link PhoenixInputFormat}.
*/
public static class PhoenixInputFormatBuilder {
private final JdbcConnectionOptions.JdbcConnectionOptionsBuilder connOptionsBuilder;
private final PhoenixInputFormat format;
public PhoenixInputFormatBuilder() {
//this.connOptionsBuilder = new JdbcConnectionOptions.JdbcConnectionOptionsBuilder();
this.connOptionsBuilder = new JdbcConnectionOptions.JdbcConnectionOptionsBuilder();
this.format = new PhoenixInputFormat();
// using TYPE_FORWARD_ONLY for high performance reads
this.format.resultSetType = ResultSet.TYPE_FORWARD_ONLY;
this.format.resultSetConcurrency = ResultSet.CONCUR_READ_ONLY;
}
public PhoenixInputFormatBuilder setUsername(String username) {
connOptionsBuilder.withUsername(username);
return this;
}
public PhoenixInputFormatBuilder setPassword(String password) {
connOptionsBuilder.withPassword(password);
return this;
}
public PhoenixInputFormatBuilder setDrivername(String drivername) {
connOptionsBuilder.withDriverName(drivername);
return this;
}
public PhoenixInputFormatBuilder setDBUrl(String dbURL) {
connOptionsBuilder.withUrl(dbURL);
return this;
}
public PhoenixInputFormatBuilder setQuery(String query) {
format.queryTemplate = query;
return this;
}
public PhoenixInputFormatBuilder setResultSetType(int resultSetType) {
format.resultSetType = resultSetType;
return this;
}
public PhoenixInputFormatBuilder setResultSetConcurrency(int resultSetConcurrency) {
format.resultSetConcurrency = resultSetConcurrency;
return this;
}
public PhoenixInputFormatBuilder setParametersProvider(
JdbcParameterValuesProvider parameterValuesProvider) {
format.parameterValues = parameterValuesProvider.getParameterValues();
return this;
}
public PhoenixInputFormatBuilder setRowTypeInfo(RowTypeInfo rowTypeInfo) {
format.rowTypeInfo = rowTypeInfo;
return this;
}
public PhoenixInputFormatBuilder setFetchSize(int fetchSize) {
Preconditions.checkArgument(
fetchSize == Integer.MIN_VALUE || fetchSize > 0,
"Illegal value %s for fetchSize, has to be positive or Integer.MIN_VALUE.",
fetchSize);
format.fetchSize = fetchSize;
return this;
}
public PhoenixInputFormatBuilder setAutoCommit(Boolean autoCommit) {
format.autoCommit = autoCommit;
return this;
}
public PhoenixInputFormatBuilder setNamespaceMappingEnabled(Boolean namespaceMappingEnabled) {
format.namespaceMappingEnabled = namespaceMappingEnabled;
return this;
}
public PhoenixInputFormatBuilder setMapSystemTablesEnabled(Boolean mapSystemTablesEnabled) {
format.mapSystemTablesEnabled = mapSystemTablesEnabled;
return this;
}
public PhoenixInputFormat finish() {
format.connectionProvider =
//new SimpleJdbcConnectionProvider(connOptionsBuilder.build());
new PhoneixJdbcConnectionProvider(connOptionsBuilder.build(), format.namespaceMappingEnabled, format.namespaceMappingEnabled);
if (format.queryTemplate == null) {
throw new NullPointerException("No query supplied");
}
if (format.rowTypeInfo == null) {
throw new NullPointerException(
"No " + RowTypeInfo.class.getSimpleName() + " supplied");
}
if (format.parameterValues == null) {
LOG.debug("No input splitting configured (data will be read with parallelism 1).");
}
return format;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.dialect;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.DecimalType;
import org.apache.flink.table.types.logical.LogicalTypeRoot;
import org.apache.flink.table.types.logical.TimestampType;
import org.apache.flink.table.types.logical.VarBinaryType;
import java.util.List;
abstract class AbstractDialect implements JdbcDialect {
@Override
public void validate(TableSchema schema) throws ValidationException {
for (int i = 0; i < schema.getFieldCount(); i++) {
DataType dt = schema.getFieldDataType(i).get();
String fieldName = schema.getFieldName(i).get();
// TODO: We can't convert VARBINARY(n) data type to
// PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO in
// LegacyTypeInfoDataTypeConverter
// when n is smaller than Integer.MAX_VALUE
if (unsupportedTypes().contains(dt.getLogicalType().getTypeRoot())
|| (dt.getLogicalType() instanceof VarBinaryType
&& Integer.MAX_VALUE
!= ((VarBinaryType) dt.getLogicalType()).getLength())) {
throw new ValidationException(
String.format(
"The %s dialect doesn't support type: %s.",
dialectName(), dt.toString()));
}
// only validate precision of DECIMAL type for blink planner
if (dt.getLogicalType() instanceof DecimalType) {
int precision = ((DecimalType) dt.getLogicalType()).getPrecision();
if (precision > maxDecimalPrecision() || precision < minDecimalPrecision()) {
throw new ValidationException(
String.format(
"The precision of field '%s' is out of the DECIMAL "
+ "precision range [%d, %d] supported by %s dialect.",
fieldName,
minDecimalPrecision(),
maxDecimalPrecision(),
dialectName()));
}
}
// only validate precision of DECIMAL type for blink planner
if (dt.getLogicalType() instanceof TimestampType) {
int precision = ((TimestampType) dt.getLogicalType()).getPrecision();
if (precision > maxTimestampPrecision() || precision < minTimestampPrecision()) {
throw new ValidationException(
String.format(
"The precision of field '%s' is out of the TIMESTAMP "
+ "precision range [%d, %d] supported by %s dialect.",
fieldName,
minTimestampPrecision(),
maxTimestampPrecision(),
dialectName()));
}
}
}
}
public abstract int maxDecimalPrecision();
public abstract int minDecimalPrecision();
public abstract int maxTimestampPrecision();
public abstract int minTimestampPrecision();
/**
* Defines the unsupported types for the dialect.
*
* @return a list of logical type roots.
*/
public abstract List<LogicalTypeRoot> unsupportedTypes();
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.dialect;
import org.apache.flink.annotation.Internal;
import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.types.logical.RowType;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Optional;
import java.util.stream.Collectors;
import static java.lang.String.format;
/** Handle the SQL dialect of jdbc driver. */
@Internal
public interface JdbcDialect extends Serializable {
/**
* Get the name of jdbc dialect.
*
* @return the dialect name.
*/
String dialectName();
/**
* Check if this dialect instance can handle a certain jdbc url.
*
* @param url the jdbc url.
* @return True if the dialect can be applied on the given jdbc url.
*/
boolean canHandle(String url);
/**
* Get converter that convert jdbc object and Flink internal object each other.
*
* @param rowType the given row type
* @return a row converter for the database
*/
JdbcRowConverter getRowConverter(RowType rowType);
/**
* Get limit clause to limit the number of emitted row from the jdbc source.
*
* @param limit number of row to emit. The value of the parameter should be non-negative.
* @return the limit clause.
*/
String getLimitClause(long limit);
/**
* Check if this dialect instance support a specific data type in table schema.
*
* @param schema the table schema.
* @exception ValidationException in case of the table schema contains unsupported type.
*/
default void validate(TableSchema schema) throws ValidationException {}
/**
* @return the default driver class name, if user not configure the driver class name, then will
* use this one.
*/
default Optional<String> defaultDriverName() {
return Optional.empty();
}
/**
* Quotes the identifier. This is used to put quotes around the identifier in case the column
* name is a reserved keyword, or in case it contains characters that require quotes (e.g.
* space). Default using double quotes {@code "} to quote.
*/
default String quoteIdentifier(String identifier) {
return "\"" + identifier + "\"";
}
/**
* Get dialect upsert statement, the database has its own upsert syntax, such as Mysql using
* DUPLICATE KEY UPDATE, and PostgresSQL using ON CONFLICT... DO UPDATE SET..
*
* @return None if dialect does not support upsert statement, the writer will degrade to the use
* of select + update/insert, this performance is poor.
*/
default Optional<String> getUpsertStatement(
String tableName, String[] fieldNames, String[] uniqueKeyFields) {
return Optional.empty();
}
/** Get row exists statement by condition fields. Default use SELECT. */
default String getRowExistsStatement(String tableName, String[] conditionFields) {
String fieldExpressions =
Arrays.stream(conditionFields)
.map(f -> format("%s = :%s", quoteIdentifier(f), f))
.collect(Collectors.joining(" AND "));
return "SELECT 1 FROM " + quoteIdentifier(tableName) + " WHERE " + fieldExpressions;
}
/** Get insert into statement. */
default String getInsertIntoStatement(String tableName, String[] fieldNames) {
String columns =
Arrays.stream(fieldNames)
.map(this::quoteIdentifier)
.collect(Collectors.joining(", "));
String placeholders =
Arrays.stream(fieldNames).map(f -> ":" + f).collect(Collectors.joining(", "));
return "INSERT INTO "
+ quoteIdentifier(tableName)
+ "("
+ columns
+ ")"
+ " VALUES ("
+ placeholders
+ ")";
}
/**
* Get update one row statement by condition fields, default not use limit 1, because limit 1 is
* a sql dialect.
*/
default String getUpdateStatement(
String tableName, String[] fieldNames, String[] conditionFields) {
String setClause =
Arrays.stream(fieldNames)
.map(f -> format("%s = :%s", quoteIdentifier(f), f))
.collect(Collectors.joining(", "));
String conditionClause =
Arrays.stream(conditionFields)
.map(f -> format("%s = :%s", quoteIdentifier(f), f))
.collect(Collectors.joining(" AND "));
return "UPDATE "
+ quoteIdentifier(tableName)
+ " SET "
+ setClause
+ " WHERE "
+ conditionClause;
}
/**
* Get delete one row statement by condition fields, default not use limit 1, because limit 1 is
* a sql dialect.
*/
default String getDeleteStatement(String tableName, String[] conditionFields) {
String conditionClause =
Arrays.stream(conditionFields)
.map(f -> format("%s = :%s", quoteIdentifier(f), f))
.collect(Collectors.joining(" AND "));
return "DELETE FROM " + quoteIdentifier(tableName) + " WHERE " + conditionClause;
}
/** Get select fields statement by condition fields. Default use SELECT. */
default String getSelectFromStatement(
String tableName, String[] selectFields, String[] conditionFields) {
String selectExpressions =
Arrays.stream(selectFields)
.map(this::quoteIdentifier)
.collect(Collectors.joining(", "));
String fieldExpressions =
Arrays.stream(conditionFields)
.map(f -> format("%s = :%s", quoteIdentifier(f), f))
.collect(Collectors.joining(" AND "));
return "SELECT "
+ selectExpressions
+ " FROM "
+ quoteIdentifier(tableName)
+ (conditionFields.length > 0 ? " WHERE " + fieldExpressions : "");
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.dialect;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
/** Default JDBC dialects. */
public final class JdbcDialects {
private static final List<JdbcDialect> DIALECTS =
Arrays.asList(new PhoenixDialect());
/** Fetch the JdbcDialect class corresponding to a given database url. */
public static Optional<JdbcDialect> get(String url) {
for (JdbcDialect dialect : DIALECTS) {
if (dialect.canHandle(url)) {
return Optional.of(dialect);
}
}
return Optional.empty();
}
}
package org.apache.flink.connector.phoenix.dialect;
import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter;
import org.apache.flink.connector.phoenix.internal.converter.PhoenixRowConverter;
import org.apache.flink.table.types.logical.LogicalTypeRoot;
import org.apache.flink.table.types.logical.RowType;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
/**
* PhoenixDialect
*
* @author gy
* @since 2022/3/16 11:19
**/
public class PhoenixDialect extends AbstractDialect {
private static final long serialVersionUID = 1L;
private static final int MAX_TIMESTAMP_PRECISION = 6;
private static final int MIN_TIMESTAMP_PRECISION = 1;
private static final int MAX_DECIMAL_PRECISION = 65;
private static final int MIN_DECIMAL_PRECISION = 1;
@Override
public boolean canHandle(String url) {
return url.startsWith("jdbc:phoenix:");
}
@Override
public JdbcRowConverter getRowConverter(RowType rowType) {
return new PhoenixRowConverter(rowType);
}
@Override
public String getLimitClause(long limit) {
return "LIMIT " + limit;
}
@Override
public Optional<String> defaultDriverName() {
return Optional.of("org.apache.phoenix.jdbc.PhoenixDriver");
}
/**
* phoenix不支持 ` 号
* 不加任何 " ` 号 在列名以及表名上,否则会导致phoenix解析错误
* @param identifier
* @return
*/
@Override
public String quoteIdentifier(String identifier) {
//return "`" + identifier + "`";
//return super.quoteIdentifier(identifier);
return identifier;
}
@Override
public Optional<String> getUpsertStatement(String tableName, String[] fieldNames, String[] uniqueKeyFields) {
String columns = (String) Arrays.stream(fieldNames).map(this::quoteIdentifier).collect(Collectors.joining(", "));
String placeholders = (String) Arrays.stream(fieldNames).map((f) -> {
return ":" + f;
}).collect(Collectors.joining(", "));
String sql = "UPSERT INTO " + this.quoteIdentifier(tableName) + "(" + columns + ") VALUES (" + placeholders + ")";
return Optional.of(sql);
}
@Override
public String getInsertIntoStatement(String tableName, String[] fieldNames) {
return this.getUpsertStatement(tableName,fieldNames,null).get();
}
@Override
public String dialectName() {
return "Phoenix";
}
@Override
public int maxDecimalPrecision() {
return MAX_DECIMAL_PRECISION;
}
@Override
public int minDecimalPrecision() {
return MIN_DECIMAL_PRECISION;
}
@Override
public int maxTimestampPrecision() {
return MAX_TIMESTAMP_PRECISION;
}
@Override
public int minTimestampPrecision() {
return MIN_TIMESTAMP_PRECISION;
}
@Override
public List<LogicalTypeRoot> unsupportedTypes() {
return Arrays.asList(
LogicalTypeRoot.BINARY,
LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE,
LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE,
LogicalTypeRoot.INTERVAL_YEAR_MONTH,
LogicalTypeRoot.INTERVAL_DAY_TIME,
LogicalTypeRoot.ARRAY,
LogicalTypeRoot.MULTISET,
LogicalTypeRoot.MAP,
LogicalTypeRoot.ROW,
LogicalTypeRoot.DISTINCT_TYPE,
LogicalTypeRoot.STRUCTURED_TYPE,
LogicalTypeRoot.NULL,
LogicalTypeRoot.RAW,
LogicalTypeRoot.SYMBOL,
LogicalTypeRoot.UNRESOLVED);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.io.RichOutputFormat;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Flushable;
import java.io.IOException;
import java.sql.Connection;
/** Base jdbc outputFormat. */
public abstract class AbstractJdbcOutputFormat<T> extends RichOutputFormat<T> implements Flushable {
private static final long serialVersionUID = 1L;
public static final int DEFAULT_FLUSH_MAX_SIZE = 5000;
public static final long DEFAULT_FLUSH_INTERVAL_MILLS = 0L;
private static final Logger LOG = LoggerFactory.getLogger(AbstractJdbcOutputFormat.class);
protected final JdbcConnectionProvider connectionProvider;
public AbstractJdbcOutputFormat(JdbcConnectionProvider connectionProvider) {
this.connectionProvider = Preconditions.checkNotNull(connectionProvider);
}
@Override
public void configure(Configuration parameters) {}
@Override
public void open(int taskNumber, int numTasks) throws IOException {
try {
connectionProvider.getOrEstablishConnection();
} catch (Exception e) {
throw new IOException("unable to open JDBC writer", e);
}
}
@Override
public void close() {
connectionProvider.closeConnection();
}
@Override
public void flush() throws IOException {}
//@VisibleForTesting
public Connection getConnection() {
return connectionProvider.getConnection();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.util.Preconditions;
import javax.annotation.Nonnull;
import java.io.IOException;
/** A generic SinkFunction for JDBC. */
@Internal
public class GenericJdbcSinkFunction<T> extends RichSinkFunction<T>
implements CheckpointedFunction {
private final AbstractJdbcOutputFormat<T> outputFormat;
public GenericJdbcSinkFunction(@Nonnull AbstractJdbcOutputFormat<T> outputFormat) {
this.outputFormat = Preconditions.checkNotNull(outputFormat);
}
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
RuntimeContext ctx = getRuntimeContext();
outputFormat.setRuntimeContext(ctx);
outputFormat.open(ctx.getIndexOfThisSubtask(), ctx.getNumberOfParallelSubtasks());
}
@Override
public void invoke(T value, Context context) throws IOException {
outputFormat.writeRecord(value);
}
@Override
public void initializeState(FunctionInitializationContext context) {}
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
outputFormat.flush();
}
@Override
public void close() {
outputFormat.close();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.phoenix.JdbcExecutionOptions;
import org.apache.flink.connector.phoenix.JdbcStatementBuilder;
import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor;
import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatementImpl;
import org.apache.flink.connector.phoenix.utils.JdbcUtils;
import org.apache.flink.types.Row;
import org.apache.flink.util.Preconditions;
import org.apache.flink.util.concurrent.ExecutorThreadFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.io.Serializable;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import static org.apache.flink.connector.phoenix.utils.JdbcUtils.setRecordToStatement;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** A JDBC outputFormat that supports batching records before writing records to database. */
@Internal
public class JdbcBatchingOutputFormat<
In, JdbcIn, JdbcExec extends JdbcBatchStatementExecutor<JdbcIn>>
extends AbstractJdbcOutputFormat<In> {
/**
* An interface to extract a value from given argument.
*
* @param <F> The type of given argument
* @param <T> The type of the return value
*/
public interface RecordExtractor<F, T> extends Function<F, T>, Serializable {
static <T> RecordExtractor<T, T> identity() {
return x -> x;
}
}
/**
* A factory for creating {@link JdbcBatchStatementExecutor} instance.
*
* @param <T> The type of instance.
*/
public interface StatementExecutorFactory<T extends JdbcBatchStatementExecutor<?>>
extends Function<RuntimeContext, T>, Serializable {}
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(JdbcBatchingOutputFormat.class);
private final JdbcExecutionOptions executionOptions;
private final StatementExecutorFactory<JdbcExec> statementExecutorFactory;
private final RecordExtractor<In, JdbcIn> jdbcRecordExtractor;
private transient JdbcExec jdbcStatementExecutor;
private transient int batchCount = 0;
private transient volatile boolean closed = false;
private transient ScheduledExecutorService scheduler;
private transient ScheduledFuture<?> scheduledFuture;
private transient volatile Exception flushException;
private Connection conn = null;
public JdbcBatchingOutputFormat(
@Nonnull JdbcConnectionProvider connectionProvider,
@Nonnull JdbcExecutionOptions executionOptions,
@Nonnull StatementExecutorFactory<JdbcExec> statementExecutorFactory,
@Nonnull RecordExtractor<In, JdbcIn> recordExtractor) {
super(connectionProvider);
this.executionOptions = checkNotNull(executionOptions);
this.statementExecutorFactory = checkNotNull(statementExecutorFactory);
this.jdbcRecordExtractor = checkNotNull(recordExtractor);
}
/**
* Connects to the target database and initializes the prepared statement.
*
* @param taskNumber The number of the parallel instance.
*/
@Override
public void open(int taskNumber, int numTasks) throws IOException {
//super.open(taskNumber, numTasks);
try {
conn = connectionProvider.getOrEstablishConnection();
} catch (Exception e) {
throw new IOException("unable to open JDBC writer", e);
}
jdbcStatementExecutor = createAndOpenStatementExecutor(statementExecutorFactory);
if (executionOptions.getBatchIntervalMs() != 0 && executionOptions.getBatchSize() != 1) {
this.scheduler =
Executors.newScheduledThreadPool(
1, new ExecutorThreadFactory("jdbc-upsert-output-format"));
this.scheduledFuture =
this.scheduler.scheduleWithFixedDelay(
() -> {
synchronized (JdbcBatchingOutputFormat.this) {
if (!closed) {
try {
flush();
} catch (Exception e) {
flushException = e;
}
}
}
},
executionOptions.getBatchIntervalMs(),
executionOptions.getBatchIntervalMs(),
TimeUnit.MILLISECONDS);
}
}
private JdbcExec createAndOpenStatementExecutor(
StatementExecutorFactory<JdbcExec> statementExecutorFactory) throws IOException {
JdbcExec exec = statementExecutorFactory.apply(getRuntimeContext());
try {
exec.prepareStatements(connectionProvider.getConnection());
} catch (SQLException e) {
throw new IOException("unable to open JDBC writer", e);
}
return exec;
}
private void checkFlushException() {
if (flushException != null) {
throw new RuntimeException("Writing records to JDBC failed.", flushException);
}
}
@Override
public final synchronized void writeRecord(In record) throws IOException {
checkFlushException();
try {
addToBatch(record, jdbcRecordExtractor.apply(record));
batchCount++;
if (executionOptions.getBatchSize() > 0
&& batchCount >= executionOptions.getBatchSize()) {
flush();
}
} catch (Exception e) {
throw new IOException("Writing records to JDBC failed.", e);
}
}
protected void addToBatch(In original, JdbcIn extracted) throws SQLException {
jdbcStatementExecutor.addToBatch(extracted);
}
@Override
public synchronized void flush() throws IOException {
checkFlushException();
for (int i = 0; i <= executionOptions.getMaxRetries(); i++) {
try {
attemptFlush();
//conn.commit();
batchCount = 0;
break;
} catch (SQLException e) {
LOG.error("JDBC executeBatch error, retry times = {}", i, e);
if (i >= executionOptions.getMaxRetries()) {
throw new IOException(e);
}
try {
if (!connectionProvider.isConnectionValid()) {
updateExecutor(true);
}
} catch (Exception exception) {
LOG.error(
"JDBC connection is not valid, and reestablish connection failed.",
exception);
throw new IOException("Reestablish JDBC connection failed", exception);
}
try {
Thread.sleep(1000 * i);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new IOException(
"unable to flush; interrupted while doing another attempt", e);
}
}
}
}
protected void attemptFlush() throws SQLException {
jdbcStatementExecutor.executeBatch(conn);
}
/** Executes prepared statement and closes all resources of this instance. */
@Override
public synchronized void close() {
if (!closed) {
closed = true;
if (this.scheduledFuture != null) {
scheduledFuture.cancel(false);
this.scheduler.shutdown();
}
if (batchCount > 0) {
try {
LOG.info("关闭连接前 刷写数据 !!! batchCount: "+batchCount);
flush();
} catch (Exception e) {
LOG.warn("Writing records to JDBC failed.", e);
throw new RuntimeException("Writing records to JDBC failed.", e);
}
}
try {
if (jdbcStatementExecutor != null) {
jdbcStatementExecutor.closeStatements();
}
} catch (SQLException e) {
LOG.warn("Close JDBC writer failed.", e);
}
}
super.close();
checkFlushException();
}
public static Builder builder() {
return new Builder();
}
/** Builder for a {@link JdbcBatchingOutputFormat}. */
public static class Builder {
private JdbcOptions options;
private String[] fieldNames;
private String[] keyFields;
private int[] fieldTypes;
private JdbcExecutionOptions.Builder executionOptionsBuilder =
JdbcExecutionOptions.builder();
/** required, jdbc options. */
public Builder setOptions(JdbcOptions options) {
this.options = options;
return this;
}
/** required, field names of this jdbc sink. */
public Builder setFieldNames(String[] fieldNames) {
this.fieldNames = fieldNames;
return this;
}
/** required, upsert unique keys. */
public Builder setKeyFields(String[] keyFields) {
this.keyFields = keyFields;
return this;
}
/** required, field types of this jdbc sink. */
public Builder setFieldTypes(int[] fieldTypes) {
this.fieldTypes = fieldTypes;
return this;
}
/**
* optional, flush max size (includes all append, upsert and delete records), over this
* number of records, will flush data.
*/
public Builder setFlushMaxSize(int flushMaxSize) {
executionOptionsBuilder.withBatchSize(flushMaxSize);
return this;
}
/** optional, flush interval mills, over this time, asynchronous threads will flush data. */
public Builder setFlushIntervalMills(long flushIntervalMills) {
executionOptionsBuilder.withBatchIntervalMs(flushIntervalMills);
return this;
}
/** optional, max retry times for jdbc connector. */
public Builder setMaxRetryTimes(int maxRetryTimes) {
executionOptionsBuilder.withMaxRetries(maxRetryTimes);
return this;
}
/**
* Finalizes the configuration and checks validity.
*
* @return Configured JdbcUpsertOutputFormat
*/
public JdbcBatchingOutputFormat<Tuple2<Boolean, Row>, Row, JdbcBatchStatementExecutor<Row>>
build() {
checkNotNull(options, "No options supplied.");
checkNotNull(fieldNames, "No fieldNames supplied.");
JdbcDmlOptions dml =
JdbcDmlOptions.builder()
.withTableName(options.getTableName())
.withDialect(options.getDialect())
.withFieldNames(fieldNames)
.withKeyFields(keyFields)
.withFieldTypes(fieldTypes)
.build();
if (dml.getKeyFields().isPresent() && dml.getKeyFields().get().length > 0) {
return new TableJdbcUpsertOutputFormat(
new PhoneixJdbcConnectionProvider(options,this.options.isNamespaceMappingEnabled(),this.options.isMapSystemTablesEnabled()),
dml,
executionOptionsBuilder.build());
} else {
// warn: don't close over builder fields
String sql =
FieldNamedPreparedStatementImpl.parseNamedStatement(
options.getDialect()
.getInsertIntoStatement(
dml.getTableName(), dml.getFieldNames()),
new HashMap<>());
return new JdbcBatchingOutputFormat<>(
new PhoneixJdbcConnectionProvider(options,this.options.isNamespaceMappingEnabled(),this.options.isMapSystemTablesEnabled()),
executionOptionsBuilder.build(),
ctx ->
createSimpleRowExecutor(
sql,
dml.getFieldTypes(),
ctx.getExecutionConfig().isObjectReuseEnabled()),
tuple2 -> {
Preconditions.checkArgument(tuple2.f0);
return tuple2.f1;
});
}
}
}
static JdbcBatchStatementExecutor<Row> createSimpleRowExecutor(
String sql, int[] fieldTypes, boolean objectReuse) {
return JdbcBatchStatementExecutor.simple(
sql,
createRowJdbcStatementBuilder(fieldTypes),
objectReuse ? Row::copy : Function.identity());
}
/**
* Creates a {@link JdbcStatementBuilder} for {@link Row} using the provided SQL types array.
* Uses {@link JdbcUtils#setRecordToStatement}
*/
static JdbcStatementBuilder<Row> createRowJdbcStatementBuilder(int[] types) {
return (st, record) -> setRecordToStatement(st, types, record);
}
public void updateExecutor(boolean reconnect) throws SQLException, ClassNotFoundException {
jdbcStatementExecutor.closeStatements();
jdbcStatementExecutor.prepareStatements(
reconnect
? connectionProvider.reestablishConnection()
: connectionProvider.getConnection());
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.phoenix.JdbcExecutionOptions;
import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.executor.InsertOrUpdateJdbcExecutor;
import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor;
import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatementImpl;
import org.apache.flink.types.Row;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.function.Function;
import static org.apache.flink.connector.phoenix.utils.JdbcUtils.getPrimaryKey;
import static org.apache.flink.connector.phoenix.utils.JdbcUtils.setRecordToStatement;
import static org.apache.flink.util.Preconditions.checkArgument;
class TableJdbcUpsertOutputFormat
extends JdbcBatchingOutputFormat<
Tuple2<Boolean, Row>, Row, JdbcBatchStatementExecutor<Row>> {
private static final Logger LOG = LoggerFactory.getLogger(TableJdbcUpsertOutputFormat.class);
private JdbcBatchStatementExecutor<Row> deleteExecutor;
private final StatementExecutorFactory<JdbcBatchStatementExecutor<Row>>
deleteStatementExecutorFactory;
private Connection conn = null;
TableJdbcUpsertOutputFormat(
JdbcConnectionProvider connectionProvider,
JdbcDmlOptions dmlOptions,
JdbcExecutionOptions batchOptions) {
this(
connectionProvider,
batchOptions,
ctx -> createUpsertRowExecutor(dmlOptions, ctx),
ctx -> createDeleteExecutor(dmlOptions, ctx));
}
@VisibleForTesting
TableJdbcUpsertOutputFormat(
JdbcConnectionProvider connectionProvider,
JdbcExecutionOptions batchOptions,
StatementExecutorFactory<JdbcBatchStatementExecutor<Row>> statementExecutorFactory,
StatementExecutorFactory<JdbcBatchStatementExecutor<Row>>
deleteStatementExecutorFactory) {
super(connectionProvider, batchOptions, statementExecutorFactory, tuple2 -> tuple2.f1);
this.deleteStatementExecutorFactory = deleteStatementExecutorFactory;
}
@Override
public void open(int taskNumber, int numTasks) throws IOException {
super.open(taskNumber, numTasks);
try {
conn = connectionProvider.getOrEstablishConnection();
} catch (Exception e) {
throw new IOException("unable to open JDBC writer", e);
}
deleteExecutor = deleteStatementExecutorFactory.apply(getRuntimeContext());
try {
deleteExecutor.prepareStatements(connectionProvider.getConnection());
} catch (SQLException e) {
throw new IOException(e);
}
}
private static JdbcBatchStatementExecutor<Row> createDeleteExecutor(
JdbcDmlOptions dmlOptions, RuntimeContext ctx) {
int[] pkFields =
Arrays.stream(dmlOptions.getFieldNames())
.mapToInt(Arrays.asList(dmlOptions.getFieldNames())::indexOf)
.toArray();
int[] pkTypes =
dmlOptions.getFieldTypes() == null
? null
: Arrays.stream(pkFields).map(f -> dmlOptions.getFieldTypes()[f]).toArray();
String deleteSql =
FieldNamedPreparedStatementImpl.parseNamedStatement(
dmlOptions
.getDialect()
.getDeleteStatement(
dmlOptions.getTableName(), dmlOptions.getFieldNames()),
new HashMap<>());
return createKeyedRowExecutor(pkFields, pkTypes, deleteSql);
}
@Override
protected void addToBatch(Tuple2<Boolean, Row> original, Row extracted) throws SQLException {
if (original.f0) {
super.addToBatch(original, extracted);
} else {
deleteExecutor.addToBatch(extracted);
}
}
@Override
public synchronized void close() {
try {
super.close();
} finally {
try {
if (deleteExecutor != null) {
deleteExecutor.closeStatements();
}
} catch (SQLException e) {
LOG.warn("unable to close delete statement runner", e);
}
}
}
@Override
protected void attemptFlush() throws SQLException {
super.attemptFlush();
deleteExecutor.executeBatch(conn);
}
@Override
public void updateExecutor(boolean reconnect) throws SQLException, ClassNotFoundException {
super.updateExecutor(reconnect);
deleteExecutor.closeStatements();
deleteExecutor.prepareStatements(connectionProvider.getConnection());
}
private static JdbcBatchStatementExecutor<Row> createKeyedRowExecutor(
int[] pkFields, int[] pkTypes, String sql) {
return JdbcBatchStatementExecutor.keyed(
sql,
createRowKeyExtractor(pkFields),
(st, record) ->
setRecordToStatement(
st, pkTypes, createRowKeyExtractor(pkFields).apply(record)));
}
private static JdbcBatchStatementExecutor<Row> createUpsertRowExecutor(
JdbcDmlOptions opt, RuntimeContext ctx) {
checkArgument(opt.getKeyFields().isPresent());
int[] pkFields =
Arrays.stream(opt.getKeyFields().get())
.mapToInt(Arrays.asList(opt.getFieldNames())::indexOf)
.toArray();
int[] pkTypes =
opt.getFieldTypes() == null
? null
: Arrays.stream(pkFields).map(f -> opt.getFieldTypes()[f]).toArray();
return opt.getDialect()
.getUpsertStatement(
opt.getTableName(), opt.getFieldNames(), opt.getKeyFields().get())
.map(
sql ->
createSimpleRowExecutor(
parseNamedStatement(sql),
opt.getFieldTypes(),
ctx.getExecutionConfig().isObjectReuseEnabled()))
.orElseGet(
() ->
new InsertOrUpdateJdbcExecutor<>(
parseNamedStatement(
opt.getDialect()
.getRowExistsStatement(
opt.getTableName(),
opt.getKeyFields().get())),
parseNamedStatement(
opt.getDialect()
.getInsertIntoStatement(
opt.getTableName(),
opt.getFieldNames())),
parseNamedStatement(
opt.getDialect()
.getUpdateStatement(
opt.getTableName(),
opt.getFieldNames(),
opt.getKeyFields().get())),
createRowJdbcStatementBuilder(pkTypes),
createRowJdbcStatementBuilder(opt.getFieldTypes()),
createRowJdbcStatementBuilder(opt.getFieldTypes()),
createRowKeyExtractor(pkFields),
ctx.getExecutionConfig().isObjectReuseEnabled()
? Row::copy
: Function.identity()));
}
private static String parseNamedStatement(String statement) {
return FieldNamedPreparedStatementImpl.parseNamedStatement(statement, new HashMap<>());
}
private static Function<Row, Row> createRowKeyExtractor(int[] pkFields) {
return row -> getPrimaryKey(row, pkFields);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.connection;
import org.apache.flink.annotation.Internal;
import javax.annotation.Nullable;
import java.sql.Connection;
import java.sql.SQLException;
/** JDBC connection provider. */
@Internal
public interface JdbcConnectionProvider {
/**
* Get existing connection.
*
* @return existing connection
*/
@Nullable
Connection getConnection();
/**
* Check whether possible existing connection is valid or not through {@link
* Connection#isValid(int)}.
*
* @return true if existing connection is valid
* @throws SQLException sql exception throw from {@link Connection#isValid(int)}
*/
boolean isConnectionValid() throws SQLException;
/**
* Get existing connection or establish an new one if there is none.
*
* @return existing connection or newly established connection
* @throws SQLException sql exception
* @throws ClassNotFoundException driver class not found
*/
Connection getOrEstablishConnection() throws SQLException, ClassNotFoundException;
/** Close possible existing connection. */
void closeConnection();
/**
* Close possible existing connection and establish an new one.
*
* @return newly established connection
* @throws SQLException sql exception
* @throws ClassNotFoundException driver class not found
*/
Connection reestablishConnection() throws SQLException, ClassNotFoundException;
}
package org.apache.flink.connector.phoenix.internal.connection;
import org.apache.flink.connector.phoenix.JdbcConnectionOptions;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Serializable;
import java.sql.Connection;
import java.sql.Driver;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Enumeration;
import java.util.Properties;
/**
* PhoneixJdbcConnectionProvider
*
* @author gy
* @since 2022/3/17 9:04
**/
public class PhoneixJdbcConnectionProvider implements JdbcConnectionProvider, Serializable {
private static final Logger LOG = LoggerFactory.getLogger(PhoneixJdbcConnectionProvider.class);
private static final long serialVersionUID = 1L;
private final JdbcConnectionOptions jdbcOptions;
private transient Driver loadedDriver;
private transient Connection connection;
private Boolean namespaceMappingEnabled;
private Boolean mapSystemTablesEnabled;
public PhoneixJdbcConnectionProvider(JdbcConnectionOptions jdbcOptions) {
this.jdbcOptions = jdbcOptions;
}
public PhoneixJdbcConnectionProvider(JdbcConnectionOptions jdbcOptions, boolean namespaceMappingEnabled, boolean mapSystemTablesEnabled) {
this.jdbcOptions = jdbcOptions;
this.namespaceMappingEnabled = namespaceMappingEnabled;
this.mapSystemTablesEnabled = mapSystemTablesEnabled;
}
public Connection getConnection() {
return this.connection;
}
public boolean isConnectionValid() throws SQLException {
return this.connection != null && this.connection.isValid(this.jdbcOptions.getConnectionCheckTimeoutSeconds());
}
private static Driver loadDriver(String driverName) throws SQLException, ClassNotFoundException {
Preconditions.checkNotNull(driverName);
Enumeration drivers = DriverManager.getDrivers();
Driver driver;
do {
if (!drivers.hasMoreElements()) {
Class clazz = Class.forName(driverName, true, Thread.currentThread().getContextClassLoader());
try {
return (Driver) clazz.newInstance();
} catch (Exception var4) {
throw new SQLException("Fail to create driver of class " + driverName, var4);
}
}
driver = (Driver) drivers.nextElement();
} while (!driver.getClass().getName().equals(driverName));
return driver;
}
private Driver getLoadedDriver() throws SQLException, ClassNotFoundException {
if (this.loadedDriver == null) {
this.loadedDriver = loadDriver(this.jdbcOptions.getDriverName());
}
return this.loadedDriver;
}
public Connection getOrEstablishConnection() throws SQLException, ClassNotFoundException {
if (this.connection != null) {
return this.connection;
} else {
if (this.jdbcOptions.getDriverName() == null) {
this.connection = DriverManager.getConnection(this.jdbcOptions.getDbURL(), (String) this.jdbcOptions.getUsername().orElse((String) null), (String) this.jdbcOptions.getPassword().orElse((String) null));
} else {
Driver driver = this.getLoadedDriver();
Properties info = new Properties();
this.jdbcOptions.getUsername().ifPresent((user) -> {
info.setProperty("user", user);
});
this.jdbcOptions.getPassword().ifPresent((password) -> {
info.setProperty("password", password);
});
if (this.namespaceMappingEnabled && this.mapSystemTablesEnabled) {
info.setProperty("phoenix.schema.isNamespaceMappingEnabled", "true");
info.setProperty("phoenix.schema.mapSystemTablesToNamespace", "true");
}
this.connection = driver.connect(this.jdbcOptions.getDbURL(), info);
this.connection.setAutoCommit(false);
if (this.connection == null) {
throw new SQLException("No suitable driver found for " + this.jdbcOptions.getDbURL(), "08001");
}
}
return this.connection;
}
}
public void closeConnection() {
if (this.connection != null) {
try {
this.connection.close();
} catch (SQLException var5) {
LOG.warn("JDBC connection close failed.", var5);
} finally {
this.connection = null;
}
}
}
public Connection reestablishConnection() throws SQLException, ClassNotFoundException {
this.closeConnection();
return this.getOrEstablishConnection();
}
static {
DriverManager.getDrivers();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.converter;
import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement;
import org.apache.flink.connector.phoenix.utils.JdbcTypeUtil;
import org.apache.flink.table.data.DecimalData;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.apache.flink.table.types.logical.DecimalType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.LogicalTypeRoot;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.table.types.logical.TimestampType;
import org.apache.flink.table.types.utils.TypeConversions;
import java.io.Serializable;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.sql.Date;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Time;
import java.sql.Timestamp;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** Base class for all converters that convert between JDBC object and Flink internal object. */
public abstract class AbstractJdbcRowConverter implements JdbcRowConverter {
protected final RowType rowType;
protected final JdbcDeserializationConverter[] toInternalConverters;
protected final JdbcSerializationConverter[] toExternalConverters;
protected final LogicalType[] fieldTypes;
public abstract String converterName();
public AbstractJdbcRowConverter(RowType rowType) {
this.rowType = checkNotNull(rowType);
this.fieldTypes =
rowType.getFields().stream()
.map(RowType.RowField::getType)
.toArray(LogicalType[]::new);
this.toInternalConverters = new JdbcDeserializationConverter[rowType.getFieldCount()];
this.toExternalConverters = new JdbcSerializationConverter[rowType.getFieldCount()];
for (int i = 0; i < rowType.getFieldCount(); i++) {
toInternalConverters[i] = createNullableInternalConverter(rowType.getTypeAt(i));
toExternalConverters[i] = createNullableExternalConverter(fieldTypes[i]);
}
}
@Override
public RowData toInternal(ResultSet resultSet) throws SQLException {
GenericRowData genericRowData = new GenericRowData(rowType.getFieldCount());
for (int pos = 0; pos < rowType.getFieldCount(); pos++) {
Object field = resultSet.getObject(pos + 1);
genericRowData.setField(pos, toInternalConverters[pos].deserialize(field));
}
return genericRowData;
}
@Override
public FieldNamedPreparedStatement toExternal(
RowData rowData, FieldNamedPreparedStatement statement) throws SQLException {
for (int index = 0; index < rowData.getArity(); index++) {
toExternalConverters[index].serialize(rowData, index, statement);
}
return statement;
}
/** Runtime converter to convert JDBC field to {@link RowData} type object. */
@FunctionalInterface
interface JdbcDeserializationConverter extends Serializable {
/**
* Convert a jdbc field object of {@link ResultSet} to the internal data structure object.
*
* @param jdbcField
*/
Object deserialize(Object jdbcField) throws SQLException;
}
/**
* Runtime converter to convert {@link RowData} field to java object and fill into the {@link
* PreparedStatement}.
*/
@FunctionalInterface
interface JdbcSerializationConverter extends Serializable {
void serialize(RowData rowData, int index, FieldNamedPreparedStatement statement)
throws SQLException;
}
/**
* Create a nullable runtime {@link JdbcDeserializationConverter} from given {@link
* LogicalType}.
*/
protected JdbcDeserializationConverter createNullableInternalConverter(LogicalType type) {
return wrapIntoNullableInternalConverter(createInternalConverter(type));
}
protected JdbcDeserializationConverter wrapIntoNullableInternalConverter(
JdbcDeserializationConverter jdbcDeserializationConverter) {
return val -> {
if (val == null) {
return null;
} else {
return jdbcDeserializationConverter.deserialize(val);
}
};
}
protected JdbcDeserializationConverter createInternalConverter(LogicalType type) {
switch (type.getTypeRoot()) {
case NULL:
return val -> null;
case BOOLEAN:
case FLOAT:
case DOUBLE:
case INTERVAL_YEAR_MONTH:
case INTERVAL_DAY_TIME:
return val -> val;
case TINYINT:
return val -> ((Integer) val).byteValue();
case SMALLINT:
// Converter for small type that casts value to int and then return short value,
// since
// JDBC 1.0 use int type for small values.
return val -> val instanceof Integer ? ((Integer) val).shortValue() : val;
case INTEGER:
return val -> val;
case BIGINT:
return val -> val;
case DECIMAL:
final int precision = ((DecimalType) type).getPrecision();
final int scale = ((DecimalType) type).getScale();
// using decimal(20, 0) to support db type bigint unsigned, user should define
// decimal(20, 0) in SQL,
// but other precision like decimal(30, 0) can work too from lenient consideration.
return val ->
val instanceof BigInteger
? DecimalData.fromBigDecimal(
new BigDecimal((BigInteger) val, 0), precision, scale)
: DecimalData.fromBigDecimal((BigDecimal) val, precision, scale);
case DATE:
return val -> (int) (((Date) val).toLocalDate().toEpochDay());
case TIME_WITHOUT_TIME_ZONE:
return val -> (int) (((Time) val).toLocalTime().toNanoOfDay() / 1_000_000L);
case TIMESTAMP_WITH_TIME_ZONE:
case TIMESTAMP_WITHOUT_TIME_ZONE:
return val ->
val instanceof LocalDateTime
? TimestampData.fromLocalDateTime((LocalDateTime) val)
: TimestampData.fromTimestamp((Timestamp) val);
case CHAR:
case VARCHAR:
return val -> StringData.fromString((String) val);
case BINARY:
case VARBINARY:
return val -> (byte[]) val;
case ARRAY:
case ROW:
case MAP:
case MULTISET:
case RAW:
default:
throw new UnsupportedOperationException("Unsupported type:" + type);
}
}
/** Create a nullable JDBC f{@link JdbcSerializationConverter} from given sql type. */
protected JdbcSerializationConverter createNullableExternalConverter(LogicalType type) {
return wrapIntoNullableExternalConverter(createExternalConverter(type), type);
}
protected JdbcSerializationConverter wrapIntoNullableExternalConverter(
JdbcSerializationConverter jdbcSerializationConverter, LogicalType type) {
final int sqlType =
JdbcTypeUtil.typeInformationToSqlType(
TypeConversions.fromDataTypeToLegacyInfo(
TypeConversions.fromLogicalToDataType(type)));
return (val, index, statement) -> {
if (val == null
|| val.isNullAt(index)
|| LogicalTypeRoot.NULL.equals(type.getTypeRoot())) {
statement.setNull(index, sqlType);
} else {
jdbcSerializationConverter.serialize(val, index, statement);
}
};
}
protected JdbcSerializationConverter createExternalConverter(LogicalType type) {
switch (type.getTypeRoot()) {
case BOOLEAN:
return (val, index, statement) ->
statement.setBoolean(index, val.getBoolean(index));
case TINYINT:
return (val, index, statement) -> statement.setByte(index, val.getByte(index));
case SMALLINT:
return (val, index, statement) -> statement.setShort(index, val.getShort(index));
case INTEGER:
case INTERVAL_YEAR_MONTH:
return (val, index, statement) -> statement.setInt(index, val.getInt(index));
case BIGINT:
case INTERVAL_DAY_TIME:
return (val, index, statement) -> statement.setLong(index, val.getLong(index));
case FLOAT:
return (val, index, statement) -> statement.setFloat(index, val.getFloat(index));
case DOUBLE:
return (val, index, statement) -> statement.setDouble(index, val.getDouble(index));
case CHAR:
case VARCHAR:
// value is BinaryString
return (val, index, statement) ->
statement.setString(index, val.getString(index).toString());
case BINARY:
case VARBINARY:
return (val, index, statement) -> statement.setBytes(index, val.getBinary(index));
case DATE:
return (val, index, statement) ->
statement.setDate(
index, Date.valueOf(LocalDate.ofEpochDay(val.getInt(index))));
case TIME_WITHOUT_TIME_ZONE:
return (val, index, statement) ->
statement.setTime(
index,
Time.valueOf(
LocalTime.ofNanoOfDay(val.getInt(index) * 1_000_000L)));
case TIMESTAMP_WITH_TIME_ZONE:
case TIMESTAMP_WITHOUT_TIME_ZONE:
final int timestampPrecision = ((TimestampType) type).getPrecision();
return (val, index, statement) ->
statement.setTimestamp(
index, val.getTimestamp(index, timestampPrecision).toTimestamp());
case DECIMAL:
final int decimalPrecision = ((DecimalType) type).getPrecision();
final int decimalScale = ((DecimalType) type).getScale();
return (val, index, statement) ->
statement.setBigDecimal(
index,
val.getDecimal(index, decimalPrecision, decimalScale)
.toBigDecimal());
case ARRAY:
case MAP:
case MULTISET:
case ROW:
case RAW:
default:
throw new UnsupportedOperationException("Unsupported type:" + type);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.converter;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement;
import org.apache.flink.table.data.RowData;
import java.io.Serializable;
import java.sql.ResultSet;
import java.sql.SQLException;
/**
* Converter that is responsible to convert between JDBC object and Flink SQL internal data
* structure {@link RowData}.
*/
public interface JdbcRowConverter extends Serializable {
/**
* Convert data retrieved from {@link ResultSet} to internal {@link RowData}.
*
* @param resultSet ResultSet from JDBC
*/
RowData toInternal(ResultSet resultSet) throws SQLException;
/**
* Convert data retrieved from Flink internal RowData to JDBC Object.
*
* @param rowData The given internal {@link RowData}.
* @param statement The statement to be filled.
* @return The filled statement.
*/
FieldNamedPreparedStatement toExternal(RowData rowData, FieldNamedPreparedStatement statement)
throws SQLException;
}
package org.apache.flink.connector.phoenix.internal.converter;
import org.apache.flink.table.types.logical.RowType;
/**
* PhoenixRowConverter
*
* @author gy
* @since 2022/3/16 11:21
**/
public class PhoenixRowConverter extends AbstractJdbcRowConverter {
private static final long serialVersionUID = 1L;
@Override
public String converterName() {
return "Phoenix";
}
public PhoenixRowConverter(RowType rowType) {
super(rowType);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.executor;
import org.apache.flink.annotation.Internal;
import org.apache.flink.connector.phoenix.JdbcStatementBuilder;
import org.apache.flink.connector.phoenix.table.PhoenixUpsertTableSink;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Function;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* {@link JdbcBatchStatementExecutor} that provides upsert semantics by updating row if it exists
* and inserting otherwise. Used in Table API.
*
* @deprecated This has been replaced with {@link TableInsertOrUpdateStatementExecutor}, will remove
* this once {@link PhoenixUpsertTableSink} is removed.
*/
@Internal
public final class InsertOrUpdateJdbcExecutor<R, K, V> implements JdbcBatchStatementExecutor<R> {
private static final Logger LOG = LoggerFactory.getLogger(InsertOrUpdateJdbcExecutor.class);
private final String existSQL;
private final String insertSQL;
private final String updateSQL;
private final JdbcStatementBuilder<K> existSetter;
private final JdbcStatementBuilder<V> insertSetter;
private final JdbcStatementBuilder<V> updateSetter;
private final Function<R, K> keyExtractor;
private final Function<R, V> valueMapper;
private final Map<K, V> batch;
private transient PreparedStatement existStatement;
private transient PreparedStatement insertStatement;
private transient PreparedStatement updateStatement;
public InsertOrUpdateJdbcExecutor(
@Nonnull String existSQL,
@Nonnull String insertSQL,
@Nonnull String updateSQL,
@Nonnull JdbcStatementBuilder<K> existSetter,
@Nonnull JdbcStatementBuilder<V> insertSetter,
@Nonnull JdbcStatementBuilder<V> updateSetter,
@Nonnull Function<R, K> keyExtractor,
@Nonnull Function<R, V> valueExtractor) {
this.existSQL = checkNotNull(existSQL);
this.updateSQL = checkNotNull(updateSQL);
this.existSetter = checkNotNull(existSetter);
this.insertSQL = checkNotNull(insertSQL);
this.insertSetter = checkNotNull(insertSetter);
this.updateSetter = checkNotNull(updateSetter);
this.keyExtractor = checkNotNull(keyExtractor);
this.valueMapper = checkNotNull(valueExtractor);
this.batch = new HashMap<>();
}
@Override
public void prepareStatements(Connection connection) throws SQLException {
existStatement = connection.prepareStatement(existSQL);
insertStatement = connection.prepareStatement(insertSQL);
updateStatement = connection.prepareStatement(updateSQL);
}
@Override
public void addToBatch(R record) {
batch.put(keyExtractor.apply(record), valueMapper.apply(record));
}
@Override
public void executeBatch(Connection conn) throws SQLException {
if (!batch.isEmpty()) {
for (Map.Entry<K, V> entry : batch.entrySet()) {
processOneRowInBatch(entry.getKey(), entry.getValue());
}
conn.commit();
batch.clear();
}
}
private void processOneRowInBatch(K pk, V row) throws SQLException {
if (exist(pk)) {
updateSetter.accept(updateStatement, row);
updateStatement.executeUpdate();
} else {
insertSetter.accept(insertStatement, row);
insertStatement.executeUpdate();
}
}
private boolean exist(K pk) throws SQLException {
existSetter.accept(existStatement, pk);
try (ResultSet resultSet = existStatement.executeQuery()) {
return resultSet.next();
}
}
@Override
public void closeStatements() throws SQLException {
for (PreparedStatement s :
Arrays.asList(existStatement, insertStatement, updateStatement)) {
if (s != null) {
s.close();
}
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.executor;
import org.apache.flink.annotation.Internal;
import org.apache.flink.connector.phoenix.JdbcStatementBuilder;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.function.Function;
/** Executes the given JDBC statement in batch for the accumulated records. */
@Internal
public interface JdbcBatchStatementExecutor<T> {
/** Create statements from connection. */
void prepareStatements(Connection connection) throws SQLException;
void addToBatch(T record) throws SQLException;
/** Submits a batch of commands to the database for execution.
* @param conn*/
void executeBatch(Connection conn) throws SQLException;
/** Close JDBC related statements. */
void closeStatements() throws SQLException;
static <T, K> JdbcBatchStatementExecutor<T> keyed(
String sql, Function<T, K> keyExtractor, JdbcStatementBuilder<K> statementBuilder) {
return new KeyedBatchStatementExecutor<>(sql, keyExtractor, statementBuilder);
}
static <T, V> JdbcBatchStatementExecutor<T> simple(
String sql, JdbcStatementBuilder<V> paramSetter, Function<T, V> valueTransformer) {
return new SimpleBatchStatementExecutor<>(sql, paramSetter, valueTransformer);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.executor;
import org.apache.flink.connector.phoenix.JdbcStatementBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.HashSet;
import java.util.Set;
import java.util.function.Function;
/**
* A {@link JdbcBatchStatementExecutor} that extracts SQL keys from the supplied stream elements and
* executes a SQL query for them.
*/
class KeyedBatchStatementExecutor<T, K> implements JdbcBatchStatementExecutor<T> {
private static final Logger LOG = LoggerFactory.getLogger(KeyedBatchStatementExecutor.class);
private final String sql;
private final JdbcStatementBuilder<K> parameterSetter;
private final Function<T, K> keyExtractor;
private final Set<K> batch;
private transient PreparedStatement st;
/**
* Keep in mind object reuse: if it's on then key extractor may be required to return new
* object.
*/
KeyedBatchStatementExecutor(
String sql, Function<T, K> keyExtractor, JdbcStatementBuilder<K> statementBuilder) {
this.parameterSetter = statementBuilder;
this.keyExtractor = keyExtractor;
this.sql = sql;
this.batch = new HashSet<>();
}
@Override
public void prepareStatements(Connection connection) throws SQLException {
st = connection.prepareStatement(sql);
}
@Override
public void addToBatch(T record) {
batch.add(keyExtractor.apply(record));
}
@Override
public void executeBatch(Connection conn) throws SQLException {
if (!batch.isEmpty()) {
for (K entry : batch) {
parameterSetter.accept(st, entry);
st.executeUpdate();
}
LOG.info("connection commit datasize:" + batch.size());
conn.commit();
batch.clear();
}
}
@Override
public void closeStatements() throws SQLException {
if (st != null) {
st.close();
st = null;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.executor;
import org.apache.flink.connector.phoenix.JdbcStatementBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Function;
/**
* A {@link JdbcBatchStatementExecutor} that executes supplied statement for given the records
* (without any pre-processing).
*/
class SimpleBatchStatementExecutor<T, V> implements JdbcBatchStatementExecutor<T> {
private static final Logger LOG = LoggerFactory.getLogger(SimpleBatchStatementExecutor.class);
private final String sql;
private final JdbcStatementBuilder<V> parameterSetter;
private final Function<T, V> valueTransformer;
private final List<V> batch;
private transient PreparedStatement st;
SimpleBatchStatementExecutor(
String sql, JdbcStatementBuilder<V> statementBuilder, Function<T, V> valueTransformer) {
this.sql = sql;
this.parameterSetter = statementBuilder;
this.valueTransformer = valueTransformer;
this.batch = new ArrayList<>();
}
@Override
public void prepareStatements(Connection connection) throws SQLException {
this.st = connection.prepareStatement(sql);
}
@Override
public void addToBatch(T record) {
batch.add(valueTransformer.apply(record));
}
@Override
public void executeBatch(Connection connection) throws SQLException {
if (!batch.isEmpty()) {
for (V r : batch) {
parameterSetter.accept(st, r);
st.executeUpdate();
}
LOG.info("connection commit dataSize:" + batch.size());
connection.commit();
batch.clear();
}
}
@Override
public void closeStatements() throws SQLException {
if (st != null) {
st.close();
st = null;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.executor;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.table.data.RowData;
import org.apache.flink.types.RowKind;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Function;
/**
* Currently, this statement executor is only used for table/sql to buffer insert/update/delete
* events, and reduce them in buffer before submit to external database.
*/
public final class TableBufferReducedStatementExecutor
implements JdbcBatchStatementExecutor<RowData> {
private final JdbcBatchStatementExecutor<RowData> upsertExecutor;
private final JdbcBatchStatementExecutor<RowData> deleteExecutor;
private final Function<RowData, RowData> keyExtractor;
private final Function<RowData, RowData> valueTransform;
// the mapping is [KEY, <+/-, VALUE>]
private final Map<RowData, Tuple2<Boolean, RowData>> reduceBuffer = new HashMap<>();
public TableBufferReducedStatementExecutor(
JdbcBatchStatementExecutor<RowData> upsertExecutor,
JdbcBatchStatementExecutor<RowData> deleteExecutor,
Function<RowData, RowData> keyExtractor,
Function<RowData, RowData> valueTransform) {
this.upsertExecutor = upsertExecutor;
this.deleteExecutor = deleteExecutor;
this.keyExtractor = keyExtractor;
this.valueTransform = valueTransform;
}
@Override
public void prepareStatements(Connection connection) throws SQLException {
upsertExecutor.prepareStatements(connection);
deleteExecutor.prepareStatements(connection);
}
@Override
public void addToBatch(RowData record) throws SQLException {
RowData key = keyExtractor.apply(record);
boolean flag = changeFlag(record.getRowKind());
RowData value = valueTransform.apply(record); // copy or not
reduceBuffer.put(key, Tuple2.of(flag, value));
}
/**
* Returns true if the row kind is INSERT or UPDATE_AFTER, returns false if the row kind is
* DELETE or UPDATE_BEFORE.
*/
private boolean changeFlag(RowKind rowKind) {
switch (rowKind) {
case INSERT:
case UPDATE_AFTER:
return true;
case DELETE:
case UPDATE_BEFORE:
return false;
default:
throw new UnsupportedOperationException(
String.format(
"Unknown row kind, the supported row kinds is: INSERT, UPDATE_BEFORE, UPDATE_AFTER,"
+ " DELETE, but get: %s.",
rowKind));
}
}
@Override
public void executeBatch(Connection conn) throws SQLException {
for (Map.Entry<RowData, Tuple2<Boolean, RowData>> entry : reduceBuffer.entrySet()) {
if (entry.getValue().f0) {
upsertExecutor.addToBatch(entry.getValue().f1);
} else {
// delete by key
deleteExecutor.addToBatch(entry.getKey());
}
}
upsertExecutor.executeBatch(conn);
deleteExecutor.executeBatch(conn);
reduceBuffer.clear();
}
@Override
public void closeStatements() throws SQLException {
upsertExecutor.closeStatements();
deleteExecutor.closeStatements();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.executor;
import org.apache.flink.table.data.RowData;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Function;
/**
* Currently, this statement executor is only used for table/sql to buffer records, because the
* {@link PreparedStatement#executeBatch()} may fail and clear buffered records, so we have to
* buffer the records and replay the records when retrying {@link JdbcBatchStatementExecutor#executeBatch(Connection)}.
*/
public final class TableBufferedStatementExecutor implements JdbcBatchStatementExecutor<RowData> {
private final JdbcBatchStatementExecutor<RowData> statementExecutor;
private final Function<RowData, RowData> valueTransform;
private final List<RowData> buffer = new ArrayList<>();
public TableBufferedStatementExecutor(
JdbcBatchStatementExecutor<RowData> statementExecutor,
Function<RowData, RowData> valueTransform) {
this.statementExecutor = statementExecutor;
this.valueTransform = valueTransform;
}
@Override
public void prepareStatements(Connection connection) throws SQLException {
statementExecutor.prepareStatements(connection);
}
@Override
public void addToBatch(RowData record) throws SQLException {
RowData value = valueTransform.apply(record); // copy or not
buffer.add(value);
}
@Override
public void executeBatch(Connection conn) throws SQLException {
for (RowData value : buffer) {
statementExecutor.addToBatch(value);
}
statementExecutor.executeBatch(conn);
buffer.clear();
}
@Override
public void closeStatements() throws SQLException {
statementExecutor.closeStatements();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.executor;
import org.apache.flink.annotation.Internal;
import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement;
import org.apache.flink.connector.phoenix.statement.StatementFactory;
import org.apache.flink.table.data.RowData;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.function.Function;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* {@link JdbcBatchStatementExecutor} that provides upsert semantics by updating row if it exists
* and inserting otherwise. Only used in Table/SQL API.
*/
@Internal
public final class TableInsertOrUpdateStatementExecutor
implements JdbcBatchStatementExecutor<RowData> {
private final StatementFactory existStmtFactory;
private final StatementFactory insertStmtFactory;
private final StatementFactory updateStmtFactory;
private final JdbcRowConverter existSetter;
private final JdbcRowConverter insertSetter;
private final JdbcRowConverter updateSetter;
private final Function<RowData, RowData> keyExtractor;
private transient FieldNamedPreparedStatement existStatement;
private transient FieldNamedPreparedStatement insertStatement;
private transient FieldNamedPreparedStatement updateStatement;
public TableInsertOrUpdateStatementExecutor(
StatementFactory existStmtFactory,
StatementFactory insertStmtFactory,
StatementFactory updateStmtFactory,
JdbcRowConverter existSetter,
JdbcRowConverter insertSetter,
JdbcRowConverter updateSetter,
Function<RowData, RowData> keyExtractor) {
this.existStmtFactory = checkNotNull(existStmtFactory);
this.insertStmtFactory = checkNotNull(insertStmtFactory);
this.updateStmtFactory = checkNotNull(updateStmtFactory);
this.existSetter = checkNotNull(existSetter);
this.insertSetter = checkNotNull(insertSetter);
this.updateSetter = checkNotNull(updateSetter);
this.keyExtractor = keyExtractor;
}
@Override
public void prepareStatements(Connection connection) throws SQLException {
existStatement = existStmtFactory.createStatement(connection);
insertStatement = insertStmtFactory.createStatement(connection);
updateStatement = updateStmtFactory.createStatement(connection);
}
@Override
public void addToBatch(RowData record) throws SQLException {
processOneRowInBatch(keyExtractor.apply(record), record);
}
private void processOneRowInBatch(RowData pk, RowData row) throws SQLException {
if (exist(pk)) {
updateSetter.toExternal(row, updateStatement);
updateStatement.addBatch();
} else {
insertSetter.toExternal(row, insertStatement);
insertStatement.addBatch();
}
}
private boolean exist(RowData pk) throws SQLException {
existSetter.toExternal(pk, existStatement);
try (ResultSet resultSet = existStatement.executeQuery()) {
return resultSet.next();
}
}
@Override
public void executeBatch(Connection conn) throws SQLException {
conn.commit();
}
@Override
public void closeStatements() throws SQLException {
for (FieldNamedPreparedStatement s :
Arrays.asList(existStatement, insertStatement, updateStatement)) {
if (s != null) {
s.close();
}
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.executor;
import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement;
import org.apache.flink.connector.phoenix.statement.StatementFactory;
import org.apache.flink.table.data.RowData;
import java.sql.Connection;
import java.sql.SQLException;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* A {@link JdbcBatchStatementExecutor} that simply adds the records into batches of {@link
* java.sql.PreparedStatement} and doesn't buffer records in memory. Only used in Table/SQL API.
*/
public final class TableSimpleStatementExecutor implements JdbcBatchStatementExecutor<RowData> {
private final StatementFactory stmtFactory;
private final JdbcRowConverter converter;
private transient FieldNamedPreparedStatement st;
/**
* Keep in mind object reuse: if it's on then key extractor may be required to return new
* object.
*/
public TableSimpleStatementExecutor(StatementFactory stmtFactory, JdbcRowConverter converter) {
this.stmtFactory = checkNotNull(stmtFactory);
this.converter = checkNotNull(converter);
}
@Override
public void prepareStatements(Connection connection) throws SQLException {
st = stmtFactory.createStatement(connection);
}
@Override
public void addToBatch(RowData record) throws SQLException {
converter.toExternal(record, st);
st.addBatch();
}
@Override
public void executeBatch(Connection conn) throws SQLException {
st.executeBatch();
}
@Override
public void closeStatements() throws SQLException {
if (st != null) {
st.close();
st = null;
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.options;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.util.Preconditions;
import javax.annotation.Nullable;
import java.util.Arrays;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Stream;
/** JDBC sink DML options. */
public class JdbcDmlOptions extends JdbcTypedQueryOptions {
private static final long serialVersionUID = 1L;
private final String[] fieldNames;
@Nullable private final String[] keyFields;
private final String tableName;
private final JdbcDialect dialect;
public static JdbcDmlOptionsBuilder builder() {
return new JdbcDmlOptionsBuilder();
}
private JdbcDmlOptions(
String tableName,
JdbcDialect dialect,
String[] fieldNames,
int[] fieldTypes,
String[] keyFields) {
super(fieldTypes);
this.tableName = Preconditions.checkNotNull(tableName, "table is empty");
this.dialect = Preconditions.checkNotNull(dialect, "dialect name is empty");
this.fieldNames = Preconditions.checkNotNull(fieldNames, "field names is empty");
this.keyFields = keyFields;
}
public String getTableName() {
return tableName;
}
public JdbcDialect getDialect() {
return dialect;
}
public String[] getFieldNames() {
return fieldNames;
}
public Optional<String[]> getKeyFields() {
return Optional.ofNullable(keyFields);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
JdbcDmlOptions that = (JdbcDmlOptions) o;
return Arrays.equals(fieldNames, that.fieldNames)
&& Arrays.equals(keyFields, that.keyFields)
&& Objects.equals(tableName, that.tableName)
&& Objects.equals(dialect, that.dialect);
}
@Override
public int hashCode() {
int result = Objects.hash(tableName, dialect);
result = 31 * result + Arrays.hashCode(fieldNames);
result = 31 * result + Arrays.hashCode(keyFields);
return result;
}
/** Builder for {@link JdbcDmlOptions}. */
public static class JdbcDmlOptionsBuilder
extends JdbcUpdateQueryOptionsBuilder<JdbcDmlOptionsBuilder> {
private String tableName;
private String[] fieldNames;
private String[] keyFields;
private JdbcDialect dialect;
@Override
protected JdbcDmlOptionsBuilder self() {
return this;
}
public JdbcDmlOptionsBuilder withFieldNames(String field, String... fieldNames) {
this.fieldNames = concat(field, fieldNames);
return this;
}
public JdbcDmlOptionsBuilder withFieldNames(String[] fieldNames) {
this.fieldNames = fieldNames;
return this;
}
public JdbcDmlOptionsBuilder withKeyFields(String keyField, String... keyFields) {
this.keyFields = concat(keyField, keyFields);
return this;
}
public JdbcDmlOptionsBuilder withKeyFields(String[] keyFields) {
this.keyFields = keyFields;
return this;
}
public JdbcDmlOptionsBuilder withTableName(String tableName) {
this.tableName = tableName;
return self();
}
public JdbcDmlOptionsBuilder withDialect(JdbcDialect dialect) {
this.dialect = dialect;
return self();
}
public JdbcDmlOptions build() {
return new JdbcDmlOptions(tableName, dialect, fieldNames, fieldTypes, keyFields);
}
static String[] concat(String first, String... next) {
if (next == null || next.length == 0) {
return new String[] {first};
} else {
return Stream.concat(Stream.of(new String[] {first}), Stream.of(next))
.toArray(String[]::new);
}
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.options;
import org.apache.flink.util.Preconditions;
import java.util.stream.IntStream;
/** JDBC sink insert options. */
public class JdbcInsertOptions extends JdbcTypedQueryOptions {
private static final long serialVersionUID = 1L;
private final String query;
public JdbcInsertOptions(String query, int[] typesArray) {
super(typesArray);
this.query = Preconditions.checkNotNull(query, "query is empty");
}
public String getQuery() {
return query;
}
public static JdbcInsertOptions from(String query, int firstFieldType, int... nextFieldTypes) {
return new JdbcInsertOptions(query, concat(firstFieldType, nextFieldTypes));
}
private static int[] concat(int first, int... next) {
if (next == null || next.length == 0) {
return new int[] {first};
} else {
return IntStream.concat(IntStream.of(new int[] {first}), IntStream.of(next)).toArray();
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.options;
import org.apache.flink.connector.phoenix.JdbcExecutionOptions;
import java.io.Serializable;
import java.util.Objects;
/** Options for the JDBC lookup. */
public class JdbcLookupOptions implements Serializable {
private final long cacheMaxSize;
private final long cacheExpireMs;
private final int maxRetryTimes;
public JdbcLookupOptions(long cacheMaxSize, long cacheExpireMs, int maxRetryTimes) {
this.cacheMaxSize = cacheMaxSize;
this.cacheExpireMs = cacheExpireMs;
this.maxRetryTimes = maxRetryTimes;
}
public long getCacheMaxSize() {
return cacheMaxSize;
}
public long getCacheExpireMs() {
return cacheExpireMs;
}
public int getMaxRetryTimes() {
return maxRetryTimes;
}
public static Builder builder() {
return new Builder();
}
@Override
public boolean equals(Object o) {
if (o instanceof JdbcLookupOptions) {
JdbcLookupOptions options = (JdbcLookupOptions) o;
return Objects.equals(cacheMaxSize, options.cacheMaxSize)
&& Objects.equals(cacheExpireMs, options.cacheExpireMs)
&& Objects.equals(maxRetryTimes, options.maxRetryTimes);
} else {
return false;
}
}
/** Builder of {@link JdbcLookupOptions}. */
public static class Builder {
private long cacheMaxSize = -1L;
private long cacheExpireMs = -1L;
private int maxRetryTimes = JdbcExecutionOptions.DEFAULT_MAX_RETRY_TIMES;
/** optional, lookup cache max size, over this value, the old data will be eliminated. */
public Builder setCacheMaxSize(long cacheMaxSize) {
this.cacheMaxSize = cacheMaxSize;
return this;
}
/** optional, lookup cache expire mills, over this time, the old data will expire. */
public Builder setCacheExpireMs(long cacheExpireMs) {
this.cacheExpireMs = cacheExpireMs;
return this;
}
/** optional, max retry times for jdbc connector. */
public Builder setMaxRetryTimes(int maxRetryTimes) {
this.maxRetryTimes = maxRetryTimes;
return this;
}
public JdbcLookupOptions build() {
return new JdbcLookupOptions(cacheMaxSize, cacheExpireMs, maxRetryTimes);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.options;
import org.apache.flink.connector.phoenix.JdbcConnectionOptions;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.connector.phoenix.dialect.JdbcDialects;
import javax.annotation.Nullable;
import java.util.Objects;
import java.util.Optional;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** Options for the JDBC connector. */
public class JdbcOptions extends JdbcConnectionOptions {
private static final long serialVersionUID = 1L;
private String tableName;
private JdbcDialect dialect;
private final @Nullable Integer parallelism;
protected boolean namespaceMappingEnabled;
protected boolean mapSystemTablesEnabled;
protected JdbcOptions(
String dbURL,
String tableName,
String driverName,
String username,
String password,
JdbcDialect dialect,
Integer parallelism,
int connectionCheckTimeoutSeconds,
boolean namespaceMappingEnabled,
boolean mapSystemTablesEnabled
) {
super(dbURL, driverName, username, password, connectionCheckTimeoutSeconds);
this.tableName = tableName;
this.dialect = dialect;
this.parallelism = parallelism;
this.namespaceMappingEnabled = namespaceMappingEnabled;
this.mapSystemTablesEnabled = mapSystemTablesEnabled;
}
protected JdbcOptions(
String dbURL,
String tableName,
String driverName,
String username,
String password,
JdbcDialect dialect,
Integer parallelism,
int connectionCheckTimeoutSeconds) {
super(dbURL, driverName, username, password, connectionCheckTimeoutSeconds);
this.tableName = tableName;
this.dialect = dialect;
this.parallelism = parallelism;
}
public String getTableName() {
return tableName;
}
public JdbcDialect getDialect() {
return dialect;
}
public Integer getParallelism() {
return parallelism;
}
public boolean isNamespaceMappingEnabled() {
return namespaceMappingEnabled;
}
public boolean isMapSystemTablesEnabled() {
return mapSystemTablesEnabled;
}
public static Builder builder() {
return new Builder();
}
@Override
public boolean equals(Object o) {
if (o instanceof JdbcOptions) {
JdbcOptions options = (JdbcOptions) o;
return Objects.equals(url, options.url)
&& Objects.equals(tableName, options.tableName)
&& Objects.equals(driverName, options.driverName)
&& Objects.equals(username, options.username)
&& Objects.equals(password, options.password)
&& Objects.equals(
dialect.getClass().getName(), options.dialect.getClass().getName())
&& Objects.equals(parallelism, options.parallelism)
&& Objects.equals(
connectionCheckTimeoutSeconds, options.connectionCheckTimeoutSeconds)
&& Objects.equals(
namespaceMappingEnabled, options.namespaceMappingEnabled)
&& Objects.equals(
mapSystemTablesEnabled, options.mapSystemTablesEnabled);
} else {
return false;
}
}
@Override
public int hashCode() {
return Objects.hash(
url,
tableName,
driverName,
username,
password,
dialect.getClass().getName(),
parallelism,
connectionCheckTimeoutSeconds,
namespaceMappingEnabled,
mapSystemTablesEnabled
);
}
/** Builder of {@link JdbcOptions}. */
public static class Builder {
private String dbURL;
private String tableName;
private String driverName;
private String username;
private String password;
private JdbcDialect dialect;
private Integer parallelism;
private int connectionCheckTimeoutSeconds = 60;
protected boolean namespaceMappingEnabled;
protected boolean mapSystemTablesEnabled;
/** required, table name. */
public Builder setTableName(String tableName) {
this.tableName = tableName;
return this;
}
/** optional, user name. */
public Builder setUsername(String username) {
this.username = username;
return this;
}
/** optional, password. */
public Builder setPassword(String password) {
this.password = password;
return this;
}
/** optional, connectionCheckTimeoutSeconds. */
public Builder setConnectionCheckTimeoutSeconds(int connectionCheckTimeoutSeconds) {
this.connectionCheckTimeoutSeconds = connectionCheckTimeoutSeconds;
return this;
}
/**
* optional, driver name, dialect has a default driver name, See {@link
* JdbcDialect#defaultDriverName}.
*/
public Builder setDriverName(String driverName) {
this.driverName = driverName;
return this;
}
/** required, JDBC DB url. */
public Builder setDBUrl(String dbURL) {
this.dbURL = dbURL;
return this;
}
/**
* optional, Handle the SQL dialect of jdbc driver. If not set, it will be infer by {@link
* JdbcDialects#get} from DB url.
*/
public Builder setDialect(JdbcDialect dialect) {
this.dialect = dialect;
return this;
}
public Builder setParallelism(Integer parallelism) {
this.parallelism = parallelism;
return this;
}
public Builder setNamespaceMappingEnabled(boolean namespaceMappingEnabled) {
this.namespaceMappingEnabled = namespaceMappingEnabled;
return this;
}
public Builder setMapSystemTablesEnabled(boolean mapSystemTablesEnabled) {
this.mapSystemTablesEnabled = mapSystemTablesEnabled;
return this;
}
public JdbcOptions build() {
checkNotNull(dbURL, "No dbURL supplied.");
checkNotNull(tableName, "No tableName supplied.");
if (this.dialect == null) {
Optional<JdbcDialect> optional = JdbcDialects.get(dbURL);
this.dialect =
optional.orElseGet(
() -> {
throw new NullPointerException(
"Unknown dbURL,can not find proper dialect.");
});
}
if (this.driverName == null) {
Optional<String> optional = dialect.defaultDriverName();
this.driverName =
optional.orElseGet(
() -> {
throw new NullPointerException("No driverName supplied.");
});
}
return new JdbcOptions(
dbURL,
tableName,
driverName,
username,
password,
dialect,
parallelism,
connectionCheckTimeoutSeconds,
namespaceMappingEnabled,
mapSystemTablesEnabled);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.options;
import java.io.Serializable;
import java.util.Objects;
import java.util.Optional;
/** Options for the JDBC scan. */
public class JdbcReadOptions implements Serializable {
private final String query;
private final String partitionColumnName;
private final Long partitionLowerBound;
private final Long partitionUpperBound;
private final Integer numPartitions;
private final int fetchSize;
private final boolean autoCommit;
private JdbcReadOptions(
String query,
String partitionColumnName,
Long partitionLowerBound,
Long partitionUpperBound,
Integer numPartitions,
int fetchSize,
boolean autoCommit) {
this.query = query;
this.partitionColumnName = partitionColumnName;
this.partitionLowerBound = partitionLowerBound;
this.partitionUpperBound = partitionUpperBound;
this.numPartitions = numPartitions;
this.fetchSize = fetchSize;
this.autoCommit = autoCommit;
}
public Optional<String> getQuery() {
return Optional.ofNullable(query);
}
public Optional<String> getPartitionColumnName() {
return Optional.ofNullable(partitionColumnName);
}
public Optional<Long> getPartitionLowerBound() {
return Optional.ofNullable(partitionLowerBound);
}
public Optional<Long> getPartitionUpperBound() {
return Optional.ofNullable(partitionUpperBound);
}
public Optional<Integer> getNumPartitions() {
return Optional.ofNullable(numPartitions);
}
public int getFetchSize() {
return fetchSize;
}
public boolean getAutoCommit() {
return autoCommit;
}
public static Builder builder() {
return new Builder();
}
@Override
public boolean equals(Object o) {
if (o instanceof JdbcReadOptions) {
JdbcReadOptions options = (JdbcReadOptions) o;
return Objects.equals(query, options.query)
&& Objects.equals(partitionColumnName, options.partitionColumnName)
&& Objects.equals(partitionLowerBound, options.partitionLowerBound)
&& Objects.equals(partitionUpperBound, options.partitionUpperBound)
&& Objects.equals(numPartitions, options.numPartitions)
&& Objects.equals(fetchSize, options.fetchSize)
&& Objects.equals(autoCommit, options.autoCommit);
} else {
return false;
}
}
/** Builder of {@link JdbcReadOptions}. */
public static class Builder {
protected String query;
protected String partitionColumnName;
protected Long partitionLowerBound;
protected Long partitionUpperBound;
protected Integer numPartitions;
protected int fetchSize = 0;
protected boolean autoCommit = true;
/** optional, SQL query statement for this JDBC source. */
public Builder setQuery(String query) {
this.query = query;
return this;
}
/** optional, name of the column used for partitioning the input. */
public Builder setPartitionColumnName(String partitionColumnName) {
this.partitionColumnName = partitionColumnName;
return this;
}
/** optional, the smallest value of the first partition. */
public Builder setPartitionLowerBound(long partitionLowerBound) {
this.partitionLowerBound = partitionLowerBound;
return this;
}
/** optional, the largest value of the last partition. */
public Builder setPartitionUpperBound(long partitionUpperBound) {
this.partitionUpperBound = partitionUpperBound;
return this;
}
/**
* optional, the maximum number of partitions that can be used for parallelism in table
* reading.
*/
public Builder setNumPartitions(int numPartitions) {
this.numPartitions = numPartitions;
return this;
}
/**
* optional, the number of rows to fetch per round trip. default value is 0, according to
* the jdbc api, 0 means that fetchSize hint will be ignored.
*/
public Builder setFetchSize(int fetchSize) {
this.fetchSize = fetchSize;
return this;
}
/** optional, whether to set auto commit on the JDBC driver. */
public Builder setAutoCommit(boolean autoCommit) {
this.autoCommit = autoCommit;
return this;
}
public JdbcReadOptions build() {
return new JdbcReadOptions(
query,
partitionColumnName,
partitionLowerBound,
partitionUpperBound,
numPartitions,
fetchSize,
autoCommit);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.options;
import javax.annotation.Nullable;
import java.io.Serializable;
/** Jdbc query type options. */
abstract class JdbcTypedQueryOptions implements Serializable {
@Nullable private final int[] fieldTypes;
JdbcTypedQueryOptions(int[] fieldTypes) {
this.fieldTypes = fieldTypes;
}
public int[] getFieldTypes() {
return fieldTypes;
}
public abstract static class JdbcUpdateQueryOptionsBuilder<
T extends JdbcUpdateQueryOptionsBuilder<T>> {
int[] fieldTypes;
protected abstract T self();
public T withFieldTypes(int[] fieldTypes) {
this.fieldTypes = fieldTypes;
return self();
}
}
}
package org.apache.flink.connector.phoenix.internal.options;
import java.io.Serializable;
import java.util.Objects;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.util.Preconditions;
public class PhoenixJdbcExecutionOptions implements Serializable {
public static final int DEFAULT_MAX_RETRY_TIMES = 3;
private static final int DEFAULT_INTERVAL_MILLIS = 0;
public static final int DEFAULT_SIZE = 5000;
private final long batchIntervalMs;
private final int batchSize;
private final int maxRetries;
private PhoenixJdbcExecutionOptions(long batchIntervalMs, int batchSize, int maxRetries) {
Preconditions.checkArgument(maxRetries >= 0);
this.batchIntervalMs = batchIntervalMs;
this.batchSize = batchSize;
this.maxRetries = maxRetries;
}
public long getBatchIntervalMs() {
return this.batchIntervalMs;
}
public int getBatchSize() {
return this.batchSize;
}
public int getMaxRetries() {
return this.maxRetries;
}
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (o != null && this.getClass() == o.getClass()) {
PhoenixJdbcExecutionOptions that = (PhoenixJdbcExecutionOptions)o;
return this.batchIntervalMs == that.batchIntervalMs && this.batchSize == that.batchSize && this.maxRetries == that.maxRetries;
} else {
return false;
}
}
public int hashCode() {
return Objects.hash(new Object[]{this.batchIntervalMs, this.batchSize, this.maxRetries});
}
public static Builder builder() {
return new Builder();
}
public static PhoenixJdbcExecutionOptions defaults() {
return builder().build();
}
public static final class Builder {
private long intervalMs = 0L;
private int size = 5000;
private int maxRetries = 3;
public Builder() {
}
public Builder withBatchSize(int size) {
this.size = size;
return this;
}
public Builder withBatchIntervalMs(long intervalMs) {
this.intervalMs = intervalMs;
return this;
}
public Builder withMaxRetries(int maxRetries) {
this.maxRetries = maxRetries;
return this;
}
public PhoenixJdbcExecutionOptions build() {
return new PhoenixJdbcExecutionOptions(this.intervalMs, this.size, this.maxRetries);
}
}
}
\ No newline at end of file
package org.apache.flink.connector.phoenix.internal.options;
import java.io.Serializable;
import java.util.Objects;
public class PhoenixJdbcLookupOptions implements Serializable {
private final long cacheMaxSize;
private final long cacheExpireMs;
private final int maxRetryTimes;
public PhoenixJdbcLookupOptions(long cacheMaxSize, long cacheExpireMs, int maxRetryTimes) {
this.cacheMaxSize = cacheMaxSize;
this.cacheExpireMs = cacheExpireMs;
this.maxRetryTimes = maxRetryTimes;
}
public long getCacheMaxSize() {
return this.cacheMaxSize;
}
public long getCacheExpireMs() {
return this.cacheExpireMs;
}
public int getMaxRetryTimes() {
return this.maxRetryTimes;
}
public static Builder builder() {
return new Builder();
}
public boolean equals(Object o) {
if (!(o instanceof PhoenixJdbcLookupOptions)) {
return false;
} else {
PhoenixJdbcLookupOptions options = (PhoenixJdbcLookupOptions)o;
return Objects.equals(this.cacheMaxSize, options.cacheMaxSize) && Objects.equals(this.cacheExpireMs, options.cacheExpireMs) && Objects.equals(this.maxRetryTimes, options.maxRetryTimes);
}
}
public static class Builder {
private long cacheMaxSize = -1L;
private long cacheExpireMs = -1L;
private int maxRetryTimes = 3;
public Builder() {
}
public Builder setCacheMaxSize(long cacheMaxSize) {
this.cacheMaxSize = cacheMaxSize;
return this;
}
public Builder setCacheExpireMs(long cacheExpireMs) {
this.cacheExpireMs = cacheExpireMs;
return this;
}
public Builder setMaxRetryTimes(int maxRetryTimes) {
this.maxRetryTimes = maxRetryTimes;
return this;
}
public PhoenixJdbcLookupOptions build() {
return new PhoenixJdbcLookupOptions(this.cacheMaxSize, this.cacheExpireMs, this.maxRetryTimes);
}
}
}
\ No newline at end of file
package org.apache.flink.connector.phoenix.internal.options;
import org.apache.flink.connector.phoenix.JdbcConnectionOptions;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.connector.phoenix.dialect.JdbcDialects;
import org.apache.flink.util.Preconditions;
import javax.annotation.Nullable;
import java.util.Objects;
import java.util.Optional;
/**
* PhoenixJdbcOptions
*
* @author gy
* @since 2022/3/17 9:57
**/
public class PhoenixJdbcOptions extends JdbcConnectionOptions {
private static final long serialVersionUID = 1L;
private String tableName;
private JdbcDialect dialect;
@Nullable
private final Integer parallelism;
//setting phoenix schema isEnabled
private Boolean isNamespaceMappingEnabled;
private Boolean mapSystemTablesToNamespace;
private PhoenixJdbcOptions(String dbURL, String tableName, String driverName, String username, String password, JdbcDialect dialect, Integer parallelism, int connectionCheckTimeoutSeconds,boolean isNamespaceMappingEnabled, boolean mapSystemTablesToNamespace) {
super(dbURL, driverName, username, password, connectionCheckTimeoutSeconds);
this.tableName = tableName;
this.dialect = dialect;
this.parallelism = parallelism;
this.isNamespaceMappingEnabled = isNamespaceMappingEnabled;
this.mapSystemTablesToNamespace = mapSystemTablesToNamespace;
}
public String getTableName() {
return this.tableName;
}
public JdbcDialect getDialect() {
return this.dialect;
}
public Integer getParallelism() {
return this.parallelism;
}
public Boolean getNamespaceMappingEnabled() {
return isNamespaceMappingEnabled;
}
public Boolean getMapSystemTablesToNamespace() {
return mapSystemTablesToNamespace;
}
public static Builder builder() {
return new Builder();
}
public boolean equals(Object o) {
if (!(o instanceof PhoenixJdbcOptions)) {
return false;
} else {
PhoenixJdbcOptions options = (PhoenixJdbcOptions)o;
return Objects.equals(this.url, options.url) && Objects.equals(this.tableName, options.tableName) && Objects.equals(this.driverName, options.driverName) && Objects.equals(this.username, options.username) && Objects.equals(this.password, options.password) && Objects.equals(this.dialect.getClass().getName(), options.dialect.getClass().getName()) && Objects.equals(this.parallelism, options.parallelism) && Objects.equals(this.connectionCheckTimeoutSeconds, options.connectionCheckTimeoutSeconds)&& Objects.equals(this.isNamespaceMappingEnabled, options.isNamespaceMappingEnabled)&& Objects.equals(this.mapSystemTablesToNamespace, options.mapSystemTablesToNamespace);
}
}
public int hashCode() {
return Objects.hash(new Object[]{this.url, this.tableName, this.driverName, this.username, this.password, this.dialect.getClass().getName(), this.parallelism, this.connectionCheckTimeoutSeconds,this.isNamespaceMappingEnabled,this.mapSystemTablesToNamespace});
}
public static class Builder {
private String dbURL;
private String tableName;
private String driverName;
private String username;
private String password;
private JdbcDialect dialect;
private Integer parallelism;
private int connectionCheckTimeoutSeconds = 60;
private Boolean isNamespaceMappingEnabled;
private Boolean mapSystemTablesToNamespace;
public Builder() {
}
public Builder setTableName(String tableName) {
this.tableName = tableName;
return this;
}
public Builder setUsername(String username) {
this.username = username;
return this;
}
public Builder setPassword(String password) {
this.password = password;
return this;
}
public Builder setConnectionCheckTimeoutSeconds(int connectionCheckTimeoutSeconds) {
this.connectionCheckTimeoutSeconds = connectionCheckTimeoutSeconds;
return this;
}
public Builder setDriverName(String driverName) {
this.driverName = driverName;
return this;
}
public Builder setDBUrl(String dbURL) {
this.dbURL = dbURL;
return this;
}
public Builder setDialect(JdbcDialect dialect) {
this.dialect = dialect;
return this;
}
public Builder setParallelism(Integer parallelism) {
this.parallelism = parallelism;
return this;
}
public Builder setNamespaceMappingEnabled(Boolean namespaceMappingEnabled) {
this.isNamespaceMappingEnabled = namespaceMappingEnabled;
return this;
}
public Builder setMapSystemTablesToNamespace(Boolean mapSystemTablesToNamespace) {
this.mapSystemTablesToNamespace = mapSystemTablesToNamespace;
return this;
}
public PhoenixJdbcOptions build() {
Preconditions.checkNotNull(this.dbURL, "No dbURL supplied.");
Preconditions.checkNotNull(this.tableName, "No tableName supplied.");
Optional optional;
if (this.dialect == null) {
optional = JdbcDialects.get(this.dbURL);
this.dialect = (JdbcDialect)optional.orElseGet(() -> {
throw new NullPointerException("Unknown dbURL,can not find proper dialect.");
});
}
if (this.driverName == null) {
optional = this.dialect.defaultDriverName();
this.driverName = (String)optional.orElseGet(() -> {
throw new NullPointerException("No driverName supplied.");
});
}
return new PhoenixJdbcOptions(this.dbURL, this.tableName, this.driverName, this.username, this.password, this.dialect, this.parallelism, this.connectionCheckTimeoutSeconds,this.isNamespaceMappingEnabled,this.mapSystemTablesToNamespace);
}
}
}
package org.apache.flink.connector.phoenix.internal.options;
import org.apache.flink.connector.phoenix.internal.options.JdbcReadOptions;
import java.io.Serializable;
import java.util.Objects;
import java.util.Optional;
public class PhoenixJdbcReadOptions implements Serializable {
private final String query;
private final String partitionColumnName;
private final Long partitionLowerBound;
private final Long partitionUpperBound;
private final Integer numPartitions;
private final int fetchSize;
private final boolean autoCommit;
private PhoenixJdbcReadOptions(String query, String partitionColumnName, Long partitionLowerBound, Long partitionUpperBound, Integer numPartitions, int fetchSize, boolean autoCommit) {
this.query = query;
this.partitionColumnName = partitionColumnName;
this.partitionLowerBound = partitionLowerBound;
this.partitionUpperBound = partitionUpperBound;
this.numPartitions = numPartitions;
this.fetchSize = fetchSize;
this.autoCommit = autoCommit;
}
public Optional<String> getQuery() {
return Optional.ofNullable(this.query);
}
public Optional<String> getPartitionColumnName() {
return Optional.ofNullable(this.partitionColumnName);
}
public Optional<Long> getPartitionLowerBound() {
return Optional.ofNullable(this.partitionLowerBound);
}
public Optional<Long> getPartitionUpperBound() {
return Optional.ofNullable(this.partitionUpperBound);
}
public Optional<Integer> getNumPartitions() {
return Optional.ofNullable(this.numPartitions);
}
public int getFetchSize() {
return this.fetchSize;
}
public boolean getAutoCommit() {
return this.autoCommit;
}
public static Builder builder() {
return new Builder();
}
public boolean equals(Object o) {
if (!(o instanceof JdbcReadOptions)) {
return false;
} else {
PhoenixJdbcReadOptions options = (PhoenixJdbcReadOptions)o;
return Objects.equals(this.query, options.query) && Objects.equals(this.partitionColumnName, options.partitionColumnName) && Objects.equals(this.partitionLowerBound, options.partitionLowerBound) && Objects.equals(this.partitionUpperBound, options.partitionUpperBound) && Objects.equals(this.numPartitions, options.numPartitions) && Objects.equals(this.fetchSize, options.fetchSize) && Objects.equals(this.autoCommit, options.autoCommit);
}
}
public static class Builder {
protected String query;
protected String partitionColumnName;
protected Long partitionLowerBound;
protected Long partitionUpperBound;
protected Integer numPartitions;
protected int fetchSize = 0;
protected boolean autoCommit = true;
public Builder() {
}
public Builder setQuery(String query) {
this.query = query;
return this;
}
public Builder setPartitionColumnName(String partitionColumnName) {
this.partitionColumnName = partitionColumnName;
return this;
}
public Builder setPartitionLowerBound(long partitionLowerBound) {
this.partitionLowerBound = partitionLowerBound;
return this;
}
public Builder setPartitionUpperBound(long partitionUpperBound) {
this.partitionUpperBound = partitionUpperBound;
return this;
}
public Builder setNumPartitions(int numPartitions) {
this.numPartitions = numPartitions;
return this;
}
public Builder setFetchSize(int fetchSize) {
this.fetchSize = fetchSize;
return this;
}
public Builder setAutoCommit(boolean autoCommit) {
this.autoCommit = autoCommit;
return this;
}
public PhoenixJdbcReadOptions build() {
return new PhoenixJdbcReadOptions(this.query, this.partitionColumnName, this.partitionLowerBound, this.partitionUpperBound, this.numPartitions, this.fetchSize, this.autoCommit);
}
}
}
\ No newline at end of file
package org.apache.flink.connector.phoenix.internal.options;/*
package org.apache.flink.connector.phoenix.internal.options;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.phoenix.internal.AbstractJdbcOutputFormat;
import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Date;
import java.util.Properties;
*/
/**
* PhoenixSinkFunction
*
* @author gy
* @since 2022/3/22 16:27
**//*
public class PhoenixSinkFunction <T> extends RichSinkFunction<T>
implements CheckpointedFunction {
private static final Logger LOG = LoggerFactory.getLogger(AbstractJdbcOutputFormat.class);
private final JdbcConnectionProvider jdbcConnectionProvider;
private final JdbcOptions options;
private static Connection connection = null;
private static String tableName = "test.ecgbeats12";
private static PreparedStatement psUp = null;
private static int batchcount = 0;
private static int totalcount = 0;
private static Date startTime;
public PhoenixSinkFunction(JdbcOptions jdbcOptions,JdbcConnectionProvider jdbcConnectionProvider) {
this.options = jdbcOptions;
this.jdbcConnectionProvider = jdbcConnectionProvider;
}
@Override
public void open(Configuration parameters) throws Exception {
Connection connection = jdbcConnectionProvider.getOrEstablishConnection();
//super.open(parameters);
*/
/*RuntimeContext ctx = getRuntimeContext();
outputFormat.setRuntimeContext(ctx);
outputFormat.open(ctx.getIndexOfThisSubtask(), ctx.getNumberOfParallelSubtasks());*//*
*/
/* Class.forName("org.apache.phoenix.jdbc.PhoenixDriver");
Properties properties = new Properties();
properties.put("phoenix.schema.isNamespaceMappingEnabled", "true");
properties.put("phoenix.schema.mapSystemTablesToNamespac", "true");
connection = DriverManager.getConnection("jdbc:phoenix:hd01,hd02,hd03:2181",properties);*//*
connection.setAutoCommit(false);
//使用PrepareStatement进行数据的插入,需要指定好对应的Primary Key
StringBuilder sqlBuilder = new StringBuilder();
sqlBuilder.append("upsert into " + tableName + "(ecg_id , bindex , btype , bt_flag , af_flag , bmatch , rr, nrr , detpeak , dettresh ) values(?,?,?,?,?,?,?,?,?,?)");
String sqlUp = sqlBuilder.toString();
psUp = connection.prepareStatement(sqlUp);
this.options.getDialect().
}
@Override
public void invoke(T value, Context context) throws IOException {
psUp.executeUpdate();
//psUp.addBatch();
batchcount++;
totalcount++;
if (batchcount == 1000) {
System.out.println("add batch : "+batchcount);
//Phoenix使用commit()而不是executeBatch()来控制批量更新。
//psUp.executeBatch();
connection.commit();
//psUp.clearBatch();
batchcount = 0;
System.out.println("totalcount : "+totalcount);
}
}
@Override
public void initializeState(FunctionInitializationContext context) {}
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
outputFormat.flush();
}
@Override
public void close() {
//psUp.executeBatch();
connection.commit();
//psUp.clearBatch();
Date endTime = new Date();
long l = endTime.getTime() - startTime.getTime();
long day = l / (24 * 60 * 60 * 1000);
long hour = (l / (60 * 60 * 1000) - day * 24);
long min = ((l / (60 * 1000)) - day * 24 * 60 - hour * 60);
long s = (l / 1000 - day * 24 * 60 * 60 - hour * 60 * 60 - min * 60);
System.out.println("========结束写入时间: "+ endTime);
System.out.println("========运行时间: " + day + "天" + hour + "小时" + min + "分" + s + "秒");
if (psUp != null ) {
try {
psUp.close();
} catch (SQLException throwables) {
throwables.printStackTrace();
}
}
if (connection != null) {
try {
connection.close();
} catch (SQLException throwables) {
throwables.printStackTrace();
}
}
}
}*/
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.split;
import org.apache.flink.annotation.Experimental;
import org.apache.flink.connector.phoenix.PhoenixInputFormat;
import java.io.Serializable;
/**
* This splits generator actually does nothing but wrapping the query parameters computed by the
* user before creating the {@link PhoenixInputFormat} instance.
*/
@Experimental
public class JdbcGenericParameterValuesProvider implements JdbcParameterValuesProvider {
private final Serializable[][] parameters;
public JdbcGenericParameterValuesProvider(Serializable[][] parameters) {
this.parameters = parameters;
}
@Override
public Serializable[][] getParameterValues() {
// do nothing...precomputed externally
return parameters;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.split;
import org.apache.flink.annotation.Experimental;
import org.apache.flink.util.Preconditions;
import java.io.Serializable;
/**
* This query parameters generator is an helper class to parameterize from/to queries on a numeric
* column. The generated array of from/to values will be equally sized to fetchSize (apart from the
* last one), ranging from minVal up to maxVal.
*
* <p>For example, if there's a table <CODE>BOOKS</CODE> with a numeric PK <CODE>id</CODE>, using a
* query like:
*
* <PRE>
* SELECT * FROM BOOKS WHERE id BETWEEN ? AND ?
* </PRE>
*
* <p>You can take advantage of this class to automatically generate the parameters of the BETWEEN
* clause, based on the passed constructor parameters.
*/
@Experimental
public class JdbcNumericBetweenParametersProvider implements JdbcParameterValuesProvider {
private final long minVal;
private final long maxVal;
private long batchSize;
private int batchNum;
/**
* NumericBetweenParametersProviderJdbc constructor.
*
* @param minVal the lower bound of the produced "from" values
* @param maxVal the upper bound of the produced "to" values
*/
public JdbcNumericBetweenParametersProvider(long minVal, long maxVal) {
Preconditions.checkArgument(minVal <= maxVal, "minVal must not be larger than maxVal");
this.minVal = minVal;
this.maxVal = maxVal;
}
/**
* NumericBetweenParametersProviderJdbc constructor.
*
* @param fetchSize the max distance between the produced from/to pairs
* @param minVal the lower bound of the produced "from" values
* @param maxVal the upper bound of the produced "to" values
*/
public JdbcNumericBetweenParametersProvider(long fetchSize, long minVal, long maxVal) {
Preconditions.checkArgument(minVal <= maxVal, "minVal must not be larger than maxVal");
this.minVal = minVal;
this.maxVal = maxVal;
ofBatchSize(fetchSize);
}
public JdbcNumericBetweenParametersProvider ofBatchSize(long batchSize) {
Preconditions.checkArgument(batchSize > 0, "Batch size must be positive");
long maxElemCount = (maxVal - minVal) + 1;
if (batchSize > maxElemCount) {
batchSize = maxElemCount;
}
this.batchSize = batchSize;
this.batchNum = new Double(Math.ceil((double) maxElemCount / batchSize)).intValue();
return this;
}
public JdbcNumericBetweenParametersProvider ofBatchNum(int batchNum) {
Preconditions.checkArgument(batchNum > 0, "Batch number must be positive");
long maxElemCount = (maxVal - minVal) + 1;
if (batchNum > maxElemCount) {
batchNum = (int) maxElemCount;
}
this.batchNum = batchNum;
this.batchSize = new Double(Math.ceil((double) maxElemCount / batchNum)).longValue();
return this;
}
@Override
public Serializable[][] getParameterValues() {
Preconditions.checkState(
batchSize > 0,
"Batch size and batch number must be positive. Have you called `ofBatchSize` or `ofBatchNum`?");
long maxElemCount = (maxVal - minVal) + 1;
long bigBatchNum = maxElemCount - (batchSize - 1) * batchNum;
Serializable[][] parameters = new Serializable[batchNum][2];
long start = minVal;
for (int i = 0; i < batchNum; i++) {
long end = start + batchSize - 1 - (i >= bigBatchNum ? 1 : 0);
parameters[i] = new Long[] {start, end};
start = end + 1;
}
return parameters;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.split;
import org.apache.flink.annotation.Experimental;
import org.apache.flink.connector.phoenix.PhoenixInputFormat;
import java.io.Serializable;
/**
* This interface is used by the {@link PhoenixInputFormat} to compute the list of parallel query to
* run (i.e. splits). Each query will be parameterized using a row of the matrix provided by each
* {@link JdbcParameterValuesProvider} implementation.
*/
@Experimental
public interface JdbcParameterValuesProvider {
/** Returns the necessary parameters array to use for query in parallel a table. */
Serializable[][] getParameterValues();
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.statement;
import java.math.BigDecimal;
import java.sql.*;
/**
* This is a wrapper around {@link PreparedStatement} and allows the users to set parameters by name
* instead of by index. This allows users to use the same variable parameter multiple times in a
* statement.
*
* <p>Code such as this:
*
* <pre>
* Connection con = getConnection();
* String query = "select * from my_table where first_name=? or last_name=?";
* PreparedStatement st = con.prepareStatement(query);
* st.setString(1, "bob");
* st.setString(2, "bob");
* ResultSet rs = st.executeQuery();
* </pre>
*
* <p>Can be replaced with:
*
* <pre>
* Connection con = getConnection();
* String query = "select * from my_table where first_name=:name or last_name=:name";
* FieldNamedPreparedStatement st = FieldNamedPreparedStatement.prepareStatement(con, query, new String[]{"name"});
* st.setString(0, "bob");
* ResultSet rs = st.executeQuery();
* </pre>
*/
public interface FieldNamedPreparedStatement extends AutoCloseable {
/**
* Creates a <code>NamedPreparedStatement</code> object for sending parameterized SQL statements
* to the database.
*
* @param connection the connection used to connect to database.
* @param sql an SQL statement that may contain one or more ':fieldName' as parameter
* placeholders
* @param fieldNames the field names in schema order used as the parameter names
*/
static FieldNamedPreparedStatement prepareStatement(
Connection connection, String sql, String[] fieldNames) throws SQLException {
return FieldNamedPreparedStatementImpl.prepareStatement(connection, sql, fieldNames);
}
/**
* Clears the current parameter values immediately.
*
* <p>In general, parameter values remain in force for repeated use of a statement. Setting a
* parameter value automatically clears its previous value. However, in some cases it is useful
* to immediately release the resources used by the current parameter values; this can be done
* by calling the method <code>clearParameters</code>.
*
* @see PreparedStatement#clearParameters()
*/
void clearParameters() throws SQLException;
/**
* Executes the SQL query in this <code>NamedPreparedStatement</code> object and returns the
* <code>ResultSet</code> object generated by the query.
*
* @see PreparedStatement#executeQuery()
*/
ResultSet executeQuery() throws SQLException;
/**
* Adds a set of parameters to this <code>NamedPreparedStatement</code> object's batch of
* commands.
*
* @see PreparedStatement#addBatch()
*/
void addBatch() throws SQLException;
/**
* Submits a batch of commands to the database for execution and if all commands execute
* successfully, returns an array of update counts. The <code>int</code> elements of the array
* that is returned are ordered to correspond to the commands in the batch, which are ordered
* according to the order in which they were added to the batch.
*
* @see PreparedStatement#executeBatch()
*/
int[] executeBatch() throws SQLException;
/**
* Phoenix add Batch method
*
* @see PreparedStatement#executeBatch()
*/
void executeUpdate() throws SQLException;
/**
* Sets the designated parameter to SQL <code>NULL</code>.
*
* <p><B>Note:</B> You must specify the parameter's SQL type.
*
* @see PreparedStatement#setNull(int, int)
*/
void setNull(int fieldIndex, int sqlType) throws SQLException;
/**
* Sets the designated parameter to the given Java <code>boolean</code> value. The driver
* converts this to an SQL <code>BIT</code> or <code>BOOLEAN</code> value when it sends it to
* the database.
*
* @see PreparedStatement#setBoolean(int, boolean)
*/
void setBoolean(int fieldIndex, boolean x) throws SQLException;
/**
* Sets the designated parameter to the given Java <code>byte</code> value. The driver converts
* this to an SQL <code>TINYINT</code> value when it sends it to the database.
*
* @see PreparedStatement#setByte(int, byte)
*/
void setByte(int fieldIndex, byte x) throws SQLException;
/**
* Sets the designated parameter to the given Java <code>short</code> value. The driver converts
* this to an SQL <code>SMALLINT</code> value when it sends it to the database.
*
* @see PreparedStatement#setShort(int, short)
*/
void setShort(int fieldIndex, short x) throws SQLException;
/**
* Sets the designated parameter to the given Java <code>int</code> value. The driver converts
* this to an SQL <code>INTEGER</code> value when it sends it to the database.
*
* @see PreparedStatement#setInt(int, int)
*/
void setInt(int fieldIndex, int x) throws SQLException;
/**
* Sets the designated parameter to the given Java <code>long</code> value. The driver converts
* this to an SQL <code>BIGINT</code> value when it sends it to the database.
*
* @see PreparedStatement#setLong(int, long)
*/
void setLong(int fieldIndex, long x) throws SQLException;
/**
* Sets the designated parameter to the given Java <code>float</code> value. The driver converts
* this to an SQL <code>REAL</code> value when it sends it to the database.
*
* @see PreparedStatement#setFloat(int, float)
*/
void setFloat(int fieldIndex, float x) throws SQLException;
/**
* Sets the designated parameter to the given Java <code>double</code> value. The driver
* converts this to an SQL <code>DOUBLE</code> value when it sends it to the database.
*
* @see PreparedStatement#setDouble(int, double)
*/
void setDouble(int fieldIndex, double x) throws SQLException;
/**
* Sets the designated parameter to the given <code>java.math.BigDecimal</code> value. The
* driver converts this to an SQL <code>NUMERIC</code> value when it sends it to the database.
*
* @see PreparedStatement#setBigDecimal(int, BigDecimal)
*/
void setBigDecimal(int fieldIndex, BigDecimal x) throws SQLException;
/**
* Sets the designated parameter to the given Java <code>String</code> value. The driver
* converts this to an SQL <code>VARCHAR</code> or <code>LONGVARCHAR</code> value (depending on
* the argument's size relative to the driver's limits on <code>VARCHAR</code> values) when it
* sends it to the database.
*
* @see PreparedStatement#setString(int, String)
*/
void setString(int fieldIndex, String x) throws SQLException;
/**
* Sets the designated parameter to the given Java array of bytes. The driver converts this to
* an SQL <code>VARBINARY</code> or <code>LONGVARBINARY</code> (depending on the argument's size
* relative to the driver's limits on <code>VARBINARY</code> values) when it sends it to the
* database.
*
* @see PreparedStatement#setBytes(int, byte[])
*/
void setBytes(int fieldIndex, byte[] x) throws SQLException;
/**
* Sets the designated parameter to the given <code>java.sql.Date</code> value using the default
* time zone of the virtual machine that is running the application. The driver converts this to
* an SQL <code>DATE</code> value when it sends it to the database.
*
* @see PreparedStatement#setDate(int, Date)
*/
void setDate(int fieldIndex, Date x) throws SQLException;
/**
* Sets the designated parameter to the given <code>java.sql.Time</code> value. The driver
* converts this to an SQL <code>TIME</code> value when it sends it to the database.
*
* @see PreparedStatement#setTime(int, Time)
*/
void setTime(int fieldIndex, Time x) throws SQLException;
/**
* Sets the designated parameter to the given <code>java.sql.Timestamp</code> value. The driver
* converts this to an SQL <code>TIMESTAMP</code> value when it sends it to the database.
*
* @see PreparedStatement#setTimestamp(int, Timestamp)
*/
void setTimestamp(int fieldIndex, Timestamp x) throws SQLException;
/**
* Sets the value of the designated parameter using the given object.
*
* @see PreparedStatement#setObject(int, Object)
*/
void setObject(int fieldIndex, Object x) throws SQLException;
/**
* Releases this <code>Statement</code> object's database and JDBC resources immediately instead
* of waiting for this to happen when it is automatically closed. It is generally good practice
* to release resources as soon as you are finished with them to avoid tying up database
* resources.
*
* @see PreparedStatement#close()
*/
void close() throws SQLException;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.statement;
import java.math.BigDecimal;
import java.sql.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.apache.flink.util.Preconditions.checkArgument;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** Simple implementation of {@link FieldNamedPreparedStatement}. */
public class FieldNamedPreparedStatementImpl implements FieldNamedPreparedStatement {
private final PreparedStatement statement;
private final int[][] indexMapping;
private FieldNamedPreparedStatementImpl(PreparedStatement statement, int[][] indexMapping) {
this.statement = statement;
this.indexMapping = indexMapping;
}
@Override
public void clearParameters() throws SQLException {
statement.clearParameters();
}
@Override
public ResultSet executeQuery() throws SQLException {
return statement.executeQuery();
}
@Override
public void addBatch() throws SQLException {
statement.executeUpdate();
}
@Override
public void executeUpdate() throws SQLException {
statement.executeUpdate();
}
@Override
public int[] executeBatch() throws SQLException {
return statement.executeBatch();
}
@Override
public void setNull(int fieldIndex, int sqlType) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setNull(index, sqlType);
}
}
@Override
public void setBoolean(int fieldIndex, boolean x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setBoolean(index, x);
}
}
@Override
public void setByte(int fieldIndex, byte x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setByte(index, x);
}
}
@Override
public void setShort(int fieldIndex, short x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setShort(index, x);
}
}
@Override
public void setInt(int fieldIndex, int x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setInt(index, x);
}
}
@Override
public void setLong(int fieldIndex, long x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setLong(index, x);
}
}
@Override
public void setFloat(int fieldIndex, float x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setFloat(index, x);
}
}
@Override
public void setDouble(int fieldIndex, double x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setDouble(index, x);
}
}
@Override
public void setBigDecimal(int fieldIndex, BigDecimal x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setBigDecimal(index, x);
}
}
@Override
public void setString(int fieldIndex, String x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setString(index, x);
}
}
@Override
public void setBytes(int fieldIndex, byte[] x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setBytes(index, x);
}
}
@Override
public void setDate(int fieldIndex, Date x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setDate(index, x);
}
}
@Override
public void setTime(int fieldIndex, Time x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setTime(index, x);
}
}
@Override
public void setTimestamp(int fieldIndex, Timestamp x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setTimestamp(index, x);
}
}
@Override
public void setObject(int fieldIndex, Object x) throws SQLException {
for (int index : indexMapping[fieldIndex]) {
statement.setObject(index, x);
}
}
@Override
public void close() throws SQLException {
statement.close();
}
// ----------------------------------------------------------------------------------------
public static FieldNamedPreparedStatement prepareStatement(
Connection connection, String sql, String[] fieldNames) throws SQLException {
checkNotNull(connection, "connection must not be null.");
checkNotNull(sql, "sql must not be null.");
checkNotNull(fieldNames, "fieldNames must not be null.");
if (sql.contains("?")) {
throw new IllegalArgumentException("SQL statement must not contain ? character.");
}
HashMap<String, List<Integer>> parameterMap = new HashMap<>();
String parsedSQL = parseNamedStatement(sql, parameterMap);
// currently, the statements must contain all the field parameters
checkArgument(parameterMap.size() == fieldNames.length);
int[][] indexMapping = new int[fieldNames.length][];
for (int i = 0; i < fieldNames.length; i++) {
String fieldName = fieldNames[i];
checkArgument(
parameterMap.containsKey(fieldName),
fieldName + " doesn't exist in the parameters of SQL statement: " + sql);
indexMapping[i] = parameterMap.get(fieldName).stream().mapToInt(v -> v).toArray();
}
return new FieldNamedPreparedStatementImpl(
connection.prepareStatement(parsedSQL), indexMapping);
}
/**
* Parses a sql with named parameters. The parameter-index mappings are put into the map, and
* the parsed sql is returned.
*
* @param sql sql to parse
* @param paramMap map to hold parameter-index mappings
* @return the parsed sql
*/
public static String parseNamedStatement(String sql, Map<String, List<Integer>> paramMap) {
StringBuilder parsedSql = new StringBuilder();
int fieldIndex = 1; // SQL statement parameter index starts from 1
int length = sql.length();
for (int i = 0; i < length; i++) {
char c = sql.charAt(i);
if (':' == c) {
int j = i + 1;
while (j < length && Character.isJavaIdentifierPart(sql.charAt(j))) {
j++;
}
String parameterName = sql.substring(i + 1, j);
checkArgument(
!parameterName.isEmpty(),
"Named parameters in SQL statement must not be empty.");
paramMap.computeIfAbsent(parameterName, n -> new ArrayList<>()).add(fieldIndex);
fieldIndex++;
i = j - 1;
parsedSql.append('?');
} else {
parsedSql.append(c);
}
}
return parsedSql.toString();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.statement;
import java.sql.Connection;
import java.sql.SQLException;
/** A factory to create {@link FieldNamedPreparedStatement} with the given {@link Connection}. */
public interface StatementFactory {
/** Creates {@link FieldNamedPreparedStatement} with the given {@link Connection}. */
FieldNamedPreparedStatement createStatement(Connection connection) throws SQLException;
}
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.connector.phoenix.JdbcExecutionOptions;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.connector.phoenix.dialect.JdbcDialects;
import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcReadOptions;
import org.apache.flink.connector.phoenix.internal.options.PhoenixJdbcOptions;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.factories.DynamicTableSinkFactory;
import org.apache.flink.table.factories.DynamicTableSourceFactory;
import org.apache.flink.table.factories.FactoryUtil;
import org.apache.flink.table.utils.TableSchemaUtils;
import org.apache.flink.util.Preconditions;
import java.time.Duration;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Optional;
import java.util.Set;
/**
* PhoenixDynamicTableFactory
*
* @author gy
* @since 2022/3/17 9:44
**/
public class PhoenixDynamicTableFactory implements DynamicTableSourceFactory, DynamicTableSinkFactory {
public static final String IDENTIFIER = "phoenix-jdbc";
public static final ConfigOption<String> URL = ConfigOptions.key("url").stringType().noDefaultValue().withDescription("The JDBC database URL.");
public static final ConfigOption<String> TABLE_NAME = ConfigOptions.key("table-name").stringType().noDefaultValue().withDescription("The JDBC table name.");
public static final ConfigOption<String> USERNAME = ConfigOptions.key("username").stringType().noDefaultValue().withDescription("The JDBC user name.");
public static final ConfigOption<String> PASSWORD = ConfigOptions.key("password").stringType().noDefaultValue().withDescription("The JDBC password.");
private static final ConfigOption<String> DRIVER = ConfigOptions.key("driver").stringType().noDefaultValue().withDescription("The class name of the JDBC driver to use to connect to this URL. If not set, it will automatically be derived from the URL.");
public static final ConfigOption<Duration> MAX_RETRY_TIMEOUT = ConfigOptions.key("connection.max-retry-timeout").durationType().defaultValue(Duration.ofSeconds(60L)).withDescription("Maximum timeout between retries.");
private static final ConfigOption<String> SCAN_PARTITION_COLUMN = ConfigOptions.key("scan.partition.column").stringType().noDefaultValue().withDescription("The column name used for partitioning the input.");
private static final ConfigOption<Integer> SCAN_PARTITION_NUM = ConfigOptions.key("scan.partition.num").intType().noDefaultValue().withDescription("The number of partitions.");
private static final ConfigOption<Long> SCAN_PARTITION_LOWER_BOUND = ConfigOptions.key("scan.partition.lower-bound").longType().noDefaultValue().withDescription("The smallest value of the first partition.");
private static final ConfigOption<Long> SCAN_PARTITION_UPPER_BOUND = ConfigOptions.key("scan.partition.upper-bound").longType().noDefaultValue().withDescription("The largest value of the last partition.");
private static final ConfigOption<Integer> SCAN_FETCH_SIZE = ConfigOptions.key("scan.fetch-size").intType().defaultValue(0).withDescription("Gives the reader a hint as to the number of rows that should be fetched from the database per round-trip when reading. If the value is zero, this hint is ignored.");
private static final ConfigOption<Boolean> SCAN_AUTO_COMMIT = ConfigOptions.key("scan.auto-commit").booleanType().defaultValue(true).withDescription("Sets whether the driver is in auto-commit mode.");
private static final ConfigOption<Long> LOOKUP_CACHE_MAX_ROWS = ConfigOptions.key("lookup.cache.max-rows").longType().defaultValue(-1L).withDescription("The max number of rows of lookup cache, over this value, the oldest rows will be eliminated. \"cache.max-rows\" and \"cache.ttl\" options must all be specified if any of them is specified.");
private static final ConfigOption<Duration> LOOKUP_CACHE_TTL = ConfigOptions.key("lookup.cache.ttl").durationType().defaultValue(Duration.ofSeconds(10L)).withDescription("The cache time to live.");
private static final ConfigOption<Integer> LOOKUP_MAX_RETRIES = ConfigOptions.key("lookup.max-retries").intType().defaultValue(3).withDescription("The max retry times if lookup database failed.");
private static final ConfigOption<Integer> SINK_BUFFER_FLUSH_MAX_ROWS = ConfigOptions.key("sink.buffer-flush.max-rows").intType().defaultValue(100).withDescription("The flush max size (includes all append, upsert and delete records), over this number of records, will flush data.");
private static final ConfigOption<Duration> SINK_BUFFER_FLUSH_INTERVAL = ConfigOptions.key("sink.buffer-flush.interval").durationType().defaultValue(Duration.ofSeconds(1L)).withDescription("The flush interval mills, over this time, asynchronous threads will flush data.");
private static final ConfigOption<Integer> SINK_MAX_RETRIES = ConfigOptions.key("sink.max-retries").intType().defaultValue(3).withDescription("The max retry times if writing records to database failed.");
public static final ConfigOption<Boolean> SCHEMA_NAMESPACE_MAPPING_ENABLE = ConfigOptions.key("phoenix.schema.isNamespaceMappingEnabled").booleanType().defaultValue(false).withDescription("The JDBC phoenix Schema isNamespaceMappingEnabled.");
public static final ConfigOption<Boolean> SCHEMA_MAP_SYSTEMTABLE_ENABLE = ConfigOptions.key("phoenix.schema.mapSystemTablesToNamespace").booleanType().defaultValue(false).withDescription("The JDBC phoenix mapSystemTablesToNamespace.");
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
ReadableConfig config = helper.getOptions();
helper.validate();
this.validateConfigOptions(config);
PhoenixJdbcOptions jdbcOptions = this.getJdbcOptions(config);
TableSchema physicalSchema = TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema());
return new PhoenixDynamicTableSink(jdbcOptions, this.getJdbcExecutionOptions(config), this.getJdbcDmlOptions(jdbcOptions, physicalSchema), physicalSchema);
}
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
ReadableConfig config = helper.getOptions();
helper.validate();
this.validateConfigOptions(config);
TableSchema physicalSchema = TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema());
//return new JdbcDynamicTableSource(this.getJdbcOptions(helper.getOptions()), this.getJdbcReadOptions(helper.getOptions()), this.getJdbcLookupOptions(helper.getOptions()), physicalSchema);
return new PhoenixDynamicTableSource(this.getJdbcOptions(helper.getOptions()), this.getJdbcReadOptions(helper.getOptions()), this.getJdbcLookupOptions(helper.getOptions()),physicalSchema);
}
private PhoenixJdbcOptions getJdbcOptions(ReadableConfig readableConfig) {
String url = (String)readableConfig.get(URL);
PhoenixJdbcOptions.Builder builder = PhoenixJdbcOptions.builder().setDBUrl(url).setTableName((String)readableConfig.get(TABLE_NAME)).setDialect((JdbcDialect)JdbcDialects.get(url).get()).setParallelism((Integer)readableConfig.getOptional(FactoryUtil.SINK_PARALLELISM).orElse((Integer) null)).setConnectionCheckTimeoutSeconds((int)((Duration)readableConfig.get(MAX_RETRY_TIMEOUT)).getSeconds()).setNamespaceMappingEnabled(readableConfig.get(SCHEMA_NAMESPACE_MAPPING_ENABLE)).setMapSystemTablesToNamespace(readableConfig.get(SCHEMA_MAP_SYSTEMTABLE_ENABLE));
readableConfig.getOptional(DRIVER).ifPresent(builder::setDriverName);
readableConfig.getOptional(USERNAME).ifPresent(builder::setUsername);
readableConfig.getOptional(PASSWORD).ifPresent(builder::setPassword);
return builder.build();
}
private JdbcReadOptions getJdbcReadOptions(ReadableConfig readableConfig) {
Optional<String> partitionColumnName = readableConfig.getOptional(SCAN_PARTITION_COLUMN);
JdbcReadOptions.Builder builder = JdbcReadOptions.builder();
if (partitionColumnName.isPresent()) {
builder.setPartitionColumnName((String)partitionColumnName.get());
builder.setPartitionLowerBound((Long)readableConfig.get(SCAN_PARTITION_LOWER_BOUND));
builder.setPartitionUpperBound((Long)readableConfig.get(SCAN_PARTITION_UPPER_BOUND));
builder.setNumPartitions((Integer)readableConfig.get(SCAN_PARTITION_NUM));
}
readableConfig.getOptional(SCAN_FETCH_SIZE).ifPresent(builder::setFetchSize);
builder.setAutoCommit((Boolean)readableConfig.get(SCAN_AUTO_COMMIT));
return builder.build();
}
private JdbcLookupOptions getJdbcLookupOptions(ReadableConfig readableConfig) {
return new JdbcLookupOptions((Long)readableConfig.get(LOOKUP_CACHE_MAX_ROWS), ((Duration)readableConfig.get(LOOKUP_CACHE_TTL)).toMillis(), (Integer)readableConfig.get(LOOKUP_MAX_RETRIES));
}
private JdbcExecutionOptions getJdbcExecutionOptions(ReadableConfig config) {
JdbcExecutionOptions.Builder builder = new JdbcExecutionOptions.Builder();
builder.withBatchSize((Integer)config.get(SINK_BUFFER_FLUSH_MAX_ROWS));
builder.withBatchIntervalMs(((Duration)config.get(SINK_BUFFER_FLUSH_INTERVAL)).toMillis());
builder.withMaxRetries((Integer)config.get(SINK_MAX_RETRIES));
return builder.build();
}
private JdbcDmlOptions getJdbcDmlOptions(PhoenixJdbcOptions jdbcOptions, TableSchema schema) {
String[] keyFields = (String[])schema.getPrimaryKey().map((pk) -> {
return (String[])pk.getColumns().toArray(new String[0]);
}).orElse((String[]) null);
return JdbcDmlOptions.builder().withTableName(jdbcOptions.getTableName()).withDialect(jdbcOptions.getDialect()).withFieldNames(schema.getFieldNames()).withKeyFields(keyFields).build();
}
public String factoryIdentifier() {
return this.IDENTIFIER;
}
public Set<ConfigOption<?>> requiredOptions() {
Set<ConfigOption<?>> requiredOptions = new HashSet();
requiredOptions.add(URL);
requiredOptions.add(TABLE_NAME);
requiredOptions.add(SCHEMA_NAMESPACE_MAPPING_ENABLE);
requiredOptions.add(SCHEMA_MAP_SYSTEMTABLE_ENABLE);
return requiredOptions;
}
public Set<ConfigOption<?>> optionalOptions() {
Set<ConfigOption<?>> optionalOptions = new HashSet();
optionalOptions.add(DRIVER);
optionalOptions.add(USERNAME);
optionalOptions.add(PASSWORD);
optionalOptions.add(SCAN_PARTITION_COLUMN);
optionalOptions.add(SCAN_PARTITION_LOWER_BOUND);
optionalOptions.add(SCAN_PARTITION_UPPER_BOUND);
optionalOptions.add(SCAN_PARTITION_NUM);
optionalOptions.add(SCAN_FETCH_SIZE);
optionalOptions.add(SCAN_AUTO_COMMIT);
optionalOptions.add(LOOKUP_CACHE_MAX_ROWS);
optionalOptions.add(LOOKUP_CACHE_TTL);
optionalOptions.add(LOOKUP_MAX_RETRIES);
optionalOptions.add(SINK_BUFFER_FLUSH_MAX_ROWS);
optionalOptions.add(SINK_BUFFER_FLUSH_INTERVAL);
optionalOptions.add(SINK_MAX_RETRIES);
optionalOptions.add(FactoryUtil.SINK_PARALLELISM);
optionalOptions.add(MAX_RETRY_TIMEOUT);
//optionalOptions.add(SCHEMA_NAMESPACE_MAPPING_ENABLE);
//optionalOptions.add(SCHEMA_MAP_SYSTEMTABLE_ENABLE);
return optionalOptions;
}
private void validateConfigOptions(ReadableConfig config) {
String jdbcUrl = (String)config.get(URL);
Optional<JdbcDialect> dialect = JdbcDialects.get(jdbcUrl);
Preconditions.checkState(dialect.isPresent(), "Cannot handle such jdbc url: " + jdbcUrl);
this.checkAllOrNone(config, new ConfigOption[]{SCHEMA_NAMESPACE_MAPPING_ENABLE, SCHEMA_MAP_SYSTEMTABLE_ENABLE});
this.checkAllOrNone(config, new ConfigOption[]{USERNAME, PASSWORD});
this.checkAllOrNone(config, new ConfigOption[]{SCAN_PARTITION_COLUMN, SCAN_PARTITION_NUM, SCAN_PARTITION_LOWER_BOUND, SCAN_PARTITION_UPPER_BOUND});
if (config.getOptional(SCAN_PARTITION_LOWER_BOUND).isPresent() && config.getOptional(SCAN_PARTITION_UPPER_BOUND).isPresent()) {
long lowerBound = (Long)config.get(SCAN_PARTITION_LOWER_BOUND);
long upperBound = (Long)config.get(SCAN_PARTITION_UPPER_BOUND);
if (lowerBound > upperBound) {
throw new IllegalArgumentException(String.format("'%s'='%s' must not be larger than '%s'='%s'.", SCAN_PARTITION_LOWER_BOUND.key(), lowerBound, SCAN_PARTITION_UPPER_BOUND.key(), upperBound));
}
}
this.checkAllOrNone(config, new ConfigOption[]{LOOKUP_CACHE_MAX_ROWS, LOOKUP_CACHE_TTL});
if ((Integer)config.get(LOOKUP_MAX_RETRIES) < 0) {
throw new IllegalArgumentException(String.format("The value of '%s' option shouldn't be negative, but is %s.", LOOKUP_MAX_RETRIES.key(), config.get(LOOKUP_MAX_RETRIES)));
} else if ((Integer)config.get(SINK_MAX_RETRIES) < 0) {
throw new IllegalArgumentException(String.format("The value of '%s' option shouldn't be negative, but is %s.", SINK_MAX_RETRIES.key(), config.get(SINK_MAX_RETRIES)));
} else if (((Duration)config.get(MAX_RETRY_TIMEOUT)).getSeconds() <= 0L) {
throw new IllegalArgumentException(String.format("The value of '%s' option must be in second granularity and shouldn't be smaller than 1 second, but is %s.", MAX_RETRY_TIMEOUT.key(), config.get(ConfigOptions.key(MAX_RETRY_TIMEOUT.key()).stringType().noDefaultValue())));
}
}
private void checkAllOrNone(ReadableConfig config, ConfigOption<?>[] configOptions) {
int presentCount = 0;
ConfigOption[] var4 = configOptions;
int var5 = configOptions.length;
for(int var6 = 0; var6 < var5; ++var6) {
ConfigOption configOption = var4[var6];
if (config.getOptional(configOption).isPresent()) {
++presentCount;
}
}
String[] propertyNames = (String[]) Arrays.stream(configOptions).map(ConfigOption::key).toArray((x$0) -> {
return new String[x$0];
});
Preconditions.checkArgument(configOptions.length == presentCount || presentCount == 0, "Either all or none of the following options should be provided:\n" + String.join("\n", propertyNames));
}
}
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.connector.phoenix.JdbcExecutionOptions;
import org.apache.flink.connector.phoenix.internal.GenericJdbcSinkFunction;
import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions;
import org.apache.flink.connector.phoenix.internal.options.PhoenixJdbcOptions;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.sink.SinkFunctionProvider;
import org.apache.flink.table.data.RowData;
import org.apache.flink.types.RowKind;
import org.apache.flink.util.Preconditions;
import java.util.Objects;
/**
* PhoenixDynamicTableSink
*
* @author gy
* @since 2022/3/17 11:39
**/
public class PhoenixDynamicTableSink implements DynamicTableSink {
private final PhoenixJdbcOptions jdbcOptions;
private final JdbcExecutionOptions executionOptions;
private final JdbcDmlOptions dmlOptions;
private final TableSchema tableSchema;
private final String dialectName;
public PhoenixDynamicTableSink(PhoenixJdbcOptions jdbcOptions, JdbcExecutionOptions executionOptions, JdbcDmlOptions dmlOptions, TableSchema tableSchema) {
this.jdbcOptions = jdbcOptions;
this.executionOptions = executionOptions;
this.dmlOptions = dmlOptions;
this.tableSchema = tableSchema;
this.dialectName = dmlOptions.getDialect().dialectName();
}
@Override
public ChangelogMode getChangelogMode(ChangelogMode requestedMode) {
this.validatePrimaryKey(requestedMode);
return ChangelogMode.newBuilder().addContainedKind(RowKind.INSERT).addContainedKind(RowKind.DELETE).addContainedKind(RowKind.UPDATE_AFTER).build();
}
private void validatePrimaryKey(ChangelogMode requestedMode) {
Preconditions.checkState(ChangelogMode.insertOnly().equals(requestedMode) || this.dmlOptions.getKeyFields().isPresent(), "please declare primary key for sink table when query contains update/delete record.");
}
@Override
public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
TypeInformation<RowData> rowDataTypeInformation = context.createTypeInformation(this.tableSchema.toRowDataType());
PhoenixJdbcDynamicOutputFormatBuilder builder = new PhoenixJdbcDynamicOutputFormatBuilder();
builder.setJdbcOptions(this.jdbcOptions);
builder.setJdbcDmlOptions(this.dmlOptions);
builder.setJdbcExecutionOptions(this.executionOptions);
builder.setRowDataTypeInfo(rowDataTypeInformation);
builder.setFieldDataTypes(this.tableSchema.getFieldDataTypes());
return SinkFunctionProvider.of(new GenericJdbcSinkFunction(builder.build()), this.jdbcOptions.getParallelism());
}
@Override
public DynamicTableSink copy() {
return new PhoenixDynamicTableSink(this.jdbcOptions, this.executionOptions, this.dmlOptions, this.tableSchema);
}
@Override
public String asSummaryString() {
return "Phoenix Table Sink " ;
}
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (!(o instanceof PhoenixDynamicTableSink)) {
return false;
} else {
PhoenixDynamicTableSink that = (PhoenixDynamicTableSink)o;
return Objects.equals(this.jdbcOptions, that.jdbcOptions) && Objects.equals(this.executionOptions, that.executionOptions) && Objects.equals(this.dmlOptions, that.dmlOptions) && Objects.equals(this.tableSchema, that.tableSchema) && Objects.equals(this.dialectName, that.dialectName);
}
}
public int hashCode() {
return Objects.hash(new Object[]{this.jdbcOptions, this.executionOptions, this.dmlOptions, this.tableSchema, this.dialectName});
}
}
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcReadOptions;
import org.apache.flink.connector.phoenix.internal.options.PhoenixJdbcOptions;
import org.apache.flink.connector.phoenix.split.JdbcNumericBetweenParametersProvider;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.source.*;
import org.apache.flink.table.connector.source.abilities.SupportsLimitPushDown;
import org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.table.utils.TableSchemaUtils;
import org.apache.flink.util.Preconditions;
import java.util.Objects;
/**
* PhoenixDynamicTableSource
*
* @author gy
* @since 2022/3/17 10:40
**/
public class PhoenixDynamicTableSource implements ScanTableSource, LookupTableSource, SupportsProjectionPushDown,
SupportsLimitPushDown {
private final PhoenixJdbcOptions options;
private final JdbcReadOptions readOptions;
private final JdbcLookupOptions lookupOptions;
private TableSchema physicalSchema;
private final String dialectName;
private long limit = -1L;
public PhoenixDynamicTableSource(PhoenixJdbcOptions options, JdbcReadOptions readOptions, JdbcLookupOptions lookupOptions, TableSchema physicalSchema) {
this.options = options;
this.readOptions = readOptions;
this.lookupOptions = lookupOptions;
this.physicalSchema = physicalSchema;
this.dialectName = options.getDialect().dialectName();
}
@Override
public LookupRuntimeProvider getLookupRuntimeProvider(LookupContext context) {
// JDBC only support non-nested look up keys
String[] keyNames = new String[context.getKeys().length];
for (int i = 0; i < keyNames.length; i++) {
int[] innerKeyArr = context.getKeys()[i];
Preconditions.checkArgument(
innerKeyArr.length == 1, "JDBC only support non-nested look up keys");
keyNames[i] = physicalSchema.getFieldNames()[innerKeyArr[0]];
}
final RowType rowType = (RowType) physicalSchema.toRowDataType().getLogicalType();
return TableFunctionProvider.of(
new PhoenixRowDataLookupFunction(
options,
lookupOptions,
physicalSchema.getFieldNames(),
physicalSchema.getFieldDataTypes(),
keyNames,
rowType));
}
@Override
public ScanRuntimeProvider getScanRuntimeProvider(ScanContext runtimeProviderContext) {
PhoenixJdbcRowDataInputFormat.Builder builder = PhoenixJdbcRowDataInputFormat.builder()
.setDrivername(this.options.getDriverName())
.setDBUrl(this.options.getDbURL())
.setUsername((String)this.options.getUsername().orElse((String) null))
.setPassword((String)this.options.getPassword().orElse((String) null))
.setAutoCommit(this.readOptions.getAutoCommit())
//setting phoenix schema
.setNamespaceMappingEnabled(this.options.getNamespaceMappingEnabled())
.setMapSystemTablesToNamespace(this.options.getMapSystemTablesToNamespace())
;
if (this.readOptions.getFetchSize() != 0) {
builder.setFetchSize(this.readOptions.getFetchSize());
}
JdbcDialect dialect = this.options.getDialect();
String query = dialect.getSelectFromStatement(this.options.getTableName(), this.physicalSchema.getFieldNames(), new String[0]);
if (this.readOptions.getPartitionColumnName().isPresent()) {
long lowerBound = (Long)this.readOptions.getPartitionLowerBound().get();
long upperBound = (Long)this.readOptions.getPartitionUpperBound().get();
int numPartitions = (Integer)this.readOptions.getNumPartitions().get();
builder.setParametersProvider((new JdbcNumericBetweenParametersProvider(lowerBound, upperBound)).ofBatchNum(numPartitions));
query = query + " WHERE " + dialect.quoteIdentifier((String)this.readOptions.getPartitionColumnName().get()) + " BETWEEN ? AND ?";
}
if (this.limit >= 0L) {
query = String.format("%s %s", query, dialect.getLimitClause(this.limit));
}
builder.setQuery(query);
RowType rowType = (RowType)this.physicalSchema.toRowDataType().getLogicalType();
builder.setRowConverter(dialect.getRowConverter(rowType));
builder.setRowDataTypeInfo(runtimeProviderContext.createTypeInformation(this.physicalSchema.toRowDataType()));
return InputFormatProvider.of(builder.build());
}
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.insertOnly();
}
@Override
public boolean supportsNestedProjection() {
return false;
}
@Override
public void applyProjection(int[][] projectedFields) {
this.physicalSchema = TableSchemaUtils.projectSchema(this.physicalSchema, projectedFields);
}
public DynamicTableSource copy() {
return new PhoenixDynamicTableSource(this.options, this.readOptions, this.lookupOptions, this.physicalSchema);
}
public String asSummaryString() {
return "JDBC:" + this.dialectName;
}
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (!(o instanceof PhoenixDynamicTableSource)) {
return false;
} else {
PhoenixDynamicTableSource that = (PhoenixDynamicTableSource)o;
return Objects.equals(this.options, that.options) && Objects.equals(this.physicalSchema, that.physicalSchema) && Objects.equals(this.dialectName, that.dialectName) && Objects.equals(this.limit, that.limit);
}
}
public int hashCode() {
return Objects.hash(new Object[]{this.options, this.readOptions, this.lookupOptions, this.physicalSchema, this.dialectName, this.limit});
}
public void applyLimit(long limit) {
this.limit = limit;
}
}
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.connector.phoenix.internal.JdbcBatchingOutputFormat;
import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter;
import org.apache.flink.connector.phoenix.internal.executor.TableBufferReducedStatementExecutor;
import org.apache.flink.connector.phoenix.internal.executor.TableInsertOrUpdateStatementExecutor;
import org.apache.flink.connector.phoenix.internal.executor.TableSimpleStatementExecutor;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement;
import org.apache.flink.connector.phoenix.JdbcExecutionOptions;
import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor;
import org.apache.flink.connector.phoenix.internal.executor.TableBufferedStatementExecutor;
import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions;
import org.apache.flink.connector.phoenix.internal.options.PhoenixJdbcOptions;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.RowType;
import java.io.Serializable;
import java.util.Arrays;
import java.util.function.Function;
import static org.apache.flink.table.data.RowData.createFieldGetter;
import static org.apache.flink.util.Preconditions.checkArgument;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* PhoenixJdbcDynamicOutputFormatBuilder
*
* @author gy
* @since 2022/3/17 11:43J
**/
public class PhoenixJdbcDynamicOutputFormatBuilder implements Serializable {
private static final long serialVersionUID = 1L;
private PhoenixJdbcOptions jdbcOptions;
private JdbcExecutionOptions executionOptions;
private JdbcDmlOptions dmlOptions;
private TypeInformation<RowData> rowDataTypeInformation;
private DataType[] fieldDataTypes;
public PhoenixJdbcDynamicOutputFormatBuilder() {}
public PhoenixJdbcDynamicOutputFormatBuilder setJdbcOptions(PhoenixJdbcOptions jdbcOptions) {
this.jdbcOptions = jdbcOptions;
return this;
}
public PhoenixJdbcDynamicOutputFormatBuilder setJdbcExecutionOptions(
JdbcExecutionOptions executionOptions) {
this.executionOptions = executionOptions;
return this;
}
public PhoenixJdbcDynamicOutputFormatBuilder setJdbcDmlOptions(JdbcDmlOptions dmlOptions) {
this.dmlOptions = dmlOptions;
return this;
}
public PhoenixJdbcDynamicOutputFormatBuilder setRowDataTypeInfo(
TypeInformation<RowData> rowDataTypeInfo) {
this.rowDataTypeInformation = rowDataTypeInfo;
return this;
}
public PhoenixJdbcDynamicOutputFormatBuilder setFieldDataTypes(DataType[] fieldDataTypes) {
this.fieldDataTypes = fieldDataTypes;
return this;
}
public JdbcBatchingOutputFormat<RowData, ?, ?> build() {
checkNotNull(jdbcOptions, "jdbc options can not be null");
checkNotNull(dmlOptions, "jdbc dml options can not be null");
checkNotNull(executionOptions, "jdbc execution options can not be null");
final LogicalType[] logicalTypes =
Arrays.stream(fieldDataTypes)
.map(DataType::getLogicalType)
.toArray(LogicalType[]::new);
if (dmlOptions.getKeyFields().isPresent() && dmlOptions.getKeyFields().get().length > 0) {
// upsert query
return new JdbcBatchingOutputFormat<>(
new PhoneixJdbcConnectionProvider(jdbcOptions),
executionOptions,
ctx ->
createBufferReduceExecutor(
dmlOptions, ctx, rowDataTypeInformation, logicalTypes),
JdbcBatchingOutputFormat.RecordExtractor.identity());
} else {
// append only query
final String sql =
dmlOptions
.getDialect()
.getInsertIntoStatement(
dmlOptions.getTableName(), dmlOptions.getFieldNames());
return new JdbcBatchingOutputFormat<>(
new PhoneixJdbcConnectionProvider(jdbcOptions),
executionOptions,
ctx ->
createSimpleBufferedExecutor(
ctx,
dmlOptions.getDialect(),
dmlOptions.getFieldNames(),
logicalTypes,
sql,
rowDataTypeInformation),
JdbcBatchingOutputFormat.RecordExtractor.identity());
}
}
private static JdbcBatchStatementExecutor<RowData> createBufferReduceExecutor(
JdbcDmlOptions opt,
RuntimeContext ctx,
TypeInformation<RowData> rowDataTypeInfo,
LogicalType[] fieldTypes) {
checkArgument(opt.getKeyFields().isPresent());
JdbcDialect dialect = opt.getDialect();
String tableName = opt.getTableName();
String[] pkNames = opt.getKeyFields().get();
int[] pkFields =
Arrays.stream(pkNames)
.mapToInt(Arrays.asList(opt.getFieldNames())::indexOf)
.toArray();
LogicalType[] pkTypes =
Arrays.stream(pkFields).mapToObj(f -> fieldTypes[f]).toArray(LogicalType[]::new);
final TypeSerializer<RowData> typeSerializer =
rowDataTypeInfo.createSerializer(ctx.getExecutionConfig());
final Function<RowData, RowData> valueTransform =
ctx.getExecutionConfig().isObjectReuseEnabled()
? typeSerializer::copy
: Function.identity();
return new TableBufferReducedStatementExecutor(
createUpsertRowExecutor(
dialect,
tableName,
opt.getFieldNames(),
fieldTypes,
pkFields,
pkNames,
pkTypes),
createDeleteExecutor(dialect, tableName, pkNames, pkTypes),
createRowKeyExtractor(fieldTypes, pkFields),
valueTransform);
}
private static JdbcBatchStatementExecutor<RowData> createSimpleBufferedExecutor(
RuntimeContext ctx,
JdbcDialect dialect,
String[] fieldNames,
LogicalType[] fieldTypes,
String sql,
TypeInformation<RowData> rowDataTypeInfo) {
final TypeSerializer<RowData> typeSerializer =
rowDataTypeInfo.createSerializer(ctx.getExecutionConfig());
return new TableBufferedStatementExecutor(
createSimpleRowExecutor(dialect, fieldNames, fieldTypes, sql),
ctx.getExecutionConfig().isObjectReuseEnabled()
? typeSerializer::copy
: Function.identity());
}
private static JdbcBatchStatementExecutor<RowData> createUpsertRowExecutor(
JdbcDialect dialect,
String tableName,
String[] fieldNames,
LogicalType[] fieldTypes,
int[] pkFields,
String[] pkNames,
LogicalType[] pkTypes) {
return dialect.getUpsertStatement(tableName, fieldNames, pkNames)
.map(sql -> createSimpleRowExecutor(dialect, fieldNames, fieldTypes, sql))
.orElseGet(
() ->
createInsertOrUpdateExecutor(
dialect,
tableName,
fieldNames,
fieldTypes,
pkFields,
pkNames,
pkTypes));
}
private static JdbcBatchStatementExecutor<RowData> createDeleteExecutor(
JdbcDialect dialect, String tableName, String[] pkNames, LogicalType[] pkTypes) {
String deleteSql = dialect.getDeleteStatement(tableName, pkNames);
return createSimpleRowExecutor(dialect, pkNames, pkTypes, deleteSql);
}
private static JdbcBatchStatementExecutor<RowData> createSimpleRowExecutor(
JdbcDialect dialect, String[] fieldNames, LogicalType[] fieldTypes, final String sql) {
final JdbcRowConverter rowConverter = dialect.getRowConverter(RowType.of(fieldTypes));
return new TableSimpleStatementExecutor(
connection ->
FieldNamedPreparedStatement.prepareStatement(connection, sql, fieldNames),
rowConverter);
}
private static JdbcBatchStatementExecutor<RowData> createInsertOrUpdateExecutor(
JdbcDialect dialect,
String tableName,
String[] fieldNames,
LogicalType[] fieldTypes,
int[] pkFields,
String[] pkNames,
LogicalType[] pkTypes) {
final String existStmt = dialect.getRowExistsStatement(tableName, pkNames);
final String insertStmt = dialect.getInsertIntoStatement(tableName, fieldNames);
final String updateStmt = dialect.getUpdateStatement(tableName, fieldNames, pkNames);
return new TableInsertOrUpdateStatementExecutor(
connection ->
FieldNamedPreparedStatement.prepareStatement(
connection, existStmt, pkNames),
connection ->
FieldNamedPreparedStatement.prepareStatement(
connection, insertStmt, fieldNames),
connection ->
FieldNamedPreparedStatement.prepareStatement(
connection, updateStmt, fieldNames),
dialect.getRowConverter(RowType.of(pkTypes)),
dialect.getRowConverter(RowType.of(fieldTypes)),
dialect.getRowConverter(RowType.of(fieldTypes)),
createRowKeyExtractor(fieldTypes, pkFields));
}
private static Function<RowData, RowData> createRowKeyExtractor(
LogicalType[] logicalTypes, int[] pkFields) {
final RowData.FieldGetter[] fieldGetters = new RowData.FieldGetter[pkFields.length];
for (int i = 0; i < pkFields.length; i++) {
fieldGetters[i] = createFieldGetter(logicalTypes[pkFields[i]], pkFields[i]);
}
return row -> getPrimaryKey(row, fieldGetters);
}
private static RowData getPrimaryKey(RowData row, RowData.FieldGetter[] fieldGetters) {
GenericRowData pkRow = new GenericRowData(fieldGetters.length);
for (int i = 0; i < fieldGetters.length; i++) {
pkRow.setField(i, fieldGetters[i].getFieldOrNull(row));
}
return pkRow;
}
}
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.api.common.io.DefaultInputSplitAssigner;
import org.apache.flink.api.common.io.RichInputFormat;
import org.apache.flink.api.common.io.statistics.BaseStatistics;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.phoenix.JdbcConnectionOptions;
import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter;
import org.apache.flink.connector.phoenix.split.JdbcParameterValuesProvider;
import org.apache.flink.core.io.GenericInputSplit;
import org.apache.flink.core.io.InputSplit;
import org.apache.flink.core.io.InputSplitAssigner;
import org.apache.flink.table.data.RowData;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.*;
import java.util.Arrays;
/**
* PhoenixJdbcRowDataInputFormat
*
* @author gy
* @since 2022/3/17 10:53
**/
public class PhoenixJdbcRowDataInputFormat extends RichInputFormat<RowData, InputSplit> implements ResultTypeQueryable<RowData> {
private static final long serialVersionUID = 2L;
private static final Logger LOG = LoggerFactory.getLogger(PhoenixJdbcRowDataInputFormat.class);
private JdbcConnectionProvider connectionProvider;
private int fetchSize;
private Boolean autoCommit;
private Object[][] parameterValues;
private String queryTemplate;
private int resultSetType;
private int resultSetConcurrency;
private JdbcRowConverter rowConverter;
private TypeInformation<RowData> rowDataTypeInfo;
private transient PreparedStatement statement;
private transient ResultSet resultSet;
private transient boolean hasNext;
private boolean namespaceMappingEnabled;
private boolean mapSystemTablesEnabled;
private PhoenixJdbcRowDataInputFormat(
JdbcConnectionProvider connectionProvider,
int fetchSize, Boolean autoCommit, Object[][] parameterValues,
String queryTemplate, int resultSetType, int resultSetConcurrency,
JdbcRowConverter rowConverter, TypeInformation<RowData> rowDataTypeInfo,
boolean namespaceMappingEnabled,boolean mapSystemTablesEnabled) {
this.connectionProvider = connectionProvider;
this.fetchSize = fetchSize;
this.autoCommit = autoCommit;
this.parameterValues = parameterValues;
this.queryTemplate = queryTemplate;
this.resultSetType = resultSetType;
this.resultSetConcurrency = resultSetConcurrency;
this.rowConverter = rowConverter;
this.rowDataTypeInfo = rowDataTypeInfo;
this.namespaceMappingEnabled = namespaceMappingEnabled;
this.mapSystemTablesEnabled = mapSystemTablesEnabled;
}
public void configure(Configuration parameters) {
}
public void openInputFormat() {
try {
Connection dbConn = this.connectionProvider.getOrEstablishConnection();
if (this.autoCommit != null) {
dbConn.setAutoCommit(this.autoCommit);
}
this.statement = dbConn.prepareStatement(this.queryTemplate, this.resultSetType, this.resultSetConcurrency);
if (this.fetchSize == -2147483648 || this.fetchSize > 0) {
this.statement.setFetchSize(this.fetchSize);
}
} catch (SQLException var2) {
throw new IllegalArgumentException("open() failed." + var2.getMessage(), var2);
} catch (ClassNotFoundException var3) {
throw new IllegalArgumentException("JDBC-Class not found. - " + var3.getMessage(), var3);
}
}
public void closeInputFormat() {
try {
if (this.statement != null) {
this.statement.close();
}
} catch (SQLException var5) {
LOG.info("Inputformat Statement couldn't be closed - " + var5.getMessage());
} finally {
this.statement = null;
}
this.connectionProvider.closeConnection();
this.parameterValues = (Object[][])null;
}
public void open(InputSplit inputSplit) throws IOException {
try {
if (inputSplit != null && this.parameterValues != null) {
for(int i = 0; i < this.parameterValues[inputSplit.getSplitNumber()].length; ++i) {
Object param = this.parameterValues[inputSplit.getSplitNumber()][i];
if (param instanceof String) {
this.statement.setString(i + 1, (String)param);
} else if (param instanceof Long) {
this.statement.setLong(i + 1, (Long)param);
} else if (param instanceof Integer) {
this.statement.setInt(i + 1, (Integer)param);
} else if (param instanceof Double) {
this.statement.setDouble(i + 1, (Double)param);
} else if (param instanceof Boolean) {
this.statement.setBoolean(i + 1, (Boolean)param);
} else if (param instanceof Float) {
this.statement.setFloat(i + 1, (Float)param);
} else if (param instanceof BigDecimal) {
this.statement.setBigDecimal(i + 1, (BigDecimal)param);
} else if (param instanceof Byte) {
this.statement.setByte(i + 1, (Byte)param);
} else if (param instanceof Short) {
this.statement.setShort(i + 1, (Short)param);
} else if (param instanceof Date) {
this.statement.setDate(i + 1, (Date)param);
} else if (param instanceof Time) {
this.statement.setTime(i + 1, (Time)param);
} else if (param instanceof Timestamp) {
this.statement.setTimestamp(i + 1, (Timestamp)param);
} else {
if (!(param instanceof Array)) {
throw new IllegalArgumentException("open() failed. Parameter " + i + " of type " + param.getClass() + " is not handled (yet).");
}
this.statement.setArray(i + 1, (Array)param);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Executing '%s' with parameters %s", this.queryTemplate, Arrays.deepToString(this.parameterValues[inputSplit.getSplitNumber()])));
}
}
this.resultSet = this.statement.executeQuery();
this.hasNext = this.resultSet.next();
} catch (SQLException var4) {
throw new IllegalArgumentException("open() failed." + var4.getMessage(), var4);
}
}
public void close() throws IOException {
if (this.resultSet != null) {
try {
this.resultSet.close();
} catch (SQLException var2) {
LOG.info("Inputformat ResultSet couldn't be closed - " + var2.getMessage());
}
}
}
public TypeInformation<RowData> getProducedType() {
return this.rowDataTypeInfo;
}
public boolean reachedEnd() throws IOException {
return !this.hasNext;
}
public RowData nextRecord(RowData reuse) throws IOException {
try {
if (!this.hasNext) {
return null;
} else {
RowData row = this.rowConverter.toInternal(this.resultSet);
this.hasNext = this.resultSet.next();
return row;
}
} catch (SQLException var3) {
throw new IOException("Couldn't read data - " + var3.getMessage(), var3);
} catch (NullPointerException var4) {
throw new IOException("Couldn't access resultSet", var4);
}
}
public BaseStatistics getStatistics(BaseStatistics cachedStatistics) throws IOException {
return cachedStatistics;
}
public InputSplit[] createInputSplits(int minNumSplits) throws IOException {
if (this.parameterValues == null) {
return new GenericInputSplit[]{new GenericInputSplit(0, 1)};
} else {
GenericInputSplit[] ret = new GenericInputSplit[this.parameterValues.length];
for(int i = 0; i < ret.length; ++i) {
ret[i] = new GenericInputSplit(i, ret.length);
}
return ret;
}
}
public InputSplitAssigner getInputSplitAssigner(InputSplit[] inputSplits) {
return new DefaultInputSplitAssigner(inputSplits);
}
public static Builder builder() {
return new Builder();
}
public static class Builder {
private JdbcConnectionOptions.JdbcConnectionOptionsBuilder connOptionsBuilder = new JdbcConnectionOptions.JdbcConnectionOptionsBuilder();
private int fetchSize;
private Boolean autoCommit;
private Object[][] parameterValues;
private String queryTemplate;
private JdbcRowConverter rowConverter;
private TypeInformation<RowData> rowDataTypeInfo;
private int resultSetType = 1003;
private int resultSetConcurrency = 1007;
private boolean namespaceMappingEnabled;
private boolean mapSystemTablesEnabled;
public Builder() {
}
public Builder setDrivername(String drivername) {
this.connOptionsBuilder.withDriverName(drivername);
return this;
}
public Builder setDBUrl(String dbURL) {
this.connOptionsBuilder.withUrl(dbURL);
return this;
}
public Builder setUsername(String username) {
this.connOptionsBuilder.withUsername(username);
return this;
}
public Builder setPassword(String password) {
this.connOptionsBuilder.withPassword(password);
return this;
}
public Builder setQuery(String query) {
this.queryTemplate = query;
return this;
}
public Builder setParametersProvider(JdbcParameterValuesProvider parameterValuesProvider) {
this.parameterValues = parameterValuesProvider.getParameterValues();
return this;
}
public Builder setRowDataTypeInfo(TypeInformation<RowData> rowDataTypeInfo) {
this.rowDataTypeInfo = rowDataTypeInfo;
return this;
}
public Builder setRowConverter(JdbcRowConverter rowConverter) {
this.rowConverter = rowConverter;
return this;
}
public Builder setFetchSize(int fetchSize) {
Preconditions.checkArgument(fetchSize == -2147483648 || fetchSize > 0, "Illegal value %s for fetchSize, has to be positive or Integer.MIN_VALUE.", new Object[]{fetchSize});
this.fetchSize = fetchSize;
return this;
}
public Builder setAutoCommit(boolean autoCommit) {
this.autoCommit = autoCommit;
return this;
}
public Builder setResultSetType(int resultSetType) {
this.resultSetType = resultSetType;
return this;
}
public Builder setResultSetConcurrency(int resultSetConcurrency) {
this.resultSetConcurrency = resultSetConcurrency;
return this;
}
public Builder setNamespaceMappingEnabled(Boolean namespaceMappingEnabled) {
this.namespaceMappingEnabled = namespaceMappingEnabled;
return this;
}
public Builder setMapSystemTablesToNamespace(Boolean mapSystemTablesEnabled) {
this.mapSystemTablesEnabled = mapSystemTablesEnabled;
return this;
}
public PhoenixJdbcRowDataInputFormat build() {
if (this.queryTemplate == null) {
throw new NullPointerException("No query supplied");
} else if (this.rowConverter == null) {
throw new NullPointerException("No row converter supplied");
} else {
if (this.parameterValues == null) {
PhoenixJdbcRowDataInputFormat.LOG.debug("No input splitting configured (data will be read with parallelism 1).");
}
return new PhoenixJdbcRowDataInputFormat(new PhoneixJdbcConnectionProvider(this.connOptionsBuilder.build(),this.namespaceMappingEnabled,this.mapSystemTablesEnabled), this.fetchSize, this.autoCommit, this.parameterValues, this.queryTemplate, this.resultSetType, this.resultSetConcurrency, this.rowConverter, this.rowDataTypeInfo,this.namespaceMappingEnabled,this.mapSystemTablesEnabled);
}
}
}
}
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
/**
* PhoenixJdbcSinkFunction
*
* @author gy
* @since 2022/3/17 17:41
**/
public class PhoenixJdbcSinkFunction<T> extends RichSinkFunction<T> implements CheckpointedFunction {
@Override
public void snapshotState(FunctionSnapshotContext functionSnapshotContext) throws Exception {
}
@Override
public void initializeState(FunctionInitializationContext functionInitializationContext) throws Exception {
}
@Override
public void invoke(T value) throws Exception {
}
@Override
public void invoke(T value, Context context) throws Exception {
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatementImpl;
import org.apache.flink.connector.phoenix.utils.JdbcTypeUtil;
import org.apache.flink.connector.phoenix.utils.JdbcUtils;
import org.apache.flink.shaded.guava30.com.google.common.cache.Cache;
import org.apache.flink.shaded.guava30.com.google.common.cache.CacheBuilder;
import org.apache.flink.table.functions.FunctionContext;
import org.apache.flink.table.functions.TableFunction;
import org.apache.flink.types.Row;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.TimeUnit;
import static org.apache.flink.connector.phoenix.utils.JdbcUtils.getFieldFromResultSet;
import static org.apache.flink.util.Preconditions.checkArgument;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* A {@link TableFunction} to query fields from JDBC by keys. The query template like:
*
* <PRE>
* SELECT c, d, e, f from T where a = ? and b = ?
* </PRE>
*
* <p>Support cache the result to avoid frequent accessing to remote databases. 1.The cacheMaxSize
* is -1 means not use cache. 2.For real-time data, you need to set the TTL of cache.
*/
public class PhoenixLookupFunction extends TableFunction<Row> {
private static final Logger LOG = LoggerFactory.getLogger(PhoenixLookupFunction.class);
private static final long serialVersionUID = 2L;
private final String query;
private final JdbcConnectionProvider connectionProvider;
private final TypeInformation[] keyTypes;
private final int[] keySqlTypes;
private final String[] fieldNames;
private final String[] keyNames;
private final TypeInformation[] fieldTypes;
private final int[] outputSqlTypes;
private final long cacheMaxSize;
private final long cacheExpireMs;
private final int maxRetryTimes;
private transient PreparedStatement statement;
private transient Cache<Row, List<Row>> cache;
public PhoenixLookupFunction(
JdbcOptions options,
JdbcLookupOptions lookupOptions,
String[] fieldNames,
TypeInformation[] fieldTypes,
String[] keyNames) {
this.connectionProvider = new PhoneixJdbcConnectionProvider(options);
this.fieldNames = fieldNames;
this.fieldTypes = fieldTypes;
this.keyNames = keyNames;
List<String> nameList = Arrays.asList(fieldNames);
this.keyTypes =
Arrays.stream(keyNames)
.map(
s -> {
checkArgument(
nameList.contains(s),
"keyName %s can't find in fieldNames %s.",
s,
nameList);
return fieldTypes[nameList.indexOf(s)];
})
.toArray(TypeInformation[]::new);
this.cacheMaxSize = lookupOptions.getCacheMaxSize();
this.cacheExpireMs = lookupOptions.getCacheExpireMs();
this.maxRetryTimes = lookupOptions.getMaxRetryTimes();
this.keySqlTypes =
Arrays.stream(keyTypes).mapToInt(JdbcTypeUtil::typeInformationToSqlType).toArray();
this.outputSqlTypes =
Arrays.stream(fieldTypes)
.mapToInt(JdbcTypeUtil::typeInformationToSqlType)
.toArray();
this.query =
FieldNamedPreparedStatementImpl.parseNamedStatement(
options.getDialect()
.getSelectFromStatement(
options.getTableName(), fieldNames, keyNames),
new HashMap<>());
}
public static Builder builder() {
return new Builder();
}
@Override
public void open(FunctionContext context) throws Exception {
try {
establishConnectionAndStatement();
this.cache =
cacheMaxSize == -1 || cacheExpireMs == -1
? null
: CacheBuilder.newBuilder()
.expireAfterWrite(cacheExpireMs, TimeUnit.MILLISECONDS)
.maximumSize(cacheMaxSize)
.build();
} catch (SQLException sqe) {
throw new IllegalArgumentException("open() failed.", sqe);
} catch (ClassNotFoundException cnfe) {
throw new IllegalArgumentException("JDBC driver class not found.", cnfe);
}
}
public void eval(Object... keys) {
Row keyRow = Row.of(keys);
if (cache != null) {
List<Row> cachedRows = cache.getIfPresent(keyRow);
if (cachedRows != null) {
for (Row cachedRow : cachedRows) {
collect(cachedRow);
}
return;
}
}
for (int retry = 0; retry <= maxRetryTimes; retry++) {
try {
statement.clearParameters();
for (int i = 0; i < keys.length; i++) {
JdbcUtils.setField(statement, keySqlTypes[i], keys[i], i);
}
try (ResultSet resultSet = statement.executeQuery()) {
if (cache == null) {
while (resultSet.next()) {
collect(convertToRowFromResultSet(resultSet));
}
} else {
ArrayList<Row> rows = new ArrayList<>();
while (resultSet.next()) {
Row row = convertToRowFromResultSet(resultSet);
rows.add(row);
collect(row);
}
rows.trimToSize();
cache.put(keyRow, rows);
}
}
break;
} catch (SQLException e) {
LOG.error(String.format("JDBC executeBatch error, retry times = %d", retry), e);
if (retry >= maxRetryTimes) {
throw new RuntimeException("Execution of JDBC statement failed.", e);
}
try {
if (!connectionProvider.isConnectionValid()) {
statement.close();
connectionProvider.closeConnection();
establishConnectionAndStatement();
}
} catch (SQLException | ClassNotFoundException excpetion) {
LOG.error(
"JDBC connection is not valid, and reestablish connection failed",
excpetion);
throw new RuntimeException("Reestablish JDBC connection failed", excpetion);
}
try {
Thread.sleep(1000 * retry);
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
}
}
}
private Row convertToRowFromResultSet(ResultSet resultSet) throws SQLException {
Row row = new Row(outputSqlTypes.length);
for (int i = 0; i < outputSqlTypes.length; i++) {
row.setField(i, getFieldFromResultSet(i, outputSqlTypes[i], resultSet));
}
return row;
}
private void establishConnectionAndStatement() throws SQLException, ClassNotFoundException {
Connection dbConn = connectionProvider.getOrEstablishConnection();
statement = dbConn.prepareStatement(query);
}
@Override
public void close() throws IOException {
if (cache != null) {
cache.cleanUp();
cache = null;
}
if (statement != null) {
try {
statement.close();
} catch (SQLException e) {
LOG.info("JDBC statement could not be closed: " + e.getMessage());
} finally {
statement = null;
}
}
connectionProvider.closeConnection();
}
@VisibleForTesting
public Connection getDbConnection() {
return connectionProvider.getConnection();
}
@Override
public TypeInformation<Row> getResultType() {
return new RowTypeInfo(fieldTypes, fieldNames);
}
@Override
public TypeInformation<?>[] getParameterTypes(Class<?>[] signature) {
return keyTypes;
}
/** Builder for a {@link PhoenixLookupFunction}. */
public static class Builder {
private JdbcOptions options;
private JdbcLookupOptions lookupOptions;
protected String[] fieldNames;
protected TypeInformation[] fieldTypes;
protected String[] keyNames;
/** required, jdbc options. */
public Builder setOptions(JdbcOptions options) {
this.options = options;
return this;
}
/** optional, lookup related options. */
public Builder setLookupOptions(JdbcLookupOptions lookupOptions) {
this.lookupOptions = lookupOptions;
return this;
}
/** required, field names of this jdbc table. */
public Builder setFieldNames(String[] fieldNames) {
this.fieldNames = fieldNames;
return this;
}
/** required, field types of this jdbc table. */
public Builder setFieldTypes(TypeInformation[] fieldTypes) {
this.fieldTypes = fieldTypes;
return this;
}
/** required, key names to query this jdbc table. */
public Builder setKeyNames(String[] keyNames) {
this.keyNames = keyNames;
return this;
}
/**
* Finalizes the configuration and checks validity.
*
* @return Configured JdbcLookupFunction
*/
public PhoenixLookupFunction build() {
checkNotNull(options, "No JdbcOptions supplied.");
if (lookupOptions == null) {
lookupOptions = JdbcLookupOptions.builder().build();
}
checkNotNull(fieldNames, "No fieldNames supplied.");
checkNotNull(fieldTypes, "No fieldTypes supplied.");
checkNotNull(keyNames, "No keyNames supplied.");
return new PhoenixLookupFunction(options, lookupOptions, fieldNames, fieldTypes, keyNames);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.connector.phoenix.dialect.JdbcDialects;
import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter;
import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions;
import org.apache.flink.connector.phoenix.internal.options.PhoenixJdbcOptions;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement;
import org.apache.flink.shaded.guava30.com.google.common.cache.Cache;
import org.apache.flink.shaded.guava30.com.google.common.cache.CacheBuilder;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.functions.FunctionContext;
import org.apache.flink.table.functions.TableFunction;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.RowType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import static org.apache.flink.util.Preconditions.checkArgument;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** A lookup function for {@link JdbcDynamicTableSource}. */
@Internal
public class PhoenixRowDataLookupFunction extends TableFunction<RowData> {
private static final Logger LOG = LoggerFactory.getLogger(PhoenixRowDataLookupFunction.class);
private static final long serialVersionUID = 2L;
private final String query;
private final JdbcConnectionProvider connectionProvider;
private final DataType[] keyTypes;
private final String[] keyNames;
private final long cacheMaxSize;
private final long cacheExpireMs;
private final int maxRetryTimes;
private final JdbcDialect jdbcDialect;
private final JdbcRowConverter jdbcRowConverter;
private final JdbcRowConverter lookupKeyRowConverter;
private transient FieldNamedPreparedStatement statement;
private transient Cache<RowData, List<RowData>> cache;
public PhoenixRowDataLookupFunction(
PhoenixJdbcOptions options,
JdbcLookupOptions lookupOptions,
String[] fieldNames,
DataType[] fieldTypes,
String[] keyNames,
RowType rowType) {
checkNotNull(options, "No JdbcOptions supplied.");
checkNotNull(fieldNames, "No fieldNames supplied.");
checkNotNull(fieldTypes, "No fieldTypes supplied.");
checkNotNull(keyNames, "No keyNames supplied.");
this.connectionProvider = new PhoneixJdbcConnectionProvider(options,options.getNamespaceMappingEnabled(),options.getMapSystemTablesToNamespace());
this.keyNames = keyNames;
List<String> nameList = Arrays.asList(fieldNames);
this.keyTypes =
Arrays.stream(keyNames)
.map(
s -> {
checkArgument(
nameList.contains(s),
"keyName %s can't find in fieldNames %s.",
s,
nameList);
return fieldTypes[nameList.indexOf(s)];
})
.toArray(DataType[]::new);
this.cacheMaxSize = lookupOptions.getCacheMaxSize();
this.cacheExpireMs = lookupOptions.getCacheExpireMs();
this.maxRetryTimes = lookupOptions.getMaxRetryTimes();
this.query =
options.getDialect()
.getSelectFromStatement(options.getTableName(), fieldNames, keyNames);
String dbURL = options.getDbURL();
this.jdbcDialect =
JdbcDialects.get(dbURL)
.orElseThrow(
() ->
new UnsupportedOperationException(
String.format("Unknown dbUrl:%s", dbURL)));
this.jdbcRowConverter = jdbcDialect.getRowConverter(rowType);
this.lookupKeyRowConverter =
jdbcDialect.getRowConverter(
RowType.of(
Arrays.stream(keyTypes)
.map(DataType::getLogicalType)
.toArray(LogicalType[]::new)));
}
@Override
public void open(FunctionContext context) throws Exception {
try {
establishConnectionAndStatement();
this.cache =
cacheMaxSize == -1 || cacheExpireMs == -1
? null
: CacheBuilder.newBuilder()
.expireAfterWrite(cacheExpireMs, TimeUnit.MILLISECONDS)
.maximumSize(cacheMaxSize)
.build();
} catch (SQLException sqe) {
throw new IllegalArgumentException("open() failed.", sqe);
} catch (ClassNotFoundException cnfe) {
throw new IllegalArgumentException("JDBC driver class not found.", cnfe);
}
}
/**
* This is a lookup method which is called by Flink framework in runtime.
*
* @param keys lookup keys
*/
public void eval(Object... keys) {
RowData keyRow = GenericRowData.of(keys);
if (cache != null) {
List<RowData> cachedRows = cache.getIfPresent(keyRow);
if (cachedRows != null) {
for (RowData cachedRow : cachedRows) {
collect(cachedRow);
}
return;
}
}
for (int retry = 0; retry <= maxRetryTimes; retry++) {
try {
statement.clearParameters();
statement = lookupKeyRowConverter.toExternal(keyRow, statement);
try (ResultSet resultSet = statement.executeQuery()) {
if (cache == null) {
while (resultSet.next()) {
collect(jdbcRowConverter.toInternal(resultSet));
}
} else {
ArrayList<RowData> rows = new ArrayList<>();
while (resultSet.next()) {
RowData row = jdbcRowConverter.toInternal(resultSet);
rows.add(row);
collect(row);
}
rows.trimToSize();
cache.put(keyRow, rows);
}
}
break;
} catch (SQLException e) {
LOG.error(String.format("JDBC executeBatch error, retry times = %d", retry), e);
if (retry >= maxRetryTimes) {
throw new RuntimeException("Execution of JDBC statement failed.", e);
}
try {
if (!connectionProvider.isConnectionValid()) {
statement.close();
connectionProvider.closeConnection();
establishConnectionAndStatement();
}
} catch (SQLException | ClassNotFoundException excpetion) {
LOG.error(
"JDBC connection is not valid, and reestablish connection failed",
excpetion);
throw new RuntimeException("Reestablish JDBC connection failed", excpetion);
}
try {
Thread.sleep(1000 * retry);
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
}
}
}
private void establishConnectionAndStatement() throws SQLException, ClassNotFoundException {
Connection dbConn = connectionProvider.getOrEstablishConnection();
statement = FieldNamedPreparedStatement.prepareStatement(dbConn, query, keyNames);
}
@Override
public void close() throws IOException {
if (cache != null) {
cache.cleanUp();
cache = null;
}
if (statement != null) {
try {
statement.close();
} catch (SQLException e) {
LOG.info("JDBC statement could not be closed: " + e.getMessage());
} finally {
statement = null;
}
}
connectionProvider.closeConnection();
}
@VisibleForTesting
public Connection getDbConnection() {
return connectionProvider.getConnection();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.connector.phoenix.PhoenixInputFormat;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcReadOptions;
import org.apache.flink.connector.phoenix.split.JdbcNumericBetweenParametersProvider;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatementImpl;
import org.apache.flink.connector.phoenix.utils.JdbcTypeUtil;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.functions.AsyncTableFunction;
import org.apache.flink.table.functions.TableFunction;
import org.apache.flink.table.sources.LookupableTableSource;
import org.apache.flink.table.sources.ProjectableTableSource;
import org.apache.flink.table.sources.StreamTableSource;
import org.apache.flink.table.sources.TableSource;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.utils.TableConnectorUtils;
import org.apache.flink.types.Row;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Objects;
import static org.apache.flink.table.types.utils.TypeConversions.fromDataTypeToLegacyInfo;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** {@link TableSource} for JDBC. */
public class PhoenixTableSource
implements StreamTableSource<Row>, ProjectableTableSource<Row>, LookupableTableSource<Row> {
private final JdbcOptions options;
private final JdbcReadOptions readOptions;
private final JdbcLookupOptions lookupOptions;
private final TableSchema schema;
// index of fields selected, null means that all fields are selected
private final int[] selectFields;
private final DataType producedDataType;
private PhoenixTableSource(
JdbcOptions options,
JdbcReadOptions readOptions,
JdbcLookupOptions lookupOptions,
TableSchema schema) {
this(options, readOptions, lookupOptions, schema, null);
}
private PhoenixTableSource(
JdbcOptions options,
JdbcReadOptions readOptions,
JdbcLookupOptions lookupOptions,
TableSchema schema,
int[] selectFields) {
this.options = options;
this.readOptions = readOptions;
this.lookupOptions = lookupOptions;
this.schema = schema;
this.selectFields = selectFields;
final DataType[] schemaDataTypes = schema.getFieldDataTypes();
final String[] schemaFieldNames = schema.getFieldNames();
if (selectFields != null) {
DataType[] dataTypes = new DataType[selectFields.length];
String[] fieldNames = new String[selectFields.length];
for (int i = 0; i < selectFields.length; i++) {
dataTypes[i] = schemaDataTypes[selectFields[i]];
fieldNames[i] = schemaFieldNames[selectFields[i]];
}
this.producedDataType =
TableSchema.builder().fields(fieldNames, dataTypes).build().toRowDataType();
} else {
this.producedDataType = schema.toRowDataType();
}
}
@Override
public boolean isBounded() {
return true;
}
@Override
public DataStream<Row> getDataStream(StreamExecutionEnvironment execEnv) {
return execEnv.createInput(
getInputFormat(), (RowTypeInfo) fromDataTypeToLegacyInfo(producedDataType))
.name(explainSource());
}
@Override
public TableFunction<Row> getLookupFunction(String[] lookupKeys) {
final RowTypeInfo rowTypeInfo = (RowTypeInfo) fromDataTypeToLegacyInfo(producedDataType);
return PhoenixLookupFunction.builder()
.setOptions(options)
.setLookupOptions(lookupOptions)
.setFieldTypes(rowTypeInfo.getFieldTypes())
.setFieldNames(rowTypeInfo.getFieldNames())
.setKeyNames(lookupKeys)
.build();
}
@Override
public DataType getProducedDataType() {
return producedDataType;
}
@Override
public TableSource<Row> projectFields(int[] fields) {
return new PhoenixTableSource(options, readOptions, lookupOptions, schema, fields);
}
@Override
public AsyncTableFunction<Row> getAsyncLookupFunction(String[] lookupKeys) {
throw new UnsupportedOperationException();
}
@Override
public boolean isAsyncEnabled() {
return false;
}
@Override
public TableSchema getTableSchema() {
return schema;
}
@Override
public String explainSource() {
final RowTypeInfo rowTypeInfo = (RowTypeInfo) fromDataTypeToLegacyInfo(producedDataType);
return TableConnectorUtils.generateRuntimeName(getClass(), rowTypeInfo.getFieldNames());
}
public static Builder builder() {
return new Builder();
}
private PhoenixInputFormat getInputFormat() {
final RowTypeInfo rowTypeInfo = (RowTypeInfo) fromDataTypeToLegacyInfo(producedDataType);
PhoenixInputFormat.PhoenixInputFormatBuilder builder =
PhoenixInputFormat.buildJdbcInputFormat()
.setDrivername(options.getDriverName())
.setDBUrl(options.getDbURL())
.setRowTypeInfo(
new RowTypeInfo(
rowTypeInfo.getFieldTypes(), rowTypeInfo.getFieldNames()))
// 添加phoenix配置支持
.setMapSystemTablesEnabled(options.isMapSystemTablesEnabled())
.setNamespaceMappingEnabled(options.isNamespaceMappingEnabled());
options.getUsername().ifPresent(builder::setUsername);
options.getPassword().ifPresent(builder::setPassword);
if (readOptions.getFetchSize() != 0) {
builder.setFetchSize(readOptions.getFetchSize());
}
final JdbcDialect dialect = options.getDialect();
String query = getBaseQueryStatement(rowTypeInfo);
if (readOptions.getPartitionColumnName().isPresent()) {
long lowerBound = readOptions.getPartitionLowerBound().get();
long upperBound = readOptions.getPartitionUpperBound().get();
int numPartitions = readOptions.getNumPartitions().get();
builder.setParametersProvider(
new JdbcNumericBetweenParametersProvider(lowerBound, upperBound)
.ofBatchNum(numPartitions));
query +=
" WHERE "
+ dialect.quoteIdentifier(readOptions.getPartitionColumnName().get())
+ " BETWEEN ? AND ?";
}
builder.setQuery(query);
return builder.finish();
}
private String getBaseQueryStatement(RowTypeInfo rowTypeInfo) {
return readOptions
.getQuery()
.orElseGet(
() ->
FieldNamedPreparedStatementImpl.parseNamedStatement(
options.getDialect()
.getSelectFromStatement(
options.getTableName(),
rowTypeInfo.getFieldNames(),
new String[0]),
new HashMap<>()));
}
@Override
public boolean equals(Object o) {
if (o instanceof PhoenixTableSource) {
PhoenixTableSource source = (PhoenixTableSource) o;
return Objects.equals(options, source.options)
&& Objects.equals(readOptions, source.readOptions)
&& Objects.equals(lookupOptions, source.lookupOptions)
&& Objects.equals(schema, source.schema)
&& Arrays.equals(selectFields, source.selectFields);
} else {
return false;
}
}
/** Builder for a {@link PhoenixTableSource}. */
public static class Builder {
private JdbcOptions options;
private JdbcReadOptions readOptions;
private JdbcLookupOptions lookupOptions;
protected TableSchema schema;
/** required, jdbc options. */
public Builder setOptions(JdbcOptions options) {
this.options = options;
return this;
}
/**
* optional, scan related options. {@link JdbcReadOptions} will be only used for {@link
* StreamTableSource}.
*/
public Builder setReadOptions(JdbcReadOptions readOptions) {
this.readOptions = readOptions;
return this;
}
/**
* optional, lookup related options. {@link JdbcLookupOptions} only be used for {@link
* LookupableTableSource}.
*/
public Builder setLookupOptions(JdbcLookupOptions lookupOptions) {
this.lookupOptions = lookupOptions;
return this;
}
/** required, table schema of this table source. */
public Builder setSchema(TableSchema schema) {
this.schema = JdbcTypeUtil.normalizeTableSchema(schema);
return this;
}
/**
* Finalizes the configuration and checks validity.
*
* @return Configured JdbcTableSource
*/
public PhoenixTableSource build() {
checkNotNull(options, "No options supplied.");
checkNotNull(schema, "No schema supplied.");
if (readOptions == null) {
readOptions = JdbcReadOptions.builder().build();
}
if (lookupOptions == null) {
lookupOptions = JdbcLookupOptions.builder().build();
}
return new PhoenixTableSource(options, readOptions, lookupOptions, schema);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.phoenix.dialect.JdbcDialects;
import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcReadOptions;
import org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.descriptors.DescriptorProperties;
import org.apache.flink.table.descriptors.SchemaValidator;
import org.apache.flink.table.factories.StreamTableSinkFactory;
import org.apache.flink.table.factories.StreamTableSourceFactory;
import org.apache.flink.table.sinks.StreamTableSink;
import org.apache.flink.table.sources.StreamTableSource;
import org.apache.flink.table.utils.TableSchemaUtils;
import org.apache.flink.types.Row;
import java.util.*;
import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.*;
import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_PROPERTY_VERSION;
import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_TYPE;
import static org.apache.flink.table.descriptors.DescriptorProperties.*;
import static org.apache.flink.table.descriptors.Schema.*;
public class PhoenixTableSourceSinkFactory
implements StreamTableSourceFactory<Row>, StreamTableSinkFactory<Tuple2<Boolean, Row>> {
@Override
public Map<String, String> requiredContext() {
Map<String, String> context = new HashMap<>();
//context.put(CONNECTOR_TYPE, CONNECTOR_TYPE_VALUE_JDBC); // jdbc
context.put(CONNECTOR_TYPE,CONNECTOR_TYPE_VALUE_JDBC); // phoenix
context.put(CONNECTOR_PROPERTY_VERSION, "1"); // backwards compatibility
return context;
}
@Override
public List<String> supportedProperties() {
List<String> properties = new ArrayList<>();
//phoenix
properties.add(PHOENIX_SCHEMA_NAMESPACE_MAPPING_ENABLE);
properties.add(PHOENIX_SCHEMA_MAP_SYSTEMTABLE_ENABLE);
// common options
properties.add(CONNECTOR_DRIVER);
properties.add(CONNECTOR_URL);
properties.add(CONNECTOR_TABLE);
properties.add(CONNECTOR_USERNAME);
properties.add(CONNECTOR_PASSWORD);
properties.add(CONNECTOR_CONNECTION_MAX_RETRY_TIMEOUT);
// scan options
properties.add(CONNECTOR_READ_QUERY);
properties.add(CONNECTOR_READ_PARTITION_COLUMN);
properties.add(CONNECTOR_READ_PARTITION_NUM);
properties.add(CONNECTOR_READ_PARTITION_LOWER_BOUND);
properties.add(CONNECTOR_READ_PARTITION_UPPER_BOUND);
properties.add(CONNECTOR_READ_FETCH_SIZE);
// lookup options
properties.add(CONNECTOR_LOOKUP_CACHE_MAX_ROWS);
properties.add(CONNECTOR_LOOKUP_CACHE_TTL);
properties.add(CONNECTOR_LOOKUP_MAX_RETRIES);
// sink options
properties.add(CONNECTOR_WRITE_FLUSH_MAX_ROWS);
properties.add(CONNECTOR_WRITE_FLUSH_INTERVAL);
properties.add(CONNECTOR_WRITE_MAX_RETRIES);
// schema
properties.add(SCHEMA + ".#." + SCHEMA_DATA_TYPE);
properties.add(SCHEMA + ".#." + SCHEMA_TYPE);
properties.add(SCHEMA + ".#." + SCHEMA_NAME);
// computed column
properties.add(SCHEMA + ".#." + EXPR);
// watermark
properties.add(SCHEMA + "." + WATERMARK + ".#." + WATERMARK_ROWTIME);
properties.add(SCHEMA + "." + WATERMARK + ".#." + WATERMARK_STRATEGY_EXPR);
properties.add(SCHEMA + "." + WATERMARK + ".#." + WATERMARK_STRATEGY_DATA_TYPE);
// table constraint
properties.add(SCHEMA + "." + DescriptorProperties.PRIMARY_KEY_NAME);
properties.add(SCHEMA + "." + DescriptorProperties.PRIMARY_KEY_COLUMNS);
// comment
properties.add(COMMENT);
return properties;
}
@Override
public StreamTableSource<Row> createStreamTableSource(Map<String, String> properties) {
DescriptorProperties descriptorProperties = getValidatedProperties(properties);
TableSchema schema =
TableSchemaUtils.getPhysicalSchema(descriptorProperties.getTableSchema(SCHEMA));
return PhoenixTableSource.builder()
.setOptions(getJdbcOptions(descriptorProperties))
.setReadOptions(getJdbcReadOptions(descriptorProperties))
.setLookupOptions(getJdbcLookupOptions(descriptorProperties))
.setSchema(schema)
.build();
}
@Override
public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(
Map<String, String> properties) {
DescriptorProperties descriptorProperties = getValidatedProperties(properties);
TableSchema schema =
TableSchemaUtils.getPhysicalSchema(descriptorProperties.getTableSchema(SCHEMA));
final PhoenixUpsertTableSink.Builder builder =
PhoenixUpsertTableSink.builder()
.setOptions(getJdbcOptions(descriptorProperties))
.setTableSchema(schema);
descriptorProperties
.getOptionalInt(CONNECTOR_WRITE_FLUSH_MAX_ROWS)
.ifPresent(builder::setFlushMaxSize);
descriptorProperties
.getOptionalDuration(CONNECTOR_WRITE_FLUSH_INTERVAL)
.ifPresent(s -> builder.setFlushIntervalMills(s.toMillis()));
descriptorProperties
.getOptionalInt(CONNECTOR_WRITE_MAX_RETRIES)
.ifPresent(builder::setMaxRetryTimes);
return builder.build();
}
private DescriptorProperties getValidatedProperties(Map<String, String> properties) {
final DescriptorProperties descriptorProperties = new DescriptorProperties(true);
descriptorProperties.putProperties(properties);
new SchemaValidator(true, false, false).validate(descriptorProperties);
new PhoenixJdbcValidator().validate(descriptorProperties);
return descriptorProperties;
}
private JdbcOptions getJdbcOptions(DescriptorProperties descriptorProperties) {
final String url = descriptorProperties.getString(CONNECTOR_URL);
final JdbcOptions.Builder builder =
JdbcOptions.builder()
.setDBUrl(url)
.setTableName(descriptorProperties.getString(CONNECTOR_TABLE))
.setDialect(JdbcDialects.get(url).get())
.setNamespaceMappingEnabled(Boolean.parseBoolean(descriptorProperties.getString(PHOENIX_SCHEMA_NAMESPACE_MAPPING_ENABLE)))
.setMapSystemTablesEnabled(Boolean.parseBoolean(descriptorProperties.getString(PHOENIX_SCHEMA_MAP_SYSTEMTABLE_ENABLE)));
descriptorProperties
.getOptionalDuration(CONNECTOR_CONNECTION_MAX_RETRY_TIMEOUT)
.ifPresent(s -> builder.setConnectionCheckTimeoutSeconds((int) s.getSeconds()));
descriptorProperties.getOptionalString(CONNECTOR_DRIVER).ifPresent(builder::setDriverName);
descriptorProperties.getOptionalString(CONNECTOR_USERNAME).ifPresent(builder::setUsername);
descriptorProperties.getOptionalString(CONNECTOR_PASSWORD).ifPresent(builder::setPassword);
return builder.build();
}
private JdbcReadOptions getJdbcReadOptions(DescriptorProperties descriptorProperties) {
final Optional<String> query = descriptorProperties.getOptionalString(CONNECTOR_READ_QUERY);
final Optional<String> partitionColumnName =
descriptorProperties.getOptionalString(CONNECTOR_READ_PARTITION_COLUMN);
final Optional<Long> partitionLower =
descriptorProperties.getOptionalLong(CONNECTOR_READ_PARTITION_LOWER_BOUND);
final Optional<Long> partitionUpper =
descriptorProperties.getOptionalLong(CONNECTOR_READ_PARTITION_UPPER_BOUND);
final Optional<Integer> numPartitions =
descriptorProperties.getOptionalInt(CONNECTOR_READ_PARTITION_NUM);
final JdbcReadOptions.Builder builder = JdbcReadOptions.builder();
if (query.isPresent()) {
builder.setQuery(query.get());
}
if (partitionColumnName.isPresent()) {
builder.setPartitionColumnName(partitionColumnName.get());
builder.setPartitionLowerBound(partitionLower.get());
builder.setPartitionUpperBound(partitionUpper.get());
builder.setNumPartitions(numPartitions.get());
}
descriptorProperties
.getOptionalInt(CONNECTOR_READ_FETCH_SIZE)
.ifPresent(builder::setFetchSize);
return builder.build();
}
private JdbcLookupOptions getJdbcLookupOptions(DescriptorProperties descriptorProperties) {
final JdbcLookupOptions.Builder builder = JdbcLookupOptions.builder();
descriptorProperties
.getOptionalLong(CONNECTOR_LOOKUP_CACHE_MAX_ROWS)
.ifPresent(builder::setCacheMaxSize);
descriptorProperties
.getOptionalDuration(CONNECTOR_LOOKUP_CACHE_TTL)
.ifPresent(s -> builder.setCacheExpireMs(s.toMillis()));
descriptorProperties
.getOptionalInt(CONNECTOR_LOOKUP_MAX_RETRIES)
.ifPresent(builder::setMaxRetryTimes);
return builder.build();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.api.java.typeutils.TupleTypeInfo;
import org.apache.flink.connector.phoenix.JdbcExecutionOptions;
import org.apache.flink.connector.phoenix.internal.AbstractJdbcOutputFormat;
import org.apache.flink.connector.phoenix.internal.GenericJdbcSinkFunction;
import org.apache.flink.connector.phoenix.internal.JdbcBatchingOutputFormat;
import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
import org.apache.flink.connector.phoenix.utils.JdbcTypeUtil;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSink;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.sinks.TableSink;
import org.apache.flink.table.sinks.UpsertStreamTableSink;
import org.apache.flink.table.utils.TableConnectorUtils;
import org.apache.flink.table.utils.TableSchemaUtils;
import org.apache.flink.types.Row;
import java.util.Arrays;
import java.util.Objects;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** An upsert {@link UpsertStreamTableSink} for JDBC. */
public class PhoenixUpsertTableSink implements UpsertStreamTableSink<Row> {
private final TableSchema schema;
private final JdbcOptions options;
private final int flushMaxSize;
private final long flushIntervalMills;
private final int maxRetryTime;
private String[] keyFields;
private boolean isAppendOnly;
private PhoenixUpsertTableSink(
TableSchema schema,
JdbcOptions options,
int flushMaxSize,
long flushIntervalMills,
int maxRetryTime) {
this.schema = TableSchemaUtils.checkOnlyPhysicalColumns(schema);
this.options = options;
this.flushMaxSize = flushMaxSize;
this.flushIntervalMills = flushIntervalMills;
this.maxRetryTime = maxRetryTime;
}
private JdbcBatchingOutputFormat<Tuple2<Boolean, Row>, Row, JdbcBatchStatementExecutor<Row>>
newFormat() {
if (!isAppendOnly && (keyFields == null || keyFields.length == 0)) {
throw new UnsupportedOperationException("JdbcUpsertTableSink can not support ");
}
// sql types
int[] jdbcSqlTypes =
Arrays.stream(schema.getFieldTypes())
.mapToInt(JdbcTypeUtil::typeInformationToSqlType)
.toArray();
return JdbcBatchingOutputFormat.builder()
.setOptions(options)
.setFieldNames(schema.getFieldNames())
.setFlushMaxSize(flushMaxSize)
.setFlushIntervalMills(flushIntervalMills)
.setMaxRetryTimes(maxRetryTime)
.setFieldTypes(jdbcSqlTypes)
.setKeyFields(keyFields)
.build();
}
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) {
return dataStream
.addSink(new GenericJdbcSinkFunction<>(newFormat()))
//.addSink(new PhoenixSinkFunction(
// options,
// new PhoneixJdbcConnectionProvider(options,options.isNamespaceMappingEnabled(), options.isMapSystemTablesEnabled()),
// getFieldNames(),
// keyFields,
// jdbcSqlTypes
// ))
.setParallelism(dataStream.getParallelism())
.name(
TableConnectorUtils.generateRuntimeName(
this.getClass(), schema.getFieldNames()));
}
@Override
public void setKeyFields(String[] keys) {
this.keyFields = keys;
}
@Override
public void setIsAppendOnly(Boolean isAppendOnly) {
this.isAppendOnly = isAppendOnly;
}
@Override
public TypeInformation<Tuple2<Boolean, Row>> getOutputType() {
return new TupleTypeInfo<>(Types.BOOLEAN, getRecordType());
}
@Override
public TypeInformation<Row> getRecordType() {
return new RowTypeInfo(schema.getFieldTypes(), schema.getFieldNames());
}
@Override
public String[] getFieldNames() {
return schema.getFieldNames();
}
@Override
public TypeInformation<?>[] getFieldTypes() {
return schema.getFieldTypes();
}
@Override
public TableSink<Tuple2<Boolean, Row>> configure(
String[] fieldNames, TypeInformation<?>[] fieldTypes) {
if (!Arrays.equals(getFieldNames(), fieldNames)
|| !Arrays.equals(getFieldTypes(), fieldTypes)) {
throw new ValidationException(
"Reconfiguration with different fields is not allowed. "
+ "Expected: "
+ Arrays.toString(getFieldNames())
+ " / "
+ Arrays.toString(getFieldTypes())
+ ". "
+ "But was: "
+ Arrays.toString(fieldNames)
+ " / "
+ Arrays.toString(fieldTypes));
}
PhoenixUpsertTableSink copy =
new PhoenixUpsertTableSink(
schema, options, flushMaxSize, flushIntervalMills, maxRetryTime);
copy.keyFields = keyFields;
return copy;
}
public static Builder builder() {
return new Builder();
}
@Override
public boolean equals(Object o) {
if (o instanceof PhoenixUpsertTableSink) {
PhoenixUpsertTableSink sink = (PhoenixUpsertTableSink) o;
return Objects.equals(schema, sink.schema)
&& Objects.equals(options, sink.options)
&& Objects.equals(flushMaxSize, sink.flushMaxSize)
&& Objects.equals(flushIntervalMills, sink.flushIntervalMills)
&& Objects.equals(maxRetryTime, sink.maxRetryTime)
&& Arrays.equals(keyFields, sink.keyFields)
&& Objects.equals(isAppendOnly, sink.isAppendOnly);
} else {
return false;
}
}
/** Builder for a {@link PhoenixUpsertTableSink}. */
public static class Builder {
protected TableSchema schema;
private JdbcOptions options;
protected int flushMaxSize = AbstractJdbcOutputFormat.DEFAULT_FLUSH_MAX_SIZE;
protected long flushIntervalMills = AbstractJdbcOutputFormat.DEFAULT_FLUSH_INTERVAL_MILLS;
protected int maxRetryTimes = JdbcExecutionOptions.DEFAULT_MAX_RETRY_TIMES;
/** required, table schema of this table source. */
public Builder setTableSchema(TableSchema schema) {
this.schema = JdbcTypeUtil.normalizeTableSchema(schema);
return this;
}
/** required, jdbc options. */
public Builder setOptions(JdbcOptions options) {
this.options = options;
return this;
}
/**
* optional, flush max size (includes all append, upsert and delete records), over this
* number of records, will flush data.
*/
public Builder setFlushMaxSize(int flushMaxSize) {
this.flushMaxSize = flushMaxSize;
return this;
}
/** optional, flush interval mills, over this time, asynchronous threads will flush data. */
public Builder setFlushIntervalMills(long flushIntervalMills) {
this.flushIntervalMills = flushIntervalMills;
return this;
}
/** optional, max retry times for jdbc connector. */
public Builder setMaxRetryTimes(int maxRetryTimes) {
this.maxRetryTimes = maxRetryTimes;
return this;
}
public PhoenixUpsertTableSink build() {
checkNotNull(schema, "No schema supplied.");
checkNotNull(options, "No options supplied.");
return new PhoenixUpsertTableSink(
schema, options, flushMaxSize, flushIntervalMills, maxRetryTimes);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.utils;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.typeinfo.LocalTimeTypeInfo;
import org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo;
import org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.typeutils.ObjectArrayTypeInfo;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.inference.TypeTransformations;
import org.apache.flink.table.types.utils.DataTypeUtils;
import java.sql.Types;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import static org.apache.flink.api.common.typeinfo.BasicTypeInfo.*;
import static org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO;
/** Utils for jdbc type. */
@Internal
public class JdbcTypeUtil {
private static final Map<TypeInformation<?>, Integer> TYPE_MAPPING;
private static final Map<Integer, String> SQL_TYPE_NAMES;
static {
HashMap<TypeInformation<?>, Integer> m = new HashMap<>();
m.put(STRING_TYPE_INFO, Types.VARCHAR);
m.put(BOOLEAN_TYPE_INFO, Types.BOOLEAN);
m.put(BYTE_TYPE_INFO, Types.TINYINT);
m.put(SHORT_TYPE_INFO, Types.SMALLINT);
m.put(INT_TYPE_INFO, Types.INTEGER);
m.put(LONG_TYPE_INFO, Types.BIGINT);
m.put(FLOAT_TYPE_INFO, Types.REAL);
m.put(DOUBLE_TYPE_INFO, Types.DOUBLE);
m.put(SqlTimeTypeInfo.DATE, Types.DATE);
m.put(SqlTimeTypeInfo.TIME, Types.TIME);
m.put(SqlTimeTypeInfo.TIMESTAMP, Types.TIMESTAMP);
m.put(LocalTimeTypeInfo.LOCAL_DATE, Types.DATE);
m.put(LocalTimeTypeInfo.LOCAL_TIME, Types.TIME);
m.put(LocalTimeTypeInfo.LOCAL_DATE_TIME, Types.TIMESTAMP);
m.put(BIG_DEC_TYPE_INFO, Types.DECIMAL);
m.put(BYTE_PRIMITIVE_ARRAY_TYPE_INFO, Types.BINARY);
TYPE_MAPPING = Collections.unmodifiableMap(m);
HashMap<Integer, String> names = new HashMap<>();
names.put(Types.VARCHAR, "VARCHAR");
names.put(Types.BOOLEAN, "BOOLEAN");
names.put(Types.TINYINT, "TINYINT");
names.put(Types.SMALLINT, "SMALLINT");
names.put(Types.INTEGER, "INTEGER");
names.put(Types.BIGINT, "BIGINT");
names.put(Types.FLOAT, "FLOAT");
names.put(Types.DOUBLE, "DOUBLE");
names.put(Types.CHAR, "CHAR");
names.put(Types.DATE, "DATE");
names.put(Types.TIME, "TIME");
names.put(Types.TIMESTAMP, "TIMESTAMP");
names.put(Types.DECIMAL, "DECIMAL");
names.put(Types.BINARY, "BINARY");
SQL_TYPE_NAMES = Collections.unmodifiableMap(names);
}
private JdbcTypeUtil() {}
public static int typeInformationToSqlType(TypeInformation<?> type) {
if (TYPE_MAPPING.containsKey(type)) {
return TYPE_MAPPING.get(type);
} else if (type instanceof ObjectArrayTypeInfo || type instanceof PrimitiveArrayTypeInfo) {
return Types.ARRAY;
} else {
throw new IllegalArgumentException("Unsupported type: " + type);
}
}
public static String getTypeName(int type) {
return SQL_TYPE_NAMES.get(type);
}
public static String getTypeName(TypeInformation<?> type) {
return SQL_TYPE_NAMES.get(typeInformationToSqlType(type));
}
/**
* The original table schema may contain generated columns which shouldn't be produced/consumed
* by TableSource/TableSink. And the original TIMESTAMP/DATE/TIME types uses
* LocalDateTime/LocalDate/LocalTime as the conversion classes, however, JDBC connector uses
* Timestamp/Date/Time classes. So that we bridge them to the expected conversion classes.
*/
public static TableSchema normalizeTableSchema(TableSchema schema) {
TableSchema.Builder physicalSchemaBuilder = TableSchema.builder();
schema.getTableColumns()
.forEach(
c -> {
if (c.isPhysical()) {
final DataType type =
DataTypeUtils.transform(
c.getType(), TypeTransformations.timeToSqlTypes());
physicalSchemaBuilder.field(c.getName(), type);
}
});
return physicalSchemaBuilder.build();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.utils;
import org.apache.flink.types.Row;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
/** Utils for jdbc connectors. */
public class JdbcUtils {
private static final Logger LOG = LoggerFactory.getLogger(JdbcUtils.class);
/**
* Adds a record to the prepared statement.
*
* <p>When this method is called, the output format is guaranteed to be opened.
*
* <p>WARNING: this may fail when no column types specified (because a best effort approach is
* attempted in order to insert a null value but it's not guaranteed that the JDBC driver
* handles PreparedStatement.setObject(pos, null))
*
* @param upload The prepared statement.
* @param typesArray The jdbc types of the row.
* @param row The records to add to the output.
* @see PreparedStatement
*/
public static void setRecordToStatement(PreparedStatement upload, int[] typesArray, Row row)
throws SQLException {
if (typesArray != null && typesArray.length > 0 && typesArray.length != row.getArity()) {
LOG.warn(
"Column SQL types array doesn't match arity of passed Row! Check the passed array...");
}
if (typesArray == null) {
// no types provided
for (int index = 0; index < row.getArity(); index++) {
LOG.warn(
"Unknown column type for column {}. Best effort approach to set its value: {}.",
index + 1,
row.getField(index));
upload.setObject(index + 1, row.getField(index));
}
} else {
// types provided
for (int i = 0; i < row.getArity(); i++) {
setField(upload, typesArray[i], row.getField(i), i);
}
}
}
public static void setField(PreparedStatement upload, int type, Object field, int index)
throws SQLException {
if (field == null) {
upload.setNull(index + 1, type);
} else {
try {
// casting values as suggested by
// http://docs.oracle.com/javase/1.5.0/docs/guide/jdbc/getstart/mapping.html
switch (type) {
case java.sql.Types.NULL:
upload.setNull(index + 1, type);
break;
case java.sql.Types.BOOLEAN:
case java.sql.Types.BIT:
upload.setBoolean(index + 1, (boolean) field);
break;
case java.sql.Types.CHAR:
case java.sql.Types.NCHAR:
case java.sql.Types.VARCHAR:
case java.sql.Types.LONGVARCHAR:
case java.sql.Types.LONGNVARCHAR:
upload.setString(index + 1, (String) field);
break;
case java.sql.Types.TINYINT:
upload.setByte(index + 1, (byte) field);
break;
case java.sql.Types.SMALLINT:
upload.setShort(index + 1, (short) field);
break;
case java.sql.Types.INTEGER:
upload.setInt(index + 1, (int) field);
break;
case java.sql.Types.BIGINT:
upload.setLong(index + 1, (long) field);
break;
case java.sql.Types.REAL:
upload.setFloat(index + 1, (float) field);
break;
case java.sql.Types.FLOAT:
case java.sql.Types.DOUBLE:
upload.setDouble(index + 1, (double) field);
break;
case java.sql.Types.DECIMAL:
case java.sql.Types.NUMERIC:
upload.setBigDecimal(index + 1, (java.math.BigDecimal) field);
break;
case java.sql.Types.DATE:
upload.setDate(index + 1, (java.sql.Date) field);
break;
case java.sql.Types.TIME:
upload.setTime(index + 1, (java.sql.Time) field);
break;
case java.sql.Types.TIMESTAMP:
upload.setTimestamp(index + 1, (java.sql.Timestamp) field);
break;
case java.sql.Types.BINARY:
case java.sql.Types.VARBINARY:
case java.sql.Types.LONGVARBINARY:
upload.setBytes(index + 1, (byte[]) field);
break;
default:
upload.setObject(index + 1, field);
LOG.warn(
"Unmanaged sql type ({}) for column {}. Best effort approach to set its value: {}.",
type,
index + 1,
field);
// case java.sql.Types.SQLXML
// case java.sql.Types.ARRAY:
// case java.sql.Types.JAVA_OBJECT:
// case java.sql.Types.BLOB:
// case java.sql.Types.CLOB:
// case java.sql.Types.NCLOB:
// case java.sql.Types.DATALINK:
// case java.sql.Types.DISTINCT:
// case java.sql.Types.OTHER:
// case java.sql.Types.REF:
// case java.sql.Types.ROWID:
// case java.sql.Types.STRUC
}
} catch (ClassCastException e) {
// enrich the exception with detailed information.
String errorMessage =
String.format(
"%s, field index: %s, field value: %s.",
e.getMessage(), index, field);
ClassCastException enrichedException = new ClassCastException(errorMessage);
enrichedException.setStackTrace(e.getStackTrace());
throw enrichedException;
}
}
}
public static Object getFieldFromResultSet(int index, int type, ResultSet set)
throws SQLException {
Object ret;
switch (type) {
case java.sql.Types.NULL:
ret = null;
break;
case java.sql.Types.BOOLEAN:
case java.sql.Types.BIT:
ret = set.getBoolean(index + 1);
break;
case java.sql.Types.CHAR:
case java.sql.Types.NCHAR:
case java.sql.Types.VARCHAR:
case java.sql.Types.LONGVARCHAR:
case java.sql.Types.LONGNVARCHAR:
ret = set.getString(index + 1);
break;
case java.sql.Types.TINYINT:
ret = set.getByte(index + 1);
break;
case java.sql.Types.SMALLINT:
ret = set.getShort(index + 1);
break;
case java.sql.Types.INTEGER:
ret = set.getInt(index + 1);
break;
case java.sql.Types.BIGINT:
ret = set.getLong(index + 1);
break;
case java.sql.Types.REAL:
ret = set.getFloat(index + 1);
break;
case java.sql.Types.FLOAT:
case java.sql.Types.DOUBLE:
ret = set.getDouble(index + 1);
break;
case java.sql.Types.DECIMAL:
case java.sql.Types.NUMERIC:
ret = set.getBigDecimal(index + 1);
break;
case java.sql.Types.DATE:
ret = set.getDate(index + 1);
break;
case java.sql.Types.TIME:
ret = set.getTime(index + 1);
break;
case java.sql.Types.TIMESTAMP:
ret = set.getTimestamp(index + 1);
break;
case java.sql.Types.BINARY:
case java.sql.Types.VARBINARY:
case java.sql.Types.LONGVARBINARY:
ret = set.getBytes(index + 1);
break;
default:
ret = set.getObject(index + 1);
LOG.warn(
"Unmanaged sql type ({}) for column {}. Best effort approach to get its value: {}.",
type,
index + 1,
ret);
break;
// case java.sql.Types.SQLXML
// case java.sql.Types.ARRAY:
// case java.sql.Types.JAVA_OBJECT:
// case java.sql.Types.BLOB:
// case java.sql.Types.CLOB:
// case java.sql.Types.NCLOB:
// case java.sql.Types.DATALINK:
// case java.sql.Types.DISTINCT:
// case java.sql.Types.OTHER:
// case java.sql.Types.REF:
// case java.sql.Types.ROWID:
// case java.sql.Types.STRUC
}
if (set.wasNull()) {
return null;
} else {
return ret;
}
}
public static Row getPrimaryKey(Row row, int[] pkFields) {
Row pkRow = new Row(pkFields.length);
for (int i = 0; i < pkFields.length; i++) {
pkRow.setField(i, row.getField(pkFields[i]));
}
return pkRow;
}
}
package org.apache.flink.connector.phoenix.utils;
import org.apache.flink.annotation.Internal;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.connector.phoenix.dialect.JdbcDialects;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.descriptors.ConnectorDescriptorValidator;
import org.apache.flink.table.descriptors.DescriptorProperties;
import org.apache.flink.table.utils.TableSchemaUtils;
import org.apache.flink.util.Preconditions;
import java.util.Optional;
import static org.apache.flink.table.descriptors.Schema.SCHEMA;
/** The validator for JDBC. */
@Internal
public class PhoenixJdbcValidator extends ConnectorDescriptorValidator {
public static final String CONNECTOR_TYPE_VALUE_JDBC = "phoenix";
public static final String PHOENIX_SCHEMA_NAMESPACE_MAPPING_ENABLE = "phoenix.schema.isnamespacemappingenabled";
public static final String PHOENIX_SCHEMA_MAP_SYSTEMTABLE_ENABLE = "phoenix.schema.mapsystemtablestonamespace";
public static final String CONNECTOR_URL = "connector.url";
public static final String CONNECTOR_TABLE = "connector.table";
public static final String CONNECTOR_DRIVER = "connector.driver";
public static final String CONNECTOR_USERNAME = "connector.username";
public static final String CONNECTOR_PASSWORD = "connector.password";
public static final String CONNECTOR_CONNECTION_MAX_RETRY_TIMEOUT =
"connector.connection.max-retry-timeout";
public static final String CONNECTOR_READ_QUERY = "connector.read.query";
public static final String CONNECTOR_READ_PARTITION_COLUMN = "connector.read.partition.column";
public static final String CONNECTOR_READ_PARTITION_LOWER_BOUND =
"connector.read.partition.lower-bound";
public static final String CONNECTOR_READ_PARTITION_UPPER_BOUND =
"connector.read.partition.upper-bound";
public static final String CONNECTOR_READ_PARTITION_NUM = "connector.read.partition.num";
public static final String CONNECTOR_READ_FETCH_SIZE = "connector.read.fetch-size";
public static final String CONNECTOR_LOOKUP_CACHE_MAX_ROWS = "connector.lookup.cache.max-rows";
public static final String CONNECTOR_LOOKUP_CACHE_TTL = "connector.lookup.cache.ttl";
public static final String CONNECTOR_LOOKUP_MAX_RETRIES = "connector.lookup.max-retries";
public static final String CONNECTOR_WRITE_FLUSH_MAX_ROWS = "connector.write.flush.max-rows";
public static final String CONNECTOR_WRITE_FLUSH_INTERVAL = "connector.write.flush.interval";
public static final String CONNECTOR_WRITE_MAX_RETRIES = "connector.write.max-retries";
@Override
public void validate(DescriptorProperties properties) {
super.validate(properties);
validateCommonProperties(properties);
validateReadProperties(properties);
validateLookupProperties(properties);
validateSinkProperties(properties);
}
private void validateCommonProperties(DescriptorProperties properties) {
properties.validateString(CONNECTOR_URL, false, 1);
properties.validateString(CONNECTOR_TABLE, false, 1);
properties.validateString(CONNECTOR_DRIVER, true);
properties.validateString(CONNECTOR_USERNAME, true);
properties.validateString(CONNECTOR_PASSWORD, true);
properties.validateDuration(CONNECTOR_CONNECTION_MAX_RETRY_TIMEOUT, true, 1000);
properties.validateString(PHOENIX_SCHEMA_NAMESPACE_MAPPING_ENABLE, true);
properties.validateString(PHOENIX_SCHEMA_MAP_SYSTEMTABLE_ENABLE, true);
final String url = properties.getString(CONNECTOR_URL);
final Optional<JdbcDialect> dialect = JdbcDialects.get(url);
Preconditions.checkState(dialect.isPresent(), "Cannot handle such jdbc url: " + url);
TableSchema schema = TableSchemaUtils.getPhysicalSchema(properties.getTableSchema(SCHEMA));
dialect.get().validate(schema);
Optional<String> password = properties.getOptionalString(CONNECTOR_PASSWORD);
if (password.isPresent()) {
Preconditions.checkArgument(
properties.getOptionalString(CONNECTOR_USERNAME).isPresent(),
"Database username must be provided when database password is provided");
}
}
private void validateReadProperties(DescriptorProperties properties) {
properties.validateString(CONNECTOR_READ_QUERY, true);
properties.validateString(CONNECTOR_READ_PARTITION_COLUMN, true);
properties.validateLong(CONNECTOR_READ_PARTITION_LOWER_BOUND, true);
properties.validateLong(CONNECTOR_READ_PARTITION_UPPER_BOUND, true);
properties.validateInt(CONNECTOR_READ_PARTITION_NUM, true);
properties.validateInt(CONNECTOR_READ_FETCH_SIZE, true);
Optional<Long> lowerBound =
properties.getOptionalLong(CONNECTOR_READ_PARTITION_LOWER_BOUND);
Optional<Long> upperBound =
properties.getOptionalLong(CONNECTOR_READ_PARTITION_UPPER_BOUND);
if (lowerBound.isPresent() && upperBound.isPresent()) {
Preconditions.checkArgument(
lowerBound.get() <= upperBound.get(),
CONNECTOR_READ_PARTITION_LOWER_BOUND
+ " must not be larger than "
+ CONNECTOR_READ_PARTITION_UPPER_BOUND);
}
checkAllOrNone(
properties,
new String[] {
CONNECTOR_READ_PARTITION_COLUMN,
CONNECTOR_READ_PARTITION_LOWER_BOUND,
CONNECTOR_READ_PARTITION_UPPER_BOUND,
CONNECTOR_READ_PARTITION_NUM
});
}
private void validateLookupProperties(DescriptorProperties properties) {
properties.validateLong(CONNECTOR_LOOKUP_CACHE_MAX_ROWS, true);
properties.validateDuration(CONNECTOR_LOOKUP_CACHE_TTL, true, 1);
properties.validateInt(CONNECTOR_LOOKUP_MAX_RETRIES, true, 0);
checkAllOrNone(
properties,
new String[] {CONNECTOR_LOOKUP_CACHE_MAX_ROWS, CONNECTOR_LOOKUP_CACHE_TTL});
}
private void validateSinkProperties(DescriptorProperties properties) {
properties.validateInt(CONNECTOR_WRITE_FLUSH_MAX_ROWS, true);
properties.validateDuration(CONNECTOR_WRITE_FLUSH_INTERVAL, true, 1);
properties.validateInt(CONNECTOR_WRITE_MAX_RETRIES, true);
}
private void checkAllOrNone(DescriptorProperties properties, String[] propertyNames) {
int presentCount = 0;
for (String name : propertyNames) {
if (properties.getOptionalString(name).isPresent()) {
presentCount++;
}
}
Preconditions.checkArgument(
presentCount == 0 || presentCount == propertyNames.length,
"Either all or none of the following properties should be provided:\n"
+ String.join("\n", propertyNames));
}
}
\ No newline at end of file
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
<module>dlink-connector-jdbc-1.11</module> <module>dlink-connector-jdbc-1.11</module>
<module>dlink-connector-jdbc-1.14</module> <module>dlink-connector-jdbc-1.14</module>
<module>dlink-connector-phoenix-1.13</module> <module>dlink-connector-phoenix-1.13</module>
<module>dlink-connector-phoenix-1.14</module>
</modules> </modules>
<artifactId>dlink-connectors</artifactId> <artifactId>dlink-connectors</artifactId>
</project> </project>
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment