Commit 6d32afa0 authored by godkaikai's avatar godkaikai

扩展Flink1.13.3和1.14.0

parent baa157da
......@@ -116,35 +116,6 @@ public class CustomTableEnvironmentImpl extends TableEnvironmentImpl {
}
}
public String getStreamGraphString(String statement) {
if(useSqlFragment) {
statement = sqlManager.parseVariable(statement);
if (statement.length() == 0) {
return "This is a sql fragment.";
}
}
if (checkShowFragments(statement)) {
return "'SHOW FRAGMENTS' can't be explained.";
}
List<Operation> operations = super.parser.parse(statement);
if (operations.size() != 1) {
throw new TableException("Unsupported SQL query! explainSql() only accepts a single SQL query.");
} else {
List<ModifyOperation> modifyOperations = new ArrayList<>();
for (int i = 0; i < operations.size(); i++) {
if(operations.get(i) instanceof ModifyOperation){
modifyOperations.add((ModifyOperation)operations.get(i));
}
}
List<Transformation<?>> trans = super.planner.translate(modifyOperations);
if(execEnv instanceof ExecutorBase){
return ExecutorUtils.generateStreamGraph(((ExecutorBase) execEnv).getExecutionEnvironment(), trans).getStreamingPlanAsJSON();
}else{
return "Unsupported SQL query! explainSql() need a single SQL to query.";
}
}
}
public ObjectNode getStreamGraph(String statement) {
if(useSqlFragment) {
statement = sqlManager.parseVariable(statement);
......
......@@ -117,35 +117,6 @@ public class CustomTableEnvironmentImpl extends TableEnvironmentImpl {
}
}
public String getStreamGraphString(String statement) {
if(useSqlFragment) {
statement = sqlManager.parseVariable(statement);
if (statement.length() == 0) {
return "This is a sql fragment.";
}
}
if (checkShowFragments(statement)) {
return "'SHOW FRAGMENTS' can't be explained.";
}
List<Operation> operations = super.parser.parse(statement);
if (operations.size() != 1) {
throw new TableException("Unsupported SQL query! explainSql() only accepts a single SQL query.");
} else {
List<ModifyOperation> modifyOperations = new ArrayList<>();
for (int i = 0; i < operations.size(); i++) {
if(operations.get(i) instanceof ModifyOperation){
modifyOperations.add((ModifyOperation)operations.get(i));
}
}
List<Transformation<?>> trans = super.planner.translate(modifyOperations);
if(execEnv instanceof ExecutorBase){
return ExecutorUtils.generateStreamGraph(((ExecutorBase) execEnv).getExecutionEnvironment(), trans).getStreamingPlanAsJSON();
}else{
return "Unsupported SQL query! explainSql() need a single SQL to query.";
}
}
}
public ObjectNode getStreamGraph(String statement) {
if(useSqlFragment) {
statement = sqlManager.parseVariable(statement);
......
......@@ -14,7 +14,7 @@
<properties>
<java.version>1.8</java.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<flink.version>1.13.2</flink.version>
<flink.version>1.13.3</flink.version>
<scala.binary.version>2.11</scala.binary.version>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
......
......@@ -111,35 +111,6 @@ public class CustomTableEnvironmentImpl extends TableEnvironmentImpl {
}
}
public String getStreamGraphString(String statement) {
if(useSqlFragment) {
statement = sqlManager.parseVariable(statement);
if (statement.length() == 0) {
return "This is a sql fragment.";
}
}
if (checkShowFragments(statement)) {
return "'SHOW FRAGMENTS' can't be explained.";
}
List<Operation> operations = super.getParser().parse(statement);
if (operations.size() != 1) {
throw new TableException("Unsupported SQL query! explainSql() only accepts a single SQL query.");
} else {
List<ModifyOperation> modifyOperations = new ArrayList<>();
for (int i = 0; i < operations.size(); i++) {
if(operations.get(i) instanceof ModifyOperation){
modifyOperations.add((ModifyOperation)operations.get(i));
}
}
List<Transformation<?>> trans = super.planner.translate(modifyOperations);
if(execEnv instanceof ExecutorBase){
return ExecutorUtils.generateStreamGraph(((ExecutorBase) execEnv).getExecutionEnvironment(), trans).getStreamingPlanAsJSON();
}else{
return "Unsupported SQL query! explainSql() need a single SQL to query.";
}
}
}
public ObjectNode getStreamGraph(String statement) {
if(useSqlFragment) {
statement = sqlManager.parseVariable(statement);
......
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>dlink-client</artifactId>
<groupId>com.dlink</groupId>
<version>0.3.2-SANPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>dlink-client-1.14</artifactId>
<properties>
<java.version>1.8</java.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<flink.version>1.14.0</flink.version>
<scala.binary.version>2.11</scala.binary.version>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
<junit.version>4.12</junit.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-table-planner_${scala.binary.version}</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
</exclusions>
<version>${flink.version}</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-clients_${scala.binary.version}</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
</exclusions>
<version>${flink.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>com.dlink</groupId>
<artifactId>dlink-common</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.0</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
<!--打jar包-->
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>3.2.1</version>
<configuration>
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
</configuration>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<transformers>
<transformer implementation="org.apache.maven.plugins.shade.resource.AppendingTransformer">
<resource>reference.conf</resource>
</transformer>
</transformers>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
\ No newline at end of file
package com.dlink.executor.custom;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.TableConfig;
import org.apache.flink.table.api.internal.DlinkTableEnvironmentImpl;
import org.apache.flink.table.api.internal.TableEnvironmentImpl;
import org.apache.flink.table.catalog.CatalogManager;
import org.apache.flink.table.catalog.FunctionCatalog;
import org.apache.flink.table.catalog.GenericInMemoryCatalog;
import org.apache.flink.table.delegation.Executor;
import org.apache.flink.table.delegation.ExecutorFactory;
import org.apache.flink.table.delegation.Planner;
import org.apache.flink.table.factories.FactoryUtil;
import org.apache.flink.table.factories.PlannerFactoryUtil;
import org.apache.flink.table.module.ModuleManager;
/**
* 定制TableEnvironmentImpl
*
* @author wenmo
* @since 2021/10/22 10:02
**/
public class CustomTableEnvironmentImpl extends DlinkTableEnvironmentImpl {
protected CustomTableEnvironmentImpl(CatalogManager catalogManager, SqlManager sqlManager, ModuleManager moduleManager, TableConfig tableConfig, Executor executor, FunctionCatalog functionCatalog, Planner planner, boolean isStreamingMode, ClassLoader userClassLoader) {
super(catalogManager,sqlManager, moduleManager, tableConfig, executor, functionCatalog, planner, isStreamingMode, userClassLoader);
}
public static CustomTableEnvironmentImpl create(StreamExecutionEnvironment executionEnvironment) {
return create(executionEnvironment, EnvironmentSettings.newInstance().build());
}
public static CustomTableEnvironmentImpl create(StreamExecutionEnvironment executionEnvironment, EnvironmentSettings settings) {
return create(settings, settings.toConfiguration());
}
public static CustomTableEnvironmentImpl create(Configuration configuration) {
return create(EnvironmentSettings.fromConfiguration(configuration), configuration);
}
public static CustomTableEnvironmentImpl create(EnvironmentSettings settings) {
return create(settings, settings.toConfiguration());
}
private static CustomTableEnvironmentImpl create(
EnvironmentSettings settings, Configuration configuration) {
// temporary solution until FLINK-15635 is fixed
final ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
// use configuration to init table config
final TableConfig tableConfig = new TableConfig();
tableConfig.addConfiguration(configuration);
final ModuleManager moduleManager = new ModuleManager();
final SqlManager sqlManager = new SqlManager();
final CatalogManager catalogManager =
CatalogManager.newBuilder()
.classLoader(classLoader)
.config(tableConfig.getConfiguration())
.defaultCatalog(
settings.getBuiltInCatalogName(),
new GenericInMemoryCatalog(
settings.getBuiltInCatalogName(),
settings.getBuiltInDatabaseName()))
.build();
final FunctionCatalog functionCatalog =
new FunctionCatalog(tableConfig, catalogManager, moduleManager);
final ExecutorFactory executorFactory =
FactoryUtil.discoverFactory(
classLoader, ExecutorFactory.class, settings.getExecutor());
final Executor executor = executorFactory.create(configuration);
final Planner planner =
PlannerFactoryUtil.createPlanner(
settings.getPlanner(),
executor,
tableConfig,
catalogManager,
functionCatalog);
return new CustomTableEnvironmentImpl(
catalogManager,
sqlManager,
moduleManager,
tableConfig,
executor,
functionCatalog,
planner,
settings.isStreamingMode(),
classLoader);
}
}
package com.dlink.executor.custom;
import org.apache.flink.annotation.Internal;
import org.apache.flink.core.execution.JobClient;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.ResultKind;
import org.apache.flink.table.api.TableException;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.catalog.ResolvedSchema;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.utils.PrintUtils;
import org.apache.flink.types.Row;
import org.apache.flink.util.CloseableIterator;
import org.apache.flink.util.Preconditions;
import javax.annotation.Nullable;
import java.io.PrintWriter;
import java.time.ZoneId;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* 定制TableResultImpl
* @author wenmo
* @since 2021/10/22 10:02
**/
@Internal
public class CustomTableResultImpl implements TableResult {
public static final TableResult TABLE_RESULT_OK;
private final JobClient jobClient;
private final ResolvedSchema resolvedSchema;
private final ResultKind resultKind;
private final CloseableRowIteratorWrapper data;
private final PrintStyle printStyle;
private final ZoneId sessionTimeZone;
private CustomTableResultImpl(@Nullable JobClient jobClient, ResolvedSchema resolvedSchema, ResultKind resultKind, CloseableIterator<Row> data, PrintStyle printStyle, ZoneId sessionTimeZone) {
this.jobClient = jobClient;
this.resolvedSchema = (ResolvedSchema)Preconditions.checkNotNull(resolvedSchema, "resolvedSchema should not be null");
this.resultKind = (ResultKind)Preconditions.checkNotNull(resultKind, "resultKind should not be null");
Preconditions.checkNotNull(data, "data should not be null");
this.data = new CloseableRowIteratorWrapper(data);
this.printStyle = (PrintStyle)Preconditions.checkNotNull(printStyle, "printStyle should not be null");
this.sessionTimeZone = (ZoneId)Preconditions.checkNotNull(sessionTimeZone, "sessionTimeZone should not be null");
}
public static TableResult buildTableResult(List<TableSchemaField> fields,List<Row> rows){
Builder builder = builder().resultKind(ResultKind.SUCCESS);
if(fields.size()>0) {
List<String> columnNames = new ArrayList<>();
List<DataType> columnTypes = new ArrayList<>();
for (int i = 0; i < fields.size(); i++) {
columnNames.add(fields.get(i).getName());
columnTypes.add(fields.get(i).getType());
}
builder.schema(ResolvedSchema.physical(columnNames,columnTypes)).data(rows);
}
return builder.build();
}
public Optional<JobClient> getJobClient() {
return Optional.ofNullable(this.jobClient);
}
public void await() throws InterruptedException, ExecutionException {
try {
this.awaitInternal(-1L, TimeUnit.MILLISECONDS);
} catch (TimeoutException var2) {
;
}
}
public void await(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
this.awaitInternal(timeout, unit);
}
private void awaitInternal(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
if (this.jobClient != null) {
ExecutorService executor = Executors.newFixedThreadPool(1, (r) -> {
return new Thread(r, "TableResult-await-thread");
});
try {
CompletableFuture<Void> future = CompletableFuture.runAsync(() -> {
while(!this.data.isFirstRowReady()) {
try {
Thread.sleep(100L);
} catch (InterruptedException var2) {
throw new TableException("Thread is interrupted");
}
}
}, executor);
if (timeout >= 0L) {
future.get(timeout, unit);
} else {
future.get();
}
} finally {
executor.shutdown();
}
}
}
public ResolvedSchema getResolvedSchema() {
return this.resolvedSchema;
}
public ResultKind getResultKind() {
return this.resultKind;
}
public CloseableIterator<Row> collect() {
return this.data;
}
public void print() {
Iterator<Row> it = this.collect();
if (this.printStyle instanceof TableauStyle) {
int maxColumnWidth = ((TableauStyle)this.printStyle).getMaxColumnWidth();
String nullColumn = ((TableauStyle)this.printStyle).getNullColumn();
boolean deriveColumnWidthByType = ((TableauStyle)this.printStyle).isDeriveColumnWidthByType();
boolean printRowKind = ((TableauStyle)this.printStyle).isPrintRowKind();
PrintUtils.printAsTableauForm(this.getResolvedSchema(), it, new PrintWriter(System.out), maxColumnWidth, nullColumn, deriveColumnWidthByType, printRowKind, this.sessionTimeZone);
} else {
if (!(this.printStyle instanceof RawContentStyle)) {
throw new TableException("Unsupported print style: " + this.printStyle);
}
while(it.hasNext()) {
System.out.println(String.join(",", PrintUtils.rowToString((Row)it.next(), this.getResolvedSchema(), this.sessionTimeZone)));
}
}
}
public static Builder builder() {
return new Builder();
}
static {
TABLE_RESULT_OK = builder().resultKind(ResultKind.SUCCESS).schema(ResolvedSchema.of(new Column[]{Column.physical("result", DataTypes.STRING())})).data(Collections.singletonList(Row.of(new Object[]{"OK"}))).build();
}
private static final class CloseableRowIteratorWrapper implements CloseableIterator<Row> {
private final CloseableIterator<Row> iterator;
private boolean isFirstRowReady;
private CloseableRowIteratorWrapper(CloseableIterator<Row> iterator) {
this.isFirstRowReady = false;
this.iterator = iterator;
}
public void close() throws Exception {
this.iterator.close();
}
public boolean hasNext() {
boolean hasNext = this.iterator.hasNext();
this.isFirstRowReady = this.isFirstRowReady || hasNext;
return hasNext;
}
public Row next() {
Row next = (Row)this.iterator.next();
this.isFirstRowReady = true;
return next;
}
public boolean isFirstRowReady() {
return this.isFirstRowReady || this.hasNext();
}
}
private static final class RawContentStyle implements PrintStyle {
private RawContentStyle() {
}
}
private static final class TableauStyle implements PrintStyle {
private final boolean deriveColumnWidthByType;
private final int maxColumnWidth;
private final String nullColumn;
private final boolean printRowKind;
private TableauStyle(int maxColumnWidth, String nullColumn, boolean deriveColumnWidthByType, boolean printRowKind) {
this.deriveColumnWidthByType = deriveColumnWidthByType;
this.maxColumnWidth = maxColumnWidth;
this.nullColumn = nullColumn;
this.printRowKind = printRowKind;
}
public boolean isDeriveColumnWidthByType() {
return this.deriveColumnWidthByType;
}
int getMaxColumnWidth() {
return this.maxColumnWidth;
}
String getNullColumn() {
return this.nullColumn;
}
public boolean isPrintRowKind() {
return this.printRowKind;
}
}
public interface PrintStyle {
static PrintStyle tableau(int maxColumnWidth, String nullColumn, boolean deriveColumnWidthByType, boolean printRowKind) {
Preconditions.checkArgument(maxColumnWidth > 0, "maxColumnWidth should be greater than 0");
Preconditions.checkNotNull(nullColumn, "nullColumn should not be null");
return new TableauStyle(maxColumnWidth, nullColumn, deriveColumnWidthByType, printRowKind);
}
static PrintStyle rawContent() {
return new RawContentStyle();
}
}
public static class Builder {
private JobClient jobClient;
private ResolvedSchema resolvedSchema;
private ResultKind resultKind;
private CloseableIterator<Row> data;
private PrintStyle printStyle;
private ZoneId sessionTimeZone;
private Builder() {
this.jobClient = null;
this.resolvedSchema = null;
this.resultKind = null;
this.data = null;
this.printStyle = PrintStyle.tableau(2147483647, "(NULL)", false, false);
this.sessionTimeZone = ZoneId.of("UTC");
}
public Builder jobClient(JobClient jobClient) {
this.jobClient = jobClient;
return this;
}
public Builder schema(ResolvedSchema resolvedSchema) {
Preconditions.checkNotNull(resolvedSchema, "resolvedSchema should not be null");
this.resolvedSchema = resolvedSchema;
return this;
}
public Builder resultKind(ResultKind resultKind) {
Preconditions.checkNotNull(resultKind, "resultKind should not be null");
this.resultKind = resultKind;
return this;
}
public Builder data(CloseableIterator<Row> rowIterator) {
Preconditions.checkNotNull(rowIterator, "rowIterator should not be null");
this.data = rowIterator;
return this;
}
public Builder data(List<Row> rowList) {
Preconditions.checkNotNull(rowList, "listRows should not be null");
this.data = CloseableIterator.adapterForIterator(rowList.iterator());
return this;
}
public Builder setPrintStyle(PrintStyle printStyle) {
Preconditions.checkNotNull(printStyle, "printStyle should not be null");
this.printStyle = printStyle;
return this;
}
public Builder setSessionTimeZone(ZoneId sessionTimeZone) {
Preconditions.checkNotNull(sessionTimeZone, "sessionTimeZone should not be null");
this.sessionTimeZone = sessionTimeZone;
return this;
}
public TableResult build() {
return new CustomTableResultImpl(this.jobClient, this.resolvedSchema, this.resultKind, this.data, this.printStyle, this.sessionTimeZone);
}
}
}
\ No newline at end of file
package com.dlink.executor.custom;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.ExpressionParserException;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.internal.DlinkTableEnvironmentImpl;
import org.apache.flink.table.catalog.exceptions.CatalogException;
import org.apache.flink.types.Row;
import org.apache.flink.util.StringUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static java.lang.String.format;
import static org.apache.flink.util.Preconditions.checkArgument;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* Flink Sql Fragment Manager
* @author wenmo
* @since 2021/10/22 10:02
**/
@Internal
public final class SqlManager {
private Map<String, String> sqlFragments;
static final String SHOW_FRAGMENTS = "SHOW FRAGMENTS";
public SqlManager() {
sqlFragments = new HashMap<>();
}
/**
* Get names of sql fragments loaded.
*
* @return a list of names of sql fragments loaded
*/
public List<String> listSqlFragments() {
return new ArrayList<>(sqlFragments.keySet());
}
/**
* Registers a fragment of sql under the given name. The sql fragment name must be unique.
*
* @param sqlFragmentName name under which to register the given sql fragment
* @param sqlFragment a fragment of sql to register
* @throws CatalogException if the registration of the sql fragment under the given name failed.
* But at the moment, with CatalogException, not SqlException
*/
public void registerSqlFragment(String sqlFragmentName, String sqlFragment) {
checkArgument(
!StringUtils.isNullOrWhitespaceOnly(sqlFragmentName),
"sql fragment name cannot be null or empty.");
checkNotNull(sqlFragment, "sql fragment cannot be null");
if (sqlFragments.containsKey(sqlFragmentName)) {
throw new CatalogException(
format("The fragment of sql %s already exists.", sqlFragmentName));
}
sqlFragments.put(sqlFragmentName, sqlFragment);
}
/**
* Unregisters a fragment of sql under the given name. The sql fragment name must be existed.
*
* @param sqlFragmentName name under which to unregister the given sql fragment.
* @param ignoreIfNotExists If false exception will be thrown if the fragment of sql to be
* altered does not exist.
* @throws CatalogException if the unregistration of the sql fragment under the given name
* failed. But at the moment, with CatalogException, not SqlException
*/
public void unregisterSqlFragment(String sqlFragmentName, boolean ignoreIfNotExists) {
checkArgument(
!StringUtils.isNullOrWhitespaceOnly(sqlFragmentName),
"sql fragmentName name cannot be null or empty.");
if (sqlFragments.containsKey(sqlFragmentName)) {
sqlFragments.remove(sqlFragmentName);
} else if (!ignoreIfNotExists) {
throw new CatalogException(
format("The fragment of sql %s does not exist.", sqlFragmentName));
}
}
/**
* Get a fragment of sql under the given name. The sql fragment name must be existed.
*
* @param sqlFragmentName name under which to unregister the given sql fragment.
* @throws CatalogException if the unregistration of the sql fragment under the given name
* failed. But at the moment, with CatalogException, not SqlException
*/
public String getSqlFragment(String sqlFragmentName) {
checkArgument(
!StringUtils.isNullOrWhitespaceOnly(sqlFragmentName),
"sql fragmentName name cannot be null or empty.");
if (sqlFragments.containsKey(sqlFragmentName)) {
return sqlFragments.get(sqlFragmentName);
} else {
throw new CatalogException(
format("The fragment of sql %s does not exist.", sqlFragmentName));
}
}
/**
* Get a fragment of sql under the given name. The sql fragment name must be existed.
*
* @throws CatalogException if the unregistration of the sql fragment under the given name
* failed. But at the moment, with CatalogException, not SqlException
*/
public Map<String, String> getSqlFragment() {
return sqlFragments;
}
public TableResult getSqlFragments() {
List<Row> rows = new ArrayList<>();
for (String key : sqlFragments.keySet()) {
rows.add(Row.of(key));
}
return CustomTableResultImpl.buildTableResult(new ArrayList<>(Arrays.asList(new TableSchemaField("sql fragment name", DataTypes.STRING()))), rows);
}
public Iterator getSqlFragmentsIterator() {
return sqlFragments.entrySet().iterator();
}
public Table getSqlFragmentsTable(DlinkTableEnvironmentImpl environment) {
List<String> keys = new ArrayList<>();
for (String key : sqlFragments.keySet()) {
keys.add(key);
}
return environment.fromValues(keys);
}
public boolean checkShowFragments(String sql){
return SHOW_FRAGMENTS.equals(sql.trim().toUpperCase());
}
/**
* Parse some variables under the given sql.
*
* @param statement A sql will be parsed.
* @throws ExpressionParserException if the name of the variable under the given sql failed.
*/
public String parseVariable(String statement) {
if (statement == null || "".equals(statement)) {
return statement;
}
String[] strs = statement.split(";");
StringBuilder sb = new StringBuilder();
for (int i = 0; i < strs.length; i++) {
String str = strs[i].trim();
if (str.length() == 0) {
continue;
}
if (str.contains(":=")) {
String[] strs2 = str.split(":=");
if (strs2.length >= 2) {
if (strs2[0].length() == 0) {
throw new ExpressionParserException("Illegal variable name.");
}
String valueString = str.substring(str.indexOf(":=") + 2);
this.registerSqlFragment(strs2[0], replaceVariable(valueString));
} else {
throw new ExpressionParserException("Illegal variable definition.");
}
} else {
sb.append(replaceVariable(str));
}
}
return sb.toString();
}
/**
* Replace some variables under the given sql.
*
* @param statement A sql will be replaced.
*/
private String replaceVariable(String statement) {
String pattern = "\\$\\{(.+?)\\}";
Pattern p = Pattern.compile(pattern);
Matcher m = p.matcher(statement);
StringBuffer sb = new StringBuffer();
while (m.find()) {
String key = m.group(1);
String value = this.getSqlFragment(key);
m.appendReplacement(sb, value == null ? "" : value);
}
m.appendTail(sb);
return sb.toString();
}
}
package com.dlink.executor.custom;
import org.apache.flink.table.types.DataType;
/**
* @author wenmo
* @since 2021/10/22 10:02
**/
public class TableSchemaField {
private String name;
private DataType type;
public TableSchemaField(String name, DataType type) {
this.name = name;
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public DataType getType() {
return type;
}
public void setType(DataType type) {
this.type = type;
}
}
package com.dlink.utils;
import org.apache.flink.table.catalog.CatalogManager;
import org.apache.flink.table.catalog.ObjectIdentifier;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
/**
* FlinkUtil
*
* @author wenmo
* @since 2021/10/22 10:02
*/
public class FlinkUtil {
public static List<String> getFieldNamesFromCatalogManager(CatalogManager catalogManager, String catalog, String database, String table){
Optional<CatalogManager.TableLookupResult> tableOpt = catalogManager.getTable(
ObjectIdentifier.of(catalog, database, table)
);
if (tableOpt.isPresent()) {
return tableOpt.get().getResolvedSchema().getColumnNames();
}else{
return new ArrayList<String>();
}
}
}
package org.apache.flink.table.api.internal;
import com.dlink.executor.custom.CustomTableResultImpl;
import com.dlink.executor.custom.SqlManager;
import com.dlink.result.SqlExplainResult;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.dag.Transformation;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.graph.JSONGenerator;
import org.apache.flink.streaming.api.graph.StreamGraph;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.ExplainDetail;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableConfig;
import org.apache.flink.table.api.TableException;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.catalog.CatalogManager;
import org.apache.flink.table.catalog.FunctionCatalog;
import org.apache.flink.table.catalog.GenericInMemoryCatalog;
import org.apache.flink.table.delegation.Executor;
import org.apache.flink.table.delegation.ExecutorFactory;
import org.apache.flink.table.delegation.Planner;
import org.apache.flink.table.factories.FactoryUtil;
import org.apache.flink.table.factories.PlannerFactoryUtil;
import org.apache.flink.table.functions.AggregateFunction;
import org.apache.flink.table.functions.TableAggregateFunction;
import org.apache.flink.table.functions.TableFunction;
import org.apache.flink.table.functions.UserDefinedFunctionHelper;
import org.apache.flink.table.module.ModuleManager;
import org.apache.flink.table.operations.ExplainOperation;
import org.apache.flink.table.operations.ModifyOperation;
import org.apache.flink.table.operations.Operation;
import org.apache.flink.table.operations.QueryOperation;
import org.apache.flink.table.planner.delegation.DefaultExecutor;
import java.util.ArrayList;
import java.util.List;
/**
* 定制TableEnvironmentImpl
*
* @author wenmo
* @since 2021/10/22 10:02
**/
public class DlinkTableEnvironmentImpl extends TableEnvironmentImpl {
private SqlManager sqlManager;
private boolean useSqlFragment = true;
protected DlinkTableEnvironmentImpl(CatalogManager catalogManager, SqlManager sqlManager, ModuleManager moduleManager, TableConfig tableConfig, Executor executor, FunctionCatalog functionCatalog, Planner planner, boolean isStreamingMode, ClassLoader userClassLoader) {
super(catalogManager, moduleManager, tableConfig, executor, functionCatalog, planner, isStreamingMode, userClassLoader);
this.sqlManager = sqlManager;
}
public static TableEnvironmentImpl create(Configuration configuration) {
return create(EnvironmentSettings.fromConfiguration(configuration), configuration);
}
public static TableEnvironmentImpl create(EnvironmentSettings settings) {
return create(settings, settings.toConfiguration());
}
private static TableEnvironmentImpl create(
EnvironmentSettings settings, Configuration configuration) {
// temporary solution until FLINK-15635 is fixed
final ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
// use configuration to init table config
final TableConfig tableConfig = new TableConfig();
tableConfig.addConfiguration(configuration);
final ModuleManager moduleManager = new ModuleManager();
final CatalogManager catalogManager =
CatalogManager.newBuilder()
.classLoader(classLoader)
.config(tableConfig.getConfiguration())
.defaultCatalog(
settings.getBuiltInCatalogName(),
new GenericInMemoryCatalog(
settings.getBuiltInCatalogName(),
settings.getBuiltInDatabaseName()))
.build();
final FunctionCatalog functionCatalog =
new FunctionCatalog(tableConfig, catalogManager, moduleManager);
final ExecutorFactory executorFactory =
FactoryUtil.discoverFactory(
classLoader, ExecutorFactory.class, settings.getExecutor());
final Executor executor = executorFactory.create(configuration);
final Planner planner =
PlannerFactoryUtil.createPlanner(
settings.getPlanner(),
executor,
tableConfig,
catalogManager,
functionCatalog);
return new TableEnvironmentImpl(
catalogManager,
moduleManager,
tableConfig,
executor,
functionCatalog,
planner,
settings.isStreamingMode(),
classLoader);
}
public void useSqlFragment() {
this.useSqlFragment = true;
}
public void unUseSqlFragment() {
this.useSqlFragment = false;
}
@Override
public String explainSql(String statement, ExplainDetail... extraDetails) {
if(useSqlFragment) {
statement = sqlManager.parseVariable(statement);
if (statement.length() == 0) {
return "This is a sql fragment.";
}
}
if (checkShowFragments(statement)) {
return "'SHOW FRAGMENTS' can't be explained.";
} else {
return super.explainSql(statement, extraDetails);
}
}
public ObjectNode getStreamGraph(String statement) {
if(useSqlFragment) {
statement = sqlManager.parseVariable(statement);
if (statement.length() == 0) {
throw new TableException("This is a sql fragment.");
}
}
if (checkShowFragments(statement)) {
throw new TableException("'SHOW FRAGMENTS' can't be explained.");
}
List<Operation> operations = super.getParser().parse(statement);
if (operations.size() != 1) {
throw new TableException("Unsupported SQL query! explainSql() only accepts a single SQL query.");
} else {
List<ModifyOperation> modifyOperations = new ArrayList<>();
for (int i = 0; i < operations.size(); i++) {
if(operations.get(i) instanceof ModifyOperation){
modifyOperations.add((ModifyOperation)operations.get(i));
}
}
List<Transformation<?>> trans = super.planner.translate(modifyOperations);
if(execEnv instanceof DefaultExecutor){
StreamGraph streamGraph = ((DefaultExecutor) execEnv).getExecutionEnvironment().generateStreamGraph(trans);
JSONGenerator jsonGenerator = new JSONGenerator(streamGraph);
String json = jsonGenerator.getJSON();
ObjectMapper mapper = new ObjectMapper();
ObjectNode objectNode =mapper.createObjectNode();
try {
objectNode = (ObjectNode) mapper.readTree(json);
} catch (JsonProcessingException e) {
e.printStackTrace();
}finally {
return objectNode;
}
}else{
throw new TableException("Unsupported SQL query! explainSql() need a single SQL to query.");
}
}
}
public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) {
SqlExplainResult record = new SqlExplainResult();
if(useSqlFragment) {
String orignSql = statement;
statement = sqlManager.parseVariable(statement);
if (statement.length() == 0) {
record.setParseTrue(true);
record.setType("Sql Fragment");
record.setExplain(orignSql);
record.setExplainTrue(true);
return record;
}
}
List<Operation> operations = getParser().parse(statement);
record.setParseTrue(true);
if (operations.size() != 1) {
throw new TableException(
"Unsupported SQL query! explainSql() only accepts a single SQL query.");
}
List<Operation> operationlist = new ArrayList<>(operations);
for (int i = 0; i < operationlist.size(); i++) {
Operation operation = operationlist.get(i);
if (operation instanceof ModifyOperation) {
record.setType("Modify DML");
} else if (operation instanceof ExplainOperation) {
record.setType("Explain DML");
} else if (operation instanceof QueryOperation) {
record.setType("Query DML");
} else {
record.setExplain(operation.asSummaryString());
operationlist.remove(i);
record.setType("DDL");
i=i-1;
}
}
record.setExplainTrue(true);
if(operationlist.size()==0){
//record.setExplain("DDL语句不进行解释。");
return record;
}
record.setExplain(planner.explain(operationlist, extraDetails));
return record;
}
@Override
public String[] getCompletionHints(String statement, int position) {
if(useSqlFragment) {
statement = sqlManager.parseVariable(statement);
if (statement.length() == 0) {
return new String[0];
}
}
return super.getCompletionHints(statement, position);
}
@Override
public Table sqlQuery(String query) {
if(useSqlFragment) {
query = sqlManager.parseVariable(query);
if (query.length() == 0) {
throw new TableException("Unsupported SQL query! The SQL query parsed is null.If it's a sql fragment, and please use executeSql().");
}
if (checkShowFragments(query)) {
return sqlManager.getSqlFragmentsTable(this);
} else {
return super.sqlQuery(query);
}
}else {
return super.sqlQuery(query);
}
}
@Override
public TableResult executeSql(String statement) {
if(useSqlFragment) {
statement = sqlManager.parseVariable(statement);
if (statement.length() == 0) {
return CustomTableResultImpl.TABLE_RESULT_OK;
}
if (checkShowFragments(statement)) {
return sqlManager.getSqlFragments();
} else {
return super.executeSql(statement);
}
}else{
return super.executeSql(statement);
}
}
@Override
public void sqlUpdate(String stmt) {
if(useSqlFragment) {
stmt = sqlManager.parseVariable(stmt);
if (stmt.length() == 0) {
throw new TableException("Unsupported SQL update! The SQL update parsed is null.If it's a sql fragment, and please use executeSql().");
}
}
super.sqlUpdate(stmt);
}
public boolean checkShowFragments(String sql){
return sqlManager.checkShowFragments(sql);
}
public <T> void registerFunction(String name, TableFunction<T> tableFunction) {
TypeInformation<T> typeInfo = UserDefinedFunctionHelper.getReturnTypeOfTableFunction(tableFunction);
this.functionCatalog.registerTempSystemTableFunction(name, tableFunction, typeInfo);
}
public <T, ACC> void registerFunction(String name, AggregateFunction<T, ACC> aggregateFunction) {
TypeInformation<T> typeInfo = UserDefinedFunctionHelper.getReturnTypeOfAggregateFunction(aggregateFunction);
TypeInformation<ACC> accTypeInfo = UserDefinedFunctionHelper.getAccumulatorTypeOfAggregateFunction(aggregateFunction);
this.functionCatalog.registerTempSystemAggregateFunction(name, aggregateFunction, typeInfo, accTypeInfo);
}
public <T, ACC> void registerFunction(String name, TableAggregateFunction<T, ACC> tableAggregateFunction) {
TypeInformation<T> typeInfo = UserDefinedFunctionHelper.getReturnTypeOfAggregateFunction(tableAggregateFunction);
TypeInformation<ACC> accTypeInfo = UserDefinedFunctionHelper.getAccumulatorTypeOfAggregateFunction(tableAggregateFunction);
this.functionCatalog.registerTempSystemAggregateFunction(name, tableAggregateFunction, typeInfo, accTypeInfo);
}
}
......@@ -16,5 +16,6 @@
<module>dlink-client-1.12</module>
<module>dlink-client-1.13</module>
<module>dlink-client-1.11</module>
<module>dlink-client-1.14</module>
</modules>
</project>
\ No newline at end of file
......@@ -158,10 +158,6 @@ public abstract class Executor {
return stEnvironment.explainSqlRecord(statement,extraDetails);
}
public String getStreamGraphString(String statement){
return stEnvironment.getStreamGraphString(statement);
}
public ObjectNode getStreamGraph(String statement){
return stEnvironment.getStreamGraph(statement);
}
......
......@@ -76,10 +76,6 @@ public class FlinkSqlPlus {
return explainer.explainSqlColumnCA(statement);
}
public String getStreamGraphString(String statement) {
return executor.getStreamGraphString(statement);
}
public ObjectNode getStreamGraph(String statement) {
return executor.getStreamGraph(statement);
}
......
......@@ -332,6 +332,12 @@ export default (): React.ReactNode => {
<li>
<Link>支持set语法来设置执行环境参数</Link>
</li>
<li>
<Link>升级了 Flink 1.13 的版本支持为 1.13.3</Link>
</li>
<li>
<Link>扩展了 Flink 1.14 的支持</Link>
</li>
</ul>
</Paragraph>
</Timeline.Item>
......
......@@ -96,7 +96,7 @@
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<!--<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.version}</version>
......@@ -105,7 +105,7 @@
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>${slf4j.version}</version>
</dependency>
</dependency>-->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
......@@ -163,6 +163,11 @@
<artifactId>dlink-client-1.13</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.dlink</groupId>
<artifactId>dlink-client-1.14</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.dlink</groupId>
<artifactId>dlink-client-1.11</artifactId>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment