Commit 1001549f authored by wenmo's avatar wenmo

dlink-app改进

parent 9f177ae7
package com.dlink.service;
import com.dlink.common.result.Result;
import com.dlink.db.service.ISuperService;
import com.dlink.job.JobResult;
import com.dlink.model.Task;
import com.dlink.result.SubmitResult;
/**
* 作业 服务类
......@@ -17,8 +15,6 @@ public interface TaskService extends ISuperService<Task> {
JobResult submitByTaskId(Integer id);
// Result submitApplicationByTaskId(Integer id);
Task getTaskInfoById(Integer id);
boolean saveOrUpdateTask(Task task);
......
......@@ -36,6 +36,19 @@ public class TaskServiceImpl extends SuperServiceImpl<TaskMapper, Task> implemen
@Autowired
private ClusterConfigurationService clusterConfigurationService;
@Value("${spring.datasource.driver-class-name}")
private String driver;
@Value("${spring.datasource.url}")
private String url;
@Value("${spring.datasource.username}")
private String username;
@Value("${spring.datasource.password}")
private String password;
private String buildParas(Integer id) {
return "--id " + id + " --driver " + driver + " --url " + url + " --username " + username + " --password " + password;
}
@Override
public JobResult submitByTaskId(Integer id) {
Task task = this.getById(id);
......@@ -43,15 +56,15 @@ public class TaskServiceImpl extends SuperServiceImpl<TaskMapper, Task> implemen
Statement statement = statementService.getById(id);
Assert.check(statement);
JobConfig config = task.buildSubmitConfig();
if(!JobManager.useGateway(config.getType())) {
if (!JobManager.useGateway(config.getType())) {
config.setAddress(clusterService.buildEnvironmentAddress(config.isUseRemote(), task.getClusterId()));
}else{
} else {
Map<String, String> gatewayConfig = clusterConfigurationService.getGatewayConfig(task.getClusterConfigurationId());
if("yarn-application".equals(config.getType())||"ya".equals(config.getType())){
if ("yarn-application".equals(config.getType()) || "ya".equals(config.getType())) {
SystemConfiguration systemConfiguration = SystemConfiguration.getInstances();
gatewayConfig.put("userJarPath",systemConfiguration.getSqlSubmitJarPath());
gatewayConfig.put("userJarParas",systemConfiguration.getSqlSubmitJarParas() + config.getTaskId());
gatewayConfig.put("userJarMainAppClass",systemConfiguration.getSqlSubmitJarMainAppClass());
gatewayConfig.put("userJarPath", systemConfiguration.getSqlSubmitJarPath());
gatewayConfig.put("userJarParas", systemConfiguration.getSqlSubmitJarParas() + buildParas(config.getTaskId()));
gatewayConfig.put("userJarMainAppClass", systemConfiguration.getSqlSubmitJarMainAppClass());
}
config.buildGatewayConfig(gatewayConfig);
}
......@@ -59,34 +72,14 @@ public class TaskServiceImpl extends SuperServiceImpl<TaskMapper, Task> implemen
return jobManager.executeSql(statement.getStatement());
}
/*@Override
public Result submitApplicationByTaskId(Integer id) {
Task task = this.getById(id);
Assert.check(task);
Statement statement = statementService.getById(id);
Assert.check(statement);
JobConfig config = task.buildSubmitConfig();
GatewayConfig gatewayConfig = new GatewayConfig();
gatewayConfig.getFlinkConfig().setJobName(config.getJobName());
gatewayConfig.setType(GatewayType.YARN_PER_JOB);
ClusterConfig clusterConfig = ClusterConfig.build(
"/opt/src/flink-1.12.2_pj/conf",
"/opt/src/flink-1.12.2_pj/conf",
"/usr/local/hadoop/hadoop-2.7.7/etc/hadoop/yarn-site.xml");
gatewayConfig.setClusterConfig(clusterConfig);
JobManager jobManager = JobManager.build(config);
SubmitResult result = jobManager.submitGraph(statement.getStatement(), gatewayConfig);
return Result.succeed(result,"提交成功");
}*/
@Override
public Task getTaskInfoById(Integer id) {
Task task = this.getById(id);
if (task != null) {
Statement statement = statementService.getById(id);
if(task.getClusterId()!=null) {
if (task.getClusterId() != null) {
Cluster cluster = clusterService.getById(task.getClusterId());
if(cluster!=null){
if (cluster != null) {
task.setClusterName(cluster.getAlias());
}
}
......@@ -108,13 +101,13 @@ public class TaskServiceImpl extends SuperServiceImpl<TaskMapper, Task> implemen
statementService.updateById(statement);
}
} else {
if(task.getCheckPoint()==null){
if (task.getCheckPoint() == null) {
task.setCheckPoint(0);
}
if(task.getParallelism()==null){
if (task.getParallelism() == null) {
task.setParallelism(1);
}
if(task.getClusterId()==null){
if (task.getClusterId() == null) {
task.setClusterId(0);
}
this.save(task);
......
......@@ -22,19 +22,19 @@
</properties>
<dependencies>
<!-- Apache Flink dependencies -->
<dependency>
<!--<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-core</artifactId>
<version>${flink.version}</version>
<scope>provided</scope>
</dependency>
</dependency>-->
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-java</artifactId>
<version>${flink.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<!--<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-clients_${scala.binary.version}</artifactId>
<version>${flink.version}</version>
......@@ -56,8 +56,8 @@
<artifactId>flink-streaming-scala_${scala.binary.version}</artifactId>
<version>${flink.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
</dependency>-->
<!--<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
......@@ -74,7 +74,7 @@
<artifactId>log4j</artifactId>
<version>${log4j.version}</version>
<scope>provided</scope>
</dependency>
</dependency>-->
<dependency>
<groupId>mysql</groupId>
......@@ -82,6 +82,11 @@
<scope>provided</scope>
<version>8.0.21</version>
</dependency>
<dependency>
<groupId>com.dlink</groupId>
<artifactId>dlink-client-1.13</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.dlink</groupId>
<artifactId>dlink-executor</artifactId>
......
......@@ -8,16 +8,11 @@ import com.dlink.executor.ExecutorSetting;
import com.dlink.interceptor.FlinkInterceptor;
import com.dlink.parser.SqlType;
import com.dlink.trans.Operations;
import org.apache.flink.table.api.StatementSet;
import java.io.IOException;
import java.sql.SQLException;
import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.*;
/**
* FlinkSQLFactory
......@@ -76,7 +71,7 @@ public class FlinkSQLFactory {
public static void submit(Integer id,DBConfig dbConfig){
List<String> statements = FlinkSQLFactory.getStatements(Integer.valueOf(id), dbConfig);
ExecutorSetting executorSetting = ExecutorSetting.build(FlinkSQLFactory.getTaskConfig(Integer.valueOf(id),dbConfig));
Executor executor = Executor.buildLocalExecutor(executorSetting);
Executor executor = Executor.buildAppStreamExecutor(executorSetting);
List<StatementParam> ddl = new ArrayList<>();
List<StatementParam> trans = new ArrayList<>();
for (String item : statements) {
......@@ -96,19 +91,15 @@ public class FlinkSQLFactory {
}
if(executorSetting.isUseStatementSet()) {
List<String> inserts = new ArrayList<>();
StatementSet statementSet = executor.createStatementSet();
for (StatementParam item : trans) {
if(item.getType().equals(SqlType.INSERT)) {
statementSet.addInsertSql(item.getValue());
inserts.add(item.getValue());
}
}
if(inserts.size()>0) {
statementSet.execute();
}
executor.submitStatementSet(inserts);
}else{
for (StatementParam item : trans) {
executor.executeSql(item.getValue());
executor.submitSql(item.getValue());
break;
}
}
......
package com.dlink.executor.custom;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.TableConfig;
import org.apache.flink.table.api.TableException;
import org.apache.flink.table.api.internal.TableEnvironmentImpl;
import org.apache.flink.table.catalog.CatalogManager;
import org.apache.flink.table.catalog.FunctionCatalog;
import org.apache.flink.table.catalog.GenericInMemoryCatalog;
import org.apache.flink.table.delegation.Executor;
import org.apache.flink.table.delegation.ExecutorFactory;
import org.apache.flink.table.delegation.Planner;
import org.apache.flink.table.delegation.PlannerFactory;
import org.apache.flink.table.factories.ComponentFactoryService;
import org.apache.flink.table.functions.AggregateFunction;
import org.apache.flink.table.functions.TableAggregateFunction;
import org.apache.flink.table.functions.TableFunction;
import org.apache.flink.table.functions.UserDefinedFunctionHelper;
import org.apache.flink.table.module.ModuleManager;
import java.lang.reflect.Method;
import java.util.Map;
/**
* 定制TableEnvironmentImpl
*
* @author wenmo
* @since 2021/6/7 22:06
**/
public class CustomTableEnvironmentImpl extends TableEnvironmentImpl {
protected CustomTableEnvironmentImpl(CatalogManager catalogManager, ModuleManager moduleManager, TableConfig tableConfig, Executor executor, FunctionCatalog functionCatalog, Planner planner, boolean isStreamingMode, ClassLoader userClassLoader) {
super(catalogManager, moduleManager, tableConfig, executor, functionCatalog, planner, isStreamingMode, userClassLoader);
}
public static CustomTableEnvironmentImpl create(StreamExecutionEnvironment executionEnvironment) {
return create(executionEnvironment, EnvironmentSettings.newInstance().build());
}
static CustomTableEnvironmentImpl create(StreamExecutionEnvironment executionEnvironment, EnvironmentSettings settings) {
return create(executionEnvironment, settings, new TableConfig());
}
public static CustomTableEnvironmentImpl create(StreamExecutionEnvironment executionEnvironment, EnvironmentSettings settings, TableConfig tableConfig) {
if (!settings.isStreamingMode()) {
throw new TableException("StreamTableEnvironment can not run in batch mode for now, please use TableEnvironment.");
} else {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
ModuleManager moduleManager = new ModuleManager();
CatalogManager catalogManager = CatalogManager.newBuilder().classLoader(classLoader).config(tableConfig.getConfiguration()).defaultCatalog(settings.getBuiltInCatalogName(), new GenericInMemoryCatalog(settings.getBuiltInCatalogName(), settings.getBuiltInDatabaseName())).executionConfig(executionEnvironment.getConfig()).build();
FunctionCatalog functionCatalog = new FunctionCatalog(tableConfig, catalogManager, moduleManager);
Map<String, String> executorProperties = settings.toExecutorProperties();
Executor executor = lookupExecutor(executorProperties, executionEnvironment);
Map<String, String> plannerProperties = settings.toPlannerProperties();
Planner planner = (ComponentFactoryService.find(PlannerFactory.class, plannerProperties)).create(plannerProperties, executor, tableConfig, functionCatalog, catalogManager);
return new CustomTableEnvironmentImpl(catalogManager, moduleManager, tableConfig, executor, functionCatalog, planner, settings.isStreamingMode(), classLoader);
}
}
private static Executor lookupExecutor(Map<String, String> executorProperties, StreamExecutionEnvironment executionEnvironment) {
try {
ExecutorFactory executorFactory = ComponentFactoryService.find(ExecutorFactory.class, executorProperties);
Method createMethod = executorFactory.getClass().getMethod("create", Map.class, StreamExecutionEnvironment.class);
return (Executor) createMethod.invoke(executorFactory, executorProperties, executionEnvironment);
} catch (Exception var4) {
throw new TableException("Could not instantiate the executor. Make sure a planner module is on the classpath", var4);
}
}
public <T> void registerFunction(String name, TableFunction<T> tableFunction) {
TypeInformation<T> typeInfo = UserDefinedFunctionHelper.getReturnTypeOfTableFunction(tableFunction);
this.functionCatalog.registerTempSystemTableFunction(name, tableFunction, typeInfo);
}
public <T, ACC> void registerFunction(String name, AggregateFunction<T, ACC> aggregateFunction) {
TypeInformation<T> typeInfo = UserDefinedFunctionHelper.getReturnTypeOfAggregateFunction(aggregateFunction);
TypeInformation<ACC> accTypeInfo = UserDefinedFunctionHelper.getAccumulatorTypeOfAggregateFunction(aggregateFunction);
this.functionCatalog.registerTempSystemAggregateFunction(name, aggregateFunction, typeInfo, accTypeInfo);
}
public <T, ACC> void registerFunction(String name, TableAggregateFunction<T, ACC> tableAggregateFunction) {
TypeInformation<T> typeInfo = UserDefinedFunctionHelper.getReturnTypeOfAggregateFunction(tableAggregateFunction);
TypeInformation<ACC> accTypeInfo = UserDefinedFunctionHelper.getAccumulatorTypeOfAggregateFunction(tableAggregateFunction);
this.functionCatalog.registerTempSystemAggregateFunction(name, tableAggregateFunction, typeInfo, accTypeInfo);
}
}
package com.dlink.executor.custom;
import org.apache.flink.annotation.Internal;
import org.apache.flink.core.execution.JobClient;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.ResultKind;
import org.apache.flink.table.api.TableException;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.catalog.ResolvedSchema;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.utils.PrintUtils;
import org.apache.flink.types.Row;
import org.apache.flink.util.CloseableIterator;
import org.apache.flink.util.Preconditions;
import javax.annotation.Nullable;
import java.io.PrintWriter;
import java.time.ZoneId;
import java.util.*;
import java.util.concurrent.*;
/**
* 定制CustomTableResultImpl
* @author wenmo
* @since 2021/6/7 22:06
**/
@Internal
public class CustomTableResultImpl implements TableResult {
public static final TableResult TABLE_RESULT_OK =
CustomTableResultImpl.builder()
.resultKind(ResultKind.SUCCESS)
.schema(ResolvedSchema.of(Column.physical("result", DataTypes.STRING())))
.data(Collections.singletonList(Row.of("OK")))
.build();
private final JobClient jobClient;
private final ResolvedSchema resolvedSchema;
private final ResultKind resultKind;
private final CloseableRowIteratorWrapper data;
private final PrintStyle printStyle;
private final ZoneId sessionTimeZone;
private CustomTableResultImpl(
@Nullable JobClient jobClient,
ResolvedSchema resolvedSchema,
ResultKind resultKind,
CloseableIterator<Row> data,
PrintStyle printStyle,
ZoneId sessionTimeZone) {
this.jobClient = jobClient;
this.resolvedSchema =
Preconditions.checkNotNull(resolvedSchema, "resolvedSchema should not be null");
this.resultKind = Preconditions.checkNotNull(resultKind, "resultKind should not be null");
Preconditions.checkNotNull(data, "data should not be null");
this.data = new CloseableRowIteratorWrapper(data);
this.printStyle = Preconditions.checkNotNull(printStyle, "printStyle should not be null");
this.sessionTimeZone =
Preconditions.checkNotNull(sessionTimeZone, "sessionTimeZone should not be null");
}
public static TableResult buildTableResult(List<TableSchemaField> fields, List<Row> rows){
Builder builder = builder().resultKind(ResultKind.SUCCESS);
if(fields.size()>0) {
List<String> columnNames = new ArrayList<>();
List<DataType> columnTypes = new ArrayList<>();
for (int i = 0; i < fields.size(); i++) {
columnNames.add(fields.get(i).getName());
columnTypes.add(fields.get(i).getType());
}
builder.schema(ResolvedSchema.physical(columnNames,columnTypes)).data(rows);
}
return builder.build();
}
@Override
public Optional<JobClient> getJobClient() {
return Optional.ofNullable(jobClient);
}
@Override
public void await() throws InterruptedException, ExecutionException {
try {
awaitInternal(-1, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
// do nothing
}
}
@Override
public void await(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
awaitInternal(timeout, unit);
}
private void awaitInternal(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
if (jobClient == null) {
return;
}
ExecutorService executor =
Executors.newFixedThreadPool(1, r -> new Thread(r, "TableResult-await-thread"));
try {
CompletableFuture<Void> future =
CompletableFuture.runAsync(
() -> {
while (!data.isFirstRowReady()) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new TableException("Thread is interrupted");
}
}
},
executor);
if (timeout >= 0) {
future.get(timeout, unit);
} else {
future.get();
}
} finally {
executor.shutdown();
}
}
@Override
public ResolvedSchema getResolvedSchema() {
return resolvedSchema;
}
@Override
public ResultKind getResultKind() {
return resultKind;
}
@Override
public CloseableIterator<Row> collect() {
return data;
}
@Override
public void print() {
Iterator<Row> it = collect();
if (printStyle instanceof TableauStyle) {
int maxColumnWidth = ((TableauStyle) printStyle).getMaxColumnWidth();
String nullColumn = ((TableauStyle) printStyle).getNullColumn();
boolean deriveColumnWidthByType =
((TableauStyle) printStyle).isDeriveColumnWidthByType();
boolean printRowKind = ((TableauStyle) printStyle).isPrintRowKind();
PrintUtils.printAsTableauForm(
getResolvedSchema(),
it,
new PrintWriter(System.out),
maxColumnWidth,
nullColumn,
deriveColumnWidthByType,
printRowKind,
sessionTimeZone);
} else if (printStyle instanceof RawContentStyle) {
while (it.hasNext()) {
System.out.println(
String.join(
",",
PrintUtils.rowToString(
it.next(), getResolvedSchema(), sessionTimeZone)));
}
} else {
throw new TableException("Unsupported print style: " + printStyle);
}
}
public static Builder builder() {
return new Builder();
}
/** Builder for creating a {@link CustomTableResultImpl}. */
public static class Builder {
private JobClient jobClient = null;
private ResolvedSchema resolvedSchema = null;
private ResultKind resultKind = null;
private CloseableIterator<Row> data = null;
private PrintStyle printStyle =
PrintStyle.tableau(Integer.MAX_VALUE, PrintUtils.NULL_COLUMN, false, false);
private ZoneId sessionTimeZone = ZoneId.of("UTC");
private Builder() {}
/**
* Specifies job client which associates the submitted Flink job.
*
* @param jobClient a {@link JobClient} for the submitted Flink job.
*/
public Builder jobClient(JobClient jobClient) {
this.jobClient = jobClient;
return this;
}
/**
* Specifies schema of the execution result.
*
* @param resolvedSchema a {@link ResolvedSchema} for the execution result.
*/
public Builder schema(ResolvedSchema resolvedSchema) {
Preconditions.checkNotNull(resolvedSchema, "resolvedSchema should not be null");
this.resolvedSchema = resolvedSchema;
return this;
}
/**
* Specifies result kind of the execution result.
*
* @param resultKind a {@link ResultKind} for the execution result.
*/
public Builder resultKind(ResultKind resultKind) {
Preconditions.checkNotNull(resultKind, "resultKind should not be null");
this.resultKind = resultKind;
return this;
}
/**
* Specifies an row iterator as the execution result.
*
* @param rowIterator a row iterator as the execution result.
*/
public Builder data(CloseableIterator<Row> rowIterator) {
Preconditions.checkNotNull(rowIterator, "rowIterator should not be null");
this.data = rowIterator;
return this;
}
/**
* Specifies an row list as the execution result.
*
* @param rowList a row list as the execution result.
*/
public Builder data(List<Row> rowList) {
Preconditions.checkNotNull(rowList, "listRows should not be null");
this.data = CloseableIterator.adapterForIterator(rowList.iterator());
return this;
}
/** Specifies print style. Default is {@link TableauStyle} with max integer column width. */
public Builder setPrintStyle(PrintStyle printStyle) {
Preconditions.checkNotNull(printStyle, "printStyle should not be null");
this.printStyle = printStyle;
return this;
}
/** Specifies session time zone. */
public Builder setSessionTimeZone(ZoneId sessionTimeZone) {
Preconditions.checkNotNull(sessionTimeZone, "sessionTimeZone should not be null");
this.sessionTimeZone = sessionTimeZone;
return this;
}
/** Returns a {@link TableResult} instance. */
public TableResult build() {
return new CustomTableResultImpl(
jobClient, resolvedSchema, resultKind, data, printStyle, sessionTimeZone);
}
}
/** Root interface for all print styles. */
public interface PrintStyle {
/**
* Create a tableau print style with given max column width, null column, change mode
* indicator and a flag to indicate whether the column width is derived from type (true) or
* content (false), which prints the result schema and content as tableau form.
*/
static PrintStyle tableau(
int maxColumnWidth,
String nullColumn,
boolean deriveColumnWidthByType,
boolean printRowKind) {
Preconditions.checkArgument(
maxColumnWidth > 0, "maxColumnWidth should be greater than 0");
Preconditions.checkNotNull(nullColumn, "nullColumn should not be null");
return new TableauStyle(
maxColumnWidth, nullColumn, deriveColumnWidthByType, printRowKind);
}
/**
* Create a raw content print style, which only print the result content as raw form. column
* delimiter is ",", row delimiter is "\n".
*/
static PrintStyle rawContent() {
return new RawContentStyle();
}
}
/** print the result schema and content as tableau form. */
private static final class TableauStyle implements PrintStyle {
/**
* A flag to indicate whether the column width is derived from type (true) or content
* (false).
*/
private final boolean deriveColumnWidthByType;
private final int maxColumnWidth;
private final String nullColumn;
/** A flag to indicate whether print row kind info. */
private final boolean printRowKind;
private TableauStyle(
int maxColumnWidth,
String nullColumn,
boolean deriveColumnWidthByType,
boolean printRowKind) {
this.deriveColumnWidthByType = deriveColumnWidthByType;
this.maxColumnWidth = maxColumnWidth;
this.nullColumn = nullColumn;
this.printRowKind = printRowKind;
}
public boolean isDeriveColumnWidthByType() {
return deriveColumnWidthByType;
}
int getMaxColumnWidth() {
return maxColumnWidth;
}
String getNullColumn() {
return nullColumn;
}
public boolean isPrintRowKind() {
return printRowKind;
}
}
/**
* only print the result content as raw form. column delimiter is ",", row delimiter is "\n".
*/
private static final class RawContentStyle implements PrintStyle {}
/**
* A {@link CloseableIterator} wrapper class that can return whether the first row is ready.
*
* <p>The first row is ready when {@link #hasNext} method returns true or {@link #next()} method
* returns a row. The execution order of {@link TableResult#collect} method and {@link
* TableResult#await()} may be arbitrary, this class will record whether the first row is ready
* (or accessed).
*/
private static final class CloseableRowIteratorWrapper implements CloseableIterator<Row> {
private final CloseableIterator<Row> iterator;
private boolean isFirstRowReady = false;
private CloseableRowIteratorWrapper(CloseableIterator<Row> iterator) {
this.iterator = iterator;
}
@Override
public void close() throws Exception {
iterator.close();
}
@Override
public boolean hasNext() {
boolean hasNext = iterator.hasNext();
isFirstRowReady = isFirstRowReady || hasNext;
return hasNext;
}
@Override
public Row next() {
Row next = iterator.next();
isFirstRowReady = true;
return next;
}
public boolean isFirstRowReady() {
return isFirstRowReady || hasNext();
}
}
}
\ No newline at end of file
package com.dlink.executor.custom;
import org.apache.flink.table.types.DataType;
/**
* @author wenmo
* @since 2021/6/7 22:06
**/
public class TableSchemaField {
private String name;
private DataType type;
public TableSchemaField(String name, DataType type) {
this.name = name;
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public DataType getType() {
return type;
}
public void setType(DataType type) {
this.type = type;
}
}
......@@ -2,8 +2,6 @@ package com.dlink.model;
import com.fasterxml.jackson.databind.JsonNode;
import java.util.Map;
/**
* SystemConfiguration
*
......@@ -28,7 +26,7 @@ public class SystemConfiguration {
"sqlSubmitJarParas",
"FlinkSQL提交Jar参数",
ValueType.STRING,
"--id ",
"",
"用于指定Applcation模式提交FlinkSQL的Jar的参数"
);
private Configuration sqlSubmitJarMainAppClass = new Configuration(
......
package com.dlink.executor;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
/**
* AppStreamExecutor
*
* @author wenmo
* @since 2021/11/18
*/
public class AppStreamExecutor extends Executor{
public AppStreamExecutor(ExecutorSetting executorSetting) {
this.executorSetting = executorSetting;
this.environment = StreamExecutionEnvironment.getExecutionEnvironment();
init();
}
}
......@@ -57,6 +57,10 @@ public abstract class Executor {
return new LocalStreamExecutor(executorSetting);
}
public static Executor buildAppStreamExecutor(ExecutorSetting executorSetting){
return new AppStreamExecutor(executorSetting);
}
public static Executor buildRemoteExecutor(EnvironmentSetting environmentSetting,ExecutorSetting executorSetting){
environmentSetting.setUseRemote(true);
return new RemoteStreamExecutor(environmentSetting,executorSetting);
......@@ -207,4 +211,20 @@ public abstract class Executor {
public StatementSet createStatementSet(){
return stEnvironment.createStatementSet();
}
public TableResult executeStatementSet(List<String> statements){
StatementSet statementSet = stEnvironment.createStatementSet();
for (String item : statements) {
statementSet.addInsertSql(item);
}
return statementSet.execute();
}
public void submitSql(String statements){
executeSql(statements);
}
public void submitStatementSet(List<String> statements){
executeStatementSet(statements);
}
}
......@@ -364,7 +364,7 @@ export default (): React.ReactNode => {
<Link>更新 dlink 的 flink 主版本号为 1.13.3</Link>
</li>
<li>
<Link>新增 yarn-application 的作业提交方式</Link>
<Link>新增 yarn-application 的sql作业提交方式</Link>
</li>
<li>
<Link>新增 yarn-perjob 的作业提交方式</Link>
......@@ -381,6 +381,12 @@ export default (): React.ReactNode => {
<li>
<Link>executor 模块独立并优化增强逻辑</Link>
</li>
<li>
<Link>新增系统配置管理</Link>
</li>
<li>
<Link>新增 yarn-application 的sql作业提交方式</Link>
</li>
</ul>
</Paragraph>
</Timeline.Item>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment