props) {
- TableInfo clickhouseTableInfo = super.getTableInfo(tableName, fieldsInfo, props);
- clickhouseTableInfo.setType(CURR_TYPE);
- return clickhouseTableInfo;
- }
-
- @Override
- public Class dbTypeConvertToJavaType(String fieldType) {
- return ClickHouseDataType.fromTypeString(fieldType).getJavaClass();
- }
-
-}
\ No newline at end of file
diff --git a/clickhouse/pom.xml b/clickhouse/pom.xml
deleted file mode 100644
index 37589bb36..000000000
--- a/clickhouse/pom.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
-
- flink.sql
- com.dtstack.flink
- 1.0-SNAPSHOT
-
- 4.0.0
-
- sql.clickhouse
- pom
-
-
- clickhouse-side
- clickhouse-sink
-
-
-
-
- 1.0-SNAPSHOT
- 0.1.55
-
-
-
-
- com.dtstack.flink
- sql.core
- ${sql.core.version}
- provided
-
-
-
- ru.yandex.clickhouse
- clickhouse-jdbc
- ${clickhouse.jdbc.version}
-
-
-
-
\ No newline at end of file
diff --git a/console/console-sink/pom.xml b/console/console-sink/pom.xml
deleted file mode 100644
index 2f8ad9ef9..000000000
--- a/console/console-sink/pom.xml
+++ /dev/null
@@ -1,79 +0,0 @@
-
-
-
- sql.console
- com.dtstack.flink
- 1.0-SNAPSHOT
-
- 4.0.0
-
- console-sink
- jar
-
- console-sink
- http://maven.apache.org
-
-
-
- org.apache.maven.plugins
- maven-shade-plugin
- 1.4
-
-
- package
-
- shade
-
-
-
-
-
-
-
-
-
- *:*
-
- META-INF/*.SF
- META-INF/*.DSA
- META-INF/*.RSA
-
-
-
-
-
-
-
-
-
- maven-antrun-plugin
- 1.2
-
-
- copy-resources
-
- package
-
- run
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleOutputFormat.java b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleOutputFormat.java
deleted file mode 100644
index 7658e9979..000000000
--- a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleOutputFormat.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.sink.console;
-
-import com.dtstack.flink.sql.sink.MetricOutputFormat;
-import com.dtstack.flink.sql.sink.console.table.TablePrintUtil;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.configuration.Configuration;
-import org.apache.flink.types.Row;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Reason:
- * Date: 2018/12/19
- *
- * @author xuqianjin
- */
-public class ConsoleOutputFormat extends MetricOutputFormat {
-
- private static final Logger LOG = LoggerFactory.getLogger(ConsoleOutputFormat.class);
-
- protected String[] fieldNames;
- TypeInformation>[] fieldTypes;
-
- @Override
- public void configure(Configuration parameters) {
-
- }
-
- @Override
- public void open(int taskNumber, int numTasks) throws IOException {
- initMetric();
- }
-
- @Override
- public void writeRecord(Tuple2 tuple2) throws IOException {
- Tuple2 tupleTrans = tuple2;
- Boolean retract = tupleTrans.getField(0);
- if (!retract) {
- return;
- }
-
- Row record = tupleTrans.getField(1);
- if (record.getArity() != fieldNames.length) {
- return;
- }
-
- List data = new ArrayList<>();
- data.add(fieldNames);
- data.add(record.toString().split(","));
- TablePrintUtil.build(data).print();
-
- outRecords.inc();
- }
-
- @Override
- public void close() throws IOException {
-
- }
-
- private ConsoleOutputFormat() {
- }
-
- public static ConsoleOutputFormatBuilder buildOutputFormat() {
- return new ConsoleOutputFormatBuilder();
- }
-
- public static class ConsoleOutputFormatBuilder {
-
- private final ConsoleOutputFormat format;
-
- protected ConsoleOutputFormatBuilder() {
- this.format = new ConsoleOutputFormat();
- }
-
- public ConsoleOutputFormatBuilder setFieldNames(String[] fieldNames) {
- format.fieldNames = fieldNames;
- return this;
- }
-
- public ConsoleOutputFormatBuilder setFieldTypes(TypeInformation>[] fieldTypes) {
- format.fieldTypes = fieldTypes;
- return this;
- }
-
- /**
- * Finalizes the configuration and checks validity.
- *
- * @return Configured RetractConsoleCOutputFormat
- */
- public ConsoleOutputFormat finish() {
- return format;
- }
- }
-}
diff --git a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleSink.java b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleSink.java
deleted file mode 100644
index 77a3efea2..000000000
--- a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleSink.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.sink.console;
-
-import com.dtstack.flink.sql.sink.IStreamSinkGener;
-import com.dtstack.flink.sql.table.TargetTableInfo;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.apache.flink.api.java.typeutils.TupleTypeInfo;
-import org.apache.flink.streaming.api.datastream.DataStream;
-import org.apache.flink.streaming.api.functions.sink.OutputFormatSinkFunction;
-import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
-import org.apache.flink.table.sinks.RetractStreamTableSink;
-import org.apache.flink.table.sinks.TableSink;
-import org.apache.flink.types.Row;
-
-/**
- * Reason:
- * Date: 2018/12/19
- *
- * @author xuqianjin
- */
-public class ConsoleSink implements RetractStreamTableSink, IStreamSinkGener {
-
- protected String[] fieldNames;
- TypeInformation>[] fieldTypes;
-
- @Override
- public TableSink> configure(String[] fieldNames, TypeInformation>[] fieldTypes) {
- this.fieldNames = fieldNames;
- this.fieldTypes = fieldTypes;
- return this;
- }
-
- @Override
- public TupleTypeInfo> getOutputType() {
- return new TupleTypeInfo(org.apache.flink.table.api.Types.BOOLEAN(), getRecordType());
- }
-
- @Override
- public TypeInformation getRecordType() {
- return new RowTypeInfo(fieldTypes, fieldNames);
- }
-
- @Override
- public String[] getFieldNames() {
- return fieldNames;
- }
-
- @Override
- public TypeInformation>[] getFieldTypes() {
- return fieldTypes;
- }
-
- @Override
- public void emitDataStream(DataStream> dataStream) {
- ConsoleOutputFormat.ConsoleOutputFormatBuilder builder = ConsoleOutputFormat.buildOutputFormat();
- builder.setFieldNames(this.fieldNames)
- .setFieldTypes(this.fieldTypes);
- ConsoleOutputFormat outputFormat = builder.finish();
- RichSinkFunction richSinkFunction = new OutputFormatSinkFunction(outputFormat);
- dataStream.addSink(richSinkFunction);
- }
-
- @Override
- public ConsoleSink genStreamSink(TargetTableInfo targetTableInfo) {
- return this;
- }
-}
diff --git a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleSinkParser.java b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleSinkParser.java
deleted file mode 100644
index e77444bfd..000000000
--- a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleSinkParser.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.sink.console.table;
-
-import com.dtstack.flink.sql.table.AbsTableParser;
-import com.dtstack.flink.sql.table.TableInfo;
-import com.dtstack.flink.sql.util.MathUtil;
-
-import java.util.Map;
-
-import static com.dtstack.flink.sql.table.TableInfo.PARALLELISM_KEY;
-
-/**
- * Reason:
- * Date: 2018/12/19
- *
- * @author xuqianjin
- */
-public class ConsoleSinkParser extends AbsTableParser {
- @Override
- public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) {
- ConsoleTableInfo consoleTableInfo = new ConsoleTableInfo();
- consoleTableInfo.setName(tableName);
- parseFieldsInfo(fieldsInfo, consoleTableInfo);
-
- consoleTableInfo.setParallelism(MathUtil.getIntegerVal(props.get(PARALLELISM_KEY.toLowerCase())));
- return consoleTableInfo;
- }
-}
diff --git a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleTableInfo.java b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleTableInfo.java
deleted file mode 100644
index 4b286c667..000000000
--- a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleTableInfo.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.sink.console.table;
-
-import com.dtstack.flink.sql.table.TargetTableInfo;
-
-/**
- * Reason:
- * Date: 2018/12/19
- *
- * @author xuqianjin
- */
-public class ConsoleTableInfo extends TargetTableInfo {
-
- private static final String CURR_TYPE = "console";
-
- public ConsoleTableInfo() {
- setType(CURR_TYPE);
- }
-
- @Override
- public boolean check() {
- return true;
- }
-
- @Override
- public String getType() {
- return super.getType().toLowerCase();
- }
-}
diff --git a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java
deleted file mode 100644
index 8813da619..000000000
--- a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java
+++ /dev/null
@@ -1,341 +0,0 @@
-package com.dtstack.flink.sql.sink.console.table;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Reason:
- * Date: 2018/12/19
- *
- * @author xuqianjin
- */
-public class TablePrintUtil {
- private static final Logger LOG = LoggerFactory.getLogger(TablePrintUtil.class);
- public static final int ALIGN_LEFT = 1;//左对齐
- public static final int ALIGN_RIGHT = 2;//右对齐
- public static final int ALIGN_CENTER = 3;//居中对齐
-
- private int align = ALIGN_CENTER;//默认居中对齐
- private boolean equilong = false;//默认不等宽
- private int padding = 1;//左右边距默认为1
- private char h = '-';//默认水平分隔符
- private char v = '|';//默认竖直分隔符
- private char o = '+';//默认交叉分隔符
- private char s = ' ';//默认空白填充符
- private List data;//数据
-
- private TablePrintUtil() {
- }
-
- /**
- * 链式调用入口方法
- *
- * @param data
- * @return
- */
- public static TablePrintUtil build(String[][] data) {
- TablePrintUtil self = new TablePrintUtil();
- self.data = new ArrayList<>(Arrays.asList(data));
- return self;
- }
-
- /**
- * 链式调用入口方法,T可以是String[]、List、任意实体类
- * 由于java泛型不同无法重载,所以这里要写if instanceof进行类型判断
- *
- * @param data
- * @param
- * @return
- */
- public static TablePrintUtil build(List data) {
- TablePrintUtil self = new TablePrintUtil();
- self.data = new ArrayList<>();
- if (data.size() <= 0) throw new RuntimeException("数据源至少得有一行吧");
- Object obj = data.get(0);
-
-
- if (obj instanceof String[]) {
- //如果泛型为String数组,则直接设置
- self.data = (List) data;
- } else if (obj instanceof List) {
- //如果泛型为List,则把list中的item依次转为String[],再设置
- int length = ((List) obj).size();
- for (Object item : data) {
- List col = (List) item;
- if (col.size() != length) throw new RuntimeException("数据源每列长度必须一致");
- self.data.add(col.toArray(new String[length]));
- }
- } else {
- //如果泛型为实体类,则利用反射获取get方法列表,从而推算出属性列表。
- //根据反射得来的属性列表设置表格第一行thead
- List colList = getColList(obj);
- String[] header = new String[colList.size()];
- for (int i = 0; i < colList.size(); i++) {
- header[i] = colList.get(i).colName;
- }
- self.data.add(header);
- //利用反射调用相应get方法获取属性值来设置表格tbody
- for (int i = 0; i < data.size(); i++) {
- String[] item = new String[colList.size()];
- for (int j = 0; j < colList.size(); j++) {
- String value = null;
- try {
- value = obj.getClass().getMethod(colList.get(j).getMethodName).invoke(data.get(i)).toString();
- } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException e) {
- e.printStackTrace();
- }
- item[j] = value == null ? "null" : value;
- }
- self.data.add(item);
- }
- }
- return self;
- }
-
- private static class Col {
- private String colName;//列名
- private String getMethodName;//get方法名
- }
-
- /**
- * 利用反射获取get方法名和属性名
- *
- * @return
- */
- private static List getColList(Object obj) {
- List colList = new ArrayList<>();
- Method[] methods = obj.getClass().getMethods();
- for (Method m : methods) {
- StringBuilder getMethodName = new StringBuilder(m.getName());
- if (getMethodName.substring(0, 3).equals("get") && !m.getName().equals("getClass")) {
- Col col = new Col();
- col.getMethodName = getMethodName.toString();
- char first = Character.toLowerCase(getMethodName.delete(0, 3).charAt(0));
- getMethodName.delete(0, 1).insert(0, first);
- col.colName = getMethodName.toString();
- colList.add(col);
- }
- }
- return colList;
- }
-
- /**
- * 获取字符串占的字符位数
- *
- * @param str
- * @return
- */
- private int getStringCharLength(String str) {
- Pattern p = Pattern.compile("[\u4e00-\u9fa5]");//利用正则找到中文
- Matcher m = p.matcher(str);
- int count = 0;
- while (m.find()) {
- count++;
- }
- return str.length() + count;
- }
-
- /**
- * 纵向遍历获取数据每列的长度
- *
- * @return
- */
- private int[] getColLengths() {
- int[] result = new int[data.get(0).length];
- for (int x = 0; x < result.length; x++) {
- int max = 0;
- for (int y = 0; y < data.size(); y++) {
- int len = getStringCharLength(data.get(y)[x]);
- if (len > max) {
- max = len;
- }
- }
- result[x] = max;
- }
- if (equilong) {//如果等宽表格
- int max = 0;
- for (int len : result) {
- if (len > max) max = len;
- }
- for (int i = 0; i < result.length; i++) {
- result[i] = max;
- }
- }
- return result;
- }
-
- /**
- * 取得表格字符串
- *
- * @return
- */
- public String getTableString() {
- StringBuilder sb = new StringBuilder();
- int[] colLengths = getColLengths();//获取每列文字宽度
- StringBuilder line = new StringBuilder();//表格横向分隔线
- line.append(o);
- for (int len : colLengths) {
- int allLen = len + padding * 2;//还需要加上边距和分隔符的长度
- for (int i = 0; i < allLen; i++) {
- line.append(h);
- }
- line.append(o);
- }
- sb.append(line).append("\r\n");
- for (int y = 0; y < data.size(); y++) {
- sb.append(v);
- for (int x = 0; x < data.get(y).length; x++) {
- String cell = data.get(y)[x];
- switch (align) {
- case ALIGN_LEFT:
- for (int i = 0; i < padding; i++) {sb.append(s);}
- sb.append(cell);
- for (int i = 0; i < colLengths[x] - getStringCharLength(cell) + padding; i++) {sb.append(s);}
- break;
- case ALIGN_RIGHT:
- for (int i = 0; i < colLengths[x] - getStringCharLength(cell) + padding; i++) {sb.append(s);}
- sb.append(cell);
- for (int i = 0; i < padding; i++) {sb.append(s);}
- break;
- case ALIGN_CENTER:
- int space = colLengths[x] - getStringCharLength(cell);
- int left = space / 2;
- int right = space - left;
- for (int i = 0; i < left + padding; i++) {sb.append(s);}
- sb.append(cell);
- for (int i = 0; i < right + padding; i++) {sb.append(s);}
- break;
- }
- sb.append(v);
- }
- sb.append("\r\n");
- sb.append(line).append("\r\n");
- }
- return sb.toString();
- }
-
- /**
- * 直接打印表格
- */
- public void print() {
- LOG.info("\n"+getTableString());
- System.out.println(getTableString());
- }
-
- //下面是链式调用的set方法
- public TablePrintUtil setAlign(int align) {
- this.align = align;
- return this;
- }
-
- public TablePrintUtil setEquilong(boolean equilong) {
- this.equilong = equilong;
- return this;
- }
-
- public TablePrintUtil setPadding(int padding) {
- this.padding = padding;
- return this;
- }
-
- public TablePrintUtil setH(char h) {
- this.h = h;
- return this;
- }
-
- public TablePrintUtil setV(char v) {
- this.v = v;
- return this;
- }
-
- public TablePrintUtil setO(char o) {
- this.o = o;
- return this;
- }
-
- public TablePrintUtil setS(char s) {
- this.s = s;
- return this;
- }
-
- /**
- * 使用示例
- *
- * @param args
- */
- public static void main(String[] args) {
- List data1 = new ArrayList<>();
- data1.add(new String[]{"用户名", "密码", "姓名"});
- data1.add(new String[]{"xiaoming", "xm123", "小明"});
- data1.add(new String[]{"xiaohong", "xh123", "小红"});
- TablePrintUtil.build(data1).print();
-
- List> data2 = new ArrayList<>();
- data2.add(new ArrayList<>());
- data2.add(new ArrayList<>());
- data2.add(new ArrayList<>());
- data2.get(0).add("用户名");
- data2.get(0).add("密码");
- data2.get(0).add("姓名");
- data2.get(1).add("xiaoming");
- data2.get(1).add("xm123");
- data2.get(1).add("小明");
- data2.get(2).add("xiaohong");
- data2.get(2).add("xh123");
- data2.get(2).add("小红");
- TablePrintUtil.build(data2)
- .setAlign(TablePrintUtil.ALIGN_LEFT)
- .setPadding(5)
- .setEquilong(true)
- .print();
-
-
- class User {
- String username;
- String password;
- String name;
-
- User(String username, String password, String name) {
- this.username = username;
- this.password = password;
- this.name = name;
- }
-
- public String getUsername() {
- return username;
- }
-
- public void setUsername(String username) {
- this.username = username;
- }
-
- public String getPassword() {
- return password;
- }
-
- public void setPassword(String password) {
- this.password = password;
- }
-
- public String getName() {
- return name;
- }
-
- public void setName(String name) {
- this.name = name;
- }
- }
- List data3 = new ArrayList<>();
- data3.add(new User("xiaoming", "xm123", "小明"));
- data3.add(new User("xiaohong", "xh123", "小红"));
- TablePrintUtil.build(data3).setH('=').setV('!').print();
- }
-}
diff --git a/console/console-sink/src/test/java/com/dtstack/flinkx/AppTest.java b/console/console-sink/src/test/java/com/dtstack/flinkx/AppTest.java
deleted file mode 100644
index e03e5451f..000000000
--- a/console/console-sink/src/test/java/com/dtstack/flinkx/AppTest.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-package com.dtstack.flinkx;
-
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
-
-/**
- * Unit test for simple App.
- */
-public class AppTest
- extends TestCase
-{
- /**
- * Create the test case
- *
- * @param testName name of the test case
- */
- public AppTest(String testName )
- {
- super( testName );
- }
-
- /**
- * @return the suite of tests being tested
- */
- public static Test suite()
- {
- return new TestSuite( AppTest.class );
- }
-
- /**
- * Rigourous Test :-)
- */
- public void testApp()
- {
- assertTrue( true );
- }
-}
diff --git a/console/pom.xml b/console/pom.xml
deleted file mode 100644
index 983e1c185..000000000
--- a/console/pom.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
- flink.sql
- com.dtstack.flink
- 1.0-SNAPSHOT
-
- 4.0.0
- sql.console
- pom
-
-
- console-sink
-
-
-
-
- junit
- junit
- 3.8.1
- test
-
-
- com.dtstack.flink
- sql.core
- 1.0-SNAPSHOT
- provided
-
-
-
-
-
\ No newline at end of file
diff --git a/core/pom.xml b/core/pom.xml
index c357b4c99..cbcc8eaae 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -17,7 +17,6 @@
UTF-8
core
- 1.16.0
@@ -28,58 +27,21 @@
test
-
- joda-time
- joda-time
- 2.5
-
-
-
- org.apache.flink
- flink-core
- ${flink.version}
-
-
-
- org.apache.flink
- flink-streaming-java_2.11
- ${flink.version}
-
-
- org.apache.flink
- flink-streaming-scala_2.11
- ${flink.version}
-
-
-
-
-
- org.apache.flink
- flink-table-planner_2.11
${flink.version}
org.apache.flink
- flink-table-common
+ flink-core
${flink.version}
-
- org.apache.calcite
- calcite-server
-
- ${calcite.server.version}
-
-
org.apache.flink
- flink-cep-scala_2.11
+ flink-streaming-java_2.11
${flink.version}
@@ -91,7 +53,7 @@
org.apache.flink
- flink-yarn_2.11
+ flink-streaming-scala_2.11
${flink.version}
diff --git a/core/src/main/java/com/dtstack/flink/sql/enums/ClusterMode.java b/core/src/main/java/com/dtstack/flink/sql/ClusterMode.java
similarity index 91%
rename from core/src/main/java/com/dtstack/flink/sql/enums/ClusterMode.java
rename to core/src/main/java/com/dtstack/flink/sql/ClusterMode.java
index 341258a43..024a31854 100644
--- a/core/src/main/java/com/dtstack/flink/sql/enums/ClusterMode.java
+++ b/core/src/main/java/com/dtstack/flink/sql/ClusterMode.java
@@ -17,7 +17,7 @@
* limitations under the License.
*/
-package com.dtstack.flink.sql.enums;
+package com.dtstack.flink.sql;
/**
* Created by sishu.yss on 2018/10/10.
@@ -31,8 +31,4 @@ public enum ClusterMode {
ClusterMode(int type){
this.type = type;
}
-
- public int getType(){
- return this.type;
- }
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/Main.java b/core/src/main/java/com/dtstack/flink/sql/Main.java
index a08df9b05..c662f6881 100644
--- a/core/src/main/java/com/dtstack/flink/sql/Main.java
+++ b/core/src/main/java/com/dtstack/flink/sql/Main.java
@@ -20,15 +20,9 @@
package com.dtstack.flink.sql;
-import com.dtstack.flink.sql.config.CalciteConfig;
-import com.dtstack.flink.sql.classloader.ClassLoaderManager;
-import com.dtstack.flink.sql.constrant.ConfigConstrant;
-import com.dtstack.flink.sql.enums.ClusterMode;
+import com.dtstack.flink.sql.classloader.DtClassLoader;
import com.dtstack.flink.sql.enums.ECacheType;
-import com.dtstack.flink.sql.enums.EPluginLoadMode;
-import com.dtstack.flink.sql.environment.MyLocalStreamEnvironment;
import com.dtstack.flink.sql.exec.FlinkSQLExec;
-import com.dtstack.flink.sql.option.OptionParser;
import com.dtstack.flink.sql.parser.CreateFuncParser;
import com.dtstack.flink.sql.parser.CreateTmpTableParser;
import com.dtstack.flink.sql.parser.InsertSqlParser;
@@ -42,27 +36,31 @@
import com.dtstack.flink.sql.sink.StreamSinkFactory;
import com.dtstack.flink.sql.source.StreamSourceFactory;
import com.dtstack.flink.sql.util.DtStringUtil;
-import com.dtstack.flink.sql.util.PropertiesUtils;
import com.dtstack.flink.sql.watermarker.WaterMarkerAssigner;
import com.dtstack.flink.sql.util.FlinkUtil;
import com.dtstack.flink.sql.util.PluginUtil;
+import org.apache.calcite.config.Lex;
import org.apache.calcite.sql.SqlInsert;
import org.apache.calcite.sql.SqlNode;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Options;
import org.apache.commons.io.Charsets;
-import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
+import org.apache.flink.calcite.shaded.com.google.common.base.Preconditions;
+import org.apache.flink.calcite.shaded.com.google.common.base.Strings;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Lists;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Maps;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Sets;
import org.apache.flink.client.program.ContextEnvironment;
import org.apache.flink.configuration.Configuration;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamContextEnvironment;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
@@ -72,19 +70,22 @@
import org.apache.flink.types.Row;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import java.io.File;
+import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.net.URL;
import java.net.URLClassLoader;
import java.net.URLDecoder;
+import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.TimeUnit;
-import com.dtstack.flink.sql.option.Options;
/**
* Date: 2018/6/26
@@ -100,19 +101,44 @@ public class Main {
private static final Logger LOG = LoggerFactory.getLogger(Main.class);
+ private static final int failureRate = 3;
+
+ private static final int failureInterval = 6; //min
+
+ private static final int delayInterval = 10; //sec
+
+ private static org.apache.calcite.sql.parser.SqlParser.Config config = org.apache.calcite.sql.parser.SqlParser
+ .configBuilder()
+ .setLex(Lex.MYSQL)
+ .build();
public static void main(String[] args) throws Exception {
- OptionParser optionParser = new OptionParser(args);
- Options options = optionParser.getOptions();
- String sql = options.getSql();
- String name = options.getName();
- String addJarListStr = options.getAddjar();
- String localSqlPluginPath = options.getLocalSqlPluginPath();
- String remoteSqlPluginPath = options.getRemoteSqlPluginPath();
- String pluginLoadMode = options.getPluginLoadMode();
- String deployMode = options.getMode();
- String confProp = options.getConfProp();
+ Options options = new Options();
+ options.addOption("sql", true, "sql config");
+ options.addOption("name", true, "job name");
+ options.addOption("addjar", true, "add jar");
+ options.addOption("localSqlPluginPath", true, "local sql plugin path");
+ options.addOption("remoteSqlPluginPath", true, "remote sql plugin path");
+ options.addOption("confProp", true, "env properties");
+ options.addOption("mode", true, "deploy mode");
+
+ options.addOption("savePointPath", true, "Savepoint restore path");
+ options.addOption("allowNonRestoredState", true, "Flag indicating whether non restored state is allowed if the savepoint");
+
+ CommandLineParser parser = new DefaultParser();
+ CommandLine cl = parser.parse(options, args);
+ String sql = cl.getOptionValue("sql");
+ String name = cl.getOptionValue("name");
+ String addJarListStr = cl.getOptionValue("addjar");
+ String localSqlPluginPath = cl.getOptionValue("localSqlPluginPath");
+ String remoteSqlPluginPath = cl.getOptionValue("remoteSqlPluginPath");
+ String deployMode = cl.getOptionValue("mode");
+ String confProp = cl.getOptionValue("confProp");
+
+ Preconditions.checkNotNull(sql, "parameters of sql is required");
+ Preconditions.checkNotNull(name, "parameters of name is required");
+ Preconditions.checkNotNull(localSqlPluginPath, "parameters of localSqlPluginPath is required");
sql = URLDecoder.decode(sql, Charsets.UTF_8.name());
SqlParser.setLocalSqlPluginRoot(localSqlPluginPath);
@@ -123,6 +149,10 @@ public static void main(String[] args) throws Exception {
addJarFileList = objMapper.readValue(addJarListStr, List.class);
}
+ ClassLoader threadClassLoader = Thread.currentThread().getContextClassLoader();
+ DtClassLoader parentClassloader = new DtClassLoader(new URL[]{}, threadClassLoader);
+ Thread.currentThread().setContextClassLoader(parentClassloader);
+
confProp = URLDecoder.decode(confProp, Charsets.UTF_8.toString());
Properties confProperties = PluginUtil.jsonStrToObject(confProp, Properties.class);
StreamExecutionEnvironment env = getStreamExeEnv(confProperties, deployMode);
@@ -141,22 +171,13 @@ public static void main(String[] args) throws Exception {
Map registerTableCache = Maps.newHashMap();
//register udf
- registerUDF(sqlTree, jarURList, tableEnv);
+ registerUDF(sqlTree, jarURList, parentClassloader, tableEnv);
//register table schema
- registerTable(sqlTree, env, tableEnv, localSqlPluginPath, remoteSqlPluginPath, pluginLoadMode, sideTableMap, registerTableCache);
-
- sqlTranslation(localSqlPluginPath, tableEnv,sqlTree,sideTableMap,registerTableCache);
-
- if(env instanceof MyLocalStreamEnvironment) {
- ((MyLocalStreamEnvironment) env).setClasspaths(ClassLoaderManager.getClassPath());
- }
-
- env.execute(name);
- }
+ registerTable(sqlTree, env, tableEnv, localSqlPluginPath, remoteSqlPluginPath, sideTableMap, registerTableCache);
- private static void sqlTranslation(String localSqlPluginPath, StreamTableEnvironment tableEnv,SqlTree sqlTree,Map sideTableMap,Map registerTableCache) throws Exception {
SideSqlExec sideSqlExec = new SideSqlExec();
sideSqlExec.setLocalSqlPluginPath(localSqlPluginPath);
+
for (CreateTmpTableParser.SqlParserResult result : sqlTree.getTmpSqlList()) {
sideSqlExec.registerTmpTable(result, sideTableMap, tableEnv, registerTableCache);
}
@@ -165,13 +186,14 @@ private static void sqlTranslation(String localSqlPluginPath, StreamTableEnviron
if(LOG.isInfoEnabled()){
LOG.info("exe-sql:\n" + result.getExecSql());
}
+
boolean isSide = false;
+
for (String tableName : result.getTargetTableList()) {
if (sqlTree.getTmpTableMap().containsKey(tableName)) {
CreateTmpTableParser.SqlParserResult tmp = sqlTree.getTmpTableMap().get(tableName);
String realSql = DtStringUtil.replaceIgnoreQuota(result.getExecSql(), "`", "");
-
- SqlNode sqlNode = org.apache.calcite.sql.parser.SqlParser.create(realSql, CalciteConfig.MYSQL_LEX_CONFIG).parseStmt();
+ SqlNode sqlNode = org.apache.calcite.sql.parser.SqlParser.create(realSql, config).parseStmt();
String tmpSql = ((SqlInsert) sqlNode).getSource().toString();
tmp.setExecSql(tmpSql);
sideSqlExec.registerTmpTable(tmp, sideTableMap, tableEnv, registerTableCache);
@@ -182,6 +204,7 @@ private static void sqlTranslation(String localSqlPluginPath, StreamTableEnviron
break;
}
}
+
if(isSide){
//sql-dimensional table contains the dimension table of execution
sideSqlExec.exec(result.getExecSql(), sideTableMap, tableEnv, registerTableCache);
@@ -195,8 +218,15 @@ private static void sqlTranslation(String localSqlPluginPath, StreamTableEnviron
}
}
+ if(env instanceof MyLocalStreamEnvironment) {
+ List urlList = new ArrayList<>();
+ urlList.addAll(Arrays.asList(parentClassloader.getURLs()));
+ ((MyLocalStreamEnvironment) env).setClasspaths(urlList);
+ }
+ env.execute(name);
}
+
/**
* This part is just to add classpath for the jar when reading remote execution, and will not submit jar from a local
* @param env
@@ -215,25 +245,26 @@ private static void addEnvClassPath(StreamExecutionEnvironment env, Set cla
}
}
- private static void registerUDF(SqlTree sqlTree, List jarURList, StreamTableEnvironment tableEnv)
- throws NoSuchMethodException, IllegalAccessException, InvocationTargetException {
+ private static void registerUDF(SqlTree sqlTree, List jarURList, URLClassLoader parentClassloader,
+ StreamTableEnvironment tableEnv)
+ throws ClassNotFoundException, NoSuchMethodException, IllegalAccessException, InvocationTargetException {
//register urf
- // udf和tableEnv须由同一个类加载器加载
- ClassLoader levelClassLoader = tableEnv.getClass().getClassLoader();
URLClassLoader classLoader = null;
List funcList = sqlTree.getFunctionList();
for (CreateFuncParser.SqlParserResult funcInfo : funcList) {
//classloader
if (classLoader == null) {
- classLoader = FlinkUtil.loadExtraJar(jarURList, (URLClassLoader)levelClassLoader);
+ classLoader = FlinkUtil.loadExtraJar(jarURList, parentClassloader);
}
- FlinkUtil.registerUDF(funcInfo.getType(), funcInfo.getClassName(), funcInfo.getName(), tableEnv, classLoader);
+ FlinkUtil.registerUDF(funcInfo.getType(), funcInfo.getClassName(), funcInfo.getName(),
+ tableEnv, classLoader);
}
}
- private static void registerTable(SqlTree sqlTree, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv, String localSqlPluginPath,
- String remoteSqlPluginPath, String pluginLoadMode, Map sideTableMap, Map registerTableCache) throws Exception {
+ private static void registerTable(SqlTree sqlTree, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv,
+ String localSqlPluginPath, String remoteSqlPluginPath,
+ Map sideTableMap, Map registerTableCache) throws Exception {
Set classPathSet = Sets.newHashSet();
WaterMarkerAssigner waterMarkerAssigner = new WaterMarkerAssigner();
for (TableInfo tableInfo : sqlTree.getTableInfoMap().values()) {
@@ -268,18 +299,18 @@ private static void registerTable(SqlTree sqlTree, StreamExecutionEnvironment en
LOG.info("registe table {} success.", tableInfo.getName());
}
registerTableCache.put(tableInfo.getName(), regTable);
- classPathSet.add(buildSourceAndSinkPathByLoadMode(tableInfo.getType(), SourceTableInfo.SOURCE_SUFFIX, localSqlPluginPath, remoteSqlPluginPath, pluginLoadMode));
+ classPathSet.add(PluginUtil.getRemoteJarFilePath(tableInfo.getType(), SourceTableInfo.SOURCE_SUFFIX, remoteSqlPluginPath, localSqlPluginPath));
} else if (tableInfo instanceof TargetTableInfo) {
TableSink tableSink = StreamSinkFactory.getTableSink((TargetTableInfo) tableInfo, localSqlPluginPath);
TypeInformation[] flinkTypes = FlinkUtil.transformTypes(tableInfo.getFieldClasses());
tableEnv.registerTableSink(tableInfo.getName(), tableInfo.getFields(), flinkTypes, tableSink);
- classPathSet.add(buildSourceAndSinkPathByLoadMode(tableInfo.getType(), TargetTableInfo.TARGET_SUFFIX, localSqlPluginPath, remoteSqlPluginPath, pluginLoadMode));
+ classPathSet.add( PluginUtil.getRemoteJarFilePath(tableInfo.getType(), TargetTableInfo.TARGET_SUFFIX, remoteSqlPluginPath, localSqlPluginPath));
} else if(tableInfo instanceof SideTableInfo){
String sideOperator = ECacheType.ALL.name().equals(((SideTableInfo) tableInfo).getCacheType()) ? "all" : "async";
sideTableMap.put(tableInfo.getName(), (SideTableInfo) tableInfo);
- classPathSet.add(buildSidePathByLoadMode(tableInfo.getType(), sideOperator, SideTableInfo.TARGET_SUFFIX, localSqlPluginPath, remoteSqlPluginPath, pluginLoadMode));
+ classPathSet.add(PluginUtil.getRemoteSideJarFilePath(tableInfo.getType(), sideOperator, SideTableInfo.TARGET_SUFFIX, remoteSqlPluginPath, localSqlPluginPath));
}else {
throw new RuntimeException("not support table type:" + tableInfo.getType());
}
@@ -295,36 +326,25 @@ private static void registerTable(SqlTree sqlTree, StreamExecutionEnvironment en
}
}
- private static URL buildSourceAndSinkPathByLoadMode(String type, String suffix, String localSqlPluginPath, String remoteSqlPluginPath, String pluginLoadMode) throws Exception {
- if (StringUtils.equalsIgnoreCase(pluginLoadMode, EPluginLoadMode.CLASSPATH.name())) {
- return PluginUtil.getRemoteJarFilePath(type, suffix, remoteSqlPluginPath, localSqlPluginPath);
- }
- return PluginUtil.getLocalJarFilePath(type, suffix, localSqlPluginPath);
- }
-
- private static URL buildSidePathByLoadMode(String type, String operator, String suffix, String localSqlPluginPath, String remoteSqlPluginPath, String pluginLoadMode) throws Exception {
- if (StringUtils.equalsIgnoreCase(pluginLoadMode, EPluginLoadMode.CLASSPATH.name())) {
- return PluginUtil.getRemoteSideJarFilePath(type, operator, suffix, remoteSqlPluginPath, localSqlPluginPath);
- }
- return PluginUtil.getLocalSideJarFilePath(type, operator, suffix, localSqlPluginPath);
- }
-
- private static StreamExecutionEnvironment getStreamExeEnv(Properties confProperties, String deployMode) throws Exception {
- confProperties = PropertiesUtils.propertiesTrim(confProperties);
-
+ private static StreamExecutionEnvironment getStreamExeEnv(Properties confProperties, String deployMode) throws IOException, NoSuchMethodException {
StreamExecutionEnvironment env = !ClusterMode.local.name().equals(deployMode) ?
StreamExecutionEnvironment.getExecutionEnvironment() :
new MyLocalStreamEnvironment();
- env.getConfig().disableClosureCleaner();
- env.setParallelism(FlinkUtil.getEnvParallelism(confProperties));
+ env.setParallelism(FlinkUtil.getEnvParallelism(confProperties));
Configuration globalJobParameters = new Configuration();
- //Configuration unsupported set properties key-value
Method method = Configuration.class.getDeclaredMethod("setValueInternal", String.class, Object.class);
method.setAccessible(true);
- for (Map.Entry prop : confProperties.entrySet()) {
- method.invoke(globalJobParameters, prop.getKey(), prop.getValue());
- }
+
+ confProperties.forEach((key,val) -> {
+ try {
+ method.invoke(globalJobParameters, key, val);
+ } catch (IllegalAccessException e) {
+ e.printStackTrace();
+ } catch (InvocationTargetException e) {
+ e.printStackTrace();
+ }
+ });
ExecutionConfig exeConfig = env.getConfig();
if(exeConfig.getGlobalJobParameters() == null){
@@ -332,20 +352,25 @@ private static StreamExecutionEnvironment getStreamExeEnv(Properties confPropert
}else if(exeConfig.getGlobalJobParameters() instanceof Configuration){
((Configuration) exeConfig.getGlobalJobParameters()).addAll(globalJobParameters);
}
+
+
if(FlinkUtil.getMaxEnvParallelism(confProperties) > 0){
env.setMaxParallelism(FlinkUtil.getMaxEnvParallelism(confProperties));
}
+
if(FlinkUtil.getBufferTimeoutMillis(confProperties) > 0){
env.setBufferTimeout(FlinkUtil.getBufferTimeoutMillis(confProperties));
}
+
env.setRestartStrategy(RestartStrategies.failureRateRestart(
- ConfigConstrant.failureRate,
- Time.of(ConfigConstrant.failureInterval, TimeUnit.MINUTES),
- Time.of(ConfigConstrant.delayInterval, TimeUnit.SECONDS)
+ failureRate,
+ Time.of(failureInterval, TimeUnit.MINUTES),
+ Time.of(delayInterval, TimeUnit.SECONDS)
));
+
FlinkUtil.setStreamTimeCharacteristic(env, confProperties);
FlinkUtil.openCheckpoint(env, confProperties);
+
return env;
}
-
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/environment/MyLocalStreamEnvironment.java b/core/src/main/java/com/dtstack/flink/sql/MyLocalStreamEnvironment.java
similarity index 85%
rename from core/src/main/java/com/dtstack/flink/sql/environment/MyLocalStreamEnvironment.java
rename to core/src/main/java/com/dtstack/flink/sql/MyLocalStreamEnvironment.java
index 8ac1edd41..54ddaa647 100644
--- a/core/src/main/java/com/dtstack/flink/sql/environment/MyLocalStreamEnvironment.java
+++ b/core/src/main/java/com/dtstack/flink/sql/MyLocalStreamEnvironment.java
@@ -16,16 +16,16 @@
* limitations under the License.
*/
-package com.dtstack.flink.sql.environment;
+package com.dtstack.flink.sql;
import org.apache.flink.api.common.InvalidProgramException;
import org.apache.flink.api.common.JobExecutionResult;
import org.apache.flink.api.java.ExecutionEnvironment;
+import org.apache.flink.configuration.ConfigConstants;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.TaskManagerOptions;
import org.apache.flink.runtime.jobgraph.JobGraph;
-import org.apache.flink.runtime.minicluster.MiniCluster;
-import org.apache.flink.runtime.minicluster.MiniClusterConfiguration;
+import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster;
import org.apache.flink.streaming.api.environment.LocalStreamEnvironment;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.graph.StreamGraph;
@@ -100,27 +100,24 @@ public JobExecutionResult execute(String jobName) throws Exception {
Configuration configuration = new Configuration();
configuration.addAll(jobGraph.getJobConfiguration());
- configuration.setString(TaskManagerOptions.MANAGED_MEMORY_SIZE, "512M");
- configuration.setInteger(TaskManagerOptions.NUM_TASK_SLOTS, jobGraph.getMaximumParallelism());
+ configuration.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, -1L);
+ configuration.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, jobGraph.getMaximumParallelism());
// add (and override) the settings with what the user defined
configuration.addAll(this.conf);
- MiniClusterConfiguration.Builder configBuilder = new MiniClusterConfiguration.Builder();
- configBuilder.setConfiguration(configuration);
-
if (LOG.isInfoEnabled()) {
LOG.info("Running job on local embedded Flink mini cluster");
}
- MiniCluster exec = new MiniCluster(configBuilder.build());
+ LocalFlinkMiniCluster exec = new LocalFlinkMiniCluster(configuration, true);
try {
exec.start();
- return exec.executeJobBlocking(jobGraph);
+ return exec.submitJobAndWait(jobGraph, getConfig().isSysoutLoggingEnabled());
}
finally {
transformations.clear();
- exec.closeAsync();
+ exec.stop();
}
}
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/classloader/ClassLoaderManager.java b/core/src/main/java/com/dtstack/flink/sql/classloader/ClassLoaderManager.java
deleted file mode 100644
index 6db1058e5..000000000
--- a/core/src/main/java/com/dtstack/flink/sql/classloader/ClassLoaderManager.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.classloader;
-
-import com.dtstack.flink.sql.util.PluginUtil;
-import org.apache.commons.lang3.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * company: www.dtstack.com
- * author: toutian
- * create: 2019/10/14
- */
-public class ClassLoaderManager {
-
- private static final Logger LOG = LoggerFactory.getLogger(ClassLoaderManager.class);
-
- private static Map pluginClassLoader = new ConcurrentHashMap<>();
-
- public static R newInstance(String pluginJarPath, ClassLoaderSupplier supplier) throws Exception {
- ClassLoader classLoader = retrieveClassLoad(pluginJarPath);
- return ClassLoaderSupplierCallBack.callbackAndReset(supplier, classLoader);
- }
-
- public static R newInstance(List jarUrls, ClassLoaderSupplier supplier) throws Exception {
- ClassLoader classLoader = retrieveClassLoad(jarUrls);
- return ClassLoaderSupplierCallBack.callbackAndReset(supplier, classLoader);
- }
-
- private static DtClassLoader retrieveClassLoad(String pluginJarPath) {
- return pluginClassLoader.computeIfAbsent(pluginJarPath, k -> {
- try {
- URL[] urls = PluginUtil.getPluginJarUrls(pluginJarPath);
- ClassLoader parentClassLoader = Thread.currentThread().getContextClassLoader();
- DtClassLoader classLoader = new DtClassLoader(urls, parentClassLoader);
- LOG.info("pluginJarPath:{} create ClassLoad successful...", pluginJarPath);
- return classLoader;
- } catch (Throwable e) {
- LOG.error("retrieve ClassLoad happens error:{}", e);
- throw new RuntimeException("retrieve ClassLoad happens error");
- }
- });
- }
-
- private static DtClassLoader retrieveClassLoad(List jarUrls) {
- jarUrls.sort(Comparator.comparing(URL::toString));
- String jarUrlkey = StringUtils.join(jarUrls, "_");
- return pluginClassLoader.computeIfAbsent(jarUrlkey, k -> {
- try {
- URL[] urls = jarUrls.toArray(new URL[jarUrls.size()]);
- ClassLoader parentClassLoader = Thread.currentThread().getContextClassLoader();
- DtClassLoader classLoader = new DtClassLoader(urls, parentClassLoader);
- LOG.info("jarUrl:{} create ClassLoad successful...", jarUrlkey);
- return classLoader;
- } catch (Throwable e) {
- LOG.error("retrieve ClassLoad happens error:{}", e);
- throw new RuntimeException("retrieve ClassLoad happens error");
- }
- });
- }
-
- public static List getClassPath() {
- List classPaths = new ArrayList<>();
- for (Map.Entry entry : pluginClassLoader.entrySet()) {
- classPaths.addAll(Arrays.asList(entry.getValue().getURLs()));
- }
- return classPaths;
- }
-}
diff --git a/core/src/main/java/com/dtstack/flink/sql/classloader/ClassLoaderSupplier.java b/core/src/main/java/com/dtstack/flink/sql/classloader/ClassLoaderSupplier.java
deleted file mode 100644
index 859aa75f4..000000000
--- a/core/src/main/java/com/dtstack/flink/sql/classloader/ClassLoaderSupplier.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package com.dtstack.flink.sql.classloader;
-
-/**
- * Represents a supplier of results.
- *
- * There is no requirement that a new or distinct result be returned each
- * time the supplier is invoked.
- *
- *
This is a functional interface
- * whose functional method is {@link #get()}.
- *
- * @param the type of results supplied by this supplier
- *
- * @since 1.8
- */
-@FunctionalInterface
-public interface ClassLoaderSupplier {
-
- /**
- * Gets a result.
- *
- * @return a result
- */
- T get(ClassLoader cl) throws Exception;
-}
diff --git a/core/src/main/java/com/dtstack/flink/sql/classloader/ClassLoaderSupplierCallBack.java b/core/src/main/java/com/dtstack/flink/sql/classloader/ClassLoaderSupplierCallBack.java
deleted file mode 100644
index 51d37ef5e..000000000
--- a/core/src/main/java/com/dtstack/flink/sql/classloader/ClassLoaderSupplierCallBack.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package com.dtstack.flink.sql.classloader;
-
-/**
- * company: www.dtstack.com
- * author: toutian
- * create: 2019/10/14
- */
-public class ClassLoaderSupplierCallBack {
-
- public static R callbackAndReset(ClassLoaderSupplier supplier, ClassLoader toSetClassLoader) throws Exception {
- ClassLoader oldClassLoader = Thread.currentThread().getContextClassLoader();
- Thread.currentThread().setContextClassLoader(toSetClassLoader);
- try {
- return supplier.get(toSetClassLoader);
- } finally {
- Thread.currentThread().setContextClassLoader(oldClassLoader);
- }
- }
-
-
-}
diff --git a/core/src/main/java/com/dtstack/flink/sql/config/CalciteConfig.java b/core/src/main/java/com/dtstack/flink/sql/config/CalciteConfig.java
deleted file mode 100644
index 54ae66bbc..000000000
--- a/core/src/main/java/com/dtstack/flink/sql/config/CalciteConfig.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package com.dtstack.flink.sql.config;
-
-import org.apache.calcite.config.Lex;
-import org.apache.calcite.sql.parser.SqlParser;
-import org.apache.calcite.sql.parser.SqlParser.Config;
-
-public class CalciteConfig {
-
- public static Config MYSQL_LEX_CONFIG = SqlParser
- .configBuilder()
- .setLex(Lex.MYSQL)
- .build();
-
-
-
-}
diff --git a/core/src/main/java/com/dtstack/flink/sql/enums/EPluginLoadMode.java b/core/src/main/java/com/dtstack/flink/sql/enums/EPluginLoadMode.java
deleted file mode 100644
index 6cb027ac3..000000000
--- a/core/src/main/java/com/dtstack/flink/sql/enums/EPluginLoadMode.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.enums;
-
-/**
- *
- * CLASSPATH: plugin jar depends on each machine node.
- * SHIPFILE: plugin jar only depends on the client submitted by the task.
- *
- */
-public enum EPluginLoadMode {
-
- CLASSPATH(0),
- SHIPFILE(1);
-
- private int type;
-
- EPluginLoadMode(int type){
- this.type = type;
- }
-
- public int getType(){
- return this.type;
- }
-}
diff --git a/core/src/main/java/com/dtstack/flink/sql/exec/FlinkSQLExec.java b/core/src/main/java/com/dtstack/flink/sql/exec/FlinkSQLExec.java
index 6bcc25251..d0191aec3 100644
--- a/core/src/main/java/com/dtstack/flink/sql/exec/FlinkSQLExec.java
+++ b/core/src/main/java/com/dtstack/flink/sql/exec/FlinkSQLExec.java
@@ -1,21 +1,3 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
package com.dtstack.flink.sql.exec;
import org.apache.calcite.sql.SqlIdentifier;
@@ -29,8 +11,6 @@
import org.apache.flink.table.calcite.FlinkPlannerImpl;
import org.apache.flink.table.plan.logical.LogicalRelNode;
import org.apache.flink.table.plan.schema.TableSinkTable;
-import org.apache.flink.table.plan.schema.TableSourceSinkTable;
-import scala.Option;
import java.lang.reflect.Method;
@@ -59,17 +39,12 @@ public static void sqlUpdate(StreamTableEnvironment tableEnv, String stmt) throw
Method method = TableEnvironment.class.getDeclaredMethod("getTable", String.class);
method.setAccessible(true);
- Option sinkTab = (Option)method.invoke(tableEnv, targetTableName);
- if (sinkTab.isEmpty()) {
- throw new ValidationException("Sink table " + targetTableName + "not found in flink");
- }
-
- TableSourceSinkTable targetTable = (TableSourceSinkTable) sinkTab.get();
- TableSinkTable tableSinkTable = (TableSinkTable)targetTable.tableSinkTable().get();
- String[] fieldNames = tableSinkTable.tableSink().getFieldNames();
+ TableSinkTable targetTable = (TableSinkTable) method.invoke(tableEnv, targetTableName);
+ String[] fieldNames = targetTable.tableSink().getFieldNames();
Table newTable = null;
+
try {
newTable = queryResult.select(String.join(",", fieldNames));
} catch (Exception e) {
diff --git a/core/src/main/java/com/dtstack/flink/sql/metric/MetricConstant.java b/core/src/main/java/com/dtstack/flink/sql/metric/MetricConstant.java
index 17bb75e82..89c411479 100644
--- a/core/src/main/java/com/dtstack/flink/sql/metric/MetricConstant.java
+++ b/core/src/main/java/com/dtstack/flink/sql/metric/MetricConstant.java
@@ -45,8 +45,6 @@ public class MetricConstant {
public static final String DT_NUM_RECORDS_OUT = "dtNumRecordsOut";
- public static final String DT_NUM_DIRTY_RECORDS_OUT = "dtNumDirtyRecordsOut";
-
public static final String DT_NUM_RECORDS_OUT_RATE = "dtNumRecordsOutRate";
public static final String DT_EVENT_DELAY_GAUGE = "dtEventDelay";
diff --git a/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java b/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java
deleted file mode 100644
index 07860b608..000000000
--- a/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.option;
-
-import com.google.common.collect.Lists;
-import com.dtstack.flink.sql.util.PluginUtil;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.lang.StringUtils;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Field;
-import java.util.List;
-import java.util.Map;
-import java.io.File;
-import java.io.FileInputStream;
-import java.net.URLEncoder;
-import org.apache.commons.codec.Charsets;
-
-
-/**
- * The Parser of Launcher commandline options
- *
- * Company: www.dtstack.com
- * @author sishu.yss
- */
-public class OptionParser {
-
- public static final String OPTION_SQL = "sql";
-
- private org.apache.commons.cli.Options options = new org.apache.commons.cli.Options();
-
- private BasicParser parser = new BasicParser();
-
- private Options properties = new Options();
-
- public OptionParser(String[] args) throws Exception {
- initOptions(addOptions(args));
- }
-
- private CommandLine addOptions(String[] args) throws NoSuchMethodException, InvocationTargetException, IllegalAccessException, ParseException {
- Class cla = properties.getClass();
- Field[] fields = cla.getDeclaredFields();
- for(Field field:fields){
- String name = field.getName();
- OptionRequired optionRequired = field.getAnnotation(OptionRequired.class);
- if(optionRequired != null){
- options.addOption(name,optionRequired.hasArg(),optionRequired.description());
- }
- }
- CommandLine cl = parser.parse(options, args);
- return cl;
- }
-
- private void initOptions(CommandLine cl) throws NoSuchMethodException, InvocationTargetException, IllegalAccessException, ParseException {
- Class cla = properties.getClass();
- Field[] fields = cla.getDeclaredFields();
- for(Field field:fields){
- String name = field.getName();
- String value = cl.getOptionValue(name);
- OptionRequired optionRequired = field.getAnnotation(OptionRequired.class);
- if(optionRequired != null){
- if(optionRequired.required()&&StringUtils.isBlank(value)){
- throw new RuntimeException(String.format("parameters of %s is required",name));
- }
- }
- if(StringUtils.isNotBlank(value)){
- field.setAccessible(true);
- field.set(properties,value);
- }
- }
- }
-
- public Options getOptions(){
- return properties;
- }
-
- public List getProgramExeArgList() throws Exception {
- Map mapConf = PluginUtil.ObjectToMap(properties);
- List args = Lists.newArrayList();
- for(Map.Entry one : mapConf.entrySet()){
- String key = one.getKey();
- Object value = one.getValue();
- if(value == null){
- continue;
- }else if(OPTION_SQL.equalsIgnoreCase(key)){
- File file = new File(value.toString());
- FileInputStream in = new FileInputStream(file);
- byte[] filecontent = new byte[(int) file.length()];
- in.read(filecontent);
- String content = new String(filecontent, Charsets.UTF_8.name());
- value = URLEncoder.encode(content, Charsets.UTF_8.name());
- }
- args.add("-" + key);
- args.add(value.toString());
- }
- return args;
- }
-
- public static void main(String[] args) throws Exception {
- OptionParser OptionParser = new OptionParser(args);
- System.out.println(OptionParser.getOptions());
- }
-}
diff --git a/core/src/main/java/com/dtstack/flink/sql/option/OptionRequired.java b/core/src/main/java/com/dtstack/flink/sql/option/OptionRequired.java
deleted file mode 100644
index b70c4424b..000000000
--- a/core/src/main/java/com/dtstack/flink/sql/option/OptionRequired.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.dtstack.flink.sql.option;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- *
- * Reason: TODO ADD REASON(可选)
- * Date: 2019年9月16日 下午1:24:39
- * Company: www.dtstack.com
- * @author sishu.yss
- *
- */
-@Target({ElementType.FIELD})
-@Retention(RetentionPolicy.RUNTIME)
-public @interface OptionRequired {
-
- boolean required() default false;
-
- boolean hasArg() default true;
-
- String description() default "";
-}
diff --git a/core/src/main/java/com/dtstack/flink/sql/parser/CreateFuncParser.java b/core/src/main/java/com/dtstack/flink/sql/parser/CreateFuncParser.java
index 670d98a7e..9b7017743 100644
--- a/core/src/main/java/com/dtstack/flink/sql/parser/CreateFuncParser.java
+++ b/core/src/main/java/com/dtstack/flink/sql/parser/CreateFuncParser.java
@@ -24,7 +24,7 @@
import java.util.regex.Pattern;
/**
- * parser register udf sql
+ * 解析创建自定义方法sql
* Date: 2018/6/26
* Company: www.dtstack.com
* @author xuchao
@@ -32,7 +32,7 @@
public class CreateFuncParser implements IParser {
- private static final String funcPatternStr = "(?i)\\s*create\\s+(scala|table|aggregate)\\s+function\\s+(\\S+)\\s+WITH\\s+(\\S+)";
+ private static final String funcPatternStr = "(?i)\\s*create\\s+(scala|table)\\s+function\\s+(\\S+)\\s+WITH\\s+(\\S+)";
private static final Pattern funcPattern = Pattern.compile(funcPatternStr);
diff --git a/core/src/main/java/com/dtstack/flink/sql/parser/CreateTableParser.java b/core/src/main/java/com/dtstack/flink/sql/parser/CreateTableParser.java
index ae6e1f708..b5e4a4aa4 100644
--- a/core/src/main/java/com/dtstack/flink/sql/parser/CreateTableParser.java
+++ b/core/src/main/java/com/dtstack/flink/sql/parser/CreateTableParser.java
@@ -21,7 +21,7 @@
package com.dtstack.flink.sql.parser;
import com.dtstack.flink.sql.util.DtStringUtil;
-import com.google.common.collect.Maps;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Maps;
import java.util.List;
import java.util.Map;
@@ -29,7 +29,7 @@
import java.util.regex.Pattern;
/**
- * parser create table sql
+ * 解析创建表结构sql
* Date: 2018/6/26
* Company: www.dtstack.com
* @author xuchao
diff --git a/core/src/main/java/com/dtstack/flink/sql/parser/CreateTmpTableParser.java b/core/src/main/java/com/dtstack/flink/sql/parser/CreateTmpTableParser.java
index de7141eb5..cb5620d0a 100644
--- a/core/src/main/java/com/dtstack/flink/sql/parser/CreateTmpTableParser.java
+++ b/core/src/main/java/com/dtstack/flink/sql/parser/CreateTmpTableParser.java
@@ -25,19 +25,14 @@
import org.apache.calcite.sql.*;
import org.apache.calcite.sql.parser.SqlParseException;
import org.apache.calcite.sql.parser.SqlParser;
-import com.google.common.collect.Lists;
+import org.apache.flink.shaded.guava18.com.google.common.collect.Lists;
+
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.apache.calcite.sql.SqlKind.IDENTIFIER;
-/**
- * parser create tmp table sql
- * Date: 2018/6/26
- * Company: www.dtstack.com
- * @author yanxi
- */
public class CreateTmpTableParser implements IParser {
//select table tableName as select
@@ -71,13 +66,11 @@ public void parseSql(String sql, SqlTree sqlTree) {
tableName = matcher.group(1);
selectSql = "select " + matcher.group(2);
}
-
SqlParser.Config config = SqlParser
.configBuilder()
.setLex(Lex.MYSQL)
.build();
SqlParser sqlParser = SqlParser.create(selectSql,config);
-
SqlNode sqlNode = null;
try {
sqlNode = sqlParser.parseStmt();
diff --git a/core/src/main/java/com/dtstack/flink/sql/parser/InsertSqlParser.java b/core/src/main/java/com/dtstack/flink/sql/parser/InsertSqlParser.java
index a7c6db9eb..52541385f 100644
--- a/core/src/main/java/com/dtstack/flink/sql/parser/InsertSqlParser.java
+++ b/core/src/main/java/com/dtstack/flink/sql/parser/InsertSqlParser.java
@@ -21,11 +21,16 @@
package com.dtstack.flink.sql.parser;
import org.apache.calcite.config.Lex;
-import org.apache.calcite.sql.*;
+import org.apache.calcite.sql.SqlBasicCall;
+import org.apache.calcite.sql.SqlInsert;
+import org.apache.calcite.sql.SqlJoin;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlNode;
+import org.apache.calcite.sql.SqlSelect;
import org.apache.calcite.sql.parser.SqlParseException;
import org.apache.calcite.sql.parser.SqlParser;
import org.apache.commons.lang3.StringUtils;
-import com.google.common.collect.Lists;
+import org.apache.flink.shaded.guava18.com.google.common.collect.Lists;
import java.util.List;
@@ -113,10 +118,6 @@ private static void parseNode(SqlNode sqlNode, SqlParseResult sqlParseResult){
sqlParseResult.addSourceTable(identifierNode.toString());
}
break;
- case MATCH_RECOGNIZE:
- SqlMatchRecognize node = (SqlMatchRecognize) sqlNode;
- sqlParseResult.addSourceTable(node.getTableRef().toString());
- break;
case UNION:
SqlNode unionLeft = ((SqlBasicCall)sqlNode).getOperands()[0];
SqlNode unionRight = ((SqlBasicCall)sqlNode).getOperands()[1];
@@ -131,10 +132,6 @@ private static void parseNode(SqlNode sqlNode, SqlParseResult sqlParseResult){
parseNode(unionRight, sqlParseResult);
}
break;
- case ORDER_BY:
- SqlOrderBy sqlOrderBy = (SqlOrderBy) sqlNode;
- parseNode(sqlOrderBy.query, sqlParseResult);
- break;
default:
//do nothing
break;
diff --git a/core/src/main/java/com/dtstack/flink/sql/parser/SqlParser.java b/core/src/main/java/com/dtstack/flink/sql/parser/SqlParser.java
index a76c1b31a..0f9f8ffd7 100644
--- a/core/src/main/java/com/dtstack/flink/sql/parser/SqlParser.java
+++ b/core/src/main/java/com/dtstack/flink/sql/parser/SqlParser.java
@@ -25,8 +25,8 @@
import com.dtstack.flink.sql.table.TableInfoParser;
import com.dtstack.flink.sql.util.DtStringUtil;
import org.apache.commons.lang3.StringUtils;
-import com.google.common.collect.Lists;
-import com.google.common.base.Strings;
+import org.apache.flink.shaded.curator.org.apache.curator.shaded.com.google.common.collect.Lists;
+import org.apache.flink.shaded.guava18.com.google.common.base.Strings;
import java.util.List;
import java.util.Set;
@@ -88,7 +88,6 @@ public static SqlTree parseSql(String sql) throws Exception {
sqlParser.parseSql(childSql, sqlTree);
result = true;
- break;
}
if(!result){
diff --git a/core/src/main/java/com/dtstack/flink/sql/parser/SqlTree.java b/core/src/main/java/com/dtstack/flink/sql/parser/SqlTree.java
index 1b64b7c68..3ed37c51e 100644
--- a/core/src/main/java/com/dtstack/flink/sql/parser/SqlTree.java
+++ b/core/src/main/java/com/dtstack/flink/sql/parser/SqlTree.java
@@ -22,14 +22,14 @@
import com.dtstack.flink.sql.table.TableInfo;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Lists;
+import org.apache.flink.shaded.curator.org.apache.curator.shaded.com.google.common.collect.Maps;
+import org.apache.flink.shaded.guava18.com.google.common.collect.Lists;
import java.util.List;
import java.util.Map;
/**
- * parser sql to get the Sql Tree structure
+ * 解析sql获得的对象结构
* Date: 2018/6/25
* Company: www.dtstack.com
* @author xuchao
diff --git a/core/src/main/java/com/dtstack/flink/sql/side/AllReqRow.java b/core/src/main/java/com/dtstack/flink/sql/side/AllReqRow.java
index e788cf139..a185da1bd 100644
--- a/core/src/main/java/com/dtstack/flink/sql/side/AllReqRow.java
+++ b/core/src/main/java/com/dtstack/flink/sql/side/AllReqRow.java
@@ -20,7 +20,7 @@
package com.dtstack.flink.sql.side;
-import com.dtstack.flink.sql.factory.DTThreadFactory;
+import com.dtstack.flink.sql.threadFactory.DTThreadFactory;
import org.apache.flink.api.common.functions.RichFlatMapFunction;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.types.Row;
diff --git a/core/src/main/java/com/dtstack/flink/sql/side/FieldReplaceInfo.java b/core/src/main/java/com/dtstack/flink/sql/side/FieldReplaceInfo.java
index 37b23d046..bc716ddaa 100644
--- a/core/src/main/java/com/dtstack/flink/sql/side/FieldReplaceInfo.java
+++ b/core/src/main/java/com/dtstack/flink/sql/side/FieldReplaceInfo.java
@@ -20,7 +20,7 @@
package com.dtstack.flink.sql.side;
-import com.google.common.collect.HashBasedTable;
+import org.apache.flink.calcite.shaded.com.google.common.collect.HashBasedTable;
/**
* Reason:
diff --git a/core/src/main/java/com/dtstack/flink/sql/side/JoinInfo.java b/core/src/main/java/com/dtstack/flink/sql/side/JoinInfo.java
index 6fde02493..03dbde5a6 100644
--- a/core/src/main/java/com/dtstack/flink/sql/side/JoinInfo.java
+++ b/core/src/main/java/com/dtstack/flink/sql/side/JoinInfo.java
@@ -22,10 +22,9 @@
import org.apache.calcite.sql.JoinType;
import org.apache.calcite.sql.SqlNode;
-import com.google.common.base.Strings;
+import org.apache.flink.calcite.shaded.com.google.common.base.Strings;
import java.io.Serializable;
-import java.util.Map;
/**
* Join信息
@@ -41,8 +40,6 @@ public class JoinInfo implements Serializable {
//左表是否是维表
private boolean leftIsSideTable;
- //左表是 转换后的中间表
- private boolean leftIsMidTable;
//右表是否是维表
private boolean rightIsSideTable;
@@ -66,8 +63,6 @@ public class JoinInfo implements Serializable {
private SqlNode selectNode;
private JoinType joinType;
- // 左边是中间转换表,做表映射关系,给替换属性名称使用
- private Map leftTabMapping;
public String getSideTableName(){
if(leftIsSideTable){
@@ -92,22 +87,6 @@ public String getNewTableName(){
return leftStr + "_" + rightTableName;
}
- public boolean isLeftIsMidTable() {
- return leftIsMidTable;
- }
-
- public void setLeftIsMidTable(boolean leftIsMidTable) {
- this.leftIsMidTable = leftIsMidTable;
- }
-
- public Map getLeftTabMapping() {
- return leftTabMapping;
- }
-
- public void setLeftTabMapping(Map leftTabMapping) {
- this.leftTabMapping = leftTabMapping;
- }
-
public String getNewTableAlias(){
return leftTableAlias + "_" + rightTableAlias;
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/side/JoinScope.java b/core/src/main/java/com/dtstack/flink/sql/side/JoinScope.java
index c7a73e0d7..ba07e714a 100644
--- a/core/src/main/java/com/dtstack/flink/sql/side/JoinScope.java
+++ b/core/src/main/java/com/dtstack/flink/sql/side/JoinScope.java
@@ -23,8 +23,8 @@
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Lists;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Maps;
import java.util.List;
import java.util.Map;
diff --git a/core/src/main/java/com/dtstack/flink/sql/side/ParserJoinField.java b/core/src/main/java/com/dtstack/flink/sql/side/ParserJoinField.java
index 74d303c24..df242a390 100644
--- a/core/src/main/java/com/dtstack/flink/sql/side/ParserJoinField.java
+++ b/core/src/main/java/com/dtstack/flink/sql/side/ParserJoinField.java
@@ -27,7 +27,7 @@
import org.apache.calcite.sql.SqlSelect;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import com.google.common.collect.Lists;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Lists;
import java.util.Iterator;
import java.util.List;
@@ -41,12 +41,8 @@
public class ParserJoinField {
-
/**
- * build row by field
- * @param sqlNode select node
- * @param scope join left and right table all info
- * @param getAll true,get all fields from two tables; false, extract useful field from select node
+ * Need to parse the fields of information and where selectlist
* @return
*/
public static List getRowTypeInfo(SqlNode sqlNode, JoinScope scope, boolean getAll){
diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SideInfo.java b/core/src/main/java/com/dtstack/flink/sql/side/SideInfo.java
index df41e1663..063bfd2fd 100644
--- a/core/src/main/java/com/dtstack/flink/sql/side/SideInfo.java
+++ b/core/src/main/java/com/dtstack/flink/sql/side/SideInfo.java
@@ -27,8 +27,8 @@
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlNode;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Lists;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Maps;
import java.io.Serializable;
import java.util.List;
@@ -109,8 +109,8 @@ public void parseSelectFields(JoinInfo joinInfo){
}
public void dealOneEqualCon(SqlNode sqlNode, String sideTableName){
- if(!SqlKind.COMPARISON.contains(sqlNode.getKind())){
- throw new RuntimeException("not compare operator.");
+ if(sqlNode.getKind() != SqlKind.EQUALS){
+ throw new RuntimeException("not equal operator.");
}
SqlIdentifier left = (SqlIdentifier)((SqlBasicCall)sqlNode).getOperands()[0];
diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SideSQLParser.java b/core/src/main/java/com/dtstack/flink/sql/side/SideSQLParser.java
index c881d6344..6eb242255 100644
--- a/core/src/main/java/com/dtstack/flink/sql/side/SideSQLParser.java
+++ b/core/src/main/java/com/dtstack/flink/sql/side/SideSQLParser.java
@@ -20,8 +20,8 @@
package com.dtstack.flink.sql.side;
-import com.dtstack.flink.sql.config.CalciteConfig;
-import com.dtstack.flink.sql.util.ParseUtils;
+import com.dtstack.flink.sql.util.DtStringUtil;
+import org.apache.calcite.config.Lex;
import org.apache.calcite.sql.JoinType;
import org.apache.calcite.sql.SqlAsOperator;
import org.apache.calcite.sql.SqlBasicCall;
@@ -30,26 +30,14 @@
import org.apache.calcite.sql.SqlJoin;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlNode;
-import org.apache.calcite.sql.SqlNodeList;
import org.apache.calcite.sql.SqlOperator;
-import org.apache.calcite.sql.SqlOrderBy;
import org.apache.calcite.sql.SqlSelect;
-import org.apache.calcite.sql.SqlWith;
-import org.apache.calcite.sql.SqlWithItem;
import org.apache.calcite.sql.parser.SqlParseException;
import org.apache.calcite.sql.parser.SqlParser;
import org.apache.calcite.sql.parser.SqlParserPos;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.flink.api.java.tuple.Tuple2;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Queues;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.apache.flink.calcite.shaded.com.google.common.base.Strings;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Queues;
-import java.util.List;
-import java.util.Map;
import java.util.Queue;
import java.util.Set;
@@ -63,20 +51,16 @@
*/
public class SideSQLParser {
- private static final Logger LOG = LoggerFactory.getLogger(SideSQLParser.class);
-
- private final char SPLIT = '_';
-
- private String tempSQL = "SELECT * FROM TMP";
public Queue getExeQueue(String exeSql, Set sideTableSet) throws SqlParseException {
System.out.println("---exeSql---");
System.out.println(exeSql);
- LOG.info("---exeSql---");
- LOG.info(exeSql);
-
Queue queueInfo = Queues.newLinkedBlockingQueue();
- SqlParser sqlParser = SqlParser.create(exeSql, CalciteConfig.MYSQL_LEX_CONFIG);
+ SqlParser.Config config = SqlParser
+ .configBuilder()
+ .setLex(Lex.MYSQL)
+ .build();
+ SqlParser sqlParser = SqlParser.create(exeSql,config);
SqlNode sqlNode = sqlParser.parseStmt();
parseSql(sqlNode, sideTableSet, queueInfo);
queueInfo.offer(sqlNode);
@@ -86,17 +70,6 @@ public Queue getExeQueue(String exeSql, Set sideTableSet) throws
private Object parseSql(SqlNode sqlNode, Set sideTableSet, Queue queueInfo){
SqlKind sqlKind = sqlNode.getKind();
switch (sqlKind){
- case WITH: {
- SqlWith sqlWith = (SqlWith) sqlNode;
- SqlNodeList sqlNodeList = sqlWith.withList;
- for (SqlNode withAsTable : sqlNodeList) {
- SqlWithItem sqlWithItem = (SqlWithItem) withAsTable;
- parseSql(sqlWithItem.query, sideTableSet, queueInfo);
- queueInfo.add(sqlWithItem);
- }
- parseSql(sqlWith.body, sideTableSet, queueInfo);
- break;
- }
case INSERT:
SqlNode sqlSource = ((SqlInsert)sqlNode).getSource();
return parseSql(sqlSource, sideTableSet, queueInfo);
@@ -137,66 +110,23 @@ private Object parseSql(SqlNode sqlNode, Set sideTableSet, Queue
aliasInfo.setAlias(alias.toString());
return aliasInfo;
-
- case UNION:
- SqlNode unionLeft = ((SqlBasicCall)sqlNode).getOperands()[0];
- SqlNode unionRight = ((SqlBasicCall)sqlNode).getOperands()[1];
-
- parseSql(unionLeft, sideTableSet, queueInfo);
-
- parseSql(unionRight, sideTableSet, queueInfo);
-
- break;
-
- case ORDER_BY:
- SqlOrderBy sqlOrderBy = (SqlOrderBy) sqlNode;
- parseSql(sqlOrderBy.query, sideTableSet, queueInfo);
}
return "";
}
- private JoinInfo dealJoinNode(SqlJoin joinNode, Set sideTableSet, Queue queueInfo) {
+ private JoinInfo dealJoinNode(SqlJoin joinNode, Set sideTableSet, Queue queueInfo){
SqlNode leftNode = joinNode.getLeft();
SqlNode rightNode = joinNode.getRight();
JoinType joinType = joinNode.getJoinType();
String leftTbName = "";
String leftTbAlias = "";
- String rightTableName = "";
- String rightTableAlias = "";
- Map midTableMapping = null ;
- boolean leftIsMidTable = false;
- // 右节点已经被解析
- boolean rightIsParse = false;
- Tuple2 rightTableNameAndAlias = null;
-
if(leftNode.getKind() == IDENTIFIER){
leftTbName = leftNode.toString();
}else if(leftNode.getKind() == JOIN){
- JoinInfo leftNodeJoinInfo = (JoinInfo)parseSql(leftNode, sideTableSet, queueInfo);//解析多JOIN
-
- rightTableNameAndAlias = parseRightNode(rightNode, sideTableSet, queueInfo);
- rightIsParse = true;
- if (checkIsSideTable(rightTableNameAndAlias.f0, sideTableSet)) {
- // select * from xxx
- SqlNode sqlNode = buildSelectByLeftNode(leftNode);
- // ( select * from xxx) as xxx_0
- SqlBasicCall newAsNode = buildAsNodeByJoinInfo(leftNodeJoinInfo, sqlNode);
- leftNode = newAsNode;
- joinNode.setLeft(leftNode);
-
- leftIsMidTable = true;
- midTableMapping = saveTabMapping(leftNodeJoinInfo);
-
- AliasInfo aliasInfo = (AliasInfo) parseSql(newAsNode, sideTableSet, queueInfo);
- leftTbName = aliasInfo.getName();
- leftTbAlias = aliasInfo.getAlias();
- } else {
- leftTbName = leftNodeJoinInfo.getRightTableName();
- leftTbAlias = leftNodeJoinInfo.getRightTableAlias();
- }
-
+ Object leftNodeJoinInfo = parseSql(leftNode, sideTableSet, queueInfo);
+ System.out.println(leftNodeJoinInfo);
}else if(leftNode.getKind() == AS){
AliasInfo aliasInfo = (AliasInfo) parseSql(leftNode, sideTableSet, queueInfo);
leftTbName = aliasInfo.getName();
@@ -210,23 +140,22 @@ private JoinInfo dealJoinNode(SqlJoin joinNode, Set sideTableSet, Queue<
throw new RuntimeException("side-table must be at the right of join operator");
}
- if (!rightIsParse) {
- rightTableNameAndAlias = parseRightNode(rightNode, sideTableSet, queueInfo);
+ String rightTableName = "";
+ String rightTableAlias = "";
+
+ if(rightNode.getKind() == IDENTIFIER){
+ rightTableName = rightNode.toString();
+ }else{
+ AliasInfo aliasInfo = (AliasInfo)parseSql(rightNode, sideTableSet, queueInfo);
+ rightTableName = aliasInfo.getName();
+ rightTableAlias = aliasInfo.getAlias();
}
- rightTableName = rightTableNameAndAlias.f0;
- rightTableAlias = rightTableNameAndAlias.f1;
boolean rightIsSide = checkIsSideTable(rightTableName, sideTableSet);
if(joinType == JoinType.RIGHT){
throw new RuntimeException("side join not support join type of right[current support inner join and left join]");
}
- if (leftIsMidTable) {
- // 替换右边 on语句 中的字段别名
- SqlNode afterReplaceNameCondition = ParseUtils.replaceJoinConditionTabName(joinNode.getCondition(), midTableMapping);
- joinNode.setOperand(5, afterReplaceNameCondition);
- }
-
JoinInfo tableInfo = new JoinInfo();
tableInfo.setLeftTableName(leftTbName);
tableInfo.setRightTableName(rightTableName);
@@ -247,105 +176,11 @@ private JoinInfo dealJoinNode(SqlJoin joinNode, Set sideTableSet, Queue<
tableInfo.setJoinType(joinType);
tableInfo.setCondition(joinNode.getCondition());
- tableInfo.setLeftIsMidTable(leftIsMidTable);
- tableInfo.setLeftTabMapping(midTableMapping);
-
return tableInfo;
}
- private Tuple2 parseRightNode(SqlNode sqlNode, Set sideTableSet, Queue queueInfo) {
- Tuple2 tabName = new Tuple2<>("", "");
- if(sqlNode.getKind() == IDENTIFIER){
- tabName.f0 = sqlNode.toString();
- }else{
- AliasInfo aliasInfo = (AliasInfo)parseSql(sqlNode, sideTableSet, queueInfo);
- tabName.f0 = aliasInfo.getName();
- tabName.f1 = aliasInfo.getAlias();
- }
- return tabName;
- }
-
- private Map saveTabMapping(JoinInfo leftNodeJoinInfo) {
- Map midTableMapping = Maps.newHashMap();;
-
- String midTab = buidTableName(leftNodeJoinInfo.getLeftTableAlias(), SPLIT, leftNodeJoinInfo.getRightTableAlias());
- String finalMidTab = midTab + "_0";
-
- if(leftNodeJoinInfo.isLeftIsMidTable()) {
- midTableMapping.putAll(leftNodeJoinInfo.getLeftTabMapping());
- }
- fillLeftAllTable(leftNodeJoinInfo, midTableMapping, finalMidTab);
- return midTableMapping;
- }
-
- private void fillLeftAllTable(JoinInfo leftNodeJoinInfo, Map midTableMapping, String finalMidTab) {
- List tablesName = Lists.newArrayList();
- ParseUtils.parseLeftNodeTableName(leftNodeJoinInfo.getLeftNode(), tablesName);
-
- tablesName.forEach(tab ->{
- midTableMapping.put(tab, finalMidTab);
- });
- midTableMapping.put(leftNodeJoinInfo.getRightTableAlias(), finalMidTab);
- }
-
-
- private SqlNode buildSelectByLeftNode(SqlNode leftNode) {
- SqlParser sqlParser = SqlParser.create(tempSQL, CalciteConfig.MYSQL_LEX_CONFIG);
- SqlNode sqlNode = null;
- try {
- sqlNode = sqlParser.parseStmt();
- }catch (Exception e) {
- LOG.error("tmp sql parse error..", e);
- }
-
- ((SqlSelect) sqlNode).setFrom(leftNode);
- return sqlNode;
- }
-
private void dealSelectResultWithJoinInfo(JoinInfo joinInfo, SqlSelect sqlNode, Queue queueInfo){
- // 中间虚拟表进行表名称替换
- if (joinInfo.isLeftIsMidTable()){
- SqlNode whereNode = sqlNode.getWhere();
- SqlNodeList sqlGroup = sqlNode.getGroup();
- SqlNodeList sqlSelectList = sqlNode.getSelectList();
- List newSelectNodeList = Lists.newArrayList();
-
- for( int i=0; i sideTableList){
if(sideTableList.contains(tableName)){
return true;
}
+
return false;
}
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java b/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java
index bfccd02f7..ac727ec05 100644
--- a/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java
+++ b/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java
@@ -17,7 +17,6 @@
*/
-
package com.dtstack.flink.sql.side;
import com.dtstack.flink.sql.enums.ECacheType;
@@ -38,9 +37,7 @@
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlNodeList;
import org.apache.calcite.sql.SqlOperator;
-import org.apache.calcite.sql.SqlOrderBy;
import org.apache.calcite.sql.SqlSelect;
-import org.apache.calcite.sql.SqlWithItem;
import org.apache.calcite.sql.fun.SqlCase;
import org.apache.calcite.sql.parser.SqlParseException;
import org.apache.calcite.sql.parser.SqlParserPos;
@@ -48,9 +45,9 @@
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import com.google.common.collect.HashBasedTable;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
+import org.apache.flink.calcite.shaded.com.google.common.collect.HashBasedTable;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Lists;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Maps;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.java.StreamTableEnvironment;
@@ -113,22 +110,15 @@ public void exec(String sql, Map sideTableMap, StreamTabl
}
if(pollSqlNode.getKind() == INSERT){
- System.out.println("----------real exec sql-----------" );
- System.out.println(pollSqlNode.toString());
FlinkSQLExec.sqlUpdate(tableEnv, pollSqlNode.toString());
if(LOG.isInfoEnabled()){
LOG.info("exec sql: " + pollSqlNode.toString());
}
}else if(pollSqlNode.getKind() == AS){
AliasInfo aliasInfo = parseASNode(pollSqlNode);
- Table table = tableEnv.sqlQuery(aliasInfo.getName());
+ Table table = tableEnv.sql(aliasInfo.getName());
tableEnv.registerTable(aliasInfo.getAlias(), table);
localTableCache.put(aliasInfo.getAlias(), table);
- } else if (pollSqlNode.getKind() == WITH_ITEM) {
- SqlWithItem sqlWithItem = (SqlWithItem) pollSqlNode;
- String TableAlias = sqlWithItem.name.toString();
- Table table = tableEnv.sqlQuery(sqlWithItem.query.toString());
- tableEnv.registerTable(TableAlias, table);
}
}else if (pollObj instanceof JoinInfo){
@@ -317,60 +307,15 @@ private void replaceFieldName(SqlNode sqlNode, HashBasedTable mappingTable, String tableAlias){
if(groupNode.getKind() == IDENTIFIER){
SqlIdentifier sqlIdentifier = (SqlIdentifier) groupNode;
- if(sqlIdentifier.names.size() == 1){
- return sqlIdentifier;
- }
String mappingFieldName = mappingTable.get(sqlIdentifier.getComponent(0).getSimple(), sqlIdentifier.getComponent(1).getSimple());
-
- if(mappingFieldName == null){
- throw new RuntimeException("can't find mapping fieldName:" + sqlIdentifier.toString() );
- }
sqlIdentifier = sqlIdentifier.setName(0, tableAlias);
return sqlIdentifier.setName(1, mappingFieldName);
}else if(groupNode instanceof SqlBasicCall){
@@ -481,7 +426,7 @@ private SqlNode replaceSelectFieldName(SqlNode selectNode, HashBasedTable getConditionFields(SqlNode conditionNode, String specifyTabl
ParseUtils.parseAnd(conditionNode, sqlNodeList);
List conditionFields = Lists.newArrayList();
for(SqlNode sqlNode : sqlNodeList){
- if (!SqlKind.COMPARISON.contains(sqlNode.getKind())) {
- throw new RuntimeException("not compare operator.");
+ if(sqlNode.getKind() != SqlKind.EQUALS){
+ throw new RuntimeException("not equal operator.");
}
SqlIdentifier left = (SqlIdentifier)((SqlBasicCall)sqlNode).getOperands()[0];
@@ -664,7 +613,7 @@ public void registerTmpTable(CreateTmpTableParser.SqlParserResult result,
tableEnv.sqlUpdate(pollSqlNode.toString());
}else if(pollSqlNode.getKind() == AS){
AliasInfo aliasInfo = parseASNode(pollSqlNode);
- Table table = tableEnv.sqlQuery(aliasInfo.getName());
+ Table table = tableEnv.sql(aliasInfo.getName());
tableEnv.registerTable(aliasInfo.getAlias(), table);
if(LOG.isInfoEnabled()){
LOG.info("Register Table {} by {}", aliasInfo.getAlias(), aliasInfo.getName());
@@ -791,7 +740,7 @@ private boolean checkFieldsInfo(CreateTmpTableParser.SqlParserResult result, Tab
fieldNames.add(fieldName);
String fieldType = filed[filed.length - 1 ].trim();
Class fieldClass = ClassUtil.stringConvertClass(fieldType);
- Class tableField = table.getSchema().getFieldType(i).get().getTypeClass();
+ Class tableField = table.getSchema().getType(i).get().getTypeClass();
if (fieldClass == tableField){
continue;
} else {
diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SideTableInfo.java b/core/src/main/java/com/dtstack/flink/sql/side/SideTableInfo.java
index e21389ea7..0abd55a92 100644
--- a/core/src/main/java/com/dtstack/flink/sql/side/SideTableInfo.java
+++ b/core/src/main/java/com/dtstack/flink/sql/side/SideTableInfo.java
@@ -45,26 +45,14 @@ public abstract class SideTableInfo extends TableInfo implements Serializable {
public static final String PARTITIONED_JOIN_KEY = "partitionedJoin";
- public static final String CACHE_MODE_KEY = "cacheMode";
-
- public static final String ASYNC_CAP_KEY = "asyncCapacity";
-
- public static final String ASYNC_TIMEOUT_KEY = "asyncTimeout";
-
private String cacheType = "none";//None or LRU or ALL
private int cacheSize = 10000;
private long cacheTimeout = 60 * 1000;//
- private int asyncCapacity=100;
-
- private int asyncTimeout=10000;
-
private boolean partitionedJoin = false;
- private String cacheMode="ordered";
-
public RowTypeInfo getRowTypeInfo(){
Class[] fieldClass = getFieldClasses();
TypeInformation>[] types = new TypeInformation[fieldClass.length];
@@ -107,28 +95,4 @@ public boolean isPartitionedJoin() {
public void setPartitionedJoin(boolean partitionedJoin) {
this.partitionedJoin = partitionedJoin;
}
-
- public String getCacheMode() {
- return cacheMode;
- }
-
- public void setCacheMode(String cacheMode) {
- this.cacheMode = cacheMode;
- }
-
- public int getAsyncCapacity() {
- return asyncCapacity;
- }
-
- public void setAsyncCapacity(int asyncCapacity) {
- this.asyncCapacity = asyncCapacity;
- }
-
- public int getAsyncTimeout() {
- return asyncTimeout;
- }
-
- public void setAsyncTimeout(int asyncTimeout) {
- this.asyncTimeout = asyncTimeout;
- }
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/side/StreamSideFactory.java b/core/src/main/java/com/dtstack/flink/sql/side/StreamSideFactory.java
index 185911c7e..4eb78b4c4 100644
--- a/core/src/main/java/com/dtstack/flink/sql/side/StreamSideFactory.java
+++ b/core/src/main/java/com/dtstack/flink/sql/side/StreamSideFactory.java
@@ -16,10 +16,11 @@
* limitations under the License.
*/
+
package com.dtstack.flink.sql.side;
-import com.dtstack.flink.sql.classloader.ClassLoaderManager;
+import com.dtstack.flink.sql.classloader.DtClassLoader;
import com.dtstack.flink.sql.enums.ECacheType;
import com.dtstack.flink.sql.table.AbsSideTableParser;
import com.dtstack.flink.sql.table.AbsTableParser;
@@ -29,7 +30,6 @@
* get specify side parser
* Date: 2018/7/25
* Company: www.dtstack.com
- *
* @author xuchao
*/
@@ -40,15 +40,18 @@ public class StreamSideFactory {
public static AbsTableParser getSqlParser(String pluginType, String sqlRootDir, String cacheType) throws Exception {
String sideOperator = ECacheType.ALL.name().equals(cacheType) ? "all" : "async";
+ ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
String pluginJarPath = PluginUtil.getSideJarFileDirPath(pluginType, sideOperator, "side", sqlRootDir);
+
+ DtClassLoader dtClassLoader = (DtClassLoader) classLoader;
+ PluginUtil.addPluginJar(pluginJarPath, dtClassLoader);
String className = PluginUtil.getSqlParserClassName(pluginType, CURR_TYPE);
- return ClassLoaderManager.newInstance(pluginJarPath, (cl) -> {
- Class> sideParser = cl.loadClass(className);
- if (!AbsSideTableParser.class.isAssignableFrom(sideParser)) {
- throw new RuntimeException("class " + sideParser.getName() + " not subClass of AbsSideTableParser");
- }
- return sideParser.asSubclass(AbsTableParser.class).newInstance();
- });
+ Class> sideParser = dtClassLoader.loadClass(className);
+ if(!AbsSideTableParser.class.isAssignableFrom(sideParser)){
+ throw new RuntimeException("class " + sideParser.getName() + " not subClass of AbsSideTableParser");
+ }
+
+ return sideParser.asSubclass(AbsTableParser.class).newInstance();
}
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/side/cache/LRUSideCache.java b/core/src/main/java/com/dtstack/flink/sql/side/cache/LRUSideCache.java
index 700e13bb2..475536a4c 100644
--- a/core/src/main/java/com/dtstack/flink/sql/side/cache/LRUSideCache.java
+++ b/core/src/main/java/com/dtstack/flink/sql/side/cache/LRUSideCache.java
@@ -21,8 +21,8 @@
package com.dtstack.flink.sql.side.cache;
import com.dtstack.flink.sql.side.SideTableInfo;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
+import org.apache.flink.calcite.shaded.com.google.common.cache.Cache;
+import org.apache.flink.calcite.shaded.com.google.common.cache.CacheBuilder;
import java.util.concurrent.TimeUnit;
diff --git a/core/src/main/java/com/dtstack/flink/sql/side/operator/SideAsyncOperator.java b/core/src/main/java/com/dtstack/flink/sql/side/operator/SideAsyncOperator.java
index 290804200..df9a1c175 100644
--- a/core/src/main/java/com/dtstack/flink/sql/side/operator/SideAsyncOperator.java
+++ b/core/src/main/java/com/dtstack/flink/sql/side/operator/SideAsyncOperator.java
@@ -19,7 +19,7 @@
package com.dtstack.flink.sql.side.operator;
-import com.dtstack.flink.sql.classloader.ClassLoaderManager;
+import com.dtstack.flink.sql.classloader.DtClassLoader;
import com.dtstack.flink.sql.side.AsyncReqRow;
import com.dtstack.flink.sql.side.FieldInfo;
import com.dtstack.flink.sql.side.JoinInfo;
@@ -44,34 +44,26 @@ public class SideAsyncOperator {
private static final String PATH_FORMAT = "%sasyncside";
- private static final String OPERATOR_TYPE = "Async";
-
- private static final String ORDERED = "ordered";
-
+ //TODO need to set by create table task
+ private static int asyncCapacity = 100;
private static AsyncReqRow loadAsyncReq(String sideType, String sqlRootDir, RowTypeInfo rowTypeInfo,
JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) throws Exception {
+ ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
String pathOfType = String.format(PATH_FORMAT, sideType);
String pluginJarPath = PluginUtil.getJarFileDirPath(pathOfType, sqlRootDir);
- String className = PluginUtil.getSqlSideClassName(sideType, "side", OPERATOR_TYPE);
- return ClassLoaderManager.newInstance(pluginJarPath, (cl) ->
- cl.loadClass(className).asSubclass(AsyncReqRow.class)
- .getConstructor(RowTypeInfo.class, JoinInfo.class, List.class, SideTableInfo.class)
- .newInstance(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo));
+ DtClassLoader dtClassLoader = (DtClassLoader) classLoader;
+ PluginUtil.addPluginJar(pluginJarPath, dtClassLoader);
+ String className = PluginUtil.getSqlSideClassName(sideType, "side", "Async");
+ return dtClassLoader.loadClass(className).asSubclass(AsyncReqRow.class)
+ .getConstructor(RowTypeInfo.class, JoinInfo.class, List.class, SideTableInfo.class).newInstance(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo);
}
public static DataStream getSideJoinDataStream(DataStream inputStream, String sideType, String sqlRootDir, RowTypeInfo rowTypeInfo, JoinInfo joinInfo,
List outFieldInfoList, SideTableInfo sideTableInfo) throws Exception {
AsyncReqRow asyncDbReq = loadAsyncReq(sideType, sqlRootDir, rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo);
-
//TODO How much should be set for the degree of parallelism? Timeout? capacity settings?
- if (ORDERED.equals(sideTableInfo.getCacheMode())){
- return AsyncDataStream.orderedWait(inputStream, asyncDbReq, sideTableInfo.getAsyncTimeout(), TimeUnit.MILLISECONDS, sideTableInfo.getAsyncCapacity())
- .setParallelism(sideTableInfo.getParallelism());
- }else {
- return AsyncDataStream.unorderedWait(inputStream, asyncDbReq, sideTableInfo.getAsyncTimeout(), TimeUnit.MILLISECONDS, sideTableInfo.getAsyncCapacity())
- .setParallelism(sideTableInfo.getParallelism());
- }
-
+ return AsyncDataStream.orderedWait(inputStream, asyncDbReq, 10000, TimeUnit.MILLISECONDS, asyncCapacity)
+ .setParallelism(sideTableInfo.getParallelism());
}
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/side/operator/SideWithAllCacheOperator.java b/core/src/main/java/com/dtstack/flink/sql/side/operator/SideWithAllCacheOperator.java
index 5aa810b0f..72a67d00b 100644
--- a/core/src/main/java/com/dtstack/flink/sql/side/operator/SideWithAllCacheOperator.java
+++ b/core/src/main/java/com/dtstack/flink/sql/side/operator/SideWithAllCacheOperator.java
@@ -19,7 +19,7 @@
package com.dtstack.flink.sql.side.operator;
-import com.dtstack.flink.sql.classloader.ClassLoaderManager;
+import com.dtstack.flink.sql.classloader.DtClassLoader;
import com.dtstack.flink.sql.side.AllReqRow;
import com.dtstack.flink.sql.side.FieldInfo;
import com.dtstack.flink.sql.side.JoinInfo;
@@ -28,6 +28,7 @@
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.streaming.api.datastream.DataStream;
+import java.net.MalformedURLException;
import java.util.List;
/**
@@ -42,19 +43,22 @@ public class SideWithAllCacheOperator {
private static final String PATH_FORMAT = "%sallside";
- private static final String OPERATOR_TYPE = "All";
-
private static AllReqRow loadFlatMap(String sideType, String sqlRootDir, RowTypeInfo rowTypeInfo,
JoinInfo joinInfo, List outFieldInfoList,
SideTableInfo sideTableInfo) throws Exception {
+ ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
String pathOfType = String.format(PATH_FORMAT, sideType);
String pluginJarPath = PluginUtil.getJarFileDirPath(pathOfType, sqlRootDir);
- String className = PluginUtil.getSqlSideClassName(sideType, "side", OPERATOR_TYPE);
- return ClassLoaderManager.newInstance(pluginJarPath, (cl) -> cl.loadClass(className).asSubclass(AllReqRow.class)
- .getConstructor(RowTypeInfo.class, JoinInfo.class, List.class, SideTableInfo.class)
- .newInstance(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo));
+ DtClassLoader dtClassLoader = (DtClassLoader) classLoader;
+ PluginUtil.addPluginJar(pluginJarPath, dtClassLoader);
+ String className = PluginUtil.getSqlSideClassName(sideType, "side", "All");
+
+ return dtClassLoader.loadClass(className).asSubclass(AllReqRow.class).getConstructor(RowTypeInfo.class, JoinInfo.class, List.class, SideTableInfo.class)
+ .newInstance(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo);
+
+
}
public static DataStream getSideJoinDataStream(DataStream inputStream, String sideType, String sqlRootDir, RowTypeInfo rowTypeInfo, JoinInfo joinInfo,
diff --git a/core/src/main/java/com/dtstack/flink/sql/sink/MetricOutputFormat.java b/core/src/main/java/com/dtstack/flink/sql/sink/MetricOutputFormat.java
index f56f531e0..4e11280e7 100644
--- a/core/src/main/java/com/dtstack/flink/sql/sink/MetricOutputFormat.java
+++ b/core/src/main/java/com/dtstack/flink/sql/sink/MetricOutputFormat.java
@@ -32,13 +32,10 @@ public abstract class MetricOutputFormat extends RichOutputFormat{
protected transient Counter outRecords;
- protected transient Counter outDirtyRecords;
-
protected transient Meter outRecordsRate;
public void initMetric() {
outRecords = getRuntimeContext().getMetricGroup().counter(MetricConstant.DT_NUM_RECORDS_OUT);
- outDirtyRecords = getRuntimeContext().getMetricGroup().counter(MetricConstant.DT_NUM_DIRTY_RECORDS_OUT);
outRecordsRate = getRuntimeContext().getMetricGroup().meter(MetricConstant.DT_NUM_RECORDS_OUT_RATE, new MeterView(outRecords, 20));
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/sink/StreamSinkFactory.java b/core/src/main/java/com/dtstack/flink/sql/sink/StreamSinkFactory.java
index 53460081d..ebda80c8a 100644
--- a/core/src/main/java/com/dtstack/flink/sql/sink/StreamSinkFactory.java
+++ b/core/src/main/java/com/dtstack/flink/sql/sink/StreamSinkFactory.java
@@ -20,11 +20,12 @@
package com.dtstack.flink.sql.sink;
-import com.dtstack.flink.sql.classloader.ClassLoaderManager;
+import com.dtstack.flink.sql.classloader.DtClassLoader;
import com.dtstack.flink.sql.table.AbsTableParser;
import com.dtstack.flink.sql.table.TargetTableInfo;
import com.dtstack.flink.sql.util.DtStringUtil;
import com.dtstack.flink.sql.util.PluginUtil;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Lists;
import org.apache.flink.table.sinks.TableSink;
/**
@@ -41,33 +42,51 @@ public class StreamSinkFactory {
private static final String DIR_NAME_FORMAT = "%ssink";
public static AbsTableParser getSqlParser(String pluginType, String sqlRootDir) throws Exception {
+ ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+
+ if(!(classLoader instanceof DtClassLoader)){
+ throw new RuntimeException("it's not a correct classLoader instance, it's type must be DtClassLoader!");
+ }
+
+ DtClassLoader dtClassLoader = (DtClassLoader) classLoader;
+
String pluginJarPath = PluginUtil.getJarFileDirPath(String.format(DIR_NAME_FORMAT, pluginType), sqlRootDir);
+ PluginUtil.addPluginJar(pluginJarPath, dtClassLoader);
String typeNoVersion = DtStringUtil.getPluginTypeWithoutVersion(pluginType);
String className = PluginUtil.getSqlParserClassName(typeNoVersion, CURR_TYPE);
+ Class> targetParser = dtClassLoader.loadClass(className);
- return ClassLoaderManager.newInstance(pluginJarPath, (cl) -> {
- Class> targetParser = cl.loadClass(className);
- if(!AbsTableParser.class.isAssignableFrom(targetParser)){
- throw new RuntimeException("class " + targetParser.getName() + " not subClass of AbsTableParser");
- }
- return targetParser.asSubclass(AbsTableParser.class).newInstance();
- });
+ if(!AbsTableParser.class.isAssignableFrom(targetParser)){
+ throw new RuntimeException("class " + targetParser.getName() + " not subClass of AbsTableParser");
+ }
+
+ return targetParser.asSubclass(AbsTableParser.class).newInstance();
}
public static TableSink getTableSink(TargetTableInfo targetTableInfo, String localSqlRootDir) throws Exception {
+
+ ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+ if(!(classLoader instanceof DtClassLoader)){
+ throw new RuntimeException("it's not a correct classLoader instance, it's type must be DtClassLoader!");
+ }
+
+ DtClassLoader dtClassLoader = (DtClassLoader) classLoader;
+
String pluginType = targetTableInfo.getType();
String pluginJarDirPath = PluginUtil.getJarFileDirPath(String.format(DIR_NAME_FORMAT, pluginType), localSqlRootDir);
+
+ PluginUtil.addPluginJar(pluginJarDirPath, dtClassLoader);
+
String typeNoVersion = DtStringUtil.getPluginTypeWithoutVersion(pluginType);
String className = PluginUtil.getGenerClassName(typeNoVersion, CURR_TYPE);
+ Class> sinkClass = dtClassLoader.loadClass(className);
+
+ if(!IStreamSinkGener.class.isAssignableFrom(sinkClass)){
+ throw new RuntimeException("class " + sinkClass + " not subClass of IStreamSinkGener");
+ }
- return ClassLoaderManager.newInstance(pluginJarDirPath, (cl) -> {
- Class> sinkClass = cl.loadClass(className);
- if(!IStreamSinkGener.class.isAssignableFrom(sinkClass)){
- throw new RuntimeException("class " + sinkClass + " not subClass of IStreamSinkGener");
- }
- IStreamSinkGener streamSinkGener = sinkClass.asSubclass(IStreamSinkGener.class).newInstance();
- Object result = streamSinkGener.genStreamSink(targetTableInfo);
- return (TableSink) result;
- });
+ IStreamSinkGener streamSinkGener = sinkClass.asSubclass(IStreamSinkGener.class).newInstance();
+ Object result = streamSinkGener.genStreamSink(targetTableInfo);
+ return (TableSink) result;
}
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/source/StreamSourceFactory.java b/core/src/main/java/com/dtstack/flink/sql/source/StreamSourceFactory.java
index 1057fb0ed..b8dfe66e2 100644
--- a/core/src/main/java/com/dtstack/flink/sql/source/StreamSourceFactory.java
+++ b/core/src/main/java/com/dtstack/flink/sql/source/StreamSourceFactory.java
@@ -21,11 +21,12 @@
package com.dtstack.flink.sql.source;
-import com.dtstack.flink.sql.classloader.ClassLoaderManager;
+import com.dtstack.flink.sql.classloader.DtClassLoader;
import com.dtstack.flink.sql.table.AbsSourceParser;
import com.dtstack.flink.sql.table.SourceTableInfo;
import com.dtstack.flink.sql.util.DtStringUtil;
import com.dtstack.flink.sql.util.PluginUtil;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Lists;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.java.StreamTableEnvironment;
@@ -45,16 +46,21 @@ public class StreamSourceFactory {
public static AbsSourceParser getSqlParser(String pluginType, String sqlRootDir) throws Exception {
+ ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+
String pluginJarPath = PluginUtil.getJarFileDirPath(String.format(DIR_NAME_FORMAT, pluginType), sqlRootDir);
+
+ DtClassLoader dtClassLoader = (DtClassLoader) classLoader;
+ PluginUtil.addPluginJar(pluginJarPath, dtClassLoader);
+
String typeNoVersion = DtStringUtil.getPluginTypeWithoutVersion(pluginType);
String className = PluginUtil.getSqlParserClassName(typeNoVersion, CURR_TYPE);
- return ClassLoaderManager.newInstance(pluginJarPath, (cl) -> {
- Class> sourceParser = cl.loadClass(className);
- if(!AbsSourceParser.class.isAssignableFrom(sourceParser)){
- throw new RuntimeException("class " + sourceParser.getName() + " not subClass of AbsSourceParser");
- }
- return sourceParser.asSubclass(AbsSourceParser.class).newInstance();
- });
+ Class> sourceParser = dtClassLoader.loadClass(className);
+ if(!AbsSourceParser.class.isAssignableFrom(sourceParser)){
+ throw new RuntimeException("class " + sourceParser.getName() + " not subClass of AbsSourceParser");
+ }
+
+ return sourceParser.asSubclass(AbsSourceParser.class).newInstance();
}
/**
@@ -67,17 +73,21 @@ public static Table getStreamSource(SourceTableInfo sourceTableInfo, StreamExecu
String sourceTypeStr = sourceTableInfo.getType();
String typeNoVersion = DtStringUtil.getPluginTypeWithoutVersion(sourceTypeStr);
+ ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+
String pluginJarPath = PluginUtil.getJarFileDirPath(String.format(DIR_NAME_FORMAT, sourceTypeStr), sqlRootDir);
String className = PluginUtil.getGenerClassName(typeNoVersion, CURR_TYPE);
- return ClassLoaderManager.newInstance(pluginJarPath, (cl) -> {
- Class> sourceClass = cl.loadClass(className);
- if(!IStreamSourceGener.class.isAssignableFrom(sourceClass)){
- throw new RuntimeException("class " + sourceClass.getName() + " not subClass of IStreamSourceGener");
- }
+ DtClassLoader dtClassLoader = (DtClassLoader) classLoader;
+ PluginUtil.addPluginJar(pluginJarPath, dtClassLoader);
+ Class> sourceClass = dtClassLoader.loadClass(className);
+
+ if(!IStreamSourceGener.class.isAssignableFrom(sourceClass)){
+ throw new RuntimeException("class " + sourceClass.getName() + " not subClass of IStreamSourceGener");
+ }
- IStreamSourceGener sourceGener = sourceClass.asSubclass(IStreamSourceGener.class).newInstance();
- return (Table) sourceGener.genStreamSource(sourceTableInfo, env, tableEnv);
- });
+ IStreamSourceGener sourceGener = sourceClass.asSubclass(IStreamSourceGener.class).newInstance();
+ Object object = sourceGener.genStreamSource(sourceTableInfo, env, tableEnv);
+ return (Table) object;
}
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java b/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java
index 3c4199c8c..f8ede801b 100644
--- a/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java
+++ b/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java
@@ -16,13 +16,14 @@
* limitations under the License.
*/
-
+
package com.dtstack.flink.sql.table;
import com.dtstack.flink.sql.enums.ECacheType;
import com.dtstack.flink.sql.side.SideTableInfo;
import com.dtstack.flink.sql.util.MathUtil;
+
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -84,31 +85,6 @@ protected void parseCacheProp(SideTableInfo sideTableInfo, Map p
sideTableInfo.setPartitionedJoin(true);
}
}
-
- if(props.containsKey(SideTableInfo.CACHE_MODE_KEY.toLowerCase())){
- String cachemode = MathUtil.getString(props.get(SideTableInfo.CACHE_MODE_KEY.toLowerCase()));
-
- if(!cachemode.equalsIgnoreCase("ordered") && !cachemode.equalsIgnoreCase("unordered")){
- throw new RuntimeException("cachemode must ordered or unordered!");
- }
- sideTableInfo.setCacheMode(cachemode.toLowerCase());
- }
-
- if(props.containsKey(SideTableInfo.ASYNC_CAP_KEY.toLowerCase())){
- Integer asyncCap = MathUtil.getIntegerVal(props.get(SideTableInfo.ASYNC_CAP_KEY.toLowerCase()));
- if(asyncCap < 0){
- throw new RuntimeException("asyncCapacity size need > 0.");
- }
- sideTableInfo.setAsyncCapacity(asyncCap);
- }
-
- if(props.containsKey(SideTableInfo.ASYNC_TIMEOUT_KEY.toLowerCase())){
- Integer asyncTimeout = MathUtil.getIntegerVal(props.get(SideTableInfo.ASYNC_TIMEOUT_KEY.toLowerCase()));
- if (asyncTimeout<0){
- throw new RuntimeException("asyncTimeout size need > 0.");
- }
- sideTableInfo.setAsyncTimeout(asyncTimeout);
- }
}
}
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/table/AbsSourceParser.java b/core/src/main/java/com/dtstack/flink/sql/table/AbsSourceParser.java
index 82000b386..1b676ff8d 100644
--- a/core/src/main/java/com/dtstack/flink/sql/table/AbsSourceParser.java
+++ b/core/src/main/java/com/dtstack/flink/sql/table/AbsSourceParser.java
@@ -20,7 +20,6 @@
package com.dtstack.flink.sql.table;
-import com.dtstack.flink.sql.util.ClassUtil;
import com.dtstack.flink.sql.util.MathUtil;
import java.util.regex.Matcher;
@@ -37,21 +36,19 @@
public abstract class AbsSourceParser extends AbsTableParser {
private static final String VIRTUAL_KEY = "virtualFieldKey";
+
private static final String WATERMARK_KEY = "waterMarkKey";
- private static final String NOTNULL_KEY = "notNullKey";
private static Pattern virtualFieldKeyPattern = Pattern.compile("(?i)^(\\S+\\([^\\)]+\\))\\s+AS\\s+(\\w+)$");
+
private static Pattern waterMarkKeyPattern = Pattern.compile("(?i)^\\s*WATERMARK\\s+FOR\\s+(\\S+)\\s+AS\\s+withOffset\\(\\s*(\\S+)\\s*,\\s*(\\d+)\\s*\\)$");
- private static Pattern notNullKeyPattern = Pattern.compile("(?i)^(\\w+)\\s+(\\w+)\\s+NOT\\s+NULL?$");
static {
keyPatternMap.put(VIRTUAL_KEY, virtualFieldKeyPattern);
keyPatternMap.put(WATERMARK_KEY, waterMarkKeyPattern);
- keyPatternMap.put(NOTNULL_KEY, notNullKeyPattern);
keyHandlerMap.put(VIRTUAL_KEY, AbsSourceParser::dealVirtualField);
keyHandlerMap.put(WATERMARK_KEY, AbsSourceParser::dealWaterMark);
- keyHandlerMap.put(NOTNULL_KEY, AbsSourceParser::dealNotNull);
}
static void dealVirtualField(Matcher matcher, TableInfo tableInfo){
@@ -69,18 +66,4 @@ static void dealWaterMark(Matcher matcher, TableInfo tableInfo){
sourceTableInfo.setEventTimeField(eventTimeField);
sourceTableInfo.setMaxOutOrderness(offset);
}
-
- static void dealNotNull(Matcher matcher, TableInfo tableInfo) {
- String fieldName = matcher.group(1);
- String fieldType = matcher.group(2);
- Class fieldClass= ClassUtil.stringConvertClass(fieldType);
- TableInfo.FieldExtraInfo fieldExtraInfo = new TableInfo.FieldExtraInfo();
- fieldExtraInfo.setNotNull(true);
-
- tableInfo.addPhysicalMappings(fieldName, fieldName);
- tableInfo.addField(fieldName);
- tableInfo.addFieldClass(fieldClass);
- tableInfo.addFieldType(fieldType);
- tableInfo.addFieldExtraInfo(fieldExtraInfo);
- }
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/table/AbsTableParser.java b/core/src/main/java/com/dtstack/flink/sql/table/AbsTableParser.java
index aa0639c8a..cc92b092a 100644
--- a/core/src/main/java/com/dtstack/flink/sql/table/AbsTableParser.java
+++ b/core/src/main/java/com/dtstack/flink/sql/table/AbsTableParser.java
@@ -22,8 +22,8 @@
import com.dtstack.flink.sql.util.ClassUtil;
import com.dtstack.flink.sql.util.DtStringUtil;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Lists;
+import org.apache.flink.shaded.curator.org.apache.curator.shaded.com.google.common.collect.Maps;
import java.util.List;
import java.util.Map;
@@ -82,29 +82,27 @@ public void parseFieldsInfo(String fieldsInfo, TableInfo tableInfo){
List fieldRows = DtStringUtil.splitIgnoreQuota(fieldsInfo, ',');
for(String fieldRow : fieldRows){
fieldRow = fieldRow.trim();
-
- String[] filedInfoArr = fieldRow.split("\\s+");
- if(filedInfoArr.length < 2 ){
- throw new RuntimeException(String.format("table [%s] field [%s] format error.", tableInfo.getName(), fieldRow));
- }
-
boolean isMatcherKey = dealKeyPattern(fieldRow, tableInfo);
+
if(isMatcherKey){
continue;
}
+ String[] filedInfoArr = fieldRow.split("\\s+");
+ if(filedInfoArr.length < 2){
+ throw new RuntimeException(String.format("table [%s] field [%s] format error.", tableInfo.getName(), fieldRow));
+ }
+
//Compatible situation may arise in space in the fieldName
String[] filedNameArr = new String[filedInfoArr.length - 1];
System.arraycopy(filedInfoArr, 0, filedNameArr, 0, filedInfoArr.length - 1);
String fieldName = String.join(" ", filedNameArr);
String fieldType = filedInfoArr[filedInfoArr.length - 1 ].trim();
- Class fieldClass = dbTypeConvertToJavaType(fieldType);
+ Class fieldClass = ClassUtil.stringConvertClass(fieldType);
- tableInfo.addPhysicalMappings(filedInfoArr[0],filedInfoArr[0]);
tableInfo.addField(fieldName);
tableInfo.addFieldClass(fieldClass);
tableInfo.addFieldType(fieldType);
- tableInfo.addFieldExtraInfo(null);
}
tableInfo.finish();
@@ -116,9 +114,4 @@ public static void dealPrimaryKey(Matcher matcher, TableInfo tableInfo){
List primaryKes = Lists.newArrayList(splitArry);
tableInfo.setPrimaryKeys(primaryKes);
}
-
- public Class dbTypeConvertToJavaType(String fieldType) {
- return ClassUtil.stringConvertClass(fieldType);
- }
-
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/table/SourceTableInfo.java b/core/src/main/java/com/dtstack/flink/sql/table/SourceTableInfo.java
index 9a41fa0a1..a92aa9fb1 100644
--- a/core/src/main/java/com/dtstack/flink/sql/table/SourceTableInfo.java
+++ b/core/src/main/java/com/dtstack/flink/sql/table/SourceTableInfo.java
@@ -20,9 +20,9 @@
package com.dtstack.flink.sql.table;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
+import org.apache.flink.calcite.shaded.com.google.common.base.Strings;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Lists;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Maps;
import org.apache.flink.util.StringUtils;
import java.util.ArrayList;
@@ -71,7 +71,6 @@ public void setMaxOutOrderness(Integer maxOutOrderness) {
if(maxOutOrderness == null){
return;
}
-
this.maxOutOrderness = maxOutOrderness;
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/table/TableInfo.java b/core/src/main/java/com/dtstack/flink/sql/table/TableInfo.java
index a2454b893..57f94e2e0 100644
--- a/core/src/main/java/com/dtstack/flink/sql/table/TableInfo.java
+++ b/core/src/main/java/com/dtstack/flink/sql/table/TableInfo.java
@@ -20,8 +20,8 @@
package com.dtstack.flink.sql.table;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Lists;
+import org.apache.flink.shaded.guava18.com.google.common.collect.Maps;
import java.io.Serializable;
import java.util.List;
@@ -57,8 +57,6 @@ public abstract class TableInfo implements Serializable {
private final List fieldClassList = Lists.newArrayList();
- private final List fieldExtraInfoList = Lists.newArrayList();
-
private List primaryKeys;
private Integer parallelism = 1;
@@ -145,10 +143,6 @@ public void setFieldClasses(Class>[] fieldClasses) {
this.fieldClasses = fieldClasses;
}
- public void addFieldExtraInfo(FieldExtraInfo extraInfo) {
- fieldExtraInfoList.add(extraInfo);
- }
-
public List getFieldList() {
return fieldList;
}
@@ -165,10 +159,6 @@ public Map getPhysicalFields() {
return physicalFields;
}
- public List getFieldExtraInfoList() {
- return fieldExtraInfoList;
- }
-
public void setPhysicalFields(Map physicalFields) {
this.physicalFields = physicalFields;
}
@@ -178,25 +168,4 @@ public void finish(){
this.fieldClasses = fieldClassList.toArray(new Class[fieldClassList.size()]);
this.fieldTypes = fieldTypeList.toArray(new String[fieldTypeList.size()]);
}
-
- /**
- * field extra info,used to store `not null` `default 0`...,
- *
- * now, only support not null
- */
- public static class FieldExtraInfo implements Serializable {
-
- /**
- * default false:allow field is null
- */
- boolean notNull = false;
-
- public boolean getNotNull() {
- return notNull;
- }
-
- public void setNotNull(boolean notNull) {
- this.notNull = notNull;
- }
- }
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/table/TableInfoParser.java b/core/src/main/java/com/dtstack/flink/sql/table/TableInfoParser.java
index ae98d90ae..b3a07d6d5 100644
--- a/core/src/main/java/com/dtstack/flink/sql/table/TableInfoParser.java
+++ b/core/src/main/java/com/dtstack/flink/sql/table/TableInfoParser.java
@@ -20,6 +20,7 @@
package com.dtstack.flink.sql.table;
+import com.dtstack.flink.sql.enums.ECacheType;
import com.dtstack.flink.sql.enums.ETableType;
import com.dtstack.flink.sql.parser.CreateTableParser;
import com.dtstack.flink.sql.side.SideTableInfo;
@@ -27,8 +28,8 @@
import com.dtstack.flink.sql.sink.StreamSinkFactory;
import com.dtstack.flink.sql.source.StreamSourceFactory;
import com.dtstack.flink.sql.util.MathUtil;
-import com.google.common.base.Strings;
-import com.google.common.collect.Maps;
+import org.apache.flink.calcite.shaded.com.google.common.base.Strings;
+import org.apache.flink.shaded.curator.org.apache.curator.shaded.com.google.common.collect.Maps;
import java.util.Map;
import java.util.regex.Matcher;
diff --git a/core/src/main/java/com/dtstack/flink/sql/table/TargetTableInfo.java b/core/src/main/java/com/dtstack/flink/sql/table/TargetTableInfo.java
index 70b625b5c..a9a846707 100644
--- a/core/src/main/java/com/dtstack/flink/sql/table/TargetTableInfo.java
+++ b/core/src/main/java/com/dtstack/flink/sql/table/TargetTableInfo.java
@@ -30,16 +30,4 @@
public abstract class TargetTableInfo extends TableInfo {
public static final String TARGET_SUFFIX = "Sink";
-
- public static final String SINK_DATA_TYPE = "sinkdatatype";
-
- private String sinkDataType = "json";
-
- public String getSinkDataType() {
- return sinkDataType;
- }
-
- public void setSinkDataType(String sinkDataType) {
- this.sinkDataType = sinkDataType;
- }
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/factory/DTThreadFactory.java b/core/src/main/java/com/dtstack/flink/sql/threadFactory/DTThreadFactory.java
similarity index 97%
rename from core/src/main/java/com/dtstack/flink/sql/factory/DTThreadFactory.java
rename to core/src/main/java/com/dtstack/flink/sql/threadFactory/DTThreadFactory.java
index e9f9dfa64..1c16581ef 100644
--- a/core/src/main/java/com/dtstack/flink/sql/factory/DTThreadFactory.java
+++ b/core/src/main/java/com/dtstack/flink/sql/threadFactory/DTThreadFactory.java
@@ -18,7 +18,7 @@
-package com.dtstack.flink.sql.factory;
+package com.dtstack.flink.sql.threadFactory;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
diff --git a/core/src/main/java/com/dtstack/flink/sql/udf/TimestampUdf.java b/core/src/main/java/com/dtstack/flink/sql/udf/TimestampUdf.java
new file mode 100644
index 000000000..9f605dde3
--- /dev/null
+++ b/core/src/main/java/com/dtstack/flink/sql/udf/TimestampUdf.java
@@ -0,0 +1,24 @@
+package com.dtstack.flink.sql.udf;
+
+import org.apache.flink.table.functions.FunctionContext;
+import org.apache.flink.table.functions.ScalarFunction;
+
+import java.sql.Timestamp;
+
+public class TimestampUdf extends ScalarFunction {
+ @Override
+ public void open(FunctionContext context) {
+ }
+ public static Timestamp eval(String timestamp) {
+ if (timestamp.length() == 13){
+ return new Timestamp(Long.parseLong(timestamp));
+ }else if (timestamp.length() == 10){
+ return new Timestamp(Long.parseLong(timestamp)*1000);
+ } else{
+ return Timestamp.valueOf(timestamp);
+ }
+ }
+ @Override
+ public void close() {
+ }
+}
diff --git a/core/src/main/java/com/dtstack/flink/sql/constrant/ConfigConstrant.java b/core/src/main/java/com/dtstack/flink/sql/util/ConfigConstrant.java
similarity index 79%
rename from core/src/main/java/com/dtstack/flink/sql/constrant/ConfigConstrant.java
rename to core/src/main/java/com/dtstack/flink/sql/util/ConfigConstrant.java
index 76f5996c3..160d8f411 100644
--- a/core/src/main/java/com/dtstack/flink/sql/constrant/ConfigConstrant.java
+++ b/core/src/main/java/com/dtstack/flink/sql/util/ConfigConstrant.java
@@ -18,7 +18,7 @@
-package com.dtstack.flink.sql.constrant;
+package com.dtstack.flink.sql.util;
/**
@@ -29,9 +29,7 @@
*/
public class ConfigConstrant {
- public static final String SQL_CHECKPOINT_INTERVAL_KEY = "sql.checkpoint.interval";
- // 兼容上层
- public static final String FLINK_CHECKPOINT_INTERVAL_KEY = "flink.checkpoint.interval";
+ public static final String FLINK_CHECKPOINT_INTERVAL_KEY = "sql.checkpoint.interval";
public static final String FLINK_CHECKPOINT_MODE_KEY = "sql.checkpoint.mode";
@@ -39,11 +37,7 @@ public class ConfigConstrant {
public static final String FLINK_MAXCONCURRENTCHECKPOINTS_KEY = "sql.max.concurrent.checkpoints";
- public static final String SQL_CHECKPOINT_CLEANUPMODE_KEY = "sql.checkpoint.cleanup.mode";
-
- public static final String FLINK_CHECKPOINT_CLEANUPMODE_KEY = "flink.checkpoint.cleanup.mode";
-
-
+ public static final String FLINK_CHECKPOINT_CLEANUPMODE_KEY = "sql.checkpoint.cleanup.mode";
public static final String FLINK_CHECKPOINT_DATAURI_KEY = "flinkCheckpointDataURI";
@@ -57,11 +51,4 @@ public class ConfigConstrant {
public static final String FLINK_TIME_CHARACTERISTIC_KEY = "time.characteristic";
- // restart plocy
- public static final int failureRate = 3;
-
- public static final int failureInterval = 6; //min
-
- public static final int delayInterval = 10; //sec
-
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/util/DtStringUtil.java b/core/src/main/java/com/dtstack/flink/sql/util/DtStringUtil.java
index 862b0700e..9d23afad6 100644
--- a/core/src/main/java/com/dtstack/flink/sql/util/DtStringUtil.java
+++ b/core/src/main/java/com/dtstack/flink/sql/util/DtStringUtil.java
@@ -16,16 +16,16 @@
* limitations under the License.
*/
-
+
package com.dtstack.flink.sql.util;
import com.dtstack.flink.sql.enums.ColumnType;
import org.apache.commons.lang3.StringUtils;
-import com.google.common.base.Strings;
-import com.google.common.collect.Maps;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import java.sql.Timestamp;
+import org.apache.flink.calcite.shaded.com.google.common.base.Strings;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Maps;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
+
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.List;
@@ -59,40 +59,33 @@ public static List splitIgnoreQuota(String str, char delimiter){
boolean inSingleQuotes = false;
int bracketLeftNum = 0;
StringBuilder b = new StringBuilder();
- char[] chars = str.toCharArray();
- int idx = 0;
- for (char c : chars) {
- char flag = 0;
- if (idx > 0) {
- flag = chars[idx - 1];
- }
- if (c == delimiter) {
+ for (char c : str.toCharArray()) {
+ if(c == delimiter){
if (inQuotes) {
b.append(c);
- } else if (inSingleQuotes) {
+ } else if(inSingleQuotes){
b.append(c);
- } else if (bracketLeftNum > 0) {
+ } else if(bracketLeftNum > 0){
b.append(c);
- } else {
+ }else {
tokensList.add(b.toString());
b = new StringBuilder();
}
- } else if (c == '\"' && '\\' != flag && !inSingleQuotes) {
+ }else if(c == '\"'){
inQuotes = !inQuotes;
b.append(c);
- } else if (c == '\'' && '\\' != flag && !inQuotes) {
+ }else if(c == '\''){
inSingleQuotes = !inSingleQuotes;
b.append(c);
- } else if (c == '(' && !inSingleQuotes && !inQuotes) {
+ }else if(c == '('){
bracketLeftNum++;
b.append(c);
- } else if (c == ')' && !inSingleQuotes && !inQuotes) {
+ }else if(c == ')'){
bracketLeftNum--;
b.append(c);
- } else {
+ }else{
b.append(c);
}
- idx++;
}
tokensList.add(b.toString());
@@ -231,54 +224,4 @@ public static boolean isJosn(String str){
}
return flag;
}
-
- public static Object parse(String str,Class clazz){
- String fieldType = clazz.getName();
- Object object = null;
- if(fieldType.equals(Integer.class.getName())){
- object = Integer.parseInt(str);
- }else if(fieldType.equals(Long.class.getName())){
- object = Long.parseLong(str);
- }else if(fieldType.equals(Byte.class.getName())){
- object = str.getBytes()[0];
- }else if(fieldType.equals(String.class.getName())){
- object = str;
- }else if(fieldType.equals(Float.class.getName())){
- object = Float.parseFloat(str);
- }else if(fieldType.equals(Double.class.getName())){
- object = Double.parseDouble(str);
- }else if (fieldType.equals(Timestamp.class.getName())){
- object = Timestamp.valueOf(str);
- }else{
- throw new RuntimeException("no support field type for sql. the input type:" + fieldType);
- }
- return object;
- }
-
-
- public static String firstUpperCase(String str) {
- return str.substring(0, 1).toUpperCase() + str.substring(1);
- }
-
- public static String getTableFullPath(String schema, String tableName) {
- if (StringUtils.isEmpty(schema)){
- return addQuoteForStr(tableName);
- }
- String schemaAndTabName = addQuoteForStr(schema) + "." + addQuoteForStr(tableName);
- return schemaAndTabName;
- }
-
-
-
- public static String addQuoteForStr(String column) {
- return getStartQuote() + column + getEndQuote();
- }
-
- public static String getStartQuote() {
- return "\"";
- }
-
- public static String getEndQuote() {
- return "\"";
- }
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/util/FlinkUtil.java b/core/src/main/java/com/dtstack/flink/sql/util/FlinkUtil.java
index 5811105e4..0f71c1dce 100644
--- a/core/src/main/java/com/dtstack/flink/sql/util/FlinkUtil.java
+++ b/core/src/main/java/com/dtstack/flink/sql/util/FlinkUtil.java
@@ -16,13 +16,11 @@
* limitations under the License.
*/
-
+
package com.dtstack.flink.sql.util;
-import com.dtstack.flink.sql.classloader.ClassLoaderManager;
-import com.dtstack.flink.sql.constrant.ConfigConstrant;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
@@ -33,10 +31,10 @@
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.api.java.BatchTableEnvironment;
import org.apache.flink.table.api.java.StreamTableEnvironment;
+import org.apache.flink.table.functions.AggregateFunction;
import org.apache.flink.table.functions.ScalarFunction;
import org.apache.flink.table.functions.TableFunction;
-import org.apache.flink.table.functions.AggregateFunction;
-
+import org.apache.flink.table.functions.UserDefinedFunction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -71,14 +69,12 @@ public static void openCheckpoint(StreamExecutionEnvironment env, Properties pro
}
//设置了时间间隔才表明开启了checkpoint
- if(properties.getProperty(ConfigConstrant.SQL_CHECKPOINT_INTERVAL_KEY) == null && properties.getProperty(ConfigConstrant.FLINK_CHECKPOINT_INTERVAL_KEY) == null){
+ if(properties.getProperty(ConfigConstrant.FLINK_CHECKPOINT_INTERVAL_KEY) == null){
return;
}else{
- Long sql_interval = Long.valueOf(properties.getProperty(ConfigConstrant.SQL_CHECKPOINT_INTERVAL_KEY,"0"));
- Long flink_interval = Long.valueOf(properties.getProperty(ConfigConstrant.FLINK_CHECKPOINT_INTERVAL_KEY, "0"));
- long checkpointInterval = Math.max(sql_interval, flink_interval);
+ Long interval = Long.valueOf(properties.getProperty(ConfigConstrant.FLINK_CHECKPOINT_INTERVAL_KEY));
//start checkpoint every ${interval}
- env.enableCheckpointing(checkpointInterval);
+ env.enableCheckpointing(interval);
}
String checkMode = properties.getProperty(ConfigConstrant.FLINK_CHECKPOINT_MODE_KEY);
@@ -106,14 +102,7 @@ public static void openCheckpoint(StreamExecutionEnvironment env, Properties pro
env.getCheckpointConfig().setMaxConcurrentCheckpoints(maxConcurrCheckpoints);
}
- Boolean sqlCleanMode = MathUtil.getBoolean(properties.getProperty(ConfigConstrant.SQL_CHECKPOINT_CLEANUPMODE_KEY), false);
- Boolean flinkCleanMode = MathUtil.getBoolean(properties.getProperty(ConfigConstrant.FLINK_CHECKPOINT_CLEANUPMODE_KEY), false);
-
- String cleanupModeStr = "false";
- if (sqlCleanMode || flinkCleanMode ){
- cleanupModeStr = "true";
- }
-
+ String cleanupModeStr = properties.getProperty(ConfigConstrant.FLINK_CHECKPOINT_CLEANUPMODE_KEY);
if ("true".equalsIgnoreCase(cleanupModeStr)){
env.getCheckpointConfig().enableExternalizedCheckpoints(
CheckpointConfig.ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION);
@@ -149,7 +138,6 @@ public static void setStreamTimeCharacteristic(StreamExecutionEnvironment env, P
if(characteristicStr.equalsIgnoreCase(tmp.toString())){
env.setStreamTimeCharacteristic(tmp);
flag = true;
- break;
}
}
@@ -159,19 +147,20 @@ public static void setStreamTimeCharacteristic(StreamExecutionEnvironment env, P
}
+
/**
- * TABLE|SCALA|AGGREGATE
+ * FIXME 暂时不支持 UDF 实现类--有参构造方法
+ * TABLE|SCALA
* 注册UDF到table env
*/
- public static void registerUDF(String type, String classPath, String funcName, TableEnvironment tableEnv, ClassLoader classLoader){
+ public static void registerUDF(String type, String classPath, String funcName, TableEnvironment tableEnv,
+ ClassLoader classLoader){
if("SCALA".equalsIgnoreCase(type)){
registerScalaUDF(classPath, funcName, tableEnv, classLoader);
}else if("TABLE".equalsIgnoreCase(type)){
registerTableUDF(classPath, funcName, tableEnv, classLoader);
- }else if("AGGREGATE".equalsIgnoreCase(type)){
- registerAggregateUDF(classPath, funcName, tableEnv, classLoader);
}else{
- throw new RuntimeException("not support of UDF which is not in (TABLE, SCALA, AGGREGATE)");
+ throw new RuntimeException("not support of UDF which is not in (TABLE, SCALA)");
}
}
@@ -182,7 +171,8 @@ public static void registerUDF(String type, String classPath, String funcName, T
* @param funcName
* @param tableEnv
*/
- public static void registerScalaUDF(String classPath, String funcName, TableEnvironment tableEnv, ClassLoader classLoader){
+ public static void registerScalaUDF(String classPath, String funcName, TableEnvironment tableEnv,
+ ClassLoader classLoader){
try{
ScalarFunction udfFunc = Class.forName(classPath, false, classLoader)
.asSubclass(ScalarFunction.class).newInstance();
@@ -196,23 +186,34 @@ public static void registerScalaUDF(String classPath, String funcName, TableEnvi
/**
* 注册自定义TABLEFFUNC方法到env上
- *
+ * TODO 对User-Defined Aggregate Functions的支持
* @param classPath
* @param funcName
* @param tableEnv
*/
- public static void registerTableUDF(String classPath, String funcName, TableEnvironment tableEnv, ClassLoader classLoader){
+ public static void registerTableUDF(String classPath, String funcName, TableEnvironment tableEnv,
+ ClassLoader classLoader){
try {
- TableFunction udfFunc = Class.forName(classPath, false, classLoader)
- .asSubclass(TableFunction.class).newInstance();
+ UserDefinedFunction udfFunc = Class.forName(classPath,false, classLoader).asSubclass(UserDefinedFunction.class).newInstance();
if(tableEnv instanceof StreamTableEnvironment){
- ((StreamTableEnvironment)tableEnv).registerFunction(funcName, udfFunc);
+ if (udfFunc instanceof AggregateFunction){
+ ((StreamTableEnvironment) tableEnv).registerFunction(funcName, (AggregateFunction)udfFunc);
+ }else if (udfFunc instanceof TableFunction) {
+ ((StreamTableEnvironment) tableEnv).registerFunction(funcName, (TableFunction)udfFunc);
+ }else{
+ throw new RuntimeException("no support UserDefinedFunction class for " + udfFunc.getClass().getName());
+ }
}else if(tableEnv instanceof BatchTableEnvironment){
- ((BatchTableEnvironment)tableEnv).registerFunction(funcName, udfFunc);
+ if (udfFunc instanceof AggregateFunction){
+ ((BatchTableEnvironment) tableEnv).registerFunction(funcName, (AggregateFunction)udfFunc);
+ }else if (udfFunc instanceof TableFunction) {
+ ((BatchTableEnvironment) tableEnv).registerFunction(funcName, (TableFunction)udfFunc);
+ }else{
+ throw new RuntimeException("no support UserDefinedFunction class for " + udfFunc.getClass().getName());
+ }
}else{
throw new RuntimeException("no support tableEnvironment class for " + tableEnv.getClass().getName());
}
-
logger.info("register table function:{} success.", funcName);
}catch (Exception e){
logger.error("", e);
@@ -220,31 +221,6 @@ public static void registerTableUDF(String classPath, String funcName, TableEnvi
}
}
- /**
- * 注册自定义Aggregate FUNC方法到env上
- *
- * @param classPath
- * @param funcName
- * @param tableEnv
- */
- public static void registerAggregateUDF(String classPath, String funcName, TableEnvironment tableEnv, ClassLoader classLoader) {
- try {
- AggregateFunction udfFunc = Class.forName(classPath, false, classLoader)
- .asSubclass(AggregateFunction.class).newInstance();
- if (tableEnv instanceof StreamTableEnvironment) {
- ((StreamTableEnvironment) tableEnv).registerFunction(funcName, udfFunc);
- } else if (tableEnv instanceof BatchTableEnvironment) {
- ((BatchTableEnvironment) tableEnv).registerFunction(funcName, udfFunc);
- } else {
- throw new RuntimeException("no support tableEnvironment class for " + tableEnv.getClass().getName());
- }
-
- logger.info("register Aggregate function:{} success.", funcName);
- } catch (Exception e) {
- logger.error("", e);
- throw new RuntimeException("register Aggregate UDF exception:", e);
- }
- }
/**
*
@@ -279,9 +255,21 @@ public static long getBufferTimeoutMillis(Properties properties){
}
public static URLClassLoader loadExtraJar(List jarURLList, URLClassLoader classLoader) throws NoSuchMethodException, IllegalAccessException, InvocationTargetException {
+
+ int size = 0;
+ for(URL url : jarURLList){
+ if(url.toString().endsWith(".jar")){
+ size++;
+ }
+ }
+
+ URL[] urlArray = new URL[size];
+ int i=0;
for(URL url : jarURLList){
if(url.toString().endsWith(".jar")){
+ urlArray[i] = url;
urlClassLoaderAddUrl(classLoader, url);
+ i++;
}
}
@@ -309,4 +297,4 @@ public static TypeInformation[] transformTypes(Class[] fieldTypes){
return types;
}
-}
\ No newline at end of file
+}
diff --git a/core/src/main/java/com/dtstack/flink/sql/util/JDBCUtils.java b/core/src/main/java/com/dtstack/flink/sql/util/JDBCUtils.java
deleted file mode 100644
index fde2f166e..000000000
--- a/core/src/main/java/com/dtstack/flink/sql/util/JDBCUtils.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package com.dtstack.flink.sql.util;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.sql.DriverManager;
-
-public class JDBCUtils {
-
- private static final Logger LOG = LoggerFactory.getLogger(ClassUtil.class);
-
- public final static String lock_str = "jdbc_lock_str";
-
- public static void forName(String clazz, ClassLoader classLoader) {
- synchronized (lock_str){
- try {
- Class.forName(clazz, true, classLoader);
- DriverManager.setLoginTimeout(10);
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
- }
-
-
- public synchronized static void forName(String clazz) {
- try {
- Class> driverClass = Class.forName(clazz);
- driverClass.newInstance();
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
-}
diff --git a/core/src/main/java/com/dtstack/flink/sql/util/ParseUtils.java b/core/src/main/java/com/dtstack/flink/sql/util/ParseUtils.java
index 73f0e9baa..280594d17 100644
--- a/core/src/main/java/com/dtstack/flink/sql/util/ParseUtils.java
+++ b/core/src/main/java/com/dtstack/flink/sql/util/ParseUtils.java
@@ -1,50 +1,10 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
package com.dtstack.flink.sql.util;
-import com.google.common.collect.Lists;
-import org.apache.calcite.sql.*;
-import org.apache.calcite.sql.fun.SqlCase;
-import org.apache.calcite.sql.parser.SqlParserPos;
-import org.apache.commons.lang3.StringUtils;
-import java.util.List;
-import java.util.Map;
+import org.apache.calcite.sql.SqlBasicCall;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlNode;
-import static org.apache.calcite.sql.SqlKind.*;
+import java.util.List;
/**
* @Auther: jiangjunjie
@@ -52,56 +12,6 @@
* @Description:
*/
public class ParseUtils {
- public static void parseSideWhere(SqlNode whereNode, Map physicalFields, List whereConditionList) {
- SqlKind sqlKind = whereNode.getKind();
- if ((sqlKind == SqlKind.OR || sqlKind == SqlKind.AND) && ((SqlBasicCall) whereNode).getOperandList().size() == 2) {
- SqlNode[] sqlOperandsList = ((SqlBasicCall) whereNode).getOperands();
- // whereNode是一颗先解析or再解析and的二叉树。二叉树中序遍历,先左子树,其次中间节点,最后右子树
- parseSideWhere(sqlOperandsList[0], physicalFields, whereConditionList);
- whereConditionList.add(sqlKind.name());
- parseSideWhere(sqlOperandsList[1], physicalFields, whereConditionList);
- } else {
- SqlIdentifier sqlIdentifier = (SqlIdentifier) ((SqlBasicCall) whereNode).getOperands()[0];
- String fieldName = null;
- if (sqlIdentifier.names.size() == 1) {
- fieldName = sqlIdentifier.getComponent(0).getSimple();
- } else {
- fieldName = sqlIdentifier.getComponent(1).getSimple();
- }
- if (physicalFields.containsKey(fieldName)) {
- String sideFieldName = physicalFields.get(fieldName);
- // clone SqlIdentifier node
- SqlParserPos sqlParserPos = new SqlParserPos(0, 0);
- SqlIdentifier sqlIdentifierClone = new SqlIdentifier("", null, sqlParserPos);
- List namesClone = Lists.newArrayList();
- for(String name :sqlIdentifier.names){
- namesClone.add(name);
- }
- sqlIdentifierClone.setNames(namesClone,null);
- // clone SqlBasicCall node
- SqlBasicCall sqlBasicCall = (SqlBasicCall)whereNode;
- SqlNode[] sqlNodes = sqlBasicCall.getOperands();
- SqlNode[] sqlNodesClone = new SqlNode[sqlNodes.length];
- for (int i = 0; i < sqlNodes.length; i++) {
- sqlNodesClone[i] = sqlNodes[i];
- }
- SqlBasicCall sqlBasicCallClone = new SqlBasicCall(sqlBasicCall.getOperator(), sqlNodesClone, sqlParserPos);
- // 替换维表中真实字段名
- List names = Lists.newArrayList();
- names.add(sideFieldName);
- sqlIdentifierClone.setNames(names, null);
-
- sqlBasicCallClone.setOperand(0, sqlIdentifierClone);
- whereConditionList.add(sqlBasicCallClone.toString());
- } else {
- // 如果字段不是维表中字段,删除字段前的链接符
- if (whereConditionList.size() >= 1) {
- whereConditionList.remove(whereConditionList.size() - 1);
- }
- }
- }
- }
-
public static void parseAnd(SqlNode conditionNode, List sqlNodeList){
if(conditionNode.getKind() == SqlKind.AND && ((SqlBasicCall)conditionNode).getOperandList().size()==2){
parseAnd(((SqlBasicCall)conditionNode).getOperands()[0], sqlNodeList);
@@ -110,195 +20,4 @@ public static void parseAnd(SqlNode conditionNode, List sqlNodeList){
sqlNodeList.add(conditionNode);
}
}
-
- public static void parseJoinCompareOperate(SqlNode condition, List sqlJoinCompareOperate) {
- SqlBasicCall joinCondition = (SqlBasicCall) condition;
- if (joinCondition.getKind() == SqlKind.AND) {
- List operandList = joinCondition.getOperandList();
- for (SqlNode sqlNode : operandList) {
- parseJoinCompareOperate(sqlNode, sqlJoinCompareOperate);
- }
- } else {
- String operator = transformNotEqualsOperator(joinCondition.getKind());
- sqlJoinCompareOperate.add(operator);
- }
- }
-
- public static String transformNotEqualsOperator(SqlKind sqlKind) {
- if (StringUtils.equalsIgnoreCase(sqlKind.toString(), "NOT_EQUALS")){
- return "!=";
- }
- return sqlKind.sql;
- }
-
- public static SqlNode replaceJoinConditionTabName(SqlNode conditionNode, Map mappingTable) {
- SqlNode[] operands = ((SqlBasicCall) conditionNode).getOperands();
-
- for (int i = 0; i < operands.length; i++) {
- SqlNode sqlNode = operands[i];
- SqlNode replaceNode = replaceNodeInfo(sqlNode, mappingTable);
- operands[i] = replaceNode;
- }
- return conditionNode;
- }
-
- /**
- * m.id covert m_x_0.id
- * @param selectNode
- * @param mapTab
- * @return
- */
- public static SqlNode replaceSelectFieldTabName(SqlNode selectNode, Map mapTab) {
- if (selectNode.getKind() == AS) {
- SqlNode leftNode = ((SqlBasicCall) selectNode).getOperands()[0];
- SqlNode replaceNode = replaceSelectFieldTabName(leftNode, mapTab);
- if (replaceNode != null) {
- ((SqlBasicCall) selectNode).getOperands()[0] = replaceNode;
- }
-
- return selectNode;
- }else if(selectNode.getKind() == IDENTIFIER){
- SqlIdentifier sqlIdentifier = (SqlIdentifier) selectNode;
-
- if(sqlIdentifier.names.size() == 1){
- return selectNode;
- }
-
- String newTableName = ParseUtils.getRootName(mapTab, sqlIdentifier.getComponent(0).getSimple());
-
- if(newTableName == null){
- return selectNode;
- }
- sqlIdentifier = sqlIdentifier.setName(0, newTableName);
- return sqlIdentifier;
-
- }else if(selectNode.getKind() == LITERAL || selectNode.getKind() == LITERAL_CHAIN){//字面含义
- return selectNode;
- }else if( AGGREGATE.contains(selectNode.getKind())
- || AVG_AGG_FUNCTIONS.contains(selectNode.getKind())
- || COMPARISON.contains(selectNode.getKind())
- || selectNode.getKind() == OTHER_FUNCTION
- || selectNode.getKind() == DIVIDE
- || selectNode.getKind() == CAST
- || selectNode.getKind() == TRIM
- || selectNode.getKind() == TIMES
- || selectNode.getKind() == PLUS
- || selectNode.getKind() == NOT_IN
- || selectNode.getKind() == OR
- || selectNode.getKind() == AND
- || selectNode.getKind() == MINUS
- || selectNode.getKind() == TUMBLE
- || selectNode.getKind() == TUMBLE_START
- || selectNode.getKind() == TUMBLE_END
- || selectNode.getKind() == SESSION
- || selectNode.getKind() == SESSION_START
- || selectNode.getKind() == SESSION_END
- || selectNode.getKind() == HOP
- || selectNode.getKind() == HOP_START
- || selectNode.getKind() == HOP_END
- || selectNode.getKind() == BETWEEN
- || selectNode.getKind() == IS_NULL
- || selectNode.getKind() == IS_NOT_NULL
- || selectNode.getKind() == CONTAINS
- || selectNode.getKind() == TIMESTAMP_ADD
- || selectNode.getKind() == TIMESTAMP_DIFF
-
- ){
- SqlBasicCall sqlBasicCall = (SqlBasicCall) selectNode;
- for(int i=0; i mapTab, SqlNodeList thenOperands) {
- for(int i=0; i mapTab) {
- if (parseNode.getKind() == IDENTIFIER) {
- SqlIdentifier sqlIdentifier = (SqlIdentifier) parseNode;
-
- String newTableName = ParseUtils.getRootName(mapTab, sqlIdentifier.getComponent(0).getSimple());;
-
- if (newTableName == null || sqlIdentifier.names.size() == 1) {
- return sqlIdentifier;
- }
- sqlIdentifier = sqlIdentifier.setName(0, newTableName);
- return sqlIdentifier;
- } else if (parseNode instanceof SqlBasicCall) {
- SqlBasicCall sqlBasicCall = (SqlBasicCall) parseNode;
- for (int i = 0; i < sqlBasicCall.getOperandList().size(); i++) {
- SqlNode sqlNode = sqlBasicCall.getOperandList().get(i);
- SqlNode replaceNode = replaceSelectFieldTabName(sqlNode, mapTab);
- sqlBasicCall.getOperands()[i] = replaceNode;
- }
-
- return sqlBasicCall;
- } else {
- return parseNode;
- }
- }
-
-
- public static String getRootName(Map maps, String key) {
- String res = null;
- while (maps.get(key) !=null) {
- res = maps.get(key);
- key = res;
- }
- return res;
- }
-
- public static void parseLeftNodeTableName(SqlNode leftJoin, List tablesName) {
- if (leftJoin.getKind() == IDENTIFIER) {
- SqlIdentifier sqlIdentifier = (SqlIdentifier) leftJoin;
- tablesName.add(sqlIdentifier.names.get(0));
- } else if (leftJoin.getKind() == AS) {
- SqlNode sqlNode = ((SqlBasicCall) leftJoin).getOperands()[1];
- tablesName.add(sqlNode.toString());
- } else if (leftJoin.getKind() == JOIN) {
- parseLeftNodeTableName(((SqlJoin) leftJoin).getLeft(), tablesName);
- parseLeftNodeTableName(((SqlJoin) leftJoin).getRight(), tablesName);
- }
- }
}
diff --git a/core/src/main/java/com/dtstack/flink/sql/util/PluginUtil.java b/core/src/main/java/com/dtstack/flink/sql/util/PluginUtil.java
index f1c26b047..682df169e 100644
--- a/core/src/main/java/com/dtstack/flink/sql/util/PluginUtil.java
+++ b/core/src/main/java/com/dtstack/flink/sql/util/PluginUtil.java
@@ -22,10 +22,10 @@
import com.dtstack.flink.sql.classloader.DtClassLoader;
import org.apache.commons.lang3.StringUtils;
-import com.fasterxml.jackson.core.JsonGenerationException;
-import com.fasterxml.jackson.core.JsonParseException;
-import com.fasterxml.jackson.databind.JsonMappingException;
-import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonGenerationException;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonParseException;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonMappingException;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.ByteArrayInputStream;
import java.io.File;
@@ -33,8 +33,6 @@
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
-import java.util.ArrayList;
-import java.util.List;
import java.util.Map;
import java.util.Properties;
@@ -110,37 +108,19 @@ public static Properties stringToProperties(String str) throws IOException{
}
public static URL getRemoteJarFilePath(String pluginType, String tableType, String remoteSqlRootDir, String localSqlPluginPath) throws Exception {
- return buildFinalJarFilePath(pluginType, tableType, remoteSqlRootDir, localSqlPluginPath);
- }
-
- public static URL getLocalJarFilePath(String pluginType, String tableType, String localSqlPluginPath) throws Exception {
- return buildFinalJarFilePath(pluginType, tableType, null, localSqlPluginPath);
- }
-
- public static URL buildFinalJarFilePath(String pluginType, String tableType, String remoteSqlRootDir, String localSqlPluginPath) throws Exception {
String dirName = pluginType + tableType.toLowerCase();
String prefix = String.format("%s-%s", pluginType, tableType.toLowerCase());
String jarPath = localSqlPluginPath + SP + dirName;
String jarName = getCoreJarFileName(jarPath, prefix);
- String sqlRootDir = remoteSqlRootDir == null ? localSqlPluginPath : remoteSqlRootDir;
- return new URL("file:" + sqlRootDir + SP + dirName + SP + jarName);
+ return new URL("file:" + remoteSqlRootDir + SP + dirName + SP + jarName);
}
public static URL getRemoteSideJarFilePath(String pluginType, String sideOperator, String tableType, String remoteSqlRootDir, String localSqlPluginPath) throws Exception {
- return buildFinalSideJarFilePath(pluginType, sideOperator, tableType, remoteSqlRootDir, localSqlPluginPath);
- }
-
- public static URL getLocalSideJarFilePath(String pluginType, String sideOperator, String tableType, String localSqlPluginPath) throws Exception {
- return buildFinalSideJarFilePath(pluginType, sideOperator, tableType, null, localSqlPluginPath);
- }
-
- public static URL buildFinalSideJarFilePath(String pluginType, String sideOperator, String tableType, String remoteSqlRootDir, String localSqlPluginPath) throws Exception {
String dirName = pluginType + sideOperator + tableType.toLowerCase();
String prefix = String.format("%s-%s-%s", pluginType, sideOperator, tableType.toLowerCase());
String jarPath = localSqlPluginPath + SP + dirName;
String jarName = getCoreJarFileName(jarPath, prefix);
- String sqlRootDir = remoteSqlRootDir == null ? localSqlPluginPath : remoteSqlRootDir;
- return new URL("file:" + sqlRootDir + SP + dirName + SP + jarName);
+ return new URL("file:" + remoteSqlRootDir + SP + dirName + SP + jarName);
}
public static String upperCaseFirstChar(String str){
@@ -164,25 +144,6 @@ public static void addPluginJar(String pluginDir, DtClassLoader classLoader) thr
}
}
- public static URL[] getPluginJarUrls(String pluginDir) throws MalformedURLException {
- List urlList = new ArrayList<>();
- File dirFile = new File(pluginDir);
- if(!dirFile.exists() || !dirFile.isDirectory()){
- throw new RuntimeException("plugin path:" + pluginDir + "is not exist.");
- }
-
- File[] files = dirFile.listFiles(tmpFile -> tmpFile.isFile() && tmpFile.getName().endsWith(JAR_SUFFIX));
- if(files == null || files.length == 0){
- throw new RuntimeException("plugin path:" + pluginDir + " is null.");
- }
-
- for(File file : files){
- URL pluginJarURL = file.toURI().toURL();
- urlList.add(pluginJarURL);
- }
- return urlList.toArray(new URL[urlList.size()]);
- }
-
public static String getCoreJarFileName (String path, String prefix) throws Exception {
String coreJarFileName = null;
File pluginDir = new File(path);
diff --git a/core/src/main/java/com/dtstack/flink/sql/util/PropertiesUtils.java b/core/src/main/java/com/dtstack/flink/sql/util/PropertiesUtils.java
deleted file mode 100644
index dcb2a081a..000000000
--- a/core/src/main/java/com/dtstack/flink/sql/util/PropertiesUtils.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package com.dtstack.flink.sql.util;
-
-import java.util.Properties;
-
-public class PropertiesUtils {
- public static Properties propertiesTrim(Properties confProperties) {
- Properties properties = new Properties();
- confProperties.forEach(
- (k, v) -> {
- properties.put(k.toString().trim(), v.toString().trim());
- }
- );
- return properties;
- }
-}
diff --git a/core/src/main/java/com/dtstack/flink/sql/watermarker/AbsCustomerWaterMarker.java b/core/src/main/java/com/dtstack/flink/sql/watermarker/AbsCustomerWaterMarker.java
index c9fbc0f44..b990bdd82 100644
--- a/core/src/main/java/com/dtstack/flink/sql/watermarker/AbsCustomerWaterMarker.java
+++ b/core/src/main/java/com/dtstack/flink/sql/watermarker/AbsCustomerWaterMarker.java
@@ -1,22 +1,3 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
package com.dtstack.flink.sql.watermarker;
import com.dtstack.flink.sql.metric.EventDelayGauge;
diff --git a/core/src/main/java/com/dtstack/flink/sql/watermarker/WaterMarkerAssigner.java b/core/src/main/java/com/dtstack/flink/sql/watermarker/WaterMarkerAssigner.java
index d444c1bac..9288ecb87 100644
--- a/core/src/main/java/com/dtstack/flink/sql/watermarker/WaterMarkerAssigner.java
+++ b/core/src/main/java/com/dtstack/flink/sql/watermarker/WaterMarkerAssigner.java
@@ -21,16 +21,16 @@
package com.dtstack.flink.sql.watermarker;
import com.dtstack.flink.sql.table.SourceTableInfo;
+import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import com.google.common.base.Strings;
+import org.apache.flink.shaded.guava18.com.google.common.base.Strings;
import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.types.Row;
import org.apache.flink.util.Preconditions;
-import java.util.TimeZone;
-
/**
* define watermarker
* Date: 2018/6/29
diff --git a/core/src/main/scala/com/dtstack/flink/App.scala b/core/src/main/scala/com/dtstack/flink/App.scala
deleted file mode 100644
index e74ccc28a..000000000
--- a/core/src/main/scala/com/dtstack/flink/App.scala
+++ /dev/null
@@ -1,11 +0,0 @@
-package com.dtstack.flink
-
-/**
- * Hello world!
- *
- */
-object App {
- def main(args: Array[String]): Unit = {
- println( "Hello World!" )
- }
-}
diff --git a/core/src/test/java/com/dtstack/flink/sql/side/SideSqlExecTest.java b/core/src/test/java/com/dtstack/flink/sql/side/SideSqlExecTest.java
index 3bab778ae..8eb09ee18 100644
--- a/core/src/test/java/com/dtstack/flink/sql/side/SideSqlExecTest.java
+++ b/core/src/test/java/com/dtstack/flink/sql/side/SideSqlExecTest.java
@@ -22,10 +22,12 @@
import com.dtstack.flink.sql.Main;
import com.dtstack.flink.sql.parser.SqlParser;
import com.dtstack.flink.sql.parser.SqlTree;
-import com.google.common.base.Charsets;
-import com.google.common.collect.Lists;
+import org.apache.flink.calcite.shaded.com.google.common.base.Charsets;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Lists;
import org.junit.Test;
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
import java.net.URLEncoder;
import java.util.List;
diff --git a/core/src/test/java/com/dtstack/flink/sql/side/TestSideSqlParser.java b/core/src/test/java/com/dtstack/flink/sql/side/TestSideSqlParser.java
index 18a7d2d86..01e33a3b6 100644
--- a/core/src/test/java/com/dtstack/flink/sql/side/TestSideSqlParser.java
+++ b/core/src/test/java/com/dtstack/flink/sql/side/TestSideSqlParser.java
@@ -21,7 +21,7 @@
package com.dtstack.flink.sql.side;
import org.apache.calcite.sql.parser.SqlParseException;
-import com.google.common.collect.Sets;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Sets;
import org.junit.Test;
import java.util.Set;
diff --git a/docs/clickhouseSink.md b/docs/clickhouseSink.md
deleted file mode 100644
index d9774727f..000000000
--- a/docs/clickhouseSink.md
+++ /dev/null
@@ -1,53 +0,0 @@
-## 1.格式:
-```
-CREATE TABLE tableName(
- colName colType,
- ...
- colNameX colType
- )WITH(
- type ='clickhouse',
- url ='jdbcUrl',
- userName ='userName',
- password ='pwd',
- tableName ='tableName',
- parallelism ='parllNum'
- );
-
-```
-
-## 2.支持版本
- 19.14.x、19.15.x、19.16.x
-
-## 3.表结构定义
-
-|参数名称|含义|
-|----|---|
-| tableName| clickhouse表名称|
-| colName | 列名称|
-| colType | 列类型 [colType支持的类型](colType.md)|
-
-## 4.参数:
-
-|参数名称|含义|是否必填|默认值|
-|----|----|----|----|
-|type |表明 输出表类型 clickhouse |是||
-|url | 连接clickhouse 数据库 jdbcUrl |是||
-|userName | clickhouse 连接用户名 |是||
-| password | clickhouse 连接密码|是||
-| tableName | clickhouse 表名称|是||
-| parallelism | 并行度设置|否|1|
-
-## 5.样例:
-```
-CREATE TABLE MyResult(
- channel VARCHAR,
- pv VARCHAR
- )WITH(
- type ='clickhouse',
- url ='jdbc:clickhouse://172.16.8.104:3306/test?charset=utf8',
- userName ='dtstack',
- password ='abc123',
- tableName ='pv2',
- parallelism ='1'
- )
- ```
\ No newline at end of file
diff --git a/docs/clickhouseSide.md b/docs/cockroachSide.md
similarity index 66%
rename from docs/clickhouseSide.md
rename to docs/cockroachSide.md
index 63d3cc3da..569ad0abc 100644
--- a/docs/clickhouseSide.md
+++ b/docs/cockroachSide.md
@@ -7,7 +7,7 @@
PRIMARY KEY(keyInfo),
PERIOD FOR SYSTEM_TIME
)WITH(
- type='clickhouse',
+ type='postgresql',
url='jdbcUrl',
userName='dbUserName',
password='dbPwd',
@@ -21,28 +21,29 @@
```
# 2.支持版本
- 19.14.x、19.15.x、19.16.x
+ Cockroach-2.0+
## 3.表结构定义
|参数名称|含义|
|----|---|
- | tableName | clickhouse表名称|
+ | tableName | 注册到flink的表名称(可选填;不填默认和hbase对应的表名称相同)|
| colName | 列名称|
| colType | 列类型 [colType支持的类型](colType.md)|
| PERIOD FOR SYSTEM_TIME | 关键字表明该定义的表为维表信息|
| PRIMARY KEY(keyInfo) | 维表主键定义;多个列之间用逗号隔开|
## 4.参数
+注意:因为cockroachDB数据库访问协议使用了postgresql,所以type使用postgresql。
|参数名称|含义|是否必填|默认值|
|----|---|---|----|
- | type | 表明维表的类型 clickhouse |是||
- | url | 连接clickhouse数据库 jdbcUrl |是||
- | userName | clickhouse连接用户名 |是||
- | password | clickhouse连接密码|是||
- | tableName | clickhouse表名称|是||
- | tableName | clickhouse 的表名称|是||
+ | type | 表明维表的类型[postgresql] |是||
+ | url | 连接postgresql数据库 jdbcUrl |是||
+ | userName | postgresql连接用户名 |是||
+ | password | postgresql连接密码|是||
+ | tableName | postgresql表名称|是||
+ | tableName | postgresql 的表名称|是||
| cache | 维表缓存策略(NONE/LRU)|否|NONE|
| partitionedJoin | 是否在維表join之前先根据 設定的key 做一次keyby操作(可以減少维表的数据缓存量)|否|false|
@@ -52,9 +53,7 @@
* LRU:
* cacheSize: 缓存的条目数量
* cacheTTLMs:缓存的过期时间(ms)
- * cacheMode: (unordered|ordered)异步加载是有序还是无序,默认有序。
- * asyncCapacity:异步请求容量,默认1000
- * asyncTimeout:异步请求超时时间,默认10000毫秒
+
## 5.样例
```
@@ -64,17 +63,14 @@ create table sideTable(
PRIMARY KEY(channel),
PERIOD FOR SYSTEM_TIME
)WITH(
- type='clickhouse',
- url='jdbc:clickhouse://172.16.8.104:3306/test?charset=utf8',
+ type='postgresql',
+ url='jdbc:postgresql://localhost:9001/test?sslmode=disable',
userName='dtstack',
password='abc123',
tableName='sidetest',
cache ='LRU',
cacheSize ='10000',
cacheTTLMs ='60000',
- cacheMode='unordered',
- asyncCapacity='1000',
- asyncTimeout='10000'
parallelism ='1',
partitionedJoin='false'
);
diff --git a/docs/cockroachSink.md b/docs/cockroachSink.md
new file mode 100644
index 000000000..2dca26091
--- /dev/null
+++ b/docs/cockroachSink.md
@@ -0,0 +1,57 @@
+## 1.格式:
+```
+CREATE TABLE tableName(
+ colName colType,
+ ...
+ colNameX colType
+ )WITH(
+ type ='postgresql',
+ url ='jdbcUrl',
+ userName ='userName',
+ password ='pwd',
+ tableName ='tableName',
+ parallelism ='parllNum',
+ isCockroach='true'
+ );
+
+```
+
+## 2.支持版本
+ Cockroach-2.0+
+
+## 3.表结构定义
+
+|参数名称|含义|
+|----|---|
+| tableName| 在 sql 中使用的名称;即注册到flink-table-env上的名称|
+| colName | 列名称|
+| colType | 列类型 [colType支持的类型](colType.md)|
+
+## 4.参数:
+注意:因为cockroachDB数据库访问协议使用了postgresql,所以type使用postgresql。
+
+|参数名称|含义|是否必填|默认值|
+|----|----|----|----|
+|type |表明 输出表类型[postgresql]|是||
+|url | 连接postgresql数据库 jdbcUrl |是||
+|userName | postgresql连接用户名 |是||
+| password | postgresql连接密码|是||
+| tableName | postgresqll表名称|是||
+| parallelism | 并行度设置|否|1|
+| isCockroach | 显示声明数据库为Cockroach[true/false]|是|false|
+
+## 5.样例:
+```
+CREATE TABLE MyResult(
+ channel VARCHAR,
+ pv VARCHAR
+ )WITH(
+ type ='postgresql',
+ url ='jdbc:postgresql://localhost:9001/test?sslmode=disable',
+ userName ='dtstack',
+ password ='abc123',
+ tableName ='pv2',
+ parallelism ='1',
+ isCockroach='true'
+ )
+ ```
diff --git a/docs/consoleSink.md b/docs/consoleSink.md
deleted file mode 100644
index 206d7faaa..000000000
--- a/docs/consoleSink.md
+++ /dev/null
@@ -1,50 +0,0 @@
-## 1.格式:
-```
-CREATE TABLE tableName(
- colName colType,
- ...
- colNameX colType
- )WITH(
- type ='console',
- parallelism ='parllNum'
- );
-
-```
-
-## 2.支持版本
-没有限制
-
-## 3.表结构定义
-
-|参数名称|含义|
-|----|---|
-| tableName| 在 sql 中使用的名称;即注册到flink-table-env上的名称|
-| colName | 列名称|
-| colType | 列类型 [colType支持的类型](colType.md)|
-
-## 4.参数:
-
-|参数名称|含义|是否必填|默认值|
-|----|----|----|----|
-|type |表明 输出表类型[console]|是||
-| parallelism | 并行度设置|否|1|
-
-## 5.样例:
-```
-CREATE TABLE MyResult(
- name VARCHAR,
- channel VARCHAR
- )WITH(
- type ='console',
- parallelism ='1'
- )
- ```
-
- ## 6.输出结果:
- ```
- +------+---------+
- | name | channel |
- +------+---------+
- | aa | 02 |
- +------+---------+
- ```
\ No newline at end of file
diff --git a/docs/createView.md b/docs/createView.md
index 6f81fa8cb..42232ee0e 100644
--- a/docs/createView.md
+++ b/docs/createView.md
@@ -1,4 +1,4 @@
-##语法
+## 1.语法
```
CREATE VIEW viewName
[ (columnName[ , columnName]*) ]
@@ -7,7 +7,7 @@
CREATE VIEW viewName [ (columnName[ , columnName]*) ];
INSERT INTO viewName queryStatement;
```
-##样例
+## 2.样例
```
CREATE TABLE MyTable(
name varchar,
diff --git a/docs/elasticsearchSink.md b/docs/elasticsearchSink.md
index 69d69af75..9a406245a 100644
--- a/docs/elasticsearchSink.md
+++ b/docs/elasticsearchSink.md
@@ -27,7 +27,7 @@ CREATE TABLE tableName(
## 4.参数:
|参数名称|含义|是否必填|默认值|
|----|---|---|----|
-|type|表明 输出表类型[mysq|hbase|elasticsearch]|是||
+|type|表明 输出表类型[mysql\|hbase\|elasticsearch]|是||
|address | 连接ES Transport地址(tcp地址)|是||
|cluster | ES 集群名称 |是||
|index | 选择的ES上的index名称|是||
diff --git a/docs/hbaseSide.md b/docs/hbaseSide.md
index 0c4e545f9..07b03026a 100644
--- a/docs/hbaseSide.md
+++ b/docs/hbaseSide.md
@@ -35,7 +35,7 @@
|参数名称|含义|是否必填|默认值|
|----|---|---|----|
-| type | 表明维表的类型[hbase|mysql]|是||
+| type | 表明维表的类型[hbase\|mysql]|是||
| zookeeperQuorum | hbase 的zk地址;格式ip:port[;ip:port]|是||
| zookeeperParent | hbase 的zk parent路径|是||
| tableName | hbase 的表名称|是||
diff --git a/docs/hbaseSink.md b/docs/hbaseSink.md
index c6a15e766..b41abb281 100644
--- a/docs/hbaseSink.md
+++ b/docs/hbaseSink.md
@@ -30,7 +30,7 @@ hbase2.0
|参数名称|含义|是否必填|默认值|
|----|---|---|-----|
-|type | 表明 输出表类型[mysq|hbase|elasticsearch]|是||
+|type | 表明 输出表类型[mysql\|hbase\|elasticsearch]|是||
|zookeeperQuorum | hbase zk地址,多个直接用逗号隔开|是||
|zookeeperParent | zkParent 路径|是||
|tableName | 关联的hbase表名称|是||
diff --git a/docs/kafkaSource.md b/docs/kafkaSource.md
index 580eda6b4..d859bc219 100644
--- a/docs/kafkaSource.md
+++ b/docs/kafkaSource.md
@@ -9,20 +9,16 @@ CREATE TABLE tableName(
WATERMARK FOR colName AS withOffset( colName , delayTime )
)WITH(
type ='kafka09',
- kafka.bootstrap.servers ='ip:port,ip:port...',
- kafka.zookeeper.quorum ='ip:port,ip:port/zkparent',
- kafka.auto.offset.reset ='latest',
- kafka.topic ='topicName',
- parallelism ='parllNum',
- --timezone='America/Los_Angeles',
- timezone='Asia/Shanghai',
- sourcedatatype ='json' #可不设置
+ bootstrapServers ='ip:port,ip:port...',
+ zookeeperQuorum ='ip:port,ip:port/zkparent',
+ offsetReset ='latest',
+ topic ='topicName',
+ parallelism ='parllNum'
);
```
## 2.支持的版本
- kafka08,kafka09,kafka10,kafka11及以上版本
- **kafka读取和写入的版本必须一致,否则会有兼容性错误。**
+ kafka09,kafka10,kafka11
## 3.表结构定义
@@ -32,327 +28,76 @@ CREATE TABLE tableName(
| colName | 列名称|
| colType | 列类型 [colType支持的类型](colType.md)|
| function(colNameX) as aliasName | 支持在定义列信息的时候根据已有列类型生成新的列(函数可以使用系统函数和已经注册的UDF)|
-| WATERMARK FOR colName AS withOffset( colName , delayTime ) | 标识输入流生的watermake生成规则,根据指定的colName(当前支持列的类型为Long | Timestamp) 和delayTime生成waterMark 同时会在注册表的使用附带上rowtime字段(如果未指定则默认添加proctime字段);注意:添加该标识的使用必须设置系统参数 time.characteristic:EventTime; delayTime: 数据最大延迟时间(ms)|
+| WATERMARK FOR colName AS withOffset( colName , delayTime ) | 标识输入流生的watermake生成规则,根据指定的colName(当前支持列的类型为Long \| Timestamp) 和delayTime生成waterMark 同时会在注册表的使用附带上rowtime字段(如果未指定则默认添加proctime字段);注意:添加该标识的使用必须设置系统参数 time.characteristic:EventTime; delayTime: 数据最大延迟时间(ms)|
## 4.参数:
|参数名称|含义|是否必填|默认值|
|----|---|---|---|
-|type | kafka09 | 是|kafka08、kafka09、kafka10、kafka11、kafka(对应kafka1.0及以上版本)|
-|kafka.group.id | 需要读取的 groupId 名称|否||
-|kafka.bootstrap.servers | kafka bootstrap-server 地址信息(多个用逗号隔开)|是||
-|kafka.zookeeper.quorum | kafka zk地址信息(多个之间用逗号分隔)|是||
-|kafka.topic | 需要读取的 topic 名称|是||
-|patterntopic | topic是否是正则表达式格式(true|false) |否| false
-|kafka.auto.offset.reset | 读取的topic 的offset初始位置[latest|earliest|指定offset值({"0":12312,"1":12321,"2":12312},{"partition_no":offset_value})]|否|latest|
+|type | kafka09 | 是||
+|bootstrapServers | kafka bootstrap-server 地址信息(多个用逗号隔开)|是||
+|zookeeperQuorum | kafka zk地址信息(多个之间用逗号分隔)|是||
+|topic | 需要读取的 topic 名称|是||
+|topicIsPattern | topic是否是正则表达式格式|否| false
+|groupId | 需要读取的 groupId 名称|否||
+|offsetReset | 读取的topic 的offset初始位置[latest\|earliest\|指定offset值({"0":12312,"1":12321,"2":12312},{"partition_no":offset_value})]|否|latest|
|parallelism | 并行度设置|否|1|
-|sourcedatatype | 数据类型|否|json|
-|timezone|时区设置[timezone支持的参数](timeZone.md)|否|'Asia/Shanghai'
-**kafka相关参数可以自定义,使用kafka.开头即可。**
-
+
## 5.样例:
```
-CREATE TABLE MyTable(
- name varchar,
- channel varchar,
- pv INT,
- xctime bigint,
- CHARACTER_LENGTH(channel) AS timeLeng
- )WITH(
- type ='kafka09',
- kafka.bootstrap.servers ='172.16.8.198:9092',
- kafka.zookeeper.quorum ='172.16.8.198:2181/kafka',
- kafka.auto.offset.reset ='latest',
- kafka.topic ='nbTest1,nbTest2,nbTest3',
- --kafka.topic ='mqTest.*',
- --patterntopic='true'
- parallelism ='1',
- sourcedatatype ='json' #可不设置
- );
-```
-## 6.支持嵌套json、数据类型字段解析
-
-嵌套json解析示例
-
-json: {"name":"tom", "obj":{"channel": "root"}, "pv": 4, "xctime":1572932485}
-```
-CREATE TABLE MyTable(
- name varchar,
- obj.channel varchar as channel,
- pv INT,
- xctime bigint,
- CHARACTER_LENGTH(channel) AS timeLeng
+JSON嵌套:
+CREATE TABLE pft_report_order_two(
+ message.after.id int AS id,
+ message.after.date int AS oper_date,
+ message.after.fid int AS fid,
+ message.after.reseller_id int AS reseller_id,
+ message.after.lid int AS lid,
+ message.after.tid int AS tid,
+ message.after.pid int AS pid,
+ message.after.level int AS level,
+ message.after.operate_id int AS operate_id,
+ message.after.order_num int AS order_num,
+ message.after.order_ticket int AS order_ticket,
+ message.after.cancel_num int AS cancel_num,
+ message.after.cancel_ticket int AS cancel_ticket,
+ message.after.revoke_num int AS revoke_num,
+ message.after.revoke_ticket int AS revoke_ticket,
+ message.after.cost_money int AS cost_money,
+ message.after.sale_money int AS sale_money,
+ message.after.cancel_cost_money int AS cancel_cost_money,
+ message.after.cancel_sale_money int AS cancel_sale_money,
+ message.after.revoke_cost_money int AS revoke_cost_money,
+ message.after.revoke_sale_money int AS revoke_sale_money,
+ message.after.service_money int AS service_money,
+ message.after.orders_info varchar AS orders_info,
+ message.after.cancel_orders_info varchar AS cancel_orders_info,
+ message.after.revoke_orders_info varchar AS revoke_orders_info,
+ message.after.pay_way int AS pay_way,
+ message.after.channel int AS channel,
+ message.after.update_time int AS update_time,
+ message.after.site_id int AS site_id
)WITH(
- type ='kafka09',
- bootstrapServers ='172.16.8.198:9092',
- zookeeperQuorum ='172.16.8.198:2181/kafka',
- offsetReset ='latest',
- groupId='nbTest',
- topic ='nbTest1,nbTest2,nbTest3',
- --- topic ='mqTest.*',
- ---topicIsPattern='true',
+ type ='kafka10',
+ bootstrapServers = 'xxx:9092',
+ zookeeperQuorum = 'xx:2181/kafka',
+ offsetReset = 'latest',
+ topic ='mqTest01',
parallelism ='1'
);
-```
-数组类型字段解析示例
+WATERMARK:
-json: {"name":"tom", "obj":{"channel": "root"}, "user": [{"pv": 4}, {"pv": 10}], "xctime":1572932485}
-```
CREATE TABLE MyTable(
- name varchar,
- obj.channel varchar as channel,
- user[1].pv INT as pv,
- xctime bigint,
- CHARACTER_LENGTH(channel) AS timeLeng
- )WITH(
- type ='kafka09',
- bootstrapServers ='172.16.8.198:9092',
- zookeeperQuorum ='172.16.8.198:2181/kafka',
- offsetReset ='latest',
- groupId='nbTest',
- topic ='nbTest1,nbTest2,nbTest3',
- --- topic ='mqTest.*',
- ---topicIsPattern='true',
- parallelism ='1'
- );
-```
-or
-
-json: {"name":"tom", "obj":{"channel": "root"}, "pv": [4, 7, 10], "xctime":1572932485}
-```
-CREATE TABLE MyTable(
- name varchar,
- obj.channel varchar as channel,
- pv[1] INT as pv,
- xctime bigint,
- CHARACTER_LENGTH(channel) AS timeLeng
- )WITH(
- type ='kafka09',
- bootstrapServers ='172.16.8.198:9092',
- zookeeperQuorum ='172.16.8.198:2181/kafka',
- offsetReset ='latest',
- groupId='nbTest',
- topic ='nbTest1,nbTest2,nbTest3',
- --- topic ='mqTest.*',
- ---topicIsPattern='true',
- parallelism ='1'
- );
-```
-# 二、csv格式数据源
-根据字段分隔符进行数据分隔,按顺序匹配sql中配置的列。如数据分隔列数和sql中配置的列数相等直接匹配;如不同参照lengthcheckpolicy策略处理。
-## 1.参数:
-
-|参数名称|含义|是否必填|默认值|
-|----|---|---|---|
-|type | kafka09 | 是||
-|kafka.bootstrap.servers | kafka bootstrap-server 地址信息(多个用逗号隔开)|是||
-|kafka.zookeeper.quorum | kafka zk地址信息(多个之间用逗号分隔)|是||
-|kafka.topic | 需要读取的 topic 名称|是||
-|kafka.auto.offset.reset | 读取的topic 的offset初始位置[latest|earliest]|否|latest|
-|parallelism | 并行度设置 |否|1|
-|sourcedatatype | 数据类型|是 |csv|
-|fielddelimiter | 字段分隔符|是 ||
-|lengthcheckpolicy | 单行字段条数检查策略 |否|可选,默认为SKIP,其它可选值为EXCEPTION、PAD。SKIP:字段数目不符合时跳过 。EXCEPTION:字段数目不符合时抛出异常。PAD:按顺序填充,不存在的置为null。|
-**kafka相关参数可以自定义,使用kafka.开头即可。**
-
-## 2.样例:
-```
-CREATE TABLE MyTable(
- name varchar,
channel varchar,
pv INT,
- xctime bigint,
- CHARACTER_LENGTH(channel) AS timeLeng
+ xctime bigint
+ WATERMARK FOR xctime AS withOffset(xctime,3000)
)WITH(
- type ='kafka09',
- kafka.bootstrap.servers ='172.16.8.198:9092',
- kafka.zookeeper.quorum ='172.16.8.198:2181/kafka',
- kafka.auto.offset.reset ='latest',
- kafka.topic ='nbTest1',
- --kafka.topic ='mqTest.*',
- --kafka.topicIsPattern='true'
- parallelism ='1',
- sourcedatatype ='csv',
- fielddelimiter ='\|',
- lengthcheckpolicy = 'PAD'
+ type='kafka11',
+ bootstrapServers='172.16.8.107:9092',
+ groupId='mqTest',
+ offsetReset='latest',
+ topic='mqTest01'
);
- ```
-# 三、text格式数据源UDF自定义拆分
-Kafka源表数据解析流程:Kafka Source Table -> UDTF ->Realtime Compute -> SINK。从Kakfa读入的数据,都是VARBINARY(二进制)格式,对读入的每条数据,都需要用UDTF将其解析成格式化数据。
- 与其他格式不同,本格式定义DDL必须与以下SQL一摸一样,表中的五个字段顺序务必保持一致:
-
-## 1. 定义源表,注意:kafka源表DDL字段必须与以下例子一模一样。WITH中参数可改。
-```
-create table kafka_stream(
- _topic STRING,
- _messageKey STRING,
- _message STRING,
- _partition INT,
- _offset BIGINT,
-) with (
- type ='kafka09',
- kafka.bootstrap.servers ='172.16.8.198:9092',
- kafka.zookeeper.quorum ='172.16.8.198:2181/kafka',
- kafka.auto.offset.reset ='latest',
- kafka.topic ='nbTest1',
- parallelism ='1',
- sourcedatatype='text'
- )
-```
-## 2.参数:
-
-|参数名称|含义|是否必填|默认值|
-|----|---|---|---|
-|type | kafka09 | 是||
-|kafka.bootstrap.servers | kafka bootstrap-server 地址信息(多个用逗号隔开)|是||
-|kafka.zookeeper.quorum | kafka zk地址信息(多个之间用逗号分隔)|是||
-|kafka.topic | 需要读取的 topic 名称|是||
-|kafka.auto.offset.reset | 读取的topic 的offset初始位置[latest|earliest]|否|latest|
-|parallelism | 并行度设置|否|1|
-|sourcedatatype | 数据类型|否|text|
-**kafka相关参数可以自定义,使用kafka.开头即可。**
-## 2.自定义:
-从kafka读出的数据,需要进行窗口计算。 按照实时计算目前的设计,滚窗/滑窗等窗口操作,需要(且必须)在源表DDL上定义Watermark。Kafka源表比较特殊。如果要以kafka中message字段中的的Event Time进行窗口操作,
-需要先从message字段,使用UDX解析出event time,才能定义watermark。 在kafka源表场景中,需要使用计算列。 假设,kafka中写入的数据如下:
-2018-11-11 00:00:00|1|Anna|female整个计算流程为:Kafka SOURCE->UDTF->Realtime Compute->RDS SINK(单一分隔符可直接使用类csv格式模板,自定义适用于更复杂的数据类型,本说明只做参考)
-
-**SQL**
-```
--- 定义解析Kakfa message的UDTF
- CREATE FUNCTION kafkapaser AS 'com.XXXX.kafkaUDTF';
- CREATE FUNCTION kafkaUDF AS 'com.XXXX.kafkaUDF';
- -- 定义源表,注意:kafka源表DDL字段必须与以下例子一模一样。WITH中参数可改。
- create table kafka_src (
- _topic STRING,
- _messageKey STRING,
- _message STRING,
- _partition INT,
- _offset BIGINT,
- ctime AS TO_TIMESTAMP(kafkaUDF(_message)), -- 定义计算列,计算列可理解为占位符,源表中并没有这一列,其中的数据可经过下游计算得出。注意计算里的类型必须为timestamp才能在做watermark。
- watermark for ctime as withoffset(ctime,0) -- 在计算列上定义watermark
- ) WITH (
- type = 'kafka010', -- Kafka Source类型,与Kafka版本强相关,目前支持的Kafka版本请参考本文档
- topic = 'test_kafka_topic',
- ...
- );
- create table rds_sink (
- name VARCHAR,
- age INT,
- grade VARCHAR,
- updateTime TIMESTAMP
- ) WITH(
- type='mysql',
- url='jdbc:mysql://localhost:3306/test',
- tableName='test4',
- userName='test',
- password='XXXXXX'
- );
- -- 使用UDTF,将二进制数据解析成格式化数据
- CREATE VIEW input_view (
- name,
- age,
- grade,
- updateTime
- ) AS
- SELECT
- COUNT(*) as cnt,
- T.ctime,
- T.order,
- T.name,
- T.sex
- from
- kafka_src as S,
- LATERAL TABLE (kafkapaser _message)) as T (
- ctime,
- order,
- name,
- sex
- )
- Group BY T.sex,
- TUMBLE(ctime, INTERVAL '1' MINUTE);
- -- 对input_view中输出的数据做计算
- CREATE VIEW view2 (
- cnt,
- sex
- ) AS
- SELECT
- COUNT(*) as cnt,
- T.sex
- from
- input_view
- Group BY sex, TUMBLE(ctime, INTERVAL '1' MINUTE);
- -- 使用解析出的格式化数据进行计算,并将结果输出到RDS中
- insert into rds_sink
- SELECT
- cnt,sex
- from view2;
- ```
-**UDF&UDTF**
```
-package com.XXXX;
- import com.XXXX.fastjson.JSONObject;
- import org.apache.flink.table.functions.TableFunction;
- import org.apache.flink.table.types.DataType;
- import org.apache.flink.table.types.DataTypes;
- import org.apache.flink.types.Row;
- import java.io.UnsupportedEncodingException;
- /**
- 以下例子解析输入Kafka中的JSON字符串,并将其格式化输出
- **/
- public class kafkaUDTF extends TableFunction {
- public void eval(byte[] message) {
- try {
- // 读入一个二进制数据,并将其转换为String格式
- String msg = new String(message, "UTF-8");
- // 提取JSON Object中各字段
- String ctime = Timestamp.valueOf(data.split('\\|')[0]);
- String order = data.split('\\|')[1];
- String name = data.split('\\|')[2];
- String sex = data.split('\\|')[3];
- // 将解析出的字段放到要输出的Row()对象
- Row row = new Row(4);
- row.setField(0, ctime);
- row.setField(1, age);
- row.setField(2, grade);
- row.setField(3, updateTime);
- System.out.println("Kafka message str ==>" + row.toString());
- // 输出一行
- collect(row);
- } catch (ClassCastException e) {
- System.out.println("Input data format error. Input data " + msg + "is not json string");
- }
- } catch (UnsupportedEncodingException e) {
- e.printStackTrace();
- }
- }
- @Override
- // 如果返回值是Row,就必须重载实现这个方法,显式地告诉系统返回的字段类型
- // 定义输出Row()对象的字段类型
- public DataType getResultType(Object[] arguments, Class[] argTypes) {
- return DataTypes.createRowType(DataTypes.TIMESTAMP,DataTypes.STRING, DataTypes.Integer, DataTypes.STRING,DataTypes.STRING);
- }
- }
-
- package com.dp58;
- package com.dp58.sql.udx;
- import org.apache.flink.table.functions.FunctionContext;
- import org.apache.flink.table.functions.ScalarFunction;
- public class KafkaUDF extends ScalarFunction {
- // 可选,open方法可以不写
- // 需要import org.apache.flink.table.functions.FunctionContext;
- public String eval(byte[] message) {
- // 读入一个二进制数据,并将其转换为String格式
- String msg = new String(message, "UTF-8");
- return msg.split('\\|')[0];
- }
- public long eval(String b, String c) {
- return eval(b) + eval(c);
- }
- //可选,close方法可以不写
- @Override
- public void close() {
- }
- }
- ```
diff --git a/docs/kuduSide.md b/docs/kuduSide.md
index 5a73596ad..fece4a480 100644
--- a/docs/kuduSide.md
+++ b/docs/kuduSide.md
@@ -64,21 +64,21 @@ kudu 1.9.0+cdh6.2.0
|参数名称|含义|是否必填|默认值|
|----|---|---|-----|
-|type | 表明维表的类型[hbase|mysql|kudu]|是||
+|type | 表明维表的类型[hbase\|mysql|\kudu]|是||
| kuduMasters | kudu master节点的地址;格式ip[ip,ip2]|是||
| tableName | kudu 的表名称|是||
-| workerCount | 工作线程数 |否||
-| defaultOperationTimeoutMs | 写入操作超时时间 |否||
-| defaultSocketReadTimeoutMs | socket读取超时时间 |否||
-| primaryKey | 需要过滤的主键 ALL模式独有 |否||
-| lowerBoundPrimaryKey | 需要过滤的主键的最小值 ALL模式独有 |否||
-| upperBoundPrimaryKey | 需要过滤的主键的最大值(不包含) ALL模式独有 |否||
-| workerCount | 工作线程数 |否||
-| defaultOperationTimeoutMs | 写入操作超时时间 |否||
-| defaultSocketReadTimeoutMs | socket读取超时时间 |否||
-| batchSizeBytes |返回数据的大小 | 否||
-| limitNum |返回数据的条数 | 否||
-| isFaultTolerant |查询是否容错 查询失败是否扫描第二个副本 默认false 容错 | 否||
+| workerCount | 工作线程数 |否|
+| defaultOperationTimeoutMs | 写入操作超时时间 |否|
+| defaultSocketReadTimeoutMs | socket读取超时时间 |否|
+| primaryKey | 需要过滤的主键 ALL模式独有 |否|
+| lowerBoundPrimaryKey | 需要过滤的主键的最小值 ALL模式独有 |否|
+| upperBoundPrimaryKey | 需要过滤的主键的最大值(不包含) ALL模式独有 |否|
+| workerCount | 工作线程数 |否|
+| defaultOperationTimeoutMs | 写入操作超时时间 |否|
+| defaultSocketReadTimeoutMs | socket读取超时时间 |否|
+| batchSizeBytes |返回数据的大小 | 否|
+| limitNum |返回数据的条数 | 否|
+| isFaultTolerant |查询是否容错 查询失败是否扫描第二个副本 默认false 容错 | 否|
| cache | 维表缓存策略(NONE/LRU/ALL)|否|NONE|
| partitionedJoin | 是否在維表join之前先根据 設定的key 做一次keyby操作(可以減少维表的数据缓存量)|否|false|
diff --git a/docs/kuduSink.md b/docs/kuduSink.md
index 990dfdd7d..ba607c7ec 100644
--- a/docs/kuduSink.md
+++ b/docs/kuduSink.md
@@ -34,10 +34,10 @@ kudu 1.9.0+cdh6.2.0
|参数名称|含义|是否必填|默认值|
|----|---|---|-----|
-|type | 表名 输出表类型[mysq|hbase|elasticsearch|redis|kudu]|是||
+|type | 表明 输出表类型[mysql\|hbase\|elasticsearch\redis\|kudu\]|是||
| kuduMasters | kudu master节点的地址;格式ip[ip,ip2]|是||
| tableName | kudu 的表名称|是||
-| writeMode | 写入kudu的模式 insert|update|upsert |否 |upsert
+| writeMode | 写入kudu的模式 insert|update|upsert |否 |upsert
| workerCount | 工作线程数 |否|
| defaultOperationTimeoutMs | 写入操作超时时间 |否|
| defaultSocketReadTimeoutMs | socket读取超时时间 |否|
diff --git a/docs/mysqlSide.md b/docs/mysqlSide.md
index f0eb16090..d0fec5832 100644
--- a/docs/mysqlSide.md
+++ b/docs/mysqlSide.md
@@ -27,7 +27,7 @@
|参数名称|含义|
|----|---|
- | tableName | mysql表名称|
+ | tableName | 注册到flink的表名称(可选填;不填默认和hbase对应的表名称相同)|
| colName | 列名称|
| colType | 列类型 [colType支持的类型](colType.md)|
| PERIOD FOR SYSTEM_TIME | 关键字表明该定义的表为维表信息|
@@ -37,7 +37,7 @@
|参数名称|含义|是否必填|默认值|
|----|---|---|----|
- | type | 表明维表的类型 mysql |是||
+ | type | 表明维表的类型[hbase\|mysql] |是||
| url | 连接mysql数据库 jdbcUrl |是||
| userName | mysql连接用户名 |是||
| password | mysql连接密码|是||
@@ -52,9 +52,7 @@
* LRU:
* cacheSize: 缓存的条目数量
* cacheTTLMs:缓存的过期时间(ms)
- * cacheMode: (unordered|ordered)异步加载是有序还是无序,默认有序。
- * asyncCapacity:异步请求容量,默认1000
- * asyncTimeout:异步请求超时时间,默认10000毫秒
+
## 5.样例
```
@@ -72,9 +70,6 @@ create table sideTable(
cache ='LRU',
cacheSize ='10000',
cacheTTLMs ='60000',
- cacheMode='unordered',
- asyncCapacity='1000',
- asyncTimeout='10000'
parallelism ='1',
partitionedJoin='false'
);
diff --git a/docs/mysqlSink.md b/docs/mysqlSink.md
index 3218f9371..192c4944a 100644
--- a/docs/mysqlSink.md
+++ b/docs/mysqlSink.md
@@ -22,7 +22,7 @@ CREATE TABLE tableName(
|参数名称|含义|
|----|---|
-| tableName| mysql表名称|
+| tableName| 在 sql 中使用的名称;即注册到flink-table-env上的名称|
| colName | 列名称|
| colType | 列类型 [colType支持的类型](colType.md)|
@@ -30,7 +30,7 @@ CREATE TABLE tableName(
|参数名称|含义|是否必填|默认值|
|----|----|----|----|
-|type |表名 输出表类型[mysq|hbase|elasticsearch]|是||
+|type |表明 输出表类型[mysql\|hbase\|elasticsearch]|是||
|url | 连接mysql数据库 jdbcUrl |是||
|userName | mysql连接用户名 |是||
| password | mysql连接密码|是||
diff --git a/docs/oracleSide.md b/docs/oracleSide.md
deleted file mode 100644
index 74fc56680..000000000
--- a/docs/oracleSide.md
+++ /dev/null
@@ -1,85 +0,0 @@
-
-## 1.格式:
-```
- CREATE TABLE tableName(
- colName cloType,
- ...
- PRIMARY KEY(keyInfo),
- PERIOD FOR SYSTEM_TIME
- )WITH(
- type='oracle',
- url='jdbcUrl',
- userName='dbUserName',
- password='dbPwd',
- tableName='tableName',
- cache ='LRU',
- schema = 'MQTEST',
- parallelism ='1',
- partitionedJoin='false'
- );
-```
-
-# 2.支持版本
- 10g 11g
-
-## 3.表结构定义
-
- |参数名称|含义|
- |----|---|
- | tableName | oracle表名称|
- | colName | 列名称|
- | colType | 列类型 [colType支持的类型](colType.md)|
- | PERIOD FOR SYSTEM_TIME | 关键字表明该定义的表为维表信息|
- | PRIMARY KEY(keyInfo) | 维表主键定义;多个列之间用逗号隔开|
-
-## 4.参数
-
- |参数名称|含义|是否必填|默认值|
- |----|---|---|----|
- | type | 表明维表的类型 oracle |是||
- | url | 连接oracle数据库 jdbcUrl |是||
- | userName | oracle连接用户名 |是||
- | password | oracle连接密码|是||
- | tableName | oracle表名称|是||
- | schema | oracle 的schema|否|当前登录用户|
- | cache | 维表缓存策略(NONE/LRU)|否|NONE|
- | partitionedJoin | 是否在維表join之前先根据 設定的key 做一次keyby操作(可以減少维表的数据缓存量)|否|false|
-
- ----------
- > 缓存策略
- * NONE: 不做内存缓存
- * LRU:
- * cacheSize: 缓存的条目数量
- * cacheTTLMs:缓存的过期时间(ms)
- * cacheMode: (unordered|ordered)异步加载是有序还是无序,默认有序。
- * asyncCapacity:异步请求容量,默认1000
- * asyncTimeout:异步请求超时时间,默认10000毫秒
-
-## 5.样例
-```
-create table sideTable(
- channel varchar,
- xccount int,
- PRIMARY KEY(channel),
- PERIOD FOR SYSTEM_TIME
- )WITH(
- type='oracle',
- url='jdbc:oracle:thin:@xx.xx.xx.xx:1521:orcl',
- userName='xx',
- password='xx',
- tableName='sidetest',
- cache ='LRU',
- cacheSize ='10000',
- cacheTTLMs ='60000',
- cacheMode='unordered',
- asyncCapacity='1000',
- asyncTimeout='10000'
- parallelism ='1',
- partitionedJoin='false',
- schema = 'MQTEST'
- );
-
-
-```
-
-
diff --git a/docs/oracleSink.md b/docs/oracleSink.md
deleted file mode 100644
index 47ddd8371..000000000
--- a/docs/oracleSink.md
+++ /dev/null
@@ -1,55 +0,0 @@
-## 1.格式:
-```
-CREATE TABLE tableName(
- colName colType,
- ...
- colNameX colType
- )WITH(
- type ='oracle',
- url ='jdbcUrl',
- userName ='userName',
- password ='pwd',
- tableName ='tableName',
- parallelism ='parllNum'
- );
-
-```
-
-## 2.支持版本
- 10g 11g
-
-## 3.表结构定义
-
-|参数名称|含义|
-|----|---|
-| tableName| oracle表名称|
-| colName | 列名称|
-| colType | 列类型 [colType支持的类型](colType.md)|
-
-## 4.参数:
-
-|参数名称|含义|是否必填|默认值|
-|----|----|----|----|
-|type |表名 输出表类型[mysq|hbase|elasticsearch|oracle]|是||
-|url | 连接oracle数据库 jdbcUrl |是||
-|userName | oracle连接用户名 |是||
-| password | oracle连接密码|是||
-| tableName | oracle表名称|是||
-| schema | oracle 的schema|否|当前登录用户|
-| parallelism | 并行度设置|否|1|
-
-## 5.样例:
-```
-CREATE TABLE MyResult(
- channel VARCHAR,
- pv VARCHAR
- )WITH(
- type ='oracle',
- url ='jdbc:oracle:thin:@xx.xx.xx.xx:1521:orcl',
- userName ='dtstack',
- password ='abc123',
- tableName ='pv2',
- schema = 'MQTEST',
- parallelism ='1'
- )
- ```
\ No newline at end of file
diff --git a/docs/postgresqlSide.md b/docs/postgresqlSide.md
index 68d10b869..dc56a9c72 100644
--- a/docs/postgresqlSide.md
+++ b/docs/postgresqlSide.md
@@ -43,7 +43,7 @@
| password | postgresql连接密码|是||
| tableName | postgresql表名称|是||
| tableName | postgresql 的表名称|是||
- | cache | 维表缓存策略(NONE/LRU/ALL)|否|NONE|
+ | cache | 维表缓存策略(NONE/LRU)|否|NONE|
| partitionedJoin | 是否在維表join之前先根据 設定的key 做一次keyby操作(可以減少维表的数据缓存量)|否|false|
----------
diff --git a/docs/postgresqlSink.md b/docs/postgresqlSink.md
index da09c4c34..e382bfccf 100644
--- a/docs/postgresqlSink.md
+++ b/docs/postgresqlSink.md
@@ -10,7 +10,8 @@ CREATE TABLE tableName(
userName ='userName',
password ='pwd',
tableName ='tableName',
- parallelism ='parllNum'
+ parallelism ='parllNum',
+ keyField='id_'
);
```
@@ -30,15 +31,13 @@ CREATE TABLE tableName(
|参数名称|含义|是否必填|默认值|
|----|----|----|----|
-| type |表明 输出表类型[postgresql]|是||
-| url | 连接postgresql数据库 jdbcUrl |是||
-| userName | postgresql连接用户名 |是||
+|type |表明 输出表类型[postgresql]|是||
+|url | 连接postgresql数据库 jdbcUrl |是||
+|userName | postgresql连接用户名 |是||
| password | postgresql连接密码|是||
| tableName | postgresqll表名称|是||
| parallelism | 并行度设置|否|1|
-| isUpsert | 使用upsert模式插入数据(版本9.5之后才支持upsert) |否|false
-| keyField | 设置更新主键字段名(isupsert为true时为必填项)|否|
-
+| keyField | 设置更新主键,当insert表存在主键时,需要按此主键执行upsert操作|否|1|
## 5.样例:
```
CREATE TABLE MyResult(
@@ -50,6 +49,7 @@ CREATE TABLE MyResult(
userName ='dtstack',
password ='abc123',
tableName ='pv2',
- parallelism ='1'
+ parallelism ='1',
+ keyField='id_'
)
```
diff --git a/docs/redisSide.md b/docs/redisSide.md
index 9c7f4b47e..52d637b70 100644
--- a/docs/redisSide.md
+++ b/docs/redisSide.md
@@ -11,7 +11,6 @@
password = 'redisPwd',
database = 'dbName',
tableName ='sideTableName',
- redisType = '1',
cache ='LRU',
cacheSize ='10000',
cacheTTLMs ='60000'
@@ -33,11 +32,9 @@
|参数名称|含义|是否必填|默认值|
|----|---|---|----|
-| type | 表明维表的类型[hbase|mysql|redis]|是||
+| type | 表明维表的类型[hbase\|mysql\|redis]|是||
| url | redis 的地址;格式ip:port[,ip:port]|是||
| password | redis 的密码 |是||
-| redisType | redis模式(1 单机,2 哨兵, 3 集群)| 是 |
-| masterName | 主节点名称(哨兵模式下为必填项) | 否 |
| database | reids 的数据库地址|否||
| tableName | redis 的表名称|是||
| cache | 维表缓存策略(NONE/LRU/ALL)|否|NONE|
@@ -63,7 +60,6 @@ create table sideTable(
url='172.16.10.79:6379',
password='abc123',
database='0',
- redisType = '1',
tableName='sidetest',
cache = 'LRU',
cacheTTLMs='10000'
diff --git a/docs/redisSink.md b/docs/redisSink.md
index 6a754e5c6..a3c80914e 100644
--- a/docs/redisSink.md
+++ b/docs/redisSink.md
@@ -9,7 +9,6 @@ CREATE TABLE tableName(
url = 'ip:port',
database ='dbName',
password ='pwd',
- redisType='1',
tableName ='tableName',
parallelism ='parllNum'
);
@@ -33,14 +32,12 @@ redis5.0
|参数名称|含义|是否必填|默认值|
|----|---|---|-----|
-| type | 表名 输出表类型[mysq|hbase|elasticsearch|redis]|是||
+|type | 表明 输出表类型[mysql\|hbase\|elasticsearch\|redis\]|是||
| url | redis 的地址;格式ip:port[,ip:port]|是||
| password | redis 的密码 |是||
-| redisType | redis模式(1 单机,2 哨兵, 3 集群)| 是 |
-| masterName | 主节点名称(哨兵模式下为必填项) | 否 |
| database | reids 的数据库地址|否||
| tableName | redis 的表名称|是||
-| parallelism | 并行度设置|否|1|
+|parallelism | 并行度设置|否|1|
## 5.样例:
@@ -54,8 +51,7 @@ redis5.0
url='172.16.10.79:6379',
password='abc123',
database='0',
- redisType='1',
- tableName='sinktoredis'
+ tableName='sinktoredis',
);
```
\ No newline at end of file
diff --git a/docs/serverSocketSource.md b/docs/serverSocketSource.md
deleted file mode 100644
index 4f889d829..000000000
--- a/docs/serverSocketSource.md
+++ /dev/null
@@ -1,37 +0,0 @@
-
-## 1.数据格式:
-```
-数据现在只支持json格式 {"xx":"bb","cc":"dd"}
-
-CREATE TABLE MyTable(
- channel varchar,
- pv int,
- xctime date,
- xtime date
-
- )WITH(
- type='serversocket',
- host='127.0.0.1',
- port='8888',
- delimiter=';',
- maxNumRetries='100'
- );
-```
-
-
-## 2.参数:
-
-|参数名称|含义|是否必填|默认值|
-|----|---|---|---|
-|type | serversocket | 是||
-|host | server host|是||
-|port | server port|是||
-|delimiter| 每条json数据的分割符(比如:;)|是||
-|maxNumRetries| 最大重连次数 (大于0)|是||
-
-
-## 3.Server端样例:
-```
-String JsonStr = "{\"CHANNEL\":\"xc3\",\"pv\":1234567,\"xdate\":\"2018-12-07\",\"xtime\":\"2018-12-15\"};";
-
-```
diff --git a/docs/timeZone.md b/docs/timeZone.md
deleted file mode 100644
index ec6aa0ca3..000000000
--- a/docs/timeZone.md
+++ /dev/null
@@ -1,601 +0,0 @@
-* Africa/Abidjan
-* Africa/Accra
-* Africa/Addis_Ababa
-* Africa/Algiers
-* Africa/Asmara
-* Africa/Asmera
-* Africa/Bamako
-* Africa/Bangui
-* Africa/Banjul
-* Africa/Bissau
-* Africa/Blantyre
-* Africa/Brazzaville
-* Africa/Bujumbura
-* Africa/Cairo
-* Africa/Casablanca
-* Africa/Ceuta
-* Africa/Conakry
-* Africa/Dakar
-* Africa/Dar_es_Salaam
-* Africa/Djibouti
-* Africa/Douala
-* Africa/El_Aaiun
-* Africa/Freetown
-* Africa/Gaborone
-* Africa/Harare
-* Africa/Johannesburg
-* Africa/Juba
-* Africa/Kampala
-* Africa/Khartoum
-* Africa/Kigali
-* Africa/Kinshasa
-* Africa/Lagos
-* Africa/Libreville
-* Africa/Lome
-* Africa/Luanda
-* Africa/Lubumbashi
-* Africa/Lusaka
-* Africa/Malabo
-* Africa/Maputo
-* Africa/Maseru
-* Africa/Mbabane
-* Africa/Mogadishu
-* Africa/Monrovia
-* Africa/Nairobi
-* Africa/Ndjamena
-* Africa/Niamey
-* Africa/Nouakchott
-* Africa/Ouagadougou
-* Africa/Porto-Novo
-* Africa/Sao_Tome
-* Africa/Timbuktu
-* Africa/Tripoli
-* Africa/Tunis
-* Africa/Windhoek
-* America/Adak
-* America/Anchorage
-* America/Anguilla
-* America/Antigua
-* America/Araguaina
-* America/Argentina/Buenos_Aires
-* America/Argentina/Catamarca
-* America/Argentina/ComodRivadavia
-* America/Argentina/Cordoba
-* America/Argentina/Jujuy
-* America/Argentina/La_Rioja
-* America/Argentina/Mendoza
-* America/Argentina/Rio_Gallegos
-* America/Argentina/Salta
-* America/Argentina/San_Juan
-* America/Argentina/San_Luis
-* America/Argentina/Tucuman
-* America/Argentina/Ushuaia
-* America/Aruba
-* America/Asuncion
-* America/Atikokan
-* America/Atka
-* America/Bahia
-* America/Bahia_Banderas
-* America/Barbados
-* America/Belem
-* America/Belize
-* America/Blanc-Sablon
-* America/Boa_Vista
-* America/Bogota
-* America/Boise
-* America/Buenos_Aires
-* America/Cambridge_Bay
-* America/Campo_Grande
-* America/Cancun
-* America/Caracas
-* America/Catamarca
-* America/Cayenne
-* America/Cayman
-* America/Chicago
-* America/Chihuahua
-* America/Coral_Harbour
-* America/Cordoba
-* America/Costa_Rica
-* America/Creston
-* America/Cuiaba
-* America/Curacao
-* America/Danmarkshavn
-* America/Dawson
-* America/Dawson_Creek
-* America/Denver
-* America/Detroit
-* America/Dominica
-* America/Edmonton
-* America/Eirunepe
-* America/El_Salvador
-* America/Ensenada
-* America/Fort_Nelson
-* America/Fort_Wayne
-* America/Fortaleza
-* America/Glace_Bay
-* America/Godthab
-* America/Goose_Bay
-* America/Grand_Turk
-* America/Grenada
-* America/Guadeloupe
-* America/Guatemala
-* America/Guayaquil
-* America/Guyana
-* America/Halifax
-* America/Havana
-* America/Hermosillo
-* America/Indiana/Indianapolis
-* America/Indiana/Knox
-* America/Indiana/Marengo
-* America/Indiana/Petersburg
-* America/Indiana/Tell_City
-* America/Indiana/Vevay
-* America/Indiana/Vincennes
-* America/Indiana/Winamac
-* America/Indianapolis
-* America/Inuvik
-* America/Iqaluit
-* America/Jamaica
-* America/Jujuy
-* America/Juneau
-* America/Kentucky/Louisville
-* America/Kentucky/Monticello
-* America/Knox_IN
-* America/Kralendijk
-* America/La_Paz
-* America/Lima
-* America/Los_Angeles
-* America/Louisville
-* America/Lower_Princes
-* America/Maceio
-* America/Managua
-* America/Manaus
-* America/Marigot
-* America/Martinique
-* America/Matamoros
-* America/Mazatlan
-* America/Mendoza
-* America/Menominee
-* America/Merida
-* America/Metlakatla
-* America/Mexico_City
-* America/Miquelon
-* America/Moncton
-* America/Monterrey
-* America/Montevideo
-* America/Montreal
-* America/Montserrat
-* America/Nassau
-* America/New_York
-* America/Nipigon
-* America/Nome
-* America/Noronha
-* America/North_Dakota/Beulah
-* America/North_Dakota/Center
-* America/North_Dakota/New_Salem
-* America/Ojinaga
-* America/Panama
-* America/Pangnirtung
-* America/Paramaribo
-* America/Phoenix
-* America/Port-au-Prince
-* America/Port_of_Spain
-* America/Porto_Acre
-* America/Porto_Velho
-* America/Puerto_Rico
-* America/Punta_Arenas
-* America/Rainy_River
-* America/Rankin_Inlet
-* America/Recife
-* America/Regina
-* America/Resolute
-* America/Rio_Branco
-* America/Rosario
-* America/Santa_Isabel
-* America/Santarem
-* America/Santiago
-* America/Santo_Domingo
-* America/Sao_Paulo
-* America/Scoresbysund
-* America/Shiprock
-* America/Sitka
-* America/St_Barthelemy
-* America/St_Johns
-* America/St_Kitts
-* America/St_Lucia
-* America/St_Thomas
-* America/St_Vincent
-* America/Swift_Current
-* America/Tegucigalpa
-* America/Thule
-* America/Thunder_Bay
-* America/Tijuana
-* America/Toronto
-* America/Tortola
-* America/Vancouver
-* America/Virgin
-* America/Whitehorse
-* America/Winnipeg
-* America/Yakutat
-* America/Yellowknife
-* Antarctica/Casey
-* Antarctica/Davis
-* Antarctica/DumontDUrville
-* Antarctica/Macquarie
-* Antarctica/Mawson
-* Antarctica/McMurdo
-* Antarctica/Palmer
-* Antarctica/Rothera
-* Antarctica/South_Pole
-* Antarctica/Syowa
-* Antarctica/Troll
-* Antarctica/Vostok
-* Arctic/Longyearbyen
-* Asia/Aden
-* Asia/Almaty
-* Asia/Amman
-* Asia/Anadyr
-* Asia/Aqtau
-* Asia/Aqtobe
-* Asia/Ashgabat
-* Asia/Ashkhabad
-* Asia/Atyrau
-* Asia/Baghdad
-* Asia/Bahrain
-* Asia/Baku
-* Asia/Bangkok
-* Asia/Barnaul
-* Asia/Beirut
-* Asia/Bishkek
-* Asia/Brunei
-* Asia/Calcutta
-* Asia/Chita
-* Asia/Choibalsan
-* Asia/Chongqing
-* Asia/Chungking
-* Asia/Colombo
-* Asia/Dacca
-* Asia/Damascus
-* Asia/Dhaka
-* Asia/Dili
-* Asia/Dubai
-* Asia/Dushanbe
-* Asia/Famagusta
-* Asia/Gaza
-* Asia/Harbin
-* Asia/Hebron
-* Asia/Ho_Chi_Minh
-* Asia/Hong_Kong
-* Asia/Hovd
-* Asia/Irkutsk
-* Asia/Istanbul
-* Asia/Jakarta
-* Asia/Jayapura
-* Asia/Jerusalem
-* Asia/Kabul
-* Asia/Kamchatka
-* Asia/Karachi
-* Asia/Kashgar
-* Asia/Kathmandu
-* Asia/Katmandu
-* Asia/Khandyga
-* Asia/Kolkata
-* Asia/Krasnoyarsk
-* Asia/Kuala_Lumpur
-* Asia/Kuching
-* Asia/Kuwait
-* Asia/Macao
-* Asia/Macau
-* Asia/Magadan
-* Asia/Makassar
-* Asia/Manila
-* Asia/Muscat
-* Asia/Nicosia
-* Asia/Novokuznetsk
-* Asia/Novosibirsk
-* Asia/Omsk
-* Asia/Oral
-* Asia/Phnom_Penh
-* Asia/Pontianak
-* Asia/Pyongyang
-* Asia/Qatar
-* Asia/Qyzylorda
-* Asia/Rangoon
-* Asia/Riyadh
-* Asia/Saigon
-* Asia/Sakhalin
-* Asia/Samarkand
-* Asia/Seoul
-* Asia/Shanghai
-* Asia/Singapore
-* Asia/Srednekolymsk
-* Asia/Taipei
-* Asia/Tashkent
-* Asia/Tbilisi
-* Asia/Tehran
-* Asia/Tel_Aviv
-* Asia/Thimbu
-* Asia/Thimphu
-* Asia/Tokyo
-* Asia/Tomsk
-* Asia/Ujung_Pandang
-* Asia/Ulaanbaatar
-* Asia/Ulan_Bator
-* Asia/Urumqi
-* Asia/Ust-Nera
-* Asia/Vientiane
-* Asia/Vladivostok
-* Asia/Yakutsk
-* Asia/Yangon
-* Asia/Yekaterinburg
-* Asia/Yerevan
-* Atlantic/Azores
-* Atlantic/Bermuda
-* Atlantic/Canary
-* Atlantic/Cape_Verde
-* Atlantic/Faeroe
-* Atlantic/Faroe
-* Atlantic/Jan_Mayen
-* Atlantic/Madeira
-* Atlantic/Reykjavik
-* Atlantic/South_Georgia
-* Atlantic/St_Helena
-* Atlantic/Stanley
-* Australia/ACT
-* Australia/Adelaide
-* Australia/Brisbane
-* Australia/Broken_Hill
-* Australia/Canberra
-* Australia/Currie
-* Australia/Darwin
-* Australia/Eucla
-* Australia/Hobart
-* Australia/LHI
-* Australia/Lindeman
-* Australia/Lord_Howe
-* Australia/Melbourne
-* Australia/NSW
-* Australia/North
-* Australia/Perth
-* Australia/Queensland
-* Australia/South
-* Australia/Sydney
-* Australia/Tasmania
-* Australia/Victoria
-* Australia/West
-* Australia/Yancowinna
-* Brazil/Acre
-* Brazil/DeNoronha
-* Brazil/East
-* Brazil/West
-* CET
-* CST6CDT
-* Canada/Atlantic
-* Canada/Central
-* Canada/Eastern
-* Canada/Mountain
-* Canada/Newfoundland
-* Canada/Pacific
-* Canada/Saskatchewan
-* Canada/Yukon
-* Chile/Continental
-* Chile/EasterIsland
-* Cuba
-* EET
-* EST5EDT
-* Egypt
-* Eire
-* Etc/GMT
-* Etc/GMT+0
-* Etc/GMT+1
-* Etc/GMT+10
-* Etc/GMT+11
-* Etc/GMT+12
-* Etc/GMT+2
-* Etc/GMT+3
-* Etc/GMT+4
-* Etc/GMT+5
-* Etc/GMT+6
-* Etc/GMT+7
-* Etc/GMT+8
-* Etc/GMT+9
-* Etc/GMT-0
-* Etc/GMT-1
-* Etc/GMT-10
-* Etc/GMT-11
-* Etc/GMT-12
-* Etc/GMT-13
-* Etc/GMT-14
-* Etc/GMT-2
-* Etc/GMT-3
-* Etc/GMT-4
-* Etc/GMT-5
-* Etc/GMT-6
-* Etc/GMT-7
-* Etc/GMT-8
-* Etc/GMT-9
-* Etc/GMT0
-* Etc/Greenwich
-* Etc/UCT
-* Etc/UTC
-* Etc/Universal
-* Etc/Zulu
-* Europe/Amsterdam
-* Europe/Andorra
-* Europe/Astrakhan
-* Europe/Athens
-* Europe/Belfast
-* Europe/Belgrade
-* Europe/Berlin
-* Europe/Bratislava
-* Europe/Brussels
-* Europe/Bucharest
-* Europe/Budapest
-* Europe/Busingen
-* Europe/Chisinau
-* Europe/Copenhagen
-* Europe/Dublin
-* Europe/Gibraltar
-* Europe/Guernsey
-* Europe/Helsinki
-* Europe/Isle_of_Man
-* Europe/Istanbul
-* Europe/Jersey
-* Europe/Kaliningrad
-* Europe/Kiev
-* Europe/Kirov
-* Europe/Lisbon
-* Europe/Ljubljana
-* Europe/London
-* Europe/Luxembourg
-* Europe/Madrid
-* Europe/Malta
-* Europe/Mariehamn
-* Europe/Minsk
-* Europe/Monaco
-* Europe/Moscow
-* Europe/Nicosia
-* Europe/Oslo
-* Europe/Paris
-* Europe/Podgorica
-* Europe/Prague
-* Europe/Riga
-* Europe/Rome
-* Europe/Samara
-* Europe/San_Marino
-* Europe/Sarajevo
-* Europe/Saratov
-* Europe/Simferopol
-* Europe/Skopje
-* Europe/Sofia
-* Europe/Stockholm
-* Europe/Tallinn
-* Europe/Tirane
-* Europe/Tiraspol
-* Europe/Ulyanovsk
-* Europe/Uzhgorod
-* Europe/Vaduz
-* Europe/Vatican
-* Europe/Vienna
-* Europe/Vilnius
-* Europe/Volgograd
-* Europe/Warsaw
-* Europe/Zagreb
-* Europe/Zaporozhye
-* Europe/Zurich
-* GB
-* GB-Eire
-* GMT
-* GMT0
-* Greenwich
-* Hongkong
-* Iceland
-* Indian/Antananarivo
-* Indian/Chagos
-* Indian/Christmas
-* Indian/Cocos
-* Indian/Comoro
-* Indian/Kerguelen
-* Indian/Mahe
-* Indian/Maldives
-* Indian/Mauritius
-* Indian/Mayotte
-* Indian/Reunion
-* Iran
-* Israel
-* Jamaica
-* Japan
-* Kwajalein
-* Libya
-* MET
-* MST7MDT
-* Mexico/BajaNorte
-* Mexico/BajaSur
-* Mexico/General
-* NZ
-* NZ-CHAT
-* Navajo
-* PRC
-* PST8PDT
-* Pacific/Apia
-* Pacific/Auckland
-* Pacific/Bougainville
-* Pacific/Chatham
-* Pacific/Chuuk
-* Pacific/Easter
-* Pacific/Efate
-* Pacific/Enderbury
-* Pacific/Fakaofo
-* Pacific/Fiji
-* Pacific/Funafuti
-* Pacific/Galapagos
-* Pacific/Gambier
-* Pacific/Guadalcanal
-* Pacific/Guam
-* Pacific/Honolulu
-* Pacific/Johnston
-* Pacific/Kiritimati
-* Pacific/Kosrae
-* Pacific/Kwajalein
-* Pacific/Majuro
-* Pacific/Marquesas
-* Pacific/Midway
-* Pacific/Nauru
-* Pacific/Niue
-* Pacific/Norfolk
-* Pacific/Noumea
-* Pacific/Pago_Pago
-* Pacific/Palau
-* Pacific/Pitcairn
-* Pacific/Pohnpei
-* Pacific/Ponape
-* Pacific/Port_Moresby
-* Pacific/Rarotonga
-* Pacific/Saipan
-* Pacific/Samoa
-* Pacific/Tahiti
-* Pacific/Tarawa
-* Pacific/Tongatapu
-* Pacific/Truk
-* Pacific/Wake
-* Pacific/Wallis
-* Pacific/Yap
-* Poland
-* Portugal
-* ROK
-* Singapore
-* SystemV/AST4
-* SystemV/AST4ADT
-* SystemV/CST6
-* SystemV/CST6CDT
-* SystemV/EST5
-* SystemV/EST5EDT
-* SystemV/HST10
-* SystemV/MST7
-* SystemV/MST7MDT
-* SystemV/PST8
-* SystemV/PST8PDT
-* SystemV/YST9
-* SystemV/YST9YDT
-* Turkey
-* UCT
-* US/Alaska
-* US/Aleutian
-* US/Arizona
-* US/Central
-* US/East-Indiana
-* US/Eastern
-* US/Hawaii
-* US/Indiana-Starke
-* US/Michigan
-* US/Mountain
-* US/Pacific
-* US/Pacific-New
-* US/Samoa
-* UTC
-* Universal
-* W-SU
-* WET
-* Zulu
-
-
diff --git a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java
index e616163ec..19f158c3a 100644
--- a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java
+++ b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java
@@ -22,7 +22,7 @@
import com.dtstack.flink.sql.table.TargetTableInfo;
-import com.google.common.base.Preconditions;
+import org.apache.flink.calcite.shaded.com.google.common.base.Preconditions;
/**
* @date 2018/09/12
diff --git a/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllReqRow.java b/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllReqRow.java
index 0fe2d1720..c9adc87b9 100644
--- a/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllReqRow.java
+++ b/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllReqRow.java
@@ -24,7 +24,7 @@
import com.dtstack.flink.sql.side.hbase.table.HbaseSideTableInfo;
import org.apache.commons.collections.map.HashedMap;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import com.google.common.collect.Maps;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Maps;
import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo;
import org.apache.flink.types.Row;
import org.apache.flink.util.Collector;
diff --git a/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllSideInfo.java b/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllSideInfo.java
index ea51f46e4..dde3f41ba 100644
--- a/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllSideInfo.java
+++ b/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllSideInfo.java
@@ -25,9 +25,11 @@
import com.dtstack.flink.sql.side.SideInfo;
import com.dtstack.flink.sql.side.SideTableInfo;
import com.dtstack.flink.sql.util.ParseUtils;
+import org.apache.calcite.sql.SqlBasicCall;
+import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlNode;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import com.google.common.collect.Lists;
+import org.apache.flink.shaded.guava18.com.google.common.collect.Lists;
import java.util.List;
diff --git a/hbase/hbase-side/hbase-async-side/pom.xml b/hbase/hbase-side/hbase-async-side/pom.xml
index d02498881..0b206a33d 100644
--- a/hbase/hbase-side/hbase-async-side/pom.xml
+++ b/hbase/hbase-side/hbase-async-side/pom.xml
@@ -101,4 +101,4 @@
-
\ No newline at end of file
+
diff --git a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAsyncReqRow.java b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAsyncReqRow.java
index 05751d61e..6e82e4109 100644
--- a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAsyncReqRow.java
+++ b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAsyncReqRow.java
@@ -30,7 +30,7 @@
import com.dtstack.flink.sql.side.hbase.rowkeydealer.PreRowKeyModeDealerDealer;
import com.dtstack.flink.sql.side.hbase.rowkeydealer.RowKeyEqualModeDealer;
import com.dtstack.flink.sql.side.hbase.table.HbaseSideTableInfo;
-import com.dtstack.flink.sql.factory.DTThreadFactory;
+import com.dtstack.flink.sql.threadFactory.DTThreadFactory;
import com.google.common.collect.Maps;
import com.stumbleupon.async.Deferred;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
@@ -129,7 +129,6 @@ public void asyncInvoke(Row input, ResultFuture resultFuture) throws Except
Object equalObj = input.getField(conValIndex);
if(equalObj == null){
resultFuture.complete(null);
- return;
}
refData.put(sideInfo.getEqualFieldList().get(i), equalObj);
diff --git a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAsyncSideInfo.java b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAsyncSideInfo.java
index 2bfdd0d44..84a6358d1 100644
--- a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAsyncSideInfo.java
+++ b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAsyncSideInfo.java
@@ -6,10 +6,12 @@
import com.dtstack.flink.sql.side.SideTableInfo;
import com.dtstack.flink.sql.side.hbase.table.HbaseSideTableInfo;
import com.dtstack.flink.sql.util.ParseUtils;
+import org.apache.calcite.sql.SqlBasicCall;
+import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlNode;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
+import org.apache.flink.shaded.guava18.com.google.common.collect.Lists;
+import org.apache.flink.shaded.guava18.com.google.common.collect.Maps;
import java.util.List;
import java.util.Map;
diff --git a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/AbsRowKeyModeDealer.java b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/AbsRowKeyModeDealer.java
index d73b31e75..5fe61af4f 100644
--- a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/AbsRowKeyModeDealer.java
+++ b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/AbsRowKeyModeDealer.java
@@ -23,7 +23,7 @@
import com.dtstack.flink.sql.side.FieldInfo;
import com.dtstack.flink.sql.side.cache.AbsSideCache;
import org.apache.calcite.sql.JoinType;
-import com.google.common.collect.Maps;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Maps;
import org.apache.flink.streaming.api.functions.async.ResultFuture;
import org.apache.flink.types.Row;
import org.hbase.async.HBaseClient;
diff --git a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/PreRowKeyModeDealerDealer.java b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/PreRowKeyModeDealerDealer.java
index b75bca40c..d79f67ec5 100644
--- a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/PreRowKeyModeDealerDealer.java
+++ b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/PreRowKeyModeDealerDealer.java
@@ -28,7 +28,7 @@
import com.dtstack.flink.sql.side.hbase.utils.HbaseUtils;
import com.google.common.collect.Maps;
import org.apache.calcite.sql.JoinType;
-import com.google.common.collect.Lists;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Lists;
import org.apache.flink.streaming.api.functions.async.ResultFuture;
import org.apache.flink.types.Row;
import org.hbase.async.BinaryPrefixComparator;
@@ -88,7 +88,6 @@ private String dealOneRow(ArrayList> args, String rowKeyStr,
}
List cacheContent = Lists.newArrayList();
- List rowList = Lists.newArrayList();
for(List oneRow : args){
try {
@@ -121,7 +120,8 @@ private String dealOneRow(ArrayList> args, String rowKeyStr,
if (openCache) {
cacheContent.add(sideVal);
}
- rowList.add(row);
+
+ resultFuture.complete(Collections.singleton(row));
}
} catch (Exception e) {
resultFuture.complete(null);
@@ -130,10 +130,6 @@ private String dealOneRow(ArrayList> args, String rowKeyStr,
}
}
- if (rowList.size() > 0){
- resultFuture.complete(rowList);
- }
-
if(openCache){
sideCache.putCache(rowKeyStr, CacheObj.buildCacheObj(ECacheContentType.MultiLine, cacheContent));
}
diff --git a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/RowKeyEqualModeDealer.java b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/RowKeyEqualModeDealer.java
index 8604db18e..fe3149e3d 100644
--- a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/RowKeyEqualModeDealer.java
+++ b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/RowKeyEqualModeDealer.java
@@ -28,7 +28,7 @@
import com.dtstack.flink.sql.side.hbase.utils.HbaseUtils;
import com.google.common.collect.Maps;
import org.apache.calcite.sql.JoinType;
-import com.google.common.collect.Lists;
+import org.apache.flink.calcite.shaded.com.google.common.collect.Lists;
import org.apache.flink.streaming.api.functions.async.ResultFuture;
import org.apache.flink.types.Row;
import org.hbase.async.GetRequest;
diff --git a/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/RowKeyBuilder.java b/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/RowKeyBuilder.java
index 114b7fa6a..af2053fe6 100644
--- a/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/RowKeyBuilder.java
+++ b/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/RowKeyBuilder.java
@@ -21,7 +21,7 @@
package com.dtstack.flink.sql.side.hbase;
import com.dtstack.flink.sql.side.hbase.enums.EReplaceType;
-import com.google.common.collect.Lists;
+import org.apache.flink.shaded.curator.org.apache.curator.shaded.com.google.common.collect.Lists;
import java.io.Serializable;
import java.util.List;
diff --git a/hbase/hbase-sink/pom.xml b/hbase/hbase-sink/pom.xml
index 11ecf0a35..26ead3574 100644
--- a/hbase/hbase-sink/pom.xml
+++ b/hbase/hbase-sink/pom.xml
@@ -68,7 +68,7 @@
+ tofile="${basedir}/../../plugins/hbasesink/${project.name}.jar" />
diff --git a/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/table/HbaseTableInfo.java b/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/table/HbaseTableInfo.java
index ed28f781d..4e286fd45 100644
--- a/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/table/HbaseTableInfo.java
+++ b/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/table/HbaseTableInfo.java
@@ -22,7 +22,7 @@
import com.dtstack.flink.sql.table.TargetTableInfo;
-import com.google.common.base.Preconditions;
+import org.apache.flink.calcite.shaded.com.google.common.base.Preconditions;
import java.util.Map;
/**
diff --git a/kafka/kafka-sink/pom.xml b/kafka/kafka-sink/pom.xml
deleted file mode 100644
index 41aa899c2..000000000
--- a/kafka/kafka-sink/pom.xml
+++ /dev/null
@@ -1,89 +0,0 @@
-
-
-
- sql.kafka
- com.dtstack.flink
- 1.0-SNAPSHOT
- ../pom.xml
-
- 4.0.0
-
- sql.sink.kafka
- 1.0-SNAPSHOT
- kafka-sink
- jar
-
-
-
-
- org.apache.flink
- flink-json
- ${flink.version}
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-shade-plugin
- 1.4
-
-
- package
-
- shade
-
-
-
-
- org.slf4j
-
-
-
-
- *:*
-
- META-INF/*.SF
- META-INF/*.DSA
- META-INF/*.RSA
-
-
-
-
-
-
-
-
-
- maven-antrun-plugin
- 1.2
-
-
- copy-resources
-
- package
-
- run
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerFlinkKafkaProducer.java b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerFlinkKafkaProducer.java
deleted file mode 100644
index b7976a30e..000000000
--- a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerFlinkKafkaProducer.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.sink.kafka;
-
-import com.dtstack.flink.sql.metric.MetricConstant;
-import org.apache.flink.api.common.functions.RuntimeContext;
-import org.apache.flink.api.common.serialization.SerializationSchema;
-import org.apache.flink.configuration.Configuration;
-import org.apache.flink.metrics.Counter;
-import org.apache.flink.metrics.MeterView;
-import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
-
-import java.util.Properties;
-
-/**
- * @author: chuixue
- * @create: 2019-11-05 11:54
- * @description:
- **/
-public class CustomerFlinkKafkaProducer extends FlinkKafkaProducer {
-
- CustomerJsonRowSerializationSchema schema;
-
- public CustomerFlinkKafkaProducer(String topicId, SerializationSchema serializationSchema, Properties producerConfig) {
- super(topicId, serializationSchema, producerConfig);
- this.schema = (CustomerJsonRowSerializationSchema) serializationSchema;
- }
-
- @Override
- public void open(Configuration configuration) {
- RuntimeContext ctx = getRuntimeContext();
- Counter counter = ctx.getMetricGroup().counter(MetricConstant.DT_NUM_RECORDS_OUT);
- MeterView meter = ctx.getMetricGroup().meter(MetricConstant.DT_NUM_RECORDS_OUT_RATE, new MeterView(counter, 20));
-
- schema.setCounter(counter);
-
- try {
- super.open(configuration);
- } catch (Exception e) {
- throw new RuntimeException("",e);
- }
- }
-
-}
diff --git a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerJsonRowSerializationSchema.java b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerJsonRowSerializationSchema.java
deleted file mode 100644
index d698d3ee2..000000000
--- a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerJsonRowSerializationSchema.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.sink.kafka;
-
-import org.apache.flink.annotation.PublicEvolving;
-import org.apache.flink.api.common.serialization.SerializationSchema;
-import org.apache.flink.api.common.typeinfo.BasicArrayTypeInfo;
-import org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.common.typeinfo.Types;
-import org.apache.flink.api.java.typeutils.ObjectArrayTypeInfo;
-import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.apache.flink.formats.json.JsonRowDeserializationSchema;
-import org.apache.flink.formats.json.JsonRowSchemaConverter;
-import org.apache.flink.metrics.Counter;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ArrayNode;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ContainerNode;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
-import org.apache.flink.types.Row;
-import org.apache.flink.util.Preconditions;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.sql.Time;
-import java.sql.Timestamp;
-import java.text.SimpleDateFormat;
-
-/**
- * Serialization schema that serializes an object of Flink types into a JSON bytes.
- *
- * Serializes the input Flink object into a JSON string and
- * converts it into byte[].
- *
- *
Result byte[] messages can be deserialized using {@link JsonRowDeserializationSchema}.
- */
-@PublicEvolving
-public class CustomerJsonRowSerializationSchema implements SerializationSchema {
-
- private static final long serialVersionUID = -2885556750743978636L;
-
- /** Type information describing the input type. */
- private final TypeInformation typeInfo;
-
- /** Object mapper that is used to create output JSON objects. */
- private final ObjectMapper mapper = new ObjectMapper();
-
- /** Formatter for RFC 3339-compliant string representation of a time value (with UTC timezone, without milliseconds). */
- private SimpleDateFormat timeFormat = new SimpleDateFormat("HH:mm:ss'Z'");
-
- /** Formatter for RFC 3339-compliant string representation of a time value (with UTC timezone). */
- private SimpleDateFormat timeFormatWithMillis = new SimpleDateFormat("HH:mm:ss.SSS'Z'");
-
- /** Formatter for RFC 3339-compliant string representation of a timestamp value (with UTC timezone). */
- private SimpleDateFormat timestampFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
-
- /** Reusable object node. */
- private transient ObjectNode node;
-
- private Counter counter;
-
- /**
- * Creates a JSON serialization schema for the given type information.
- *
- * @param typeInfo The field names of {@link Row} are used to map to JSON properties.
- */
- public CustomerJsonRowSerializationSchema(TypeInformation typeInfo) {
- Preconditions.checkNotNull(typeInfo, "Type information");
- this.typeInfo = typeInfo;
- }
-
- /**
- * Creates a JSON serialization schema for the given JSON schema.
- *
- * @param jsonSchema JSON schema describing the result type
- *
- * @see http://json-schema.org/
- */
- public CustomerJsonRowSerializationSchema(String jsonSchema) {
- this(JsonRowSchemaConverter.convert(jsonSchema));
- }
-
- @Override
- public byte[] serialize(Row row) {
- if (node == null) {
- node = mapper.createObjectNode();
- }
-
- try {
- convertRow(node, (RowTypeInfo) typeInfo, row);
- counter.inc();
- return mapper.writeValueAsBytes(node);
- } catch (Throwable t) {
- throw new RuntimeException("Could not serialize row '" + row + "'. " +
- "Make sure that the schema matches the input.", t);
- }
- }
-
- // --------------------------------------------------------------------------------------------
-
- private ObjectNode convertRow(ObjectNode reuse, RowTypeInfo info, Row row) {
- if (reuse == null) {
- reuse = mapper.createObjectNode();
- }
- final String[] fieldNames = info.getFieldNames();
- final TypeInformation>[] fieldTypes = info.getFieldTypes();
-
- // validate the row
- if (row.getArity() != fieldNames.length) {
- throw new IllegalStateException(String.format(
- "Number of elements in the row '%s' is different from number of field names: %d", row, fieldNames.length));
- }
-
- for (int i = 0; i < fieldNames.length; i++) {
- final String name = fieldNames[i];
-
- final JsonNode fieldConverted = convert(reuse, reuse.get(name), fieldTypes[i], row.getField(i));
- reuse.set(name, fieldConverted);
- }
-
- return reuse;
- }
-
- private JsonNode convert(ContainerNode> container, JsonNode reuse, TypeInformation> info, Object object) {
- if (info == Types.VOID || object == null) {
- return container.nullNode();
- } else if (info == Types.BOOLEAN) {
- return container.booleanNode((Boolean) object);
- } else if (info == Types.STRING) {
- return container.textNode((String) object);
- } else if (info == Types.BIG_DEC) {
- // convert decimal if necessary
- if (object instanceof BigDecimal) {
- return container.numberNode((BigDecimal) object);
- }
- return container.numberNode(BigDecimal.valueOf(((Number) object).doubleValue()));
- } else if (info == Types.BIG_INT) {
- // convert integer if necessary
- if (object instanceof BigInteger) {
- return container.numberNode((BigInteger) object);
- }
- return container.numberNode(BigInteger.valueOf(((Number) object).longValue()));
- } else if (info == Types.SQL_DATE) {
- return container.textNode(object.toString());
- } else if (info == Types.SQL_TIME) {
- final Time time = (Time) object;
- // strip milliseconds if possible
- if (time.getTime() % 1000 > 0) {
- return container.textNode(timeFormatWithMillis.format(time));
- }
- return container.textNode(timeFormat.format(time));
- } else if (info == Types.SQL_TIMESTAMP) {
- return container.textNode(timestampFormat.format((Timestamp) object));
- } else if (info instanceof RowTypeInfo) {
- if (reuse != null && reuse instanceof ObjectNode) {
- return convertRow((ObjectNode) reuse, (RowTypeInfo) info, (Row) object);
- } else {
- return convertRow(null, (RowTypeInfo) info, (Row) object);
- }
- } else if (info instanceof ObjectArrayTypeInfo) {
- if (reuse != null && reuse instanceof ArrayNode) {
- return convertObjectArray((ArrayNode) reuse, ((ObjectArrayTypeInfo) info).getComponentInfo(), (Object[]) object);
- } else {
- return convertObjectArray(null, ((ObjectArrayTypeInfo) info).getComponentInfo(), (Object[]) object);
- }
- } else if (info instanceof BasicArrayTypeInfo) {
- if (reuse != null && reuse instanceof ArrayNode) {
- return convertObjectArray((ArrayNode) reuse, ((BasicArrayTypeInfo) info).getComponentInfo(), (Object[]) object);
- } else {
- return convertObjectArray(null, ((BasicArrayTypeInfo) info).getComponentInfo(), (Object[]) object);
- }
- } else if (info instanceof PrimitiveArrayTypeInfo && ((PrimitiveArrayTypeInfo) info).getComponentType() == Types.BYTE) {
- return container.binaryNode((byte[]) object);
- } else {
- // for types that were specified without JSON schema
- // e.g. POJOs
- try {
- return mapper.valueToTree(object);
- } catch (IllegalArgumentException e) {
- throw new IllegalStateException("Unsupported type information '" + info + "' for object: " + object, e);
- }
- }
- }
-
- private ArrayNode convertObjectArray(ArrayNode reuse, TypeInformation> info, Object[] array) {
- if (reuse == null) {
- reuse = mapper.createArrayNode();
- } else {
- reuse.removeAll();
- }
-
- for (Object object : array) {
- reuse.add(convert(reuse, null, info, object));
- }
- return reuse;
- }
-
- public Counter getCounter() {
- return counter;
- }
-
- public void setCounter(Counter counter) {
- this.counter = counter;
- }
-}
diff --git a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerKafkaJsonTableSink.java b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerKafkaJsonTableSink.java
deleted file mode 100644
index af6e54854..000000000
--- a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerKafkaJsonTableSink.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.sink.kafka;
-
-import org.apache.flink.api.common.serialization.SerializationSchema;
-import org.apache.flink.streaming.api.datastream.DataStream;
-import org.apache.flink.streaming.api.functions.sink.SinkFunction;
-import org.apache.flink.streaming.connectors.kafka.KafkaTableSink;
-import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
-import org.apache.flink.table.api.TableSchema;
-import org.apache.flink.table.utils.TableConnectorUtils;
-import org.apache.flink.types.Row;
-
-import java.util.Optional;
-import java.util.Properties;
-
-/**
- * @author: chuixue
- * @create: 2019-11-05 11:54
- * @description:
- **/
-public class CustomerKafkaJsonTableSink extends KafkaTableSink {
-
- protected SerializationSchema schema;
-
-
- public CustomerKafkaJsonTableSink(TableSchema schema,
- String topic,
- Properties properties,
- Optional> partitioner,
- SerializationSchema serializationSchema) {
-
- super(schema, topic, properties, partitioner, serializationSchema);
- this.schema = serializationSchema;
- }
-
- @Override
- protected SinkFunction createKafkaProducer(String topic, Properties properties, SerializationSchema serializationSchema, Optional> optional) {
- return new CustomerFlinkKafkaProducer(topic, serializationSchema, properties);
- }
-
- @Override
- public void emitDataStream(DataStream dataStream) {
- SinkFunction kafkaProducer = createKafkaProducer(topic, properties, schema, partitioner);
- // always enable flush on checkpoint to achieve at-least-once if query runs with checkpointing enabled.
- //kafkaProducer.setFlushOnCheckpoint(true);
- dataStream.addSink(kafkaProducer).name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames()));
- }
-}
diff --git a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java
deleted file mode 100644
index 44bf9f98b..000000000
--- a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.sink.kafka;
-
-import com.dtstack.flink.sql.sink.IStreamSinkGener;
-import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo;
-import com.dtstack.flink.sql.table.TargetTableInfo;
-import org.apache.flink.api.common.serialization.SerializationSchema;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.apache.flink.api.java.typeutils.TupleTypeInfo;
-import org.apache.flink.streaming.api.datastream.DataStream;
-import org.apache.flink.streaming.connectors.kafka.KafkaTableSinkBase;
-import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
-import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
-import org.apache.flink.table.api.TableSchema;
-import org.apache.flink.table.sinks.RetractStreamTableSink;
-import org.apache.flink.table.sinks.TableSink;
-import org.apache.flink.types.Row;
-
-import java.util.Optional;
-import java.util.Properties;
-
-/**
- * @author: chuixue
- * @create: 2019-11-05 11:45
- * @description:
- **/
-public class KafkaSink implements RetractStreamTableSink, IStreamSinkGener {
-
- protected String[] fieldNames;
-
- protected TypeInformation>[] fieldTypes;
-
- protected String topic;
-
- protected int parallelism;
-
- protected Properties properties;
-
- /** Serialization schema for encoding records to Kafka. */
- protected SerializationSchema serializationSchema;
-
- /** The schema of the table. */
- private TableSchema schema;
-
- /** Partitioner to select Kafka partition for each item. */
- protected Optional> partitioner;
-
- @Override
- public KafkaSink genStreamSink(TargetTableInfo targetTableInfo) {
- KafkaSinkTableInfo kafkaSinkTableInfo = (KafkaSinkTableInfo) targetTableInfo;
- this.topic = kafkaSinkTableInfo.getTopic();
-
- properties = new Properties();
- properties.setProperty("bootstrap.servers", kafkaSinkTableInfo.getBootstrapServers());
-
- for (String key : kafkaSinkTableInfo.getKafkaParamKeys()) {
- properties.setProperty(key, kafkaSinkTableInfo.getKafkaParam(key));
- }
- this.partitioner = Optional.of(new FlinkFixedPartitioner<>());
- this.fieldNames = kafkaSinkTableInfo.getFields();
- TypeInformation[] types = new TypeInformation[kafkaSinkTableInfo.getFields().length];
- for (int i = 0; i < kafkaSinkTableInfo.getFieldClasses().length; i++) {
- types[i] = TypeInformation.of(kafkaSinkTableInfo.getFieldClasses()[i]);
- }
- this.fieldTypes = types;
-
- TableSchema.Builder schemaBuilder = TableSchema.builder();
- for (int i=0;i getRecordType() {
- return new RowTypeInfo(fieldTypes, fieldNames);
- }
-
- @Override
- public void emitDataStream(DataStream> dataStream) {
- KafkaTableSinkBase kafkaTableSink = new CustomerKafkaJsonTableSink(
- schema,
- topic,
- properties,
- partitioner,
- serializationSchema
- );
-
- DataStream ds = dataStream.map((Tuple2 record) -> {
- return record.f1;
- }).returns(getOutputType().getTypeAt(1)).setParallelism(parallelism);
-
- kafkaTableSink.emitDataStream(ds);
- }
-
- @Override
- public TupleTypeInfo> getOutputType() {
- return new TupleTypeInfo(org.apache.flink.table.api.Types.BOOLEAN(), new RowTypeInfo(fieldTypes, fieldNames));
- }
-
- @Override
- public String[] getFieldNames() {
- return fieldNames;
- }
-
- @Override
- public TypeInformation>[] getFieldTypes() {
- return fieldTypes;
- }
-
- @Override
- public TableSink> configure(String[] fieldNames, TypeInformation>[] fieldTypes) {
- this.fieldNames = fieldNames;
- this.fieldTypes = fieldTypes;
- return this;
- }
-}
diff --git a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java
deleted file mode 100644
index f633c8112..000000000
--- a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.sink.kafka.table;
-
-import com.dtstack.flink.sql.table.AbsTableParser;
-import com.dtstack.flink.sql.table.TableInfo;
-import com.dtstack.flink.sql.util.MathUtil;
-
-import java.util.Map;
-
-/**
- * @author: chuixue
- * @create: 2019-11-05 11:46
- * @description:
- **/
-public class KafkaSinkParser extends AbsTableParser {
- @Override
- public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) throws Exception {
- KafkaSinkTableInfo kafkaSinkTableInfo = new KafkaSinkTableInfo();
- kafkaSinkTableInfo.setName(tableName);
- parseFieldsInfo(fieldsInfo, kafkaSinkTableInfo);
- kafkaSinkTableInfo.setParallelism(MathUtil.getIntegerVal(props.get(KafkaSinkTableInfo.PARALLELISM_KEY.toLowerCase())));
-
- if (props.get(KafkaSinkTableInfo.SINK_DATA_TYPE) != null) {
- kafkaSinkTableInfo.setSinkDataType(props.get(KafkaSinkTableInfo.SINK_DATA_TYPE).toString());
- }
-
-// if (props.get(KafkaSinkTableInfo.FIELD_DELINITER) != null) {
-// kafka11SinkTableInfo.setFieldDelimiter(props.get(KafkaSinkTableInfo.FIELD_DELINITER).toString());
-// }
-
- kafkaSinkTableInfo.setBootstrapServers(MathUtil.getString(props.get(KafkaSinkTableInfo.BOOTSTRAPSERVERS_KEY.toLowerCase())));
- kafkaSinkTableInfo.setTopic(MathUtil.getString(props.get(KafkaSinkTableInfo.TOPIC_KEY.toLowerCase())));
-
- Integer parallelism = MathUtil.getIntegerVal(props.get(KafkaSinkTableInfo.PARALLELISM_KEY.toLowerCase()));
- kafkaSinkTableInfo.setParallelism(parallelism);
-
- for (String key : props.keySet()) {
- if (!key.isEmpty() && key.startsWith("kafka.")) {
- kafkaSinkTableInfo.addKafkaParam(key.substring(6), props.get(key).toString());
- }
- }
- kafkaSinkTableInfo.check();
-
- return kafkaSinkTableInfo;
- }
-}
diff --git a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkTableInfo.java b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkTableInfo.java
deleted file mode 100644
index 1d23932c1..000000000
--- a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkTableInfo.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.sink.kafka.table;
-
-import com.dtstack.flink.sql.table.TargetTableInfo;
-import com.google.common.base.Preconditions;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * @author: chuixue
- * @create: 2019-11-05 11:46
- * @description:
- **/
-public class KafkaSinkTableInfo extends TargetTableInfo {
- //version
- private static final String CURR_TYPE = "kafka";
-
- public KafkaSinkTableInfo(){
- super.setType(CURR_TYPE);
- }
- public static final String BOOTSTRAPSERVERS_KEY = "bootstrapServers";
-
- public static final String TOPIC_KEY = "topic";
-
- private String bootstrapServers;
-
- public Map kafkaParam = new HashMap();
-
- private String topic;
-
-
- public void addKafkaParam(String key,String value){
- kafkaParam.put(key,value);
- }
-
- public String getKafkaParam(String key){
- return kafkaParam.get(key);
- }
-
- public Set getKafkaParamKeys(){
- return kafkaParam.keySet();
- }
-
-
- public String getBootstrapServers() {
- return bootstrapServers;
- }
-
- public void setBootstrapServers(String bootstrapServers) {
- this.bootstrapServers = bootstrapServers;
- }
-
- public String getTopic() {
- return topic;
- }
-
- public void setTopic(String topic) {
- this.topic = topic;
- }
-
-
- @Override
- public boolean check() {
- Preconditions.checkNotNull(bootstrapServers, "kafka of bootstrapServers is required");
- Preconditions.checkNotNull(topic, "kafka of topic is required");
- //Preconditions.checkNotNull(kafkaParam.get("groupId"), "kafka of groupId is required");
- return false;
- }
-
- @Override
- public String getType() {
- return super.getType();
- }
-}
diff --git a/kafka/kafka-source/pom.xml b/kafka/kafka-source/pom.xml
deleted file mode 100644
index 55ca950fb..000000000
--- a/kafka/kafka-source/pom.xml
+++ /dev/null
@@ -1,81 +0,0 @@
-
-
-
- sql.kafka
- com.dtstack.flink
- 1.0-SNAPSHOT
- ../pom.xml
-
- 4.0.0
-
- sql.source.kafka
- jar
-
- kafka-source
- http://maven.apache.org
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-shade-plugin
- 1.4
-
-
- package
-
- shade
-
-
-
-
- org.slf4j
-
-
-
-
- *:*
-
- META-INF/*.SF
- META-INF/*.DSA
- META-INF/*.RSA
-
-
-
-
-
-
-
-
-
- maven-antrun-plugin
- 1.2
-
-
- copy-resources
-
- package
-
- run
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/CustomerJsonDeserialization.java b/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/CustomerJsonDeserialization.java
deleted file mode 100644
index 6d3e57957..000000000
--- a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/CustomerJsonDeserialization.java
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.source.kafka;
-
-import com.dtstack.flink.sql.source.AbsDeserialization;
-import com.dtstack.flink.sql.source.kafka.metric.KafkaTopicPartitionLagMetric;
-import com.dtstack.flink.sql.table.TableInfo;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.common.typeinfo.Types;
-import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.apache.flink.calcite.shaded.com.google.common.base.Strings;
-import org.apache.flink.metrics.MetricGroup;
-import org.apache.flink.shaded.guava18.com.google.common.collect.Maps;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ArrayNode;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.JsonNodeType;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.TextNode;
-import org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread;
-import org.apache.flink.streaming.connectors.kafka.internals.AbstractFetcher;
-import org.apache.flink.types.Row;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.clients.consumer.internals.SubscriptionState;
-import org.apache.kafka.common.TopicPartition;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.sql.Date;
-import java.sql.Time;
-import java.sql.Timestamp;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import static com.dtstack.flink.sql.metric.MetricConstant.*;
-
-/**
- * @author: chuixue
- * @create: 2019-11-05 10:57
- * @description:
- **/
-public class CustomerJsonDeserialization extends AbsDeserialization {
-
- private static final Logger LOG = LoggerFactory.getLogger(CustomerJsonDeserialization.class);
-
- private static final long serialVersionUID = 2385115520960444192L;
-
- private static int dirtyDataFrequency = 1000;
-
- private final ObjectMapper objectMapper = new ObjectMapper();
-
- /** Type information describing the result type. */
- private final TypeInformation typeInfo;
-
- /** Field names to parse. Indices match fieldTypes indices. */
- private final String[] fieldNames;
-
- /** Types to parse fields as. Indices match fieldNames indices. */
- private final TypeInformation>[] fieldTypes;
-
- private AbstractFetcher fetcher;
-
- private boolean firstMsg = true;
-
- private Map nodeAndJsonNodeMapping = Maps.newHashMap();
-
- private Map rowAndFieldMapping;
-
- private List fieldExtraInfos;
-
- public CustomerJsonDeserialization(TypeInformation typeInfo, Map rowAndFieldMapping, List fieldExtraInfos){
- this.typeInfo = typeInfo;
- this.fieldNames = ((RowTypeInfo) typeInfo).getFieldNames();
- this.fieldTypes = ((RowTypeInfo) typeInfo).getFieldTypes();
- this.rowAndFieldMapping= rowAndFieldMapping;
- this.fieldExtraInfos = fieldExtraInfos;
- }
-
- @Override
- public Row deserialize(byte[] message) throws IOException {
-
- if(firstMsg){
- try {
- registerPtMetric(fetcher);
- } catch (Exception e) {
- LOG.error("register topic partition metric error.", e);
- }
-
- firstMsg = false;
- }
-
- try {
- JsonNode root = objectMapper.readTree(message);
-
- if (numInRecord.getCount() % dirtyDataFrequency == 0) {
- LOG.info(root.toString());
- }
-
- numInRecord.inc();
- numInBytes.inc(message.length);
-
- parseTree(root, null);
- Row row = new Row(fieldNames.length);
-
- for (int i = 0; i < fieldNames.length; i++) {
- JsonNode node = getIgnoreCase(fieldNames[i]);
- TableInfo.FieldExtraInfo fieldExtraInfo = fieldExtraInfos.get(i);
-
- if (node == null) {
- if (fieldExtraInfo != null && fieldExtraInfo.getNotNull()) {
- throw new IllegalStateException("Failed to find field with name '"
- + fieldNames[i] + "'.");
- } else {
- row.setField(i, null);
- }
- } else {
- // Read the value as specified type
-
- Object value = convert(node, fieldTypes[i]);
- row.setField(i, value);
- }
- }
-
- numInResolveRecord.inc();
- return row;
- } catch (Exception e) {
- //add metric of dirty data
- if (dirtyDataCounter.getCount() % dirtyDataFrequency == 0) {
- LOG.info("dirtyData: " + new String(message));
- LOG.error("" , e);
- }
- dirtyDataCounter.inc();
- return null;
- }finally {
- nodeAndJsonNodeMapping.clear();
- }
- }
-
- public JsonNode getIgnoreCase(String key) {
- String nodeMappingKey = rowAndFieldMapping.getOrDefault(key, key);
- return nodeAndJsonNodeMapping.get(nodeMappingKey);
- }
-
- private void parseTree(JsonNode jsonNode, String prefix){
- if (jsonNode.isArray()) {
- ArrayNode array = (ArrayNode) jsonNode;
- for (int i = 0; i < array.size(); i++) {
- JsonNode child = array.get(i);
- String nodeKey = getNodeKey(prefix, i);
-
- if (child.isValueNode()) {
- nodeAndJsonNodeMapping.put(nodeKey, child);
- } else {
- if (rowAndFieldMapping.containsValue(nodeKey)) {
- nodeAndJsonNodeMapping.put(nodeKey, child);
- }
- parseTree(child, nodeKey);
- }
- }
- return;
- }
-
- Iterator iterator = jsonNode.fieldNames();
- while (iterator.hasNext()){
- String next = iterator.next();
- JsonNode child = jsonNode.get(next);
- String nodeKey = getNodeKey(prefix, next);
-
- if (child.isValueNode()){
- nodeAndJsonNodeMapping.put(nodeKey, child);
- }else if(child.isArray()){
- parseTree(child, nodeKey);
- }else {
- parseTree(child, nodeKey);
- }
- }
- }
-
- private String getNodeKey(String prefix, String nodeName){
- if(Strings.isNullOrEmpty(prefix)){
- return nodeName;
- }
-
- return prefix + "." + nodeName;
- }
-
- private String getNodeKey(String prefix, int i) {
- if (Strings.isNullOrEmpty(prefix)) {
- return "[" + i + "]";
- }
- return prefix + "[" + i + "]";
- }
-
- public void setFetcher(AbstractFetcher fetcher) {
- this.fetcher = fetcher;
- }
-
- protected void registerPtMetric(AbstractFetcher fetcher) throws Exception {
-
- Field consumerThreadField = fetcher.getClass().getSuperclass().getDeclaredField("consumerThread");
- consumerThreadField.setAccessible(true);
- KafkaConsumerThread consumerThread = (KafkaConsumerThread) consumerThreadField.get(fetcher);
-
- Field hasAssignedPartitionsField = consumerThread.getClass().getDeclaredField("hasAssignedPartitions");
- hasAssignedPartitionsField.setAccessible(true);
-
- //wait until assignedPartitions
-
- boolean hasAssignedPartitions = (boolean) hasAssignedPartitionsField.get(consumerThread);
-
- if(!hasAssignedPartitions){
- throw new RuntimeException("wait 50 secs, but not assignedPartitions");
- }
-
- Field consumerField = consumerThread.getClass().getDeclaredField("consumer");
- consumerField.setAccessible(true);
-
- KafkaConsumer kafkaConsumer = (KafkaConsumer) consumerField.get(consumerThread);
- Field subscriptionStateField = kafkaConsumer.getClass().getDeclaredField("subscriptions");
- subscriptionStateField.setAccessible(true);
-
- //topic partitions lag
- SubscriptionState subscriptionState = (SubscriptionState) subscriptionStateField.get(kafkaConsumer);
- Set assignedPartitions = subscriptionState.assignedPartitions();
- for(TopicPartition topicPartition : assignedPartitions){
- MetricGroup metricGroup = getRuntimeContext().getMetricGroup().addGroup(DT_TOPIC_GROUP, topicPartition.topic())
- .addGroup(DT_PARTITION_GROUP, topicPartition.partition() + "");
- metricGroup.gauge(DT_TOPIC_PARTITION_LAG_GAUGE, new KafkaTopicPartitionLagMetric(subscriptionState, topicPartition));
- }
-
- }
-
- private static String partitionLagMetricName(TopicPartition tp) {
- return tp + ".records-lag";
- }
-
- private Object convert(JsonNode node, TypeInformation> info) {
- if (info.getTypeClass().equals(Types.BOOLEAN.getTypeClass())) {
- return node.asBoolean();
- } else if (info.getTypeClass().equals(Types.STRING.getTypeClass())) {
- return node.asText();
- } else if (info.getTypeClass().equals(Types.SQL_DATE.getTypeClass())) {
- return Date.valueOf(node.asText());
- } else if (info.getTypeClass().equals(Types.SQL_TIME.getTypeClass())) {
- // local zone
- return Time.valueOf(node.asText());
- } else if (info.getTypeClass().equals(Types.SQL_TIMESTAMP.getTypeClass())) {
- // local zone
- return Timestamp.valueOf(node.asText());
- } else {
- // for types that were specified without JSON schema
- // e.g. POJOs
- try {
- return objectMapper.treeToValue(node, info.getTypeClass());
- } catch (JsonProcessingException e) {
- throw new IllegalStateException("Unsupported type information '" + info + "' for node: " + node);
- }
- }
- }
-}
diff --git a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/CustomerKafkaConsumer.java b/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/CustomerKafkaConsumer.java
deleted file mode 100644
index 7d35a35b3..000000000
--- a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/CustomerKafkaConsumer.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.source.kafka;
-
-import com.dtstack.flink.sql.source.AbsDeserialization;
-import org.apache.flink.metrics.MetricGroup;
-import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
-import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks;
-import org.apache.flink.streaming.api.operators.StreamingRuntimeContext;
-import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
-import org.apache.flink.streaming.connectors.kafka.config.OffsetCommitMode;
-import org.apache.flink.streaming.connectors.kafka.internals.AbstractFetcher;
-import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
-import org.apache.flink.types.Row;
-import org.apache.flink.util.SerializedValue;
-
-import java.util.Arrays;
-import java.util.Map;
-import java.util.Properties;
-import java.util.regex.Pattern;
-
-/**
- * @author: chuixue
- * @create: 2019-11-05 10:58
- * @description:
- **/
-public class CustomerKafkaConsumer extends FlinkKafkaConsumer {
-
- private static final long serialVersionUID = -2265366268827807739L;
-
- private CustomerJsonDeserialization customerJsonDeserialization;
-
- public CustomerKafkaConsumer(String topic, AbsDeserialization valueDeserializer, Properties props) {
- super(Arrays.asList(topic.split(",")), valueDeserializer, props);
- this.customerJsonDeserialization = (CustomerJsonDeserialization) valueDeserializer;
- }
-
- public CustomerKafkaConsumer(Pattern subscriptionPattern, AbsDeserialization valueDeserializer, Properties props) {
- super(subscriptionPattern, valueDeserializer, props);
- this.customerJsonDeserialization = (CustomerJsonDeserialization) valueDeserializer;
- }
-
- @Override
- public void run(SourceContext sourceContext) throws Exception {
- customerJsonDeserialization.setRuntimeContext(getRuntimeContext());
- customerJsonDeserialization.initMetric();
- super.run(sourceContext);
- }
-
- @Override
- protected AbstractFetcher createFetcher(SourceContext sourceContext, Map assignedPartitionsWithInitialOffsets, SerializedValue> watermarksPeriodic, SerializedValue> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception {
- AbstractFetcher fetcher = super.createFetcher(sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext, offsetCommitMode, consumerMetricGroup, useMetrics);
- customerJsonDeserialization.setFetcher(fetcher);
- return fetcher;
- }
-}
diff --git a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java b/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java
deleted file mode 100644
index c26d99b62..000000000
--- a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.source.kafka;
-
-import com.dtstack.flink.sql.source.IStreamSourceGener;
-import com.dtstack.flink.sql.source.kafka.table.KafkaSourceTableInfo;
-import com.dtstack.flink.sql.table.SourceTableInfo;
-import com.dtstack.flink.sql.util.DtStringUtil;
-import com.dtstack.flink.sql.util.PluginUtil;
-import org.apache.commons.lang3.BooleanUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.apache.flink.streaming.api.datastream.DataStreamSource;
-import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
-import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
-import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
-import org.apache.flink.table.api.Table;
-import org.apache.flink.table.api.java.StreamTableEnvironment;
-import org.apache.flink.types.Row;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-import java.util.regex.Pattern;
-
-/**
- * @author: chuixue
- * @create: 2019-11-05 10:55
- * @description:
- **/
-public class KafkaSource implements IStreamSourceGener {
-
- private static final String SOURCE_OPERATOR_NAME_TPL = "${topic}_${table}";
-
- /**
- * Get kafka data source, you need to provide the data field names, data types
- * If you do not specify auto.offset.reset, the default use groupoffset
- *
- * @param sourceTableInfo
- * @return
- */
- @SuppressWarnings("rawtypes")
- @Override
- public Table genStreamSource(SourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) {
-
- KafkaSourceTableInfo kafkaSourceTableInfo = (KafkaSourceTableInfo) sourceTableInfo;
- String topicName = kafkaSourceTableInfo.getTopic();
-
- Properties props = new Properties();
- props.setProperty("bootstrap.servers", kafkaSourceTableInfo.getBootstrapServers());
- if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())) {
- props.setProperty("auto.offset.reset", "none");
- } else {
- props.setProperty("auto.offset.reset", kafkaSourceTableInfo.getOffsetReset());
- }
- if (StringUtils.isNotBlank(kafkaSourceTableInfo.getGroupId())) {
- props.setProperty("group.id", kafkaSourceTableInfo.getGroupId());
- }
-
- TypeInformation[] types = new TypeInformation[kafkaSourceTableInfo.getFields().length];
- for (int i = 0; i < kafkaSourceTableInfo.getFieldClasses().length; i++) {
- types[i] = TypeInformation.of(kafkaSourceTableInfo.getFieldClasses()[i]);
- }
-
- TypeInformation typeInformation = new RowTypeInfo(types, kafkaSourceTableInfo.getFields());
-
- FlinkKafkaConsumer kafkaSrc;
- if (BooleanUtils.isTrue(kafkaSourceTableInfo.getTopicIsPattern())) {
- kafkaSrc = new CustomerKafkaConsumer(Pattern.compile(topicName),
- new CustomerJsonDeserialization(typeInformation, kafkaSourceTableInfo.getPhysicalFields(), kafkaSourceTableInfo.getFieldExtraInfoList()), props);
- } else {
- kafkaSrc = new CustomerKafkaConsumer(topicName,
- new CustomerJsonDeserialization(typeInformation, kafkaSourceTableInfo.getPhysicalFields(), kafkaSourceTableInfo.getFieldExtraInfoList()), props);
- }
-
- //earliest,latest
- if ("earliest".equalsIgnoreCase(kafkaSourceTableInfo.getOffsetReset())) {
- kafkaSrc.setStartFromEarliest();
- } else if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())) {// {"0":12312,"1":12321,"2":12312}
- try {
- Properties properties = PluginUtil.jsonStrToObject(kafkaSourceTableInfo.getOffsetReset(), Properties.class);
- Map offsetMap = PluginUtil.ObjectToMap(properties);
- Map specificStartupOffsets = new HashMap<>();
- for (Map.Entry entry : offsetMap.entrySet()) {
- specificStartupOffsets.put(new KafkaTopicPartition(topicName, Integer.valueOf(entry.getKey())), Long.valueOf(entry.getValue().toString()));
- }
- kafkaSrc.setStartFromSpecificOffsets(specificStartupOffsets);
- } catch (Exception e) {
- throw new RuntimeException("not support offsetReset type:" + kafkaSourceTableInfo.getOffsetReset());
- }
- } else {
- kafkaSrc.setStartFromLatest();
- }
-
- String fields = StringUtils.join(kafkaSourceTableInfo.getFields(), ",");
- String sourceOperatorName = SOURCE_OPERATOR_NAME_TPL.replace("${topic}", topicName).replace("${table}", sourceTableInfo.getName());
-
- DataStreamSource kafkaSource = env.addSource(kafkaSrc, sourceOperatorName, typeInformation);
- Integer parallelism = kafkaSourceTableInfo.getParallelism();
- if (parallelism != null) {
- kafkaSource.setParallelism(parallelism);
- }
- return tableEnv.fromDataStream(kafkaSource, fields);
- }
-}
diff --git a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/metric/KafkaTopicPartitionLagMetric.java b/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/metric/KafkaTopicPartitionLagMetric.java
deleted file mode 100644
index 7810056d7..000000000
--- a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/metric/KafkaTopicPartitionLagMetric.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.source.kafka.metric;
-
-import org.apache.flink.metrics.Gauge;
-import org.apache.kafka.clients.consumer.internals.SubscriptionState;
-import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.requests.IsolationLevel;
-
-/**
- * @author: chuixue
- * @create: 2019-11-05 11:09
- * @description:
- **/
-public class KafkaTopicPartitionLagMetric implements Gauge {
-
- private SubscriptionState subscriptionState;
-
- private TopicPartition tp;
-
- public KafkaTopicPartitionLagMetric(SubscriptionState subscriptionState, TopicPartition tp){
- this.subscriptionState = subscriptionState;
- this.tp = tp;
- }
-
- @Override
- public Long getValue() {
- return subscriptionState.partitionLag(tp, IsolationLevel.READ_UNCOMMITTED);
- }
-}
diff --git a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java b/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java
deleted file mode 100644
index a99f49298..000000000
--- a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.source.kafka.table;
-
-import com.dtstack.flink.sql.table.AbsSourceParser;
-import com.dtstack.flink.sql.table.TableInfo;
-import com.dtstack.flink.sql.util.ClassUtil;
-import com.dtstack.flink.sql.util.MathUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * @author: chuixue
- * @create: 2019-11-05 11:08
- * @description:
- **/
-public class KafkaSourceParser extends AbsSourceParser {
-
- private static final Logger LOG = LoggerFactory.getLogger(KafkaSourceParser.class);
-
- private static final String KAFKA_NEST_FIELD_KEY = "nestFieldKey";
-
- private static Pattern kafkaNestFieldKeyPattern = Pattern.compile("(?i)((@*\\S+\\.)*\\S+)\\s+(\\w+)\\s+AS\\s+(\\w+)(\\s+NOT\\s+NULL)?$");
-
- static {
- keyPatternMap.put(KAFKA_NEST_FIELD_KEY, kafkaNestFieldKeyPattern);
-
- keyHandlerMap.put(KAFKA_NEST_FIELD_KEY, KafkaSourceParser::dealNestField);
- }
-
- /**
- * add parser for alias field
- *
- * @param matcher
- * @param tableInfo
- */
- static void dealNestField(Matcher matcher, TableInfo tableInfo) {
- String physicalField = matcher.group(1);
- String fieldType = matcher.group(3);
- String mappingField = matcher.group(4);
- Class fieldClass = ClassUtil.stringConvertClass(fieldType);
- boolean notNull = matcher.group(5) != null;
- TableInfo.FieldExtraInfo fieldExtraInfo = new TableInfo.FieldExtraInfo();
- fieldExtraInfo.setNotNull(notNull);
-
- tableInfo.addPhysicalMappings(mappingField, physicalField);
- tableInfo.addField(mappingField);
- tableInfo.addFieldClass(fieldClass);
- tableInfo.addFieldType(fieldType);
- tableInfo.addFieldExtraInfo(fieldExtraInfo);
- if (LOG.isInfoEnabled()) {
- LOG.info(physicalField + "--->" + mappingField + " Class: " + fieldClass.toString());
- }
- }
-
- @Override
- public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) throws Exception {
- KafkaSourceTableInfo kafkaSourceTableInfo = new KafkaSourceTableInfo();
- kafkaSourceTableInfo.setName(tableName);
- parseFieldsInfo(fieldsInfo, kafkaSourceTableInfo);
-
- kafkaSourceTableInfo.setParallelism(MathUtil.getIntegerVal(props.get(KafkaSourceTableInfo.PARALLELISM_KEY.toLowerCase())));
- String bootstrapServer = MathUtil.getString(props.get(KafkaSourceTableInfo.BOOTSTRAPSERVERS_KEY.toLowerCase()));
- if (bootstrapServer == null || bootstrapServer.trim().equals("")) {
- throw new Exception("BootstrapServers can not be empty!");
- } else {
- kafkaSourceTableInfo.setBootstrapServers(bootstrapServer);
- }
- kafkaSourceTableInfo.setGroupId(MathUtil.getString(props.get(KafkaSourceTableInfo.GROUPID_KEY.toLowerCase())));
- kafkaSourceTableInfo.setTopic(MathUtil.getString(props.get(KafkaSourceTableInfo.TOPIC_KEY.toLowerCase())));
- kafkaSourceTableInfo.setOffsetReset(MathUtil.getString(props.get(KafkaSourceTableInfo.OFFSETRESET_KEY.toLowerCase())));
- kafkaSourceTableInfo.setTopicIsPattern(MathUtil.getBoolean(props.get(KafkaSourceTableInfo.TOPICISPATTERN_KEY.toLowerCase())));
- kafkaSourceTableInfo.setTimeZone(MathUtil.getString(props.get(KafkaSourceTableInfo.TIME_ZONE_KEY.toLowerCase())));
- kafkaSourceTableInfo.check();
- return kafkaSourceTableInfo;
- }
-}
diff --git a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java b/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java
deleted file mode 100644
index 33b704ac0..000000000
--- a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.source.kafka.table;
-
-import com.dtstack.flink.sql.table.SourceTableInfo;
-import org.apache.flink.calcite.shaded.com.google.common.base.Preconditions;
-
-/**
- * @author: chuixue
- * @create: 2019-11-05 11:09
- * @description:
- **/
-public class KafkaSourceTableInfo extends SourceTableInfo {
-
- //version
- private static final String CURR_TYPE = "kafka";
-
- public static final String BOOTSTRAPSERVERS_KEY = "bootstrapServers";
-
- public static final String TOPIC_KEY = "topic";
-
- public static final String GROUPID_KEY = "groupId";
-
- public static final String OFFSETRESET_KEY = "offsetReset";
-
- public static final String TOPICISPATTERN_KEY = "topicIsPattern";
-
- private String bootstrapServers;
-
- private String topic;
-
- private String groupId;
-
- private Boolean topicIsPattern = false;
-
- public Boolean getTopicIsPattern() {
- return topicIsPattern;
- }
-
- public void setTopicIsPattern(Boolean topicIsPattern) {
- if (topicIsPattern == null) return;
-
- this.topicIsPattern = topicIsPattern;
- }
-
- //latest, earliest
- private String offsetReset = "latest";
-
- private String offset;
-
- public KafkaSourceTableInfo() {
- super.setType(CURR_TYPE);
- }
-
-
- public String getBootstrapServers() {
- return bootstrapServers;
- }
-
- public void setBootstrapServers(String bootstrapServers) {
- this.bootstrapServers = bootstrapServers;
- }
-
- public String getTopic() {
- return topic;
- }
-
- public void setTopic(String topic) {
- this.topic = topic;
- }
-
- public String getGroupId() {
- return groupId;
- }
-
- public void setGroupId(String groupId) {
- this.groupId = groupId;
- }
-
- public String getOffsetReset() {
- return offsetReset;
- }
-
- public void setOffsetReset(String offsetReset) {
- if (offsetReset == null) {
- return;
- }
-
- this.offsetReset = offsetReset;
- }
-
- public String getOffset() {
- return offset;
- }
-
- public void setOffset(String offset) {
- this.offset = offset;
- }
-
- @Override
- public boolean check() {
- Preconditions.checkNotNull(bootstrapServers, "kafka of bootstrapServers is required");
- Preconditions.checkNotNull(topic, "kafka of topic is required");
- return false;
- }
-
- @Override
- public String getType() {
-// return super.getType() + SOURCE_SUFFIX;
- return super.getType();
- }
-}
diff --git a/kafka/pom.xml b/kafka/pom.xml
deleted file mode 100644
index 772671ff6..000000000
--- a/kafka/pom.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-
- flink.sql
- com.dtstack.flink
- 1.0-SNAPSHOT
-
- 4.0.0
-
- sql.kafka
- pom
-
-
- kafka-source
- kafka-sink
-
-
-
-
- org.apache.flink
- flink-connector-kafka_2.11
- ${flink.version}
-
-
-
- junit
- junit
- 3.8.1
- test
-
-
-
- com.dtstack.flink
- sql.core
- 1.0-SNAPSHOT
- provided
-
-
-
-
-
\ No newline at end of file
diff --git a/kafka08/kafka08-sink/pom.xml b/kafka08/kafka08-sink/pom.xml
deleted file mode 100644
index 47391d182..000000000
--- a/kafka08/kafka08-sink/pom.xml
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
-
- sql.kafka08
- com.dtstack.flink
- 1.0-SNAPSHOT
- ../pom.xml
-
- 4.0.0
-
- sql.sink.kafka08
- jar
-
- kafka08-sink
- http://maven.apache.org
-
-
-
- org.apache.flink
- flink-connector-kafka-0.8_2.11
- ${flink.version}
-
-
- org.apache.flink
- flink-json
- ${flink.version}
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-shade-plugin
- 1.4
-
-
- package
-
- shade
-
-
-
-
-
-
-
-
-
- *:*
-
- META-INF/*.SF
- META-INF/*.DSA
- META-INF/*.RSA
-
-
-
-
-
-
-
-
-
- maven-antrun-plugin
- 1.2
-
-
- copy-resources
-
- package
-
- run
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/kafka08/kafka08-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerCsvSerialization.java b/kafka08/kafka08-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerCsvSerialization.java
deleted file mode 100644
index 4168edbd1..000000000
--- a/kafka08/kafka08-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerCsvSerialization.java
+++ /dev/null
@@ -1,123 +0,0 @@
-package com.dtstack.flink.sql.sink.kafka;
-
-import org.apache.commons.lang3.StringEscapeUtils;
-import org.apache.flink.annotation.Internal;
-import org.apache.flink.api.common.ExecutionConfig;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.common.typeutils.TypeSerializer;
-import org.apache.flink.api.common.typeutils.TypeSerializerSnapshot;
-import org.apache.flink.api.common.typeutils.base.TypeSerializerSingleton;
-import org.apache.flink.core.memory.DataInputView;
-import org.apache.flink.core.memory.DataOutputView;
-import org.apache.flink.types.Row;
-import org.apache.flink.types.StringValue;
-
-import java.io.IOException;
-
-import static org.apache.flink.api.java.typeutils.runtime.NullMaskUtils.writeNullMask;
-
-/**
- *
- * Date: 2018/12/18
- * Company: www.dtstack.com
- * @author DocLi
- *
- * @modifyer maqi
- */
-@Internal
-public final class CustomerCsvSerialization extends TypeSerializerSingleton {
-
- private static final long serialVersionUID = 1L;
-
- private String fieldDelimiter = "\u0001";
- private TypeInformation>[] fieldTypes;
- private TypeSerializer[] fieldSerializers;
- private static final Row EMPTY = null;
-
- public CustomerCsvSerialization(String fielddelimiter,TypeInformation>[] fieldTypes) {
- this.fieldDelimiter = fielddelimiter;
- this.fieldTypes = fieldTypes;
- this.fieldSerializers = (TypeSerializer[])createSerializer(new ExecutionConfig());
- }
-
- public TypeSerializer>[] createSerializer(ExecutionConfig config) {
- int len = fieldTypes.length;
- TypeSerializer>[] fieldSerializers = new TypeSerializer[len];
- for (int i = 0; i < len; i++) {
- fieldSerializers[i] = fieldTypes[i].createSerializer(config);
- }
- return fieldSerializers;
- }
-
- @Override
- public boolean isImmutableType() {
- return true;
- }
-
- @Override
- public Row createInstance() {
- return EMPTY;
- }
-
- @Override
- public Row copy(Row from) {
- return null;
- }
-
- @Override
- public Row copy(Row from, Row reuse) {
- return null;
- }
-
- @Override
- public int getLength() {
- return -1;
- }
-
- @Override
- public void serialize(Row record, DataOutputView target) throws IOException {
- int len = fieldSerializers.length;
-
- if (record.getArity() != len) {
- throw new RuntimeException("Row arity of from does not match serializers.");
- }
-
- // write a null mask
- writeNullMask(len, record, target);
-
- // serialize non-null fields
- StringBuffer stringBuffer = new StringBuffer();
- for (int i = 0; i < len; i++) {
- Object o = record.getField(i);
- if (o != null) {
- //fieldSerializers[i].serialize(o, target);
- stringBuffer.append(o);
- }
- if(i != len-1){
- stringBuffer.append(StringEscapeUtils.unescapeJava(fieldDelimiter));
- //fieldSerializers[i].serialize(fieldDelimiter, target);
- }
- }
- StringValue.writeString(stringBuffer.toString(), target);
- }
-
- @Override
- public Row deserialize(DataInputView source) throws IOException {
- return null;
- }
-
- @Override
- public Row deserialize(Row reuse, DataInputView source) throws IOException {
- return null;
- }
-
- @Override
- public void copy(DataInputView source, DataOutputView target) throws IOException {
- StringValue.copyString(source, target);
- }
-
- @Override
- public TypeSerializerSnapshot snapshotConfiguration() {
- return null;
- }
-}
diff --git a/kafka08/kafka08-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka08/kafka08-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java
deleted file mode 100644
index fe3c00f03..000000000
--- a/kafka08/kafka08-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.sink.kafka;
-
-import com.dtstack.flink.sql.sink.IStreamSinkGener;
-import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo;
-import com.dtstack.flink.sql.table.TargetTableInfo;
-import org.apache.flink.api.common.serialization.SerializationSchema;
-import org.apache.flink.api.common.serialization.TypeInformationSerializationSchema;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.apache.flink.formats.json.JsonRowSerializationSchema;
-import org.apache.flink.streaming.api.datastream.DataStream;
-import org.apache.flink.streaming.connectors.kafka.Kafka08TableSink;
-import org.apache.flink.streaming.connectors.kafka.KafkaTableSinkBase;
-import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
-import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
-import org.apache.flink.table.api.TableSchema;
-import org.apache.flink.table.sinks.AppendStreamTableSink;
-import org.apache.flink.table.sinks.TableSink;
-import org.apache.flink.types.Row;
-
-import java.util.Optional;
-import java.util.Properties;
-/**
- * Date: 2018/12/18
- * Company: www.dtstack.com
- * @author DocLi
- *
- * @modifyer maqi
- */
-public class KafkaSink implements AppendStreamTableSink, IStreamSinkGener {
-
- protected String[] fieldNames;
-
- protected TypeInformation>[] fieldTypes;
-
- /** The schema of the table. */
- private TableSchema schema;
-
- /** The Kafka topic to write to. */
- protected String topic;
-
- /** Properties for the Kafka producer. */
- protected Properties properties;
-
- /** Serialization schema for encoding records to Kafka. */
- protected SerializationSchema serializationSchema;
-
- /** Partitioner to select Kafka partition for each item. */
- protected Optional> partitioner;
-
- @Override
- public KafkaSink genStreamSink(TargetTableInfo targetTableInfo) {
- KafkaSinkTableInfo kafka08SinkTableInfo = (KafkaSinkTableInfo) targetTableInfo;
- this.topic = kafka08SinkTableInfo.getKafkaParam("topic");
-
- Properties props = new Properties();
- for (String key:kafka08SinkTableInfo.getKafkaParamKeys()) {
- props.setProperty(key, kafka08SinkTableInfo.getKafkaParam(key));
- }
- this.properties = props;
- this.partitioner = Optional.of(new FlinkFixedPartitioner<>());
- this.fieldNames = kafka08SinkTableInfo.getFields();
- TypeInformation[] types = new TypeInformation[kafka08SinkTableInfo.getFields().length];
- for(int i = 0; i< kafka08SinkTableInfo.getFieldClasses().length; i++){
- types[i] = TypeInformation.of(kafka08SinkTableInfo.getFieldClasses()[i]);
- }
- this.fieldTypes = types;
-
- TableSchema.Builder schemaBuilder = TableSchema.builder();
- for (int i=0;i dataStream) {
- KafkaTableSinkBase kafkaTableSink = new Kafka08TableSink(
- schema,
- topic,
- properties,
- partitioner,
- serializationSchema
- );
-
- kafkaTableSink.emitDataStream(dataStream);
- }
-
- @Override
- public TypeInformation getOutputType() {
- return new RowTypeInfo(fieldTypes, fieldNames);
- }
-
- @Override
- public String[] getFieldNames() {
- return fieldNames;
- }
-
- @Override
- public TypeInformation>[] getFieldTypes() {
- return fieldTypes;
- }
-
- @Override
- public TableSink configure(String[] fieldNames, TypeInformation>[] fieldTypes) {
- this.fieldNames = fieldNames;
- this.fieldTypes = fieldTypes;
- return this;
- }
-
-}
diff --git a/kafka08/kafka08-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java b/kafka08/kafka08-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java
deleted file mode 100644
index 2b6c50512..000000000
--- a/kafka08/kafka08-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.sink.kafka.table;
-
-import com.dtstack.flink.sql.table.AbsTableParser;
-import com.dtstack.flink.sql.table.TableInfo;
-import com.dtstack.flink.sql.util.MathUtil;
-
-import java.util.Map;
-
-/**
- *
- * Date: 2018/12/18
- * Company: www.dtstack.com
- *
- * @author DocLi
- *
- * @modifyer maqi
- *
- */
-public class KafkaSinkParser extends AbsTableParser {
- @Override
- public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) {
- KafkaSinkTableInfo kafka08SinkTableInfo = new KafkaSinkTableInfo();
- kafka08SinkTableInfo.setName(tableName);
- parseFieldsInfo(fieldsInfo, kafka08SinkTableInfo);
- kafka08SinkTableInfo.setParallelism(MathUtil.getIntegerVal(props.get(KafkaSinkTableInfo.PARALLELISM_KEY.toLowerCase())));
- if (props.get(KafkaSinkTableInfo.SINK_DATA_TYPE) != null) {
- kafka08SinkTableInfo.setSinkDataType(props.get(KafkaSinkTableInfo.SINK_DATA_TYPE).toString());
- }
- if (props.get(KafkaSinkTableInfo.FIELD_DELINITER) != null) {
- kafka08SinkTableInfo.setFieldDelimiter(props.get(KafkaSinkTableInfo.FIELD_DELINITER).toString());
- }
-
- for (String key:props.keySet()) {
- if (!key.isEmpty() && key.startsWith("kafka.")) {
- kafka08SinkTableInfo.addKafkaParam(key.substring(6), props.get(key).toString());
- }
- }
- return kafka08SinkTableInfo;
- }
-}
diff --git a/kafka08/kafka08-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkTableInfo.java b/kafka08/kafka08-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkTableInfo.java
deleted file mode 100644
index 61acfa2d6..000000000
--- a/kafka08/kafka08-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkTableInfo.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.sink.kafka.table;
-
-import com.dtstack.flink.sql.table.TargetTableInfo;
-import com.google.common.base.Preconditions;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-/**
- *
- * Date: 2018/12/18
- * Company: www.dtstack.com
- * @author DocLi
- *
- * @modifyer maqi
- *
- */
-public class KafkaSinkTableInfo extends TargetTableInfo {
- //version
- private static final String CURR_TYPE = "kafka08";
-
- public KafkaSinkTableInfo(){
- super.setType(CURR_TYPE);
- }
-
- public Map kafkaParam = new HashMap();
-
- public void addKafkaParam(String key,String value){
- kafkaParam.put(key,value);
- }
-
- public String getKafkaParam(String key){
- return kafkaParam.get(key);
- }
-
- public Set getKafkaParamKeys(){
- return kafkaParam.keySet();
- }
-
- @Override
- public boolean check() {
- Preconditions.checkNotNull(kafkaParam.get("bootstrap.servers"), "kafka of bootstrapServers is required");
- Preconditions.checkNotNull(kafkaParam.get("topic"), "kafka of topic is required");
- // Preconditions.checkNotNull(kafkaParam.get("groupId"), "kafka of groupId is required");
- return false;
- }
-
- @Override
- public String getType() {
- return super.getType();
- }
-}
diff --git a/kafka08/kafka08-source/pom.xml b/kafka08/kafka08-source/pom.xml
deleted file mode 100644
index 64264fd46..000000000
--- a/kafka08/kafka08-source/pom.xml
+++ /dev/null
@@ -1,91 +0,0 @@
-
-
-
- sql.kafka08
- com.dtstack.flink
- 1.0-SNAPSHOT
- ../pom.xml
-
- 4.0.0
-
- sql.source.kafka08
- jar
-
- kafka08-source
- http://maven.apache.org
-
-
-
- org.apache.flink
- flink-connector-kafka-0.8_2.11
- ${flink.version}
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-shade-plugin
- 1.4
-
-
- package
-
- shade
-
-
-
-
- org.slf4j
-
-
-
-
- *:*
-
- META-INF/*.SF
- META-INF/*.DSA
- META-INF/*.RSA
-
-
-
-
-
-
-
-
-
- maven-antrun-plugin
- 1.2
-
-
- copy-resources
-
- package
-
- run
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java b/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java
deleted file mode 100644
index bfbffdf14..000000000
--- a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package com.dtstack.flink.sql.source.kafka;
-
-import com.dtstack.flink.sql.source.IStreamSourceGener;
-import com.dtstack.flink.sql.source.kafka.consumer.CustomerCommonConsumer;
-import com.dtstack.flink.sql.source.kafka.consumer.CustomerCsvConsumer;
-import com.dtstack.flink.sql.source.kafka.consumer.CustomerJsonConsumer;
-import com.dtstack.flink.sql.source.kafka.deserialization.CustomerCommonDeserialization;
-import com.dtstack.flink.sql.source.kafka.deserialization.CustomerCsvDeserialization;
-import com.dtstack.flink.sql.source.kafka.deserialization.CustomerJsonDeserialization;
-import com.dtstack.flink.sql.source.kafka.table.KafkaSourceTableInfo;
-import com.dtstack.flink.sql.table.SourceTableInfo;
-import com.dtstack.flink.sql.util.DtStringUtil;
-import com.dtstack.flink.sql.util.PluginUtil;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.apache.flink.streaming.api.datastream.DataStreamSource;
-import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
-import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer08;
-import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
-import org.apache.flink.table.api.Table;
-import org.apache.flink.table.api.java.StreamTableEnvironment;
-import org.apache.flink.types.Row;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-import java.util.regex.Pattern;
-
-public class KafkaSource implements IStreamSourceGener {
-
- private static final String SOURCE_OPERATOR_NAME_TPL = "${topic}_${table}";
-
- /**
- * Get kafka data source, you need to provide the data field names, data types
- * If you do not specify auto.offset.reset, the default use groupoffset
- *
- * @param sourceTableInfo
- * @return
- */
- @SuppressWarnings("rawtypes")
- @Override
- public Table genStreamSource(SourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) {
- KafkaSourceTableInfo kafka08SourceTableInfo = (KafkaSourceTableInfo) sourceTableInfo;
- String topicName = kafka08SourceTableInfo.getKafkaParam("topic");
- String offsetReset = kafka08SourceTableInfo.getKafkaParam("auto.offset.reset");
- Boolean topicIsPattern = kafka08SourceTableInfo.getPatternTopic();
-
- Properties props = new Properties();
- for (String key : kafka08SourceTableInfo.getKafkaParamKeys()) {
- props.setProperty(key, kafka08SourceTableInfo.getKafkaParam(key));
- }
-
- TypeInformation[] types = new TypeInformation[kafka08SourceTableInfo.getFields().length];
- for (int i = 0; i < kafka08SourceTableInfo.getFieldClasses().length; i++) {
- types[i] = TypeInformation.of(kafka08SourceTableInfo.getFieldClasses()[i]);
- }
-
- TypeInformation typeInformation = new RowTypeInfo(types, kafka08SourceTableInfo.getFields());
-
- FlinkKafkaConsumer08 kafkaSrc;
- String fields = StringUtils.join(kafka08SourceTableInfo.getFields(), ",");
-
- if ("json".equalsIgnoreCase(kafka08SourceTableInfo.getSourceDataType())) {
- if (topicIsPattern) {
- kafkaSrc = new CustomerJsonConsumer(Pattern.compile(topicName),
- new com.dtstack.flink.sql.source.kafka.deserialization.CustomerJsonDeserialization(typeInformation), props);
- } else {
- kafkaSrc = new CustomerJsonConsumer(topicName,
- new CustomerJsonDeserialization(typeInformation), props);
- }
- } else if ("csv".equalsIgnoreCase(kafka08SourceTableInfo.getSourceDataType())) {
- if (topicIsPattern) {
- kafkaSrc = new CustomerCsvConsumer(Pattern.compile(topicName),
- new com.dtstack.flink.sql.source.kafka.deserialization.CustomerCsvDeserialization(typeInformation,
- kafka08SourceTableInfo.getFieldDelimiter(), kafka08SourceTableInfo.getLengthCheckPolicy()), props);
- } else {
- kafkaSrc = new CustomerCsvConsumer(topicName,
- new CustomerCsvDeserialization(typeInformation,
- kafka08SourceTableInfo.getFieldDelimiter(), kafka08SourceTableInfo.getLengthCheckPolicy()), props);
- }
- } else {
- if (topicIsPattern) {
- kafkaSrc = new CustomerCommonConsumer(Pattern.compile(topicName), new com.dtstack.flink.sql.source.kafka.deserialization.CustomerCommonDeserialization(), props);
- } else {
- kafkaSrc = new CustomerCommonConsumer(topicName, new CustomerCommonDeserialization(), props);
- }
- }
-
- //earliest,latest
- if ("earliest".equalsIgnoreCase(offsetReset)) {
- kafkaSrc.setStartFromEarliest();
- } else if (DtStringUtil.isJosn(offsetReset)) {// {"0":12312,"1":12321,"2":12312}
- try {
- Properties properties = PluginUtil.jsonStrToObject(offsetReset, Properties.class);
- Map offsetMap = PluginUtil.ObjectToMap(properties);
- Map specificStartupOffsets = new HashMap<>();
- for (Map.Entry entry : offsetMap.entrySet()) {
- specificStartupOffsets.put(new KafkaTopicPartition(topicName, Integer.valueOf(entry.getKey())), Long.valueOf(entry.getValue().toString()));
- }
- kafkaSrc.setStartFromSpecificOffsets(specificStartupOffsets);
- } catch (Exception e) {
- throw new RuntimeException("not support offsetReset type:" + offsetReset);
- }
- } else {
- kafkaSrc.setStartFromLatest();
- }
- String sourceOperatorName = SOURCE_OPERATOR_NAME_TPL.replace("${topic}", topicName).replace("${table}", sourceTableInfo.getName());
- DataStreamSource kafkaSource = env.addSource(kafkaSrc, sourceOperatorName, typeInformation);
- Integer parallelism = kafka08SourceTableInfo.getParallelism();
- if (parallelism != null) {
- kafkaSource.setParallelism(parallelism);
- }
- return tableEnv.fromDataStream(kafkaSource, fields);
- }
-}
diff --git a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/consumer/CustomerCommonConsumer.java b/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/consumer/CustomerCommonConsumer.java
deleted file mode 100644
index 34b349e2c..000000000
--- a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/consumer/CustomerCommonConsumer.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.dtstack.flink.sql.source.kafka.consumer;
-
-import com.dtstack.flink.sql.source.kafka.deserialization.CustomerCommonDeserialization;
-import org.apache.flink.streaming.api.functions.source.SourceFunction;
-import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer08;
-import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
-import org.apache.flink.types.Row;
-
-import java.util.Properties;
-import java.util.regex.Pattern;
-
-/**
- *
- * Date: 2018/12/18
- * Company: www.dtstack.com
- * @author DocLi
- *
- * @modifyer maqi
- */
-public class CustomerCommonConsumer extends FlinkKafkaConsumer08 {
-
- private CustomerCommonDeserialization customerCommonDeserialization;
-
-
- public CustomerCommonConsumer(String topic, KeyedDeserializationSchema deserializer, Properties props) {
- super(topic, deserializer, props);
- this.customerCommonDeserialization= (CustomerCommonDeserialization) deserializer;
- }
-
- public CustomerCommonConsumer(Pattern subscriptionPattern, KeyedDeserializationSchema deserializer, Properties props) {
- super(subscriptionPattern, deserializer, props);
- this.customerCommonDeserialization= (CustomerCommonDeserialization) deserializer;
- }
-
-
- @Override
- public void run(SourceFunction.SourceContext sourceContext) throws Exception {
- customerCommonDeserialization.setRuntimeContext(getRuntimeContext());
- customerCommonDeserialization.initMetric();
- super.run(sourceContext);
- }
-
-}
diff --git a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/consumer/CustomerCsvConsumer.java b/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/consumer/CustomerCsvConsumer.java
deleted file mode 100644
index 7dc95450e..000000000
--- a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/consumer/CustomerCsvConsumer.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.source.kafka.consumer;
-
-import com.dtstack.flink.sql.source.AbsDeserialization;
-import com.dtstack.flink.sql.source.kafka.deserialization.CustomerCsvDeserialization;
-import org.apache.flink.streaming.api.functions.source.SourceFunction;
-import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer08;
-import org.apache.flink.types.Row;
-
-import java.util.Arrays;
-import java.util.Properties;
-import java.util.regex.Pattern;
-
-/**
- * Date: 2018/12/18
- * Company: www.dtstack.com
- * @author DocLi
- *
- * @modifyer maqi
- */
-
-public class CustomerCsvConsumer extends FlinkKafkaConsumer08 {
-
- private static final long serialVersionUID = -2265366268827807739L;
-
- private CustomerCsvDeserialization customerCsvDeserialization;
-
- public CustomerCsvConsumer(String topic, AbsDeserialization valueDeserializer, Properties props) {
- super(Arrays.asList(topic.split(",")), valueDeserializer, props);
- this.customerCsvDeserialization = (CustomerCsvDeserialization) valueDeserializer;
- }
-
- public CustomerCsvConsumer(Pattern subscriptionPattern, AbsDeserialization valueDeserializer, Properties props) {
- super(subscriptionPattern, valueDeserializer, props);
- this.customerCsvDeserialization = (CustomerCsvDeserialization) valueDeserializer;
- }
-
-
-
-
- @Override
- public void run(SourceFunction.SourceContext sourceContext) throws Exception {
- customerCsvDeserialization.setRuntimeContext(getRuntimeContext());
- customerCsvDeserialization.initMetric();
- super.run(sourceContext);
- }
-
-}
diff --git a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/consumer/CustomerJsonConsumer.java b/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/consumer/CustomerJsonConsumer.java
deleted file mode 100644
index b627d81c0..000000000
--- a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/consumer/CustomerJsonConsumer.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.source.kafka.consumer;
-
-import com.dtstack.flink.sql.source.AbsDeserialization;
-import com.dtstack.flink.sql.source.kafka.deserialization.CustomerJsonDeserialization;
-import org.apache.flink.streaming.api.functions.source.SourceFunction;
-import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer08;
-import org.apache.flink.types.Row;
-
-import java.util.Arrays;
-import java.util.Properties;
-import java.util.regex.Pattern;
-
-/**
- * Reason:
- * Date: 2018/10/19
- * Company: www.dtstack.com
- *
- * @author xuchao
- */
-
-public class CustomerJsonConsumer extends FlinkKafkaConsumer08 {
-
- private static final long serialVersionUID = -2265366268827807739L;
-
- private CustomerJsonDeserialization customerJsonDeserialization;
-
- public CustomerJsonConsumer(String topic, AbsDeserialization valueDeserializer, Properties props) {
- super(Arrays.asList(topic.split(",")), valueDeserializer, props);
- this.customerJsonDeserialization = (CustomerJsonDeserialization) valueDeserializer;
- }
-
- public CustomerJsonConsumer(Pattern subscriptionPattern, AbsDeserialization valueDeserializer, Properties props) {
- super(subscriptionPattern, valueDeserializer, props);
- this.customerJsonDeserialization = (CustomerJsonDeserialization) valueDeserializer;
- }
-
-
- @Override
- public void run(SourceFunction.SourceContext sourceContext) throws Exception {
- customerJsonDeserialization.setRuntimeContext(getRuntimeContext());
- customerJsonDeserialization.initMetric();
- super.run(sourceContext);
- }
-
-
-}
diff --git a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/deserialization/CustomerCommonDeserialization.java b/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/deserialization/CustomerCommonDeserialization.java
deleted file mode 100644
index c92ce4aa4..000000000
--- a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/deserialization/CustomerCommonDeserialization.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.dtstack.flink.sql.source.kafka.deserialization;
-
-import com.dtstack.flink.sql.source.AbsDeserialization;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.common.typeinfo.Types;
-import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.apache.flink.api.java.typeutils.TypeExtractor;
-import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
-import org.apache.flink.types.Row;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-/**
- * Date: 2018/12/18
- * Company: www.dtstack.com
- * @author DocLi
- *
- * @modifyer maqi
- */
-public class CustomerCommonDeserialization extends AbsDeserialization implements KeyedDeserializationSchema {
- private static final Logger LOG = LoggerFactory.getLogger(CustomerCommonDeserialization.class);
-
- public static final String[] KAFKA_COLUMNS = new String[]{"_TOPIC", "_MESSAGEKEY", "_MESSAGE", "_PARTITION", "_OFFSET"};
-
- private boolean firstMsg = true;
-
- @Override
- public Row deserialize(byte[] messageKey, byte[] message, String topic, int partition, long offset) {
-
- //numInRecord.inc();
- //numInBytes.inc(message.length);
- //numInBytes.inc(messageKey.length);
-
- try {
- Row row = Row.of(
- topic, //topic
- messageKey == null ? null : new String(messageKey, UTF_8), //key
- new String(message, UTF_8), //message
- partition,
- offset
- );
- return row;
- } catch (Throwable t) {
- LOG.error(t.getMessage());
- // dirtyDataCounter.inc();
- return null;
- }
- }
-
- @Override
- public Row deserialize(byte[] message) throws IOException {
- return null;
- }
-
-
- @Override
- public boolean isEndOfStream(Row nextElement) {
- return false;
- }
-
- public TypeInformation getProducedType() {
- TypeInformation>[] types = new TypeInformation>[]{
- TypeExtractor.createTypeInfo(String.class),
- TypeExtractor.createTypeInfo(String.class), //createTypeInformation[String]
- TypeExtractor.createTypeInfo(String.class),
- Types.INT,
- Types.LONG
- };
- return new RowTypeInfo(types, KAFKA_COLUMNS);
- }
-
-}
diff --git a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/deserialization/CustomerCsvDeserialization.java b/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/deserialization/CustomerCsvDeserialization.java
deleted file mode 100644
index f9f4c897c..000000000
--- a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/deserialization/CustomerCsvDeserialization.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-package com.dtstack.flink.sql.source.kafka.deserialization;
-
-
-import com.dtstack.flink.sql.source.AbsDeserialization;
-import com.dtstack.flink.sql.util.DtStringUtil;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.flink.types.Row;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Date: 2018/12/18
- * Company: www.dtstack.com
- * @author DocLi
- *
- * @modifyer maqi
- */
-
-public class CustomerCsvDeserialization extends AbsDeserialization {
-
- private static final Logger LOG = LoggerFactory.getLogger(CustomerCsvDeserialization.class);
-
- private static final long serialVersionUID = -2706012724306826506L;
-
- private final ObjectMapper objectMapper = new ObjectMapper();
-
- /** Type information describing the result type. */
- private final TypeInformation typeInfo;
-
- /** Field names to parse. Indices match fieldTypes indices. */
- private final String[] fieldNames;
-
- /** Types to parse fields as. Indices match fieldNames indices. */
- private final TypeInformation>[] fieldTypes;
-
- /** Flag indicating whether to fail on a missing field. */
- private boolean failOnMissingField;
-
- private String fieldDelimiter;
-
- private String lengthCheckPolicy;
-
- public CustomerCsvDeserialization(TypeInformation typeInfo, String fieldDelimiter, String lengthCheckPolicy){
- this.typeInfo = typeInfo;
-
- this.fieldNames = ((RowTypeInfo) typeInfo).getFieldNames();
-
- this.fieldTypes = ((RowTypeInfo) typeInfo).getFieldTypes();
-
- this.fieldDelimiter = fieldDelimiter;
-
- this.lengthCheckPolicy = lengthCheckPolicy;
- }
-
- @Override
- public Row deserialize(byte[] message) throws IOException {
-
- try {
- //numInRecord.inc();
- //numInBytes.inc(message.length);
- String[] fieldsList = null;
- if (message != null && message.length > 0){
- fieldsList = new String(message).split(fieldDelimiter);
- }
- if (fieldsList == null || fieldsList.length != fieldNames.length){//exception condition
- if (lengthCheckPolicy.equalsIgnoreCase("SKIP")) {
- return null;
- }else if (lengthCheckPolicy.equalsIgnoreCase("EXCEPTION")) {
- throw new RuntimeException("lengthCheckPolicy Error,message have "+fieldsList.length+" fields,sql have "+fieldNames.length);
- }
- }
-
- Row row = new Row(fieldNames.length);
- for (int i = 0; i < fieldNames.length; i++) {
- if (i {
-
- private static final Logger LOG = LoggerFactory.getLogger(CustomerJsonDeserialization.class);
-
- private static final long serialVersionUID = 2385115520960444192L;
-
- private final ObjectMapper objectMapper = new ObjectMapper();
-
- /** Type information describing the result type. */
- private final TypeInformation typeInfo;
-
- /** Field names to parse. Indices match fieldTypes indices. */
- private final String[] fieldNames;
-
- /** Types to parse fields as. Indices match fieldNames indices. */
- private final TypeInformation>[] fieldTypes;
-
- /** Flag indicating whether to fail on a missing field. */
- private boolean failOnMissingField;
-
- private AbstractFetcher fetcher;
-
- public CustomerJsonDeserialization(TypeInformation typeInfo){
- this.typeInfo = typeInfo;
-
- this.fieldNames = ((RowTypeInfo) typeInfo).getFieldNames();
-
- this.fieldTypes = ((RowTypeInfo) typeInfo).getFieldTypes();
- }
-
- @Override
- public Row deserialize(byte[] message) throws IOException {
-
- try {
- // numInRecord.inc();
- // numInBytes.inc(message.length);
-
- JsonNode root = objectMapper.readTree(message);
- Row row = new Row(fieldNames.length);
- for (int i = 0; i < fieldNames.length; i++) {
- JsonNode node = getIgnoreCase(root, fieldNames[i]);
-
- if (node == null) {
- if (failOnMissingField) {
- throw new IllegalStateException("Failed to find field with name '"
- + fieldNames[i] + "'.");
- } else {
- row.setField(i, null);
- }
- } else {
- // Read the value as specified type
- Object value = objectMapper.treeToValue(node, fieldTypes[i].getTypeClass());
- row.setField(i, value);
- }
- }
-
- // numInResolveRecord.inc();
- return row;
- } catch (Throwable t) {
- //add metric of dirty data
- LOG.error(t.getMessage());
- // dirtyDataCounter.inc();
- return null;
- }
- }
-
- public void setFailOnMissingField(boolean failOnMissingField) {
- this.failOnMissingField = failOnMissingField;
- }
-
- public JsonNode getIgnoreCase(JsonNode jsonNode, String key) {
-
- Iterator iter = jsonNode.fieldNames();
- while (iter.hasNext()) {
- String key1 = iter.next();
- if (key1.equalsIgnoreCase(key)) {
- return jsonNode.get(key1);
- }
- }
-
- return null;
-
- }
-
- public void setFetcher(AbstractFetcher fetcher) {
- this.fetcher = fetcher;
- }
-
-
- private static String partitionLagMetricName(TopicPartition tp) {
- return tp + ".records-lag";
- }
-}
diff --git a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java b/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java
deleted file mode 100644
index eb085ac78..000000000
--- a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-package com.dtstack.flink.sql.source.kafka.table;
-
-import com.dtstack.flink.sql.table.AbsSourceParser;
-import com.dtstack.flink.sql.table.TableInfo;
-import com.dtstack.flink.sql.util.MathUtil;
-
-import java.util.Map;
-/**
- *
- * Date: 2018/12/18
- * Company: www.dtstack.com
- * @author xuchao
- *
- * @modifyer DocLi
- */
-
-public class KafkaSourceParser extends AbsSourceParser {
-
- @Override
- public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) {
-
- KafkaSourceTableInfo kafka08SourceTableInfo = new KafkaSourceTableInfo();
- kafka08SourceTableInfo.setName(tableName);
- parseFieldsInfo(fieldsInfo, kafka08SourceTableInfo);
- kafka08SourceTableInfo.setParallelism(MathUtil.getIntegerVal(props.get(KafkaSourceTableInfo.PARALLELISM_KEY.toLowerCase())));
-
- kafka08SourceTableInfo.setPatternTopic(MathUtil.getBoolean(props.get(KafkaSourceTableInfo.PATTERNTOPIC_KEY.toLowerCase())));
-
- kafka08SourceTableInfo.setTimeZone(MathUtil.getString(props.get(KafkaSourceTableInfo.TIME_ZONE_KEY.toLowerCase())));
-
- if (props.get(KafkaSourceTableInfo.SOURCE_DATA_TYPE) != null) {
- kafka08SourceTableInfo.setSourceDataType(props.get(KafkaSourceTableInfo.SOURCE_DATA_TYPE).toString());
- }
- if (props.get(KafkaSourceTableInfo.FIELD_DELINITER) != null) {
- kafka08SourceTableInfo.setFieldDelimiter(props.get(KafkaSourceTableInfo.FIELD_DELINITER).toString());
- }
- if (props.get(KafkaSourceTableInfo.LENGTH_CHECK_POLICY) != null) {
- kafka08SourceTableInfo.setLengthCheckPolicy(props.get(KafkaSourceTableInfo.LENGTH_CHECK_POLICY).toString());
- }
- for (String key:props.keySet()) {
- if (!key.isEmpty() && key.startsWith("kafka.")) {
- kafka08SourceTableInfo.addKafkaParam(key.substring(6), props.get(key).toString());
- }
- }
- return kafka08SourceTableInfo;
- }
-}
diff --git a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java b/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java
deleted file mode 100644
index 93e9d52f7..000000000
--- a/kafka08/kafka08-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-package com.dtstack.flink.sql.source.kafka.table;
-
-import com.dtstack.flink.sql.table.SourceTableInfo;
-import com.google.common.base.Preconditions;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-
-public class KafkaSourceTableInfo extends SourceTableInfo {
-
- //version
- private static final String CURR_TYPE = "kafka08";
-
- public static final String PATTERNTOPIC_KEY = "patterntopic";
-
- private Boolean patternTopic=false;
-
- public Boolean getPatternTopic() {
- return patternTopic;
- }
-
- public void setPatternTopic(Boolean patternTopic) {
- if (patternTopic==null){
- return;
- }
- this.patternTopic = patternTopic;
- }
-
- public KafkaSourceTableInfo(){
- super.setType(CURR_TYPE);
- }
-
- public Map kafkaParam = new HashMap<>();
-
- public void addKafkaParam(String key,String value){
- kafkaParam.put(key,value);
- }
-
- public String getKafkaParam(String key){
- return kafkaParam.get(key);
- }
-
- public Set getKafkaParamKeys(){
- return kafkaParam.keySet();
- }
-
- @Override
- public boolean check() {
- Preconditions.checkNotNull(kafkaParam.get("bootstrap.servers"), "kafka of bootstrapServers is required");
- Preconditions.checkNotNull(kafkaParam.get("topic"), "kafka of topic is required");
- String offset = kafkaParam.get("auto.offset.reset");
- Preconditions.checkState(offset.equalsIgnoreCase("latest")
- || offset.equalsIgnoreCase("earliest"), "kafka of offsetReset set fail");
- return false;
- }
-
- @Override
- public String getType() {
- return super.getType();
- }
-}
diff --git a/kafka08/pom.xml b/kafka08/pom.xml
deleted file mode 100644
index ced039c1c..000000000
--- a/kafka08/pom.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-
-
-
- flink.sql
- com.dtstack.flink
- 1.0-SNAPSHOT
-
- 4.0.0
-
- sql.kafka08
- pom
-
-
- kafka08-source
- kafka08-sink
-
-
-
-
-