1
1
package com .github .jcustenborder .kafka .connect .cdc .xstream ;
2
2
3
3
import com .github .jcustenborder .kafka .connect .cdc .Change ;
4
+ import com .google .common .base .Strings ;
4
5
import com .google .common .collect .ImmutableMap ;
6
+ import org .apache .kafka .connect .data .Decimal ;
5
7
import org .apache .kafka .connect .data .Schema ;
6
8
import org .apache .kafka .connect .data .SchemaBuilder ;
9
+ import org .apache .kafka .connect .data .Timestamp ;
10
+ import org .apache .kafka .connect .errors .DataException ;
11
+ import org .apache .kafka .connect .storage .OffsetStorageReader ;
7
12
import org .junit .Test ;
13
+ import org .junit .Assert ;
14
+
15
+ import java .math .BigDecimal ;
16
+ import java .sql .*;
17
+ import java .util .Collection ;
18
+ import java .util .HashMap ;
19
+ import java .util .LinkedHashMap ;
20
+ import java .util .Map ;
21
+ import java .util .regex .Pattern ;
22
+
23
+ import static com .github .jcustenborder .kafka .connect .cdc .xstream .Oracle12cTableMetadataProvider .matches ;
8
24
9
25
public class GemdTest {
26
+ static final Map <String , Schema .Type > TYPE_LOOKUP ;
27
+ final static Pattern TIMESTAMP_PATTERN = Pattern .compile ("^TIMESTAMP\\ (\\ d\\ )$" );
28
+ final static Pattern TIMESTAMP_WITH_LOCAL_TIMEZONE = Pattern .compile ("^TIMESTAMP\\ (\\ d\\ ) WITH LOCAL TIME ZONE$" );
29
+ final static Pattern TIMESTAMP_WITH_TIMEZONE = Pattern .compile ("^TIMESTAMP\\ (\\ d\\ ) WITH TIME ZONE$" );
30
+
31
+ static {
32
+ Map <String , Schema .Type > map = new HashMap <>();
33
+ map .put ("BINARY_DOUBLE" , Schema .Type .FLOAT64 );
34
+ map .put ("BINARY_FLOAT" , Schema .Type .FLOAT32 );
35
+ map .put ("BLOB" , Schema .Type .BYTES );
36
+ map .put ("CHAR" , Schema .Type .STRING );
37
+ map .put ("NCHAR" , Schema .Type .STRING );
38
+ map .put ("CLOB" , Schema .Type .STRING );
39
+ map .put ("NCLOB" , Schema .Type .STRING );
40
+ map .put ("NVARCHAR2" , Schema .Type .STRING );
41
+ map .put ("VARCHAR2" , Schema .Type .STRING );
42
+ map .put ("NVARCHAR" , Schema .Type .STRING );
43
+ map .put ("VARCHAR" , Schema .Type .STRING );
44
+ TYPE_LOOKUP = ImmutableMap .copyOf (map );
45
+ }
10
46
@ Test
11
47
public void printQuery () {
12
48
System .out .println (Oracle12cTableMetadataProvider .PRIMARY_KEY_SQL );
@@ -25,4 +61,92 @@ public void testBuildSchema() {
25
61
.build ();
26
62
System .out .println (schema .parameters ().isEmpty ());
27
63
}
64
+
65
+ @ Test
66
+ public void testBigDecimal () {
67
+ String number = "290.88252314814815" ;
68
+ String seq = "5125828185" ;
69
+ Object bd = BigDecimal .valueOf (Double .valueOf (number ));
70
+ Object bd1 = BigDecimal .valueOf (Double .valueOf (seq ));
71
+ Object converted = Float .valueOf (((BigDecimal ) bd ).floatValue ());
72
+ Object double1 = Double .valueOf (((BigDecimal ) bd ).doubleValue ());
73
+ Object double2 = Double .valueOf (((BigDecimal ) bd1 ).doubleValue ());
74
+ Assert .assertTrue (converted instanceof Float );
75
+ }
76
+
77
+ @ Test
78
+ public void testFHOPEPSMetadata () throws ClassNotFoundException , SQLException {
79
+ Connection conn = null ;
80
+ Statement stmt = null ;
81
+ Class .forName ("oracle.jdbc.driver.OracleDriver" );
82
+ conn = DriverManager
83
+ .getConnection ("jdbc:oracle:thin:@//fc8racps1n4:1521/f8modsp1.gfoundries.com" , "xstrmadmin" , "xtra" );
84
+
85
+ try (PreparedStatement columnStatement = conn .prepareStatement (Oracle12cTableMetadataProvider .COLUMN_SQL )) {
86
+ columnStatement .setString (1 , "MDS_ADMIN" );
87
+ columnStatement .setString (2 , "T_HISTORY_VIEW_WAFER" );
88
+
89
+ Map <String , Schema > columnSchemas = new LinkedHashMap <>();
90
+
91
+ try (ResultSet resultSet = columnStatement .executeQuery ()) {
92
+ while (resultSet .next ()) {
93
+ String columnName = resultSet .getString (1 );
94
+
95
+ try {
96
+ Schema columnSchema = generateSchema (resultSet , columnName );
97
+ columnSchemas .put (columnName , columnSchema );
98
+ } catch (Exception ex ) {
99
+ throw new DataException ("Exception thrown while " , ex );
100
+ }
101
+ }
102
+ }
103
+
104
+ columnSchemas .forEach ((k ,v ) -> System .out .println (String .format ("Key: %s; Value: %s" , k , v )) );
105
+ }
106
+
107
+
108
+ }
109
+
110
+ Schema generateSchema (ResultSet resultSet , final String columnName ) throws SQLException {
111
+ SchemaBuilder builder = null ;
112
+
113
+ String dataType = resultSet .getString (2 );
114
+ String scaleString = resultSet .getString (3 );
115
+ int scale = resultSet .getInt (3 );
116
+ boolean nullable = "Y" .equalsIgnoreCase (resultSet .getString (4 ));
117
+ String comments = resultSet .getString (5 );
118
+
119
+ if (TYPE_LOOKUP .containsKey (dataType )) {
120
+ Schema .Type type = TYPE_LOOKUP .get (dataType );
121
+ builder = SchemaBuilder .type (type );
122
+ } else if ("NUMBER" .equals (dataType )) {
123
+ builder = scaleString != null ? Decimal .builder (scale ) : SchemaBuilder .float64 ();
124
+ } else if (matches (TIMESTAMP_PATTERN , dataType )) {
125
+ builder = org .apache .kafka .connect .data .Timestamp .builder ();
126
+ } else if (matches (TIMESTAMP_WITH_LOCAL_TIMEZONE , dataType )) {
127
+ builder = org .apache .kafka .connect .data .Timestamp .builder ();
128
+ } else if (matches (TIMESTAMP_WITH_TIMEZONE , dataType )) {
129
+ builder = org .apache .kafka .connect .data .Timestamp .builder ();
130
+ } else if ("DATE" .equals (dataType )) {
131
+ builder = Timestamp .builder ();
132
+ } else {
133
+ String message = String .format ("Could not determine schema type for column %s. dataType = %s" , columnName , dataType );
134
+ throw new DataException (message );
135
+ }
136
+
137
+
138
+ if (nullable ) {
139
+ builder .optional ();
140
+ }
141
+
142
+ if (!Strings .isNullOrEmpty (comments )) {
143
+ builder .doc (comments );
144
+ }
145
+
146
+ builder .parameters (
147
+ ImmutableMap .of (Change .ColumnValue .COLUMN_NAME , columnName )
148
+ );
149
+
150
+ return builder .build ();
151
+ }
28
152
}
0 commit comments