Skip to content

Commit c29f700

Browse files
committed
HADOOP-4955. Make DBOutputFormat us column names from setOutput(). Contributed by Kevin Peterson.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@738477 13f79535-47bb-0310-9956-ffa450edef68
1 parent 95f0382 commit c29f700

File tree

3 files changed

+51
-5
lines changed

3 files changed

+51
-5
lines changed

CHANGES.txt

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,9 @@ Trunk (unreleased changes)
6868

6969
HADOOP-5079. HashFunction inadvertently destroys some randomness
7070
(Jonathan Ellis via stack)
71-
71+
72+
HADOOP-4955. Make DBOutputFormat us column names from setOutput().
73+
(Kevin Peterson via enis)
7274

7375
Release 0.20.0 - Unreleased
7476

src/mapred/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java

Lines changed: 29 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -99,10 +99,31 @@ public void write(K key, V value) throws IOException {
9999

100100
/**
101101
* Constructs the query used as the prepared statement to insert data.
102+
*
103+
* @param table
104+
* the table to insert into
105+
* @param fieldNames
106+
* the fields to insert into. If field names are unknown, supply an
107+
* array of nulls.
102108
*/
103109
protected String constructQuery(String table, String[] fieldNames) {
110+
if(fieldNames == null) {
111+
throw new IllegalArgumentException("Field names may not be null");
112+
}
113+
104114
StringBuilder query = new StringBuilder();
105-
query.append("INSERT INTO ").append(table);
115+
query.append("INSERT INTO ").append(table);
116+
117+
if (fieldNames.length > 0 && fieldNames[0] != null) {
118+
query.append(" (");
119+
for (int i = 0; i < fieldNames.length; i++) {
120+
query.append(fieldNames[i]);
121+
if (i != fieldNames.length - 1) {
122+
query.append(",");
123+
}
124+
}
125+
query.append(")");
126+
}
106127
query.append(" VALUES (");
107128

108129
for (int i = 0; i < fieldNames.length; i++) {
@@ -145,9 +166,13 @@ public RecordWriter<K, V> getRecordWriter(FileSystem filesystem,
145166
/**
146167
* Initializes the reduce-part of the job with the appropriate output settings
147168
*
148-
* @param job The job
149-
* @param tableName The table to insert data into
150-
* @param fieldNames The field names in the table
169+
* @param job
170+
* The job
171+
* @param tableName
172+
* The table to insert data into
173+
* @param fieldNames
174+
* The field names in the table. If unknown, supply the appropriate
175+
* number of nulls.
151176
*/
152177
public static void setOutput(JobConf job, String tableName, String... fieldNames) {
153178
job.setOutputFormat(DBOutputFormat.class);
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
package org.apache.hadoop.mapred.lib.db;
2+
3+
import org.apache.hadoop.io.NullWritable;
4+
5+
import junit.framework.TestCase;
6+
7+
public class TestConstructQuery extends TestCase {
8+
public void testConstructQuery() {
9+
DBOutputFormat<DBWritable, NullWritable> format = new DBOutputFormat<DBWritable, NullWritable>();
10+
String expected = "INSERT INTO hadoop_output (id,name,value) VALUES (?,?,?);";
11+
String[] fieldNames = new String[] { "id", "name", "value" };
12+
String actual = format.constructQuery("hadoop_output", fieldNames);
13+
assertEquals(expected, actual);
14+
expected = "INSERT INTO hadoop_output VALUES (?,?,?);";
15+
fieldNames = new String[] { null, null, null };
16+
actual = format.constructQuery("hadoop_output", fieldNames);
17+
assertEquals(expected, actual);
18+
}
19+
}

0 commit comments

Comments
 (0)