/** This is a HACK: a modified copy of the original class
* org.apache.avro.mapreduce.AvroMultipleOutputs
* from Avro 1.7.4 library.
*
* The places which were hacked are marked with a "hacked" comment.
*
* The file has two sets of changes introduced.
*
* First set of changes
* ====================
* Change introduced in the `setSchema`
* method which handles case when we have a map-only job differently than the
* original code. In the original code, the OutputKeySchema was not set in
* map-only jobs, but here we set the schema regardless whether this
* is map-only job or not. Without setting this schema, during the execution of a
* map-only job using multiple outputs, the AvroKeyOutputFormat
* class complained that the schema is not set,
* namely the following exception was thrown:
*
* java.io.IOException: AvroKeyOutputFormat requires an output schema. Use AvroJob.setOutputKeySchema().
* at org.apache.avro.mapreduce.AvroKeyOutputFormat.getRecordWriter(AvroKeyOutputFormat.java:87)
* at org.apache.avro.mapreduce.AvroMultipleOutputs.getRecordWriter(AvroMultipleOutputs.java:459)
* at org.apache.avro.mapreduce.AvroMultipleOutputs.write(AvroMultipleOutputs.java:400)
* at eu.dnetlib.iis.common.javamapreduce.MultipleOutputs.write(MultipleOutputs.java:31)
* at eu.dnetlib.iis.core.examples.javamapreduce.MultipleOutputPersonClonerMapper.map(MultipleOutputPersonClonerMapper.java:45)
* at eu.dnetlib.iis.core.examples.javamapreduce.MultipleOutputPersonClonerMapper.map(MultipleOutputPersonClonerMapper.java:21)
* at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:140)
* at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:672)
* at org.apache.hadoop.mapred.MapTask.run(MapTask.java:330)
* at org.apache.hadoop.mapred.Child$4.run(Child.java:268)
* at java.security.AccessController.doPrivileged(Native Method)
*
* WARNING: this change is made without my full comprehension of why
* this works, and what was the reasoning behind the original code
* (is there a bug in the original code?).
* Thus, this hack might be very risky and might break something in the
* future.
*
* Second set of changes
* =====================
* Making some of the methods of the class public. This was needed to implement
* functionality described in the comment to the
* {@link MultipleOutputs.createOutputFiles} method.
*/
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.dnetlib.iis.common.javamapreduce.hack;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.List;
import java.util.Set;
import java.util.HashMap;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Collections;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import eu.dnetlib.iis.common.javamapreduce.MultipleOutputs;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.io.NullWritable;
import org.apache.avro.Schema;
import org.apache.avro.mapreduce.AvroJob;
/**
* The AvroMultipleOutputs class simplifies writing Avro output data
* to multiple outputs
*
* <p>
* Case one: writing to additional outputs other than the job default output.
*
* Each additional output, or named output, may be configured with its own
* <code>Schema</code> and <code>OutputFormat</code>.
* </p>
* <p>
* Case two: to write data to different files provided by user
* </p>
*
* <p>
* AvroMultipleOutputs supports counters, by default they are disabled. The
* counters group is the {@link AvroMultipleOutputs} class name. The names of the
* counters are the same as the output name. These count the number of records
* written to each output name.
* </p>
*
* Usage pattern for job submission:
* <pre>
*
* Job job = new Job();
*
* FileInputFormat.setInputPath(job, inDir);
* FileOutputFormat.setOutputPath(job, outDir);
*
* job.setMapperClass(MyAvroMapper.class);
* job.setReducerClass(MyAvroReducer.class);
* ...
*
* Schema schema;
* ...
* // Defines additional single output 'avro1' for the job
* AvroMultipleOutputs.addNamedOutput(job, "avro1", AvroKeyValueOutputFormat.class,
* keyschema, valueSchema); // valueSchema can be set to null if there only Key to be written
to file in the RecordWriter
*
* // Defines additional output 'avro2' with different schema for the job
* AvroMultipleOutputs.addNamedOutput(job, "avro2",
* AvroKeyOutputFormat.class,
* schema,null);
* ...
*
* job.waitForCompletion(true);
* ...
* </pre>
* <p>
* Usage in Reducer:
* <pre>
*
* public class MyAvroReducer extends
* Reducer<K, V, T, NullWritable> {
* private MultipleOutputs amos;
*
*
* public void setup(Context context) {
* ...
* amos = new AvroMultipleOutputs(context);
* }
*
* public void reduce(K, Iterator<V> values,Context context)
* throws IOException {
* ...
* amos.write("avro1",datum,NullWritable.get());
* amos.write("avro2",datum,NullWritable.get());
* amos.getCollector("avro3",datum); // here the value is taken as NullWritable
* ...
* }
*
* public void cleanup(Context context) throws IOException {
* amos.close();
* ...
* }
*
* }
* </pre>
*/
public class AvroMultipleOutputs {
protected static final String MULTIPLE_OUTPUTS = "avro.mapreduce.multipleoutputs";
protected static final String MO_PREFIX = "avro.mapreduce.multipleoutputs.namedOutput.";
protected static final String FORMAT = ".format";
protected static final String COUNTERS_ENABLED = "avro.mapreduce.multipleoutputs.counters";
/**
* Counters group used by the counters of MultipleOutputs.
*/
protected static final String COUNTERS_GROUP = AvroMultipleOutputs.class.getName();
/**
* Cache for the taskContexts
*/
private Map<String, TaskAttemptContext> taskContexts = new HashMap<String, TaskAttemptContext>();
/**
* Checks if a named output name is valid token.
*
* @param namedOutput named output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkTokenName(String namedOutput) {
if (namedOutput == null || namedOutput.length() == 0) {
throw new IllegalArgumentException(
"Name cannot be NULL or empty");
}
for (char ch : namedOutput.toCharArray()) {
if ((ch >= 'A') && (ch <= 'Z')) {
continue;
}
if ((ch >= 'a') && (ch <= 'z')) {
continue;
}
if ((ch >= '0') && (ch <= '9')) {
continue;
}
throw new IllegalArgumentException(
"Name cannot have a '" + ch + "' char");
}
}
/**
* Checks if output name is valid.
*
* name cannot be the name used for the default output
* @param outputPath base output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkBaseOutputPath(String outputPath) {
if (outputPath.equals("part")) {
throw new IllegalArgumentException("output name cannot be 'part'");
}
}
/**
* Checks if a named output name is valid.
*
* @param namedOutput named output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkNamedOutputName(JobContext job,
String namedOutput, boolean alreadyDefined) {
checkTokenName(namedOutput);
checkBaseOutputPath(namedOutput);
List<String> definedChannels = getNamedOutputsList(job);
if (alreadyDefined && definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput +
"' already alreadyDefined");
} else if (!alreadyDefined && !definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput +
"' not defined");
}
}
// Returns list of channel names.
private static List<String> getNamedOutputsList(JobContext job) {
List<String> names = new ArrayList<String>();
StringTokenizer st = new StringTokenizer(
job.getConfiguration().get(MULTIPLE_OUTPUTS, ""), " ");
while (st.hasMoreTokens()) {
names.add(st.nextToken());
}
return names;
}
// Returns the named output OutputFormat.
@SuppressWarnings("unchecked")
private static Class<? extends OutputFormat<?, ?>> getNamedOutputFormatClass(
JobContext job, String namedOutput) {
return (Class<? extends OutputFormat<?, ?>>)
job.getConfiguration().getClass(MO_PREFIX + namedOutput + FORMAT, null,
OutputFormat.class);
}
/**
* Adds a named output for the job.
* <p/>
*
* @param job job to add the named output
* @param namedOutput named output name, it has to be a word, letters
* and numbers only, cannot be the word 'part' as
* that is reserved for the default output.
* @param outputFormatClass OutputFormat class.
* @param keySchema Schema for the Key
*/
@SuppressWarnings("rawtypes")
public static void addNamedOutput(Job job, String namedOutput,
Class<? extends OutputFormat> outputFormatClass,
Schema keySchema) {
addNamedOutput(job,namedOutput,outputFormatClass,keySchema,null);
}
/**
* Adds a named output for the job.
* <p/>
*
* @param job job to add the named output
* @param namedOutput named output name, it has to be a word, letters
* and numbers only, cannot be the word 'part' as
* that is reserved for the default output.
* @param outputFormatClass OutputFormat class.
* @param keySchema Schema for the Key
* @param valueSchema Schema for the Value (used in case of AvroKeyValueOutputFormat or null)
*/
@SuppressWarnings("rawtypes")
public static void addNamedOutput(Job job, String namedOutput,
Class<? extends OutputFormat> outputFormatClass,
Schema keySchema, Schema valueSchema) {
checkNamedOutputName(job, namedOutput, true);
Configuration conf = job.getConfiguration();
conf.set(MULTIPLE_OUTPUTS,
conf.get(MULTIPLE_OUTPUTS, "") + " " + namedOutput);
conf.setClass(MO_PREFIX + namedOutput + FORMAT, outputFormatClass,
OutputFormat.class);
conf.set(MO_PREFIX+namedOutput+".keyschema", keySchema.toString());
if(valueSchema!=null){
conf.set(MO_PREFIX+namedOutput+".valueschema",valueSchema.toString());
}
}
/**
* Enables or disables counters for the named outputs.
*
* The counters group is the {@link MultipleOutputs} class name.
* The names of the counters are the same as the named outputs. These
* counters count the number records written to each output name.
* By default these counters are disabled.
*
* @param job job to enable counters
* @param enabled indicates if the counters will be enabled or not.
*/
public static void setCountersEnabled(Job job, boolean enabled) {
job.getConfiguration().setBoolean(COUNTERS_ENABLED, enabled);
}
/**
* Returns if the counters for the named outputs are enabled or not.
* By default these counters are disabled.
*
* @param job the job
* @return TRUE if the counters are enabled, FALSE if they are disabled.
*/
public static boolean getCountersEnabled(JobContext job) {
return job.getConfiguration().getBoolean(COUNTERS_ENABLED, false);
}
/**
* Wraps RecordWriter to increment counters.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
private static class RecordWriterWithCounter extends RecordWriter {
private RecordWriter writer;
private String counterName;
private TaskInputOutputContext context;
public RecordWriterWithCounter(RecordWriter writer, String counterName,
TaskInputOutputContext context) {
this.writer = writer;
this.counterName = counterName;
this.context = context;
}
public void write(Object key, Object value)
throws IOException, InterruptedException {
context.getCounter(COUNTERS_GROUP, counterName).increment(1);
writer.write(key, value);
}
public void close(TaskAttemptContext context)
throws IOException, InterruptedException {
writer.close(context);
}
}
// instance code, to be used from Mapper/Reducer code
private TaskInputOutputContext<?, ?, ?, ?> context;
private Set<String> namedOutputs;
private Map<String, RecordWriter<?, ?>> recordWriters;
private boolean countersEnabled;
/**
* Creates and initializes multiple outputs support,
* it should be instantiated in the Mapper/Reducer setup method.
*
* @param context the TaskInputOutputContext object
*/
public AvroMultipleOutputs(
TaskInputOutputContext<?, ?, ?, ?> context) {
this.context = context;
namedOutputs = Collections.unmodifiableSet(
new HashSet<String>(AvroMultipleOutputs.getNamedOutputsList(context)));
recordWriters = new HashMap<String, RecordWriter<?, ?>>();
countersEnabled = getCountersEnabled(context);
}
/** Hacked: added getter for namedOutputs */
public Set<String> getNamedOutputs(){
return Collections.unmodifiableSet(this.namedOutputs);
}
/**
* Write key and value to the namedOutput.
*
* Output path is a unique file generated for the namedOutput.
* For example, {namedOutput}-(m|r)-{part-number}
*
* @param namedOutput the named output name
* @param key the key , value is NullWritable
*/
public void write(String namedOutput, Object key)
throws IOException, InterruptedException {
write(namedOutput, key, NullWritable.get(), namedOutput);
}
/**
* Write key and value to the namedOutput.
*
* Output path is a unique file generated for the namedOutput.
* For example, {namedOutput}-(m|r)-{part-number}
*
* @param namedOutput the named output name
* @param key the key
* @param value the value
*/
public void write(String namedOutput, Object key, Object value)
throws IOException, InterruptedException {
write(namedOutput, key, value, namedOutput);
}
/**
* Write key and value to baseOutputPath using the namedOutput.
*
* @param namedOutput the named output name
* @param key the key
* @param value the value
* @param baseOutputPath base-output path to write the record to.
* Note: Framework will generate unique filename for the baseOutputPath
*/
@SuppressWarnings("unchecked")
public void write(String namedOutput, Object key, Object value,
String baseOutputPath) throws IOException, InterruptedException {
checkNamedOutputName(context, namedOutput, false);
checkBaseOutputPath(baseOutputPath);
if (!namedOutputs.contains(namedOutput)) {
throw new IllegalArgumentException("Undefined named output '" +
namedOutput + "'");
}
TaskAttemptContext taskContext = getContext(namedOutput);
getRecordWriter(taskContext, baseOutputPath).write(key, value);
}
/**
* Write key value to an output file name.
*
* Gets the record writer from job's output format.
* Job's output format should be a FileOutputFormat.
*
* @param key the key
* @param value the value
* @param baseOutputPath base-output path to write the record to.
* Note: Framework will generate unique filename for the baseOutputPath
*/
public void write(Object key, Object value, String baseOutputPath)
throws IOException, InterruptedException {
write(key, value, null, null, baseOutputPath);
}
/**
* Write key value to an output file name.
*
* Gets the record writer from job's output format. Job's output format should
* be a FileOutputFormat.
*
* @param key the key
* @param value the value
* @param keySchema keySchema to use
* @param valSchema ValueSchema to use
* @param baseOutputPath base-output path to write the record to. Note: Framework will
* generate unique filename for the baseOutputPath
*/
@SuppressWarnings({ "unchecked", "deprecation" })
public void write(Object key, Object value, Schema keySchema,
Schema valSchema, String baseOutputPath) throws IOException,
InterruptedException {
checkBaseOutputPath(baseOutputPath);
Job job = new Job(context.getConfiguration());
setSchema(job, keySchema, valSchema);
TaskAttemptContext taskContext = createTaskAttemptContext(job.getConfiguration(), context.getTaskAttemptID());
getRecordWriter(taskContext, baseOutputPath).write(key, value);
}
/** Hacked: make public from private */
// by being synchronized MultipleOutputTask can be use with a
// MultithreadedMapper.
@SuppressWarnings("rawtypes")
public synchronized RecordWriter getRecordWriter(
TaskAttemptContext taskContext, String baseFileName)
throws IOException, InterruptedException {
// look for record-writer in the cache
RecordWriter writer = recordWriters.get(baseFileName);
// If not in cache, create a new one
if (writer == null) {
// get the record writer from context output format
//FileOutputFormat.setOutputName(taskContext, baseFileName);
taskContext.getConfiguration().set("avro.mo.config.namedOutput",baseFileName);
try {
writer = ((OutputFormat) ReflectionUtils.newInstance(
taskContext.getOutputFormatClass(), taskContext.getConfiguration()))
.getRecordWriter(taskContext);
} catch (ClassNotFoundException e) {
throw new IOException(e);
}
// if counters are enabled, initialize starting counter value and
// wrap the writer with context to increment counters
if (countersEnabled) {
context.getCounter(COUNTERS_GROUP, baseFileName).setValue(0);
writer = new RecordWriterWithCounter(writer, baseFileName, context);
}
// add the record-writer to the cache
recordWriters.put(baseFileName, writer);
}
return writer;
}
/** Hacked method */
private void setSchema(Job job, Schema keySchema, Schema valSchema) {
boolean isMaponly = job.getNumReduceTasks() == 0;
if (keySchema != null) {
if (isMaponly){
AvroJob.setMapOutputKeySchema(job, keySchema);
}
AvroJob.setOutputKeySchema(job, keySchema);
}
if (valSchema != null) {
if (isMaponly){
AvroJob.setMapOutputValueSchema(job, valSchema);
}
AvroJob.setOutputValueSchema(job, valSchema);
}
}
/** Hacked: made public from private */
// Create a taskAttemptContext for the named output with
// output format and output key/value types put in the context
@SuppressWarnings("deprecation")
public TaskAttemptContext getContext(String nameOutput) throws IOException {
TaskAttemptContext taskContext = taskContexts.get(nameOutput);
if (taskContext != null) {
return taskContext;
}
// The following trick leverages the instantiation of a record writer via
// the job thus supporting arbitrary output formats.
Job job = new Job(context.getConfiguration());
job.setOutputFormatClass(getNamedOutputFormatClass(context, nameOutput));
Schema keySchema=null,valSchema=null;
if (job.getConfiguration().get(MO_PREFIX + nameOutput + ".keyschema",null) != null)
keySchema = Schema.parse(job.getConfiguration().get(
MO_PREFIX + nameOutput + ".keyschema"));
if (job.getConfiguration().get(MO_PREFIX + nameOutput + ".valueschema",
null) != null)
valSchema = Schema.parse(job.getConfiguration().get(
MO_PREFIX + nameOutput + ".valueschema"));
setSchema(job, keySchema, valSchema);
taskContext = createTaskAttemptContext(
job.getConfiguration(), context.getTaskAttemptID());
taskContexts.put(nameOutput, taskContext);
return taskContext;
}
private TaskAttemptContext createTaskAttemptContext(Configuration conf,
TaskAttemptID taskId) {
// Use reflection since the context types changed incompatibly between 1.0
// and 2.0.
try {
Class<?> c = getTaskAttemptContextClass();
Constructor<?> cons = c.getConstructor(Configuration.class,
TaskAttemptID.class);
return (TaskAttemptContext) cons.newInstance(conf, taskId);
} catch (Exception e) {
throw new IllegalStateException(e);
}
}
private Class<?> getTaskAttemptContextClass() {
try {
return Class.forName(
"org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl");
} catch (Exception e) {
try {
return Class.forName(
"org.apache.hadoop.mapreduce.TaskAttemptContext");
} catch (Exception ex) {
throw new IllegalStateException(ex);
}
}
}
/**
* Closes all the opened outputs.
*
* This should be called from cleanup method of map/reduce task.
* If overridden subclasses must invoke <code>super.close()</code> at the
* end of their <code>close()</code>
*
*/
public void close() throws IOException, InterruptedException {
for (@SuppressWarnings("rawtypes") RecordWriter writer : recordWriters.values()) {
writer.close(context);
}
}
}