/**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asp.tranlog;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat;
import org.apache.hadoop.hbase.mapreduce.PutSortReducer;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.util.Base64;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.hadoop.mapreduce.LzoTextInputFormat;
/**
* Tool to import data from a TSV file.
*
* This tool is rather simplistic - it doesn't do any quoting or escaping, but
* is useful for many data loads.
*
* @see ImportTsv#usage(String)
*/
public class ImportTsv {
final static Logger LOG = LoggerFactory.getLogger(ImportTsv.class);
final static String PRE = ">>>>>>>>>>>>>>>";
final static String NAME = "importtsv";
final static String MAPPER_CONF_KEY = "importtsv.mapper.class";
final static String SKIP_LINES_CONF_KEY = "importtsv.skip.bad.lines";
final static String BULK_OUTPUT_CONF_KEY = "importtsv.bulk.output";
final static String COLUMNS_CONF_KEY = "importtsv.columns";
final static String KEYCOLUMNS_CONF_KEY = "importtsv.key.columns";
final static String SEPARATOR_CONF_KEY = "importtsv.separator";
final static String TIMESTAMP_CONF_KEY = "importtsv.timestamp";
final static String INPUT_LZO_KEY = "importtsv.input.codec";
final static String DEFAULT_SEPARATOR = "\t";
final static String SEPARATOR_CONF_ROWKEY = "importtsv.separator.rowkey";
final static Class DEFAULT_MAPPER = TsvImporterMapper.class;
final static String CHARSET_CONF_KEY = "importtsv.charset.rowkey";
public final static byte COL_TYPE_STRING = 0;
public final static byte COL_TYPE_INT = 1;
public final static byte COL_TYPE_LONG = 2;
public final static byte COL_TYPE_DOUBLE = 3;
public final static byte COL_TYPE_DATETIME = 4;// if the source type is
// datetime, then will parse
// it to long
static class TsvParser {
/**
* Column families and qualifiers mapped to the TSV columns
*/
private final byte[][] families;
private final byte[][] qualifiers;
private final byte[] colType; // 0- String, 1- int, 2- long, 3- double,
// 4- timestamp
private int[] keyColIndex = null;// Save the columns that will make up
// the row key.
private int[] keyColLen = null;// Save the length of ever column that
// will compose the row key.
private final byte separatorByte;
// private int rowKeyColumnIndex;
public static String ROWKEY_COLUMN_SPEC = "HBASE_ROW_KEY";
/**
* @param columnsSpecification
* the list of columns to parser out, comma separated. The
* row key should be the special token
* TsvParser.ROWKEY_COLUMN_SPEC
*/
public TsvParser(String columnsSpecification, String[] keyColumns,
String separatorStr) {
// Configure separator
byte[] separator = Bytes.toBytes(separatorStr);
Preconditions.checkArgument(separator.length == 1,
"TsvParser only supports single-byte separators");
separatorByte = separator[0];
// Configure columns
ArrayList<String> columnStrings = Lists.newArrayList(Splitter
.on(',').trimResults().split(columnsSpecification));
families = new byte[columnStrings.size()][];
qualifiers = new byte[columnStrings.size()][];
colType = new byte[columnStrings.size()];
for (int i = 0; i < columnStrings.size(); i++) {
String str = columnStrings.get(i);
// if (ROWKEY_COLUMN_SPEC.equals(str)) {
// rowKeyColumnIndex = i;
// continue;
// }
String[] parts = str.split(":", 3);
if (parts.length == 1) {
families[i] = str.getBytes();
qualifiers[i] = HConstants.EMPTY_BYTE_ARRAY;
colType[i] = COL_TYPE_STRING;
} else {
families[i] = parts[0].getBytes();
qualifiers[i] = parts[1].getBytes();
if (parts.length > 2) {
colType[i] = parseColType(parts[2]);
} else
colType[i] = COL_TYPE_STRING;
}
// System.out.println(str + ", idex " + i + ", coltpe: " +
// colType[i]);
}
if (keyColumns != null) {
keyColIndex = new int[keyColumns.length];
keyColLen = new int[keyColumns.length];
for (int i = 0; i < keyColumns.length; i++) {
String[] strKdef = keyColumns[i].split(":", 2);
keyColIndex[i] = Integer.parseInt(strKdef[0]);
if (keyColIndex[i] >= qualifiers.length)
keyColIndex[i] = 0;
if (strKdef.length > 1)
keyColLen[i] = Integer.parseInt(strKdef[1]);
else
keyColLen[i] = 0;// 0 means not specify the length
}
}
}
public byte parseColType(String strCT) {
if (strCT.equalsIgnoreCase("int"))
return COL_TYPE_INT;
else if (strCT.equalsIgnoreCase("string"))
return COL_TYPE_STRING;
else if (strCT.equalsIgnoreCase("long"))
return COL_TYPE_LONG;
else if (strCT.equalsIgnoreCase("datetime"))
return COL_TYPE_DATETIME;
else if (strCT.equalsIgnoreCase("double"))
return COL_TYPE_DOUBLE;
else
return COL_TYPE_STRING;
}
public int[] getRowKeyColumnIndex() {
return keyColIndex;
}
public int[] getRowKeyColumnLen() {
return keyColLen;
}
public byte[] getFamily(int idx) {
return families[idx];
}
public byte[] getQualifier(int idx) {
return qualifiers[idx];
}
public byte[] getColType() {
return colType;
}
public ParsedLine parse(byte[] lineBytes, int length)
throws BadTsvLineException {
// Enumerate separator offsets
ArrayList<Integer> tabOffsets = new ArrayList<Integer>(
families.length);
for (int i = 0; i < length; i++) {
if (lineBytes[i] == separatorByte) {
tabOffsets.add(i);
}
}
if (tabOffsets.isEmpty()) {
throw new BadTsvLineException("No delimiter");
}
tabOffsets.add(length);
if (tabOffsets.size() > families.length) {
throw new BadTsvLineException("Excessive columns");
}/*
* else if (tabOffsets.size() <= getRowKeyColumnIndex()) { throw new
* BadTsvLineException("No row key"); }
*/
return new ParsedLine(tabOffsets, lineBytes);
}
class ParsedLine {
private final ArrayList<Integer> tabOffsets;
private byte[] lineBytes;
ParsedLine(ArrayList<Integer> tabOffsets, byte[] lineBytes) {
this.tabOffsets = tabOffsets;
this.lineBytes = lineBytes;
}
public int getColumnOffset(int idx) {
if (idx > 0)
return tabOffsets.get(idx - 1) + 1;
else
return 0;
}
public int getColumnLength(int idx) {
return tabOffsets.get(idx) - getColumnOffset(idx);
}
public int getColumnCount() {
return tabOffsets.size();
}
public byte[] getLineBytes() {
return lineBytes;
}
}
public static class BadTsvLineException extends Exception {
public BadTsvLineException(String err) {
super(err);
}
private static final long serialVersionUID = 1L;
}
}
/**
* Sets up the actual job.
*
* @param conf
* The current configuration.
* @param args
* The command line parameters.
* @return The newly created job.
* @throws IOException
* When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args)
throws IOException, ClassNotFoundException {
// Support non-XML supported characters
// by re-encoding the passed separator as a Base64 string.
String actualSeparator = conf.get(SEPARATOR_CONF_KEY);
if (actualSeparator != null) {
conf.set(SEPARATOR_CONF_KEY,
new String(Base64.encodeBytes(actualSeparator.getBytes())));
}
// See if a non-default Mapper was set
String mapperClassName = conf.get(MAPPER_CONF_KEY);
Class mapperClass = mapperClassName != null ? Class
.forName(mapperClassName) : DEFAULT_MAPPER;
String tableName = args[0];
Path inputDir = new Path(args[1]);
Job job = new Job(conf, NAME + "_" + tableName);
job.setJarByClass(mapperClass);
FileInputFormat.setInputPaths(job, inputDir);
String inputCodec = conf.get(INPUT_LZO_KEY);
if (inputCodec == null) {
FileInputFormat.setMaxInputSplitSize(job, 67108864l); // max split
// size =
// 64m
job.setInputFormatClass(TextInputFormat.class);
} else {
if (inputCodec.equalsIgnoreCase("lzo"))
job.setInputFormatClass(LzoTextInputFormat.class);
else {
usage("not supported compression codec!");
System.exit(-1);
}
}
job.setMapperClass(mapperClass);
String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY);
if (hfileOutPath != null) {
HTable table = new HTable(conf, tableName);
job.setReducerClass(PutSortReducer.class);
Path outputDir = new Path(hfileOutPath);
FileOutputFormat.setOutputPath(job, outputDir);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(Put.class);
HFileOutputFormat.configureIncrementalLoad(job, table);
} else {
// No reducers. Just write straight to table. Call
// initTableReducerJob
// to set up the TableOutputFormat.
TableMapReduceUtil.initTableReducerJob(tableName, null, job);
job.setNumReduceTasks(0);
}
TableMapReduceUtil.addDependencyJars(job);
TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
com.google.common.base.Function.class /*
* Guava used by TsvParser
*/);
return job;
}
/*
* @param errorMsg Error message. Can be null.
*/
private static void usage(final String errorMsg) {
if (errorMsg != null && errorMsg.length() > 0) {
System.err.println("ERROR: " + errorMsg);
}
String usage = "Usage: "
+ NAME
+ " -Dimporttsv.columns=a,b,c -Dimporttsv.key.columns=0,3 <tablename> <inputdir>\n"
+ "\n"
+ "Imports the given input directory of TSV data into the specified table.\n"
+ "\n"
+ "The column names of the TSV data must be specified using the -Dimporttsv.columns\n"
+ "option. This option takes the form of comma-separated column names, where each\n"
+ "column name is either a simple column family, or a columnfamily:qualifier. The special\n"
+ "column name HBASE_ROW_KEY is used to designate that this column should be used\n"
+ "as the row key for each imported record. You must specify exactly one column\n"
+ "to be the row key, and you must specify a column name for every column that exists in the\n"
+ "input data.\n"
+ "\n"
+ "By default importtsv will load data directly into HBase. To instead generate\n"
+ "HFiles of data to prepare for a bulk data load, pass the option:\n"
+ " -D"
+ BULK_OUTPUT_CONF_KEY
+ "=/path/for/output\n"
+ " Note: if you do not use this option, then the target table must already exist in HBase\n"
+ "\n"
+ "Other options that may be specified with -D include:\n"
+ " -D"
+ SEPARATOR_CONF_ROWKEY
+ "= - default:,hbase rowkey separator"
+ " -D"
+ SKIP_LINES_CONF_KEY
+ "=false - fail if encountering an invalid line\n"
+ " '-D"
+ SEPARATOR_CONF_KEY
+ "=|' - eg separate on pipes instead of tabs\n"
+ " -D"
+ TIMESTAMP_CONF_KEY
+ "=currentTimeAsLong - use the specified timestamp for the import\n"
+ " -D" + MAPPER_CONF_KEY
+ "=my.Mapper - A user-defined Mapper to use instead of "
+ DEFAULT_MAPPER.getName() + "\n" + " -D" + CHARSET_CONF_KEY
+ "=charset of rowkey, gb18030 " + "\n";
System.err.println(usage);
}
/**
* Main entry point.
*
* @param args
* The command line parameters.
* @throws Exception
* When running the job fails.
*/
public static void main(String[] args) throws Exception {
System.out.println("==============================================");
Configuration conf = HBaseConfiguration.create();
LOG.error(PRE + "conf.toString() == " + conf.toString());
String[] otherArgs = new GenericOptionsParser(conf, args)
.getRemainingArgs();
if (otherArgs.length < 2) {
usage("Wrong number of arguments: " + otherArgs.length);
System.exit(-1);
}
String columns[] = conf.getStrings(COLUMNS_CONF_KEY);
if (columns == null) {
usage("No columns specified. Please specify with -D"
+ COLUMNS_CONF_KEY + "=...");
System.exit(-1);
}
// Make sure one or more columns are specified
if (columns.length < 2) {
usage("One or more columns in addition to the row key are required");
System.exit(-1);
}
columns = conf.getStrings(COLUMNS_CONF_KEY);
if (columns == null) {
usage("One or more key columns are required");
System.exit(-1);
}
Job job = createSubmittableJob(conf, otherArgs);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}