一、 官方HBase-MapReduce
查看HBase的MapReduce任务的所需的依赖
bin/hbase mapredcp
执行环境变量的导入
export HBASE_HOME=/opt/module/hbase-1.3.1
export HADOOP_CLASSPATH=${HBASE_HOME}/bin/hbase mapredcp
-
统计Student表中有多少行数据
yarn jar ../hbase-1.3.1/lib/hbase-server-1.3.1.jar rowcounter zhangsan
-
使用MapReduce将本地数据导入到HBase
在HDFS中上传abc.tsv文件
[root@bigdata111 input]# hdfs dfs -mkdir /hbase_input
[root@bigdata111 input]# hdfs dfs -put abc.tsv /hbase_input/
yarn jar ../hbase-1.3.1/lib/hbase-server-1.3.1.jar importtsv -Dimporttsv.columns=HBASE_ROW_KEY,cf:name,cf:age test hdfs://bigdata111:9000/hbase_input
二、 自己编写实现Hbase2Hbase
-
Mapper
package top.gujm.hbase_h2h;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
/**
* 只需要cf:age列
*/
public class Mapper extends TableMapper<ImmutableBytesWritable, Put> {
Logger logger = LoggerFactory.getLogger(Mapper.class);
@Override
protected void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException {
List<Cell> columnCells = value.getColumnCells(Bytes.toBytes("cf"), Bytes.toBytes("age"));
logger.info(Bytes.toString(key.get())+"的cf:age列个数:"+columnCells.size());
if(columnCells.size() > 0){
Put put = new Put(key.get());
put.add(columnCells.get(0));
context.write(key, put);
}
}
}
-
Reducer
package top.gujm.hbase_h2h;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.io.NullWritable;
import java.io.IOException;
public class Reducer extends TableReducer<ImmutableBytesWritable, Put, NullWritable> {
@Override
protected void reduce(ImmutableBytesWritable key, Iterable<Put> values, Context context) throws IOException, InterruptedException {
for (Put put : values){
context.write(NullWritable.get(), put);
}
}
}
-
Driver
package top.gujm.hbase_h2h;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class Driver extends Configured implements Tool {
public int run(String[] strings) throws Exception {
Job job = Job.getInstance(getConf(), Driver.class.getSimpleName());
job.setJarByClass(Driver.class);
TableMapReduceUtil.initTableMapperJob(
"test",
new Scan(),
Mapper.class,
ImmutableBytesWritable.class,
Put.class,
job
);
TableMapReduceUtil.initTableReducerJob("test2", Reducer.class, job);
job.setNumReduceTasks(1);
boolean flag = job.waitForCompletion(true);
return flag ? 0 : 1;
}
public static void main(String[] args) throws Exception {
ToolRunner.run(HBaseConfiguration.create(), new Driver(), args);
}
}
-
打包并上传到linux,然后执行下面命令运行
yarn jar FlumeCustom-1.0-SNAPSHOT.jar top.gujm.hbase_h2h.Driver
-
结果
三、 自己编写实现hdfs2hbase
-
Mapper
package top.gujm.hbase_hdfs2hbase;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import java.io.IOException;
public class Mapper extends org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, ImmutableBytesWritable, Put> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//第一列rowkey,第二列name,第三列age
String[] fields = value.toString().split("\t");
byte[] rowkey = Bytes.toBytes(fields[0]);
Put put = new Put(rowkey);
byte[] family = Bytes.toBytes("cf");
byte[] columnName = Bytes.toBytes("name");
byte[] columnAge = Bytes.toBytes("age");
put.addColumn(family, columnName, Bytes.toBytes(fields[1]));
put.addColumn(family, columnAge, Bytes.toBytes(fields[2]));
context.write(new ImmutableBytesWritable(rowkey), put);
}
}
-
Reducer
package top.gujm.hbase_hdfs2hbase;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.io.NullWritable;
import java.io.IOException;
public class Reducer extends TableReducer<ImmutableBytesWritable, Put, NullWritable> {
@Override
protected void reduce(ImmutableBytesWritable key, Iterable<Put> values, Context context) throws IOException, InterruptedException {
for (Put put : values){
context.write(NullWritable.get(), put);
}
}
}
-
Driver
package top.gujm.hbase_hdfs2hbase;
import com.sun.org.apache.xerces.internal.dom.PSVIAttrNSImpl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class Driver extends Configured implements Tool {
public int run(String[] strings) throws Exception {
Job job = Job.getInstance(getConf());
job.setJarByClass(Driver.class);
job.setMapperClass(Mapper.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(Put.class);
TableMapReduceUtil.initTableReducerJob("test", Reducer.class, job);
FileInputFormat.setInputPaths(job, "/hbase_input/abc.tsv");
job.setNumReduceTasks(1);
boolean f = job.waitForCompletion(true);
return f ? 0 : 1;
}
public static void main(String[] args) throws Exception {
ToolRunner.run(new Configuration(), new Driver(), args);
}
}
-
打包上传运行
yarn jar FlumeCustom-1.0-SNAPSHOT.jar top.gujm.hbase_hdfs2hbase.Driver
-
结果
四、 自己编写实现hdfs2hbase
-
Mapper
package top.gujm.hbase_hbase2hdfs;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import java.io.IOException;
public class Mapper extends TableMapper<ImmutableBytesWritable, Result> {
@Override
protected void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException {
context.write(key, value);
}
}
-
Reducer
package top.gujm.hbase_hbase2hdfs;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;
import java.io.IOException;
public class Reducer extends org.apache.hadoop.mapreduce.Reducer<ImmutableBytesWritable, Result, Text, Text> {
@Override
protected void reduce(ImmutableBytesWritable key, Iterable<Result> values, Context context) throws IOException, InterruptedException {
StringBuffer sb = new StringBuffer();
int i = 0;
for (Result result : values){
Cell[] cells = result.rawCells();
for (Cell cell : cells) {
sb.append(i == 0 ? "" : ",");
sb.append(
Bytes.toString(CellUtil.cloneFamily(cell)) + ":" +
Bytes.toString(CellUtil.cloneQualifier(cell)) + "=" +
Bytes.toString(CellUtil.cloneValue(cell))
);
}
context.write(new Text(Bytes.toString(key.get())), new Text(sb.toString()));
}
}
}
-
Driver
package top.gujm.hbase_hbase2hdfs;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class Driver extends Configured implements Tool {
public int run(String[] strings) throws Exception {
Job job = Job.getInstance(getConf());
job.setJarByClass(Driver.class);
TableMapReduceUtil.initTableMapperJob(
"test",
new Scan(),
Mapper.class,
ImmutableBytesWritable.class,
Result.class,
job
);
job.setReducerClass(Reducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(1);
FileOutputFormat.setOutputPath(job, new Path("/out"));
job.waitForCompletion(true);
return 0;
}
public static void main(String[] args) throws Exception {
ToolRunner.run(HBaseConfiguration.create(), new Driver(), args);
}
}
-
测试数据
[图片上传失败...(image-c1be27-1569657812536)]
-
结果