3. 使用MRUnit进行Mapeduce的单元测试

1. pom.xml增加依赖

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
  <modelVersion>4.0.0</modelVersion>

  <groupId>com.yinbodotcc</groupId>
  <artifactId>countwords</artifactId>
  <version>0.0.1-SNAPSHOT</version>
  <packaging>jar</packaging>

  <name>countwords</name>
  <url>http://maven.apache.org</url>
 
  <properties>
   <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
   <hadoop.version>3.0.3</hadoop.version>
   <hive.version>0.13.1</hive.version>
   <hbase.version>0.98.6-hadoop2</hbase.version>
</properties>

  <dependencies>
    <!--dependency>
      <groupId>junit</groupId>
      <artifactId>junit</artifactId>
      <version>3.8.1</version>
      <scope>test</scope>
    </dependency-->
    
    <dependency>
        <groupId>org.apache.mrunit</groupId>
        <artifactId>mrunit</artifactId>
        <version>1.1.0</version>
        <classifier>hadoop2</classifier>
        <scope>test</scope>
    </dependency>
    
    <!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs -->
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-hdfs</artifactId>
        <version>3.0.3</version>
    </dependency>


  <dependency>  
      <groupId>org.apache.hadoop</groupId>  
      <artifactId>hadoop-client</artifactId>  
      <version>2.5.1</version>  
  </dependency> 

  <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-common</artifactId>
      <version>2.5.0</version>
  </dependency>
  
   <!-- hive client -->
   <!--dependency>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-jdbc</artifactId>
     <version>${hive.version}</version>
   </dependency>
   <dependency>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-exec</artifactId>
     <version>${hive.version}</version>
   </dependency-->
  
   <!-- hbase client -->
   <!--dependency>
     <groupId>org.apache.hbase</groupId>
     <artifactId>hbase-server</artifactId>
     <version>${hbase.version}</version>
   </dependency>
   <dependency>
     <groupId>org.apache.hbase</groupId>
     <artifactId>hbase-client</artifactId>
     <version>${hbase.version}</version>
   </dependency-->
    
  </dependencies>
</project>
图片.png

2. Java文件

2.1 测试用例

图片.png
图片.png
package chapter3;

import java.io.IOException;
import java.util.ArrayList;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mrunit.mapreduce.MapDriver;
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver;
import org.apache.hadoop.mrunit.mapreduce.ReduceDriver;
import org.apache.hadoop.mrunit.types.Pair;
import org.junit.Before;
import org.junit.Test;

public class WordCountWithToolsTest {

    MapDriver<Object, Text, Text, IntWritable> mapDriver;
    ReduceDriver<Text,IntWritable,Text,IntWritable> reduceDriver;
    MapReduceDriver<Object, Text, Text, IntWritable, Text,IntWritable> mapReduceDriver;

    @Before
    public void setUp() {
        WordCountWithTools.TokenizerMapper mapper = new WordCountWithTools.TokenizerMapper();
        WordCountWithTools.IntSumReducer reducer = new WordCountWithTools.IntSumReducer();
        mapDriver = MapDriver.newMapDriver(mapper);
        reduceDriver = ReduceDriver.newReduceDriver(reducer);
        mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
    }
    
    @Test
    public void testWordCountMapper() throws IOException {
        IntWritable inKey = new IntWritable(0);
        mapDriver.withInput(inKey, new Text("Test Quick"));
        mapDriver.withInput(inKey, new Text("Test Quick"));
        
        mapDriver.withOutput(new Text(
                "Test"),new IntWritable(1));
        mapDriver.withOutput(new Text(
                "Quick"),new IntWritable(1));
        mapDriver.withOutput(new Text(
                "Test"),new IntWritable(1));
        mapDriver.withOutput(new Text(
                "Quick"),new IntWritable(1));
        mapDriver.runTest();
        
    }   
    @Test
    public void testWordCountReduce() throws IOException {
        
        ArrayList<IntWritable> reduceInList = new ArrayList<IntWritable>();
        reduceInList.add(new IntWritable(1));
        reduceInList.add(new IntWritable(2));

        reduceDriver.withInput(new Text("Quick"), reduceInList);
        reduceDriver.withInput(new Text("Test"), reduceInList);
        
        ArrayList<Pair<Text, IntWritable>> reduceOutList = new ArrayList<Pair<Text,IntWritable>>();
        reduceOutList.add(new Pair<Text, IntWritable>(new Text(
                "Quick"),new IntWritable(3)));
        reduceOutList.add(new Pair<Text, IntWritable>(new Text(
                "Test"),new IntWritable(3)));
        
        reduceDriver.withAllOutput(reduceOutList);
        reduceDriver.runTest();
    }
    
    @Test
    public void testWordCountMapReduce() throws IOException {
        
        IntWritable inKey = new IntWritable(0);
        mapReduceDriver.withInput(inKey, new Text("Test Quick"));
        mapReduceDriver.withInput(inKey, new Text("Test Quick"));
        
        ArrayList<Pair<Text, IntWritable>> reduceOutList = new ArrayList<Pair<Text,IntWritable>>();
        reduceOutList.add(new Pair<Text, IntWritable>(new Text(
                "Quick"),new IntWritable(2)));
        reduceOutList.add(new Pair<Text, IntWritable>(new Text(
                "Test"),new IntWritable(2)));

        mapReduceDriver.withAllOutput(reduceOutList);
        mapReduceDriver.runTest();
    }
}

2.2 要测试的MapReduce类

package chapter3;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class WordCountWithTools extends Configured implements Tool {
      /**
       * <p>
       * The mapper extends from the org.apache.hadoop.mapreduce.Mapper interface. When Hadoop runs, 
       * it receives each new line in the input files as an input to the mapper. The "map" function 
       * tokenize the line, and for each token (word) emits (word,1) as the output.  </p>
       */
      public static class TokenizerMapper 
           extends Mapper<Object, Text, Text, IntWritable>{
        
        private final static IntWritable one = new IntWritable(1);
        private Text word = new Text();
          
        public void map(Object key, Text value, Context context
                        ) throws IOException, InterruptedException {
          StringTokenizer itr = new StringTokenizer(value.toString());
          while (itr.hasMoreTokens()) {
            word.set(itr.nextToken());
            context.write(word, one);
          }
        }
      }
      
      /**
       * <p>Reduce function receives all the values that has the same key as the input, and it output the key 
       * and the number of occurrences of the key as the output.</p>  
       */
      public static class IntSumReducer 
           extends Reducer<Text,IntWritable,Text,IntWritable> {
        private IntWritable result = new IntWritable();

        public void reduce(Text key, Iterable<IntWritable> values, 
                           Context context
                           ) throws IOException, InterruptedException {
          int sum = 0;
          for (IntWritable val : values) {
            sum += val.get();
          }
          result.set(sum);
          context.write(key, result);
        }
      }
      
      
    public int run(String[] args) throws Exception {
        if (args.length < 2) {
            System.out.println("chapter3.WordCountWithTools <inDir> <outDir>");
            ToolRunner.printGenericCommandUsage(System.out);
            System.out.println("");
            return -1;
        }
        String inputPath = args[0];
        String outPath = args[1];

        Job job = prepareJob(inputPath, outPath, getConf());
        job.waitForCompletion(true);

        return 0;
    }

    public Job prepareJob(String inputPath, String outPath,Configuration conf)
            throws IOException {
        Job job = Job.getInstance(conf, "word count");
        job.setJarByClass(WordCountWithTools.class);
        job.setMapperClass(TokenizerMapper.class);
        // Uncomment this to
        // job.setCombinerClass(IntSumReducer.class);
        job.setReducerClass(IntSumReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        FileInputFormat.addInputPath(job, new Path(inputPath));
        FileOutputFormat.setOutputPath(job, new Path(outPath));
        return job;
    }

    public static void main(String[] args) throws Exception {
        int res = ToolRunner.run(new Configuration(), new WordCountWithTools(), args);
        System.exit(res);
    }
}

3. 运行测试用例(以Eclipse为例)

run as---junit test


图片.png

另外一个例子

http://www.cnblogs.com/zimo-jing/p/8647113.html
https://www.cnblogs.com/zimo-jing/p/8650588.html

最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 204,189评论 6 478
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 85,577评论 2 381
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 150,857评论 0 337
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 54,703评论 1 276
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 63,705评论 5 366
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 48,620评论 1 281
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 37,995评论 3 396
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 36,656评论 0 258
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 40,898评论 1 298
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 35,639评论 2 321
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 37,720评论 1 330
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 33,395评论 4 319
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 38,982评论 3 307
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 29,953评论 0 19
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 31,195评论 1 260
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 44,907评论 2 349
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 42,472评论 2 342