package day18test;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;
import java.util.Arrays;
import java.util.Iterator;
public class WordCountJavaApp01 {
public static void main(String[] args){
//局部调整spark应用的日志级别
Logger.getLogger("org.apache.spark").setLevel(Level.WARN);
Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN);
Logger.getLogger("org.spark_project").setLevel(Level.WARN);
SparkConf conf=new SparkConf();
conf.setMaster("local[*]");
conf.setAppName(WordCountJavaApp01.class.getSimpleName());
JavaSparkContext jsc=new JavaSparkContext(conf);
//加载外部数据
JavaRDD line=jsc.textFile("file:///D:/test.txt");
//打印分区数
System.out.println(" "+line.getNumPartitions());
//flatMapfan一对多
JavaRDD wordRDD=line.flatMap(new FlatMapFunction() {
@Override
public Iterator call(String line)throws Exception {
//切割字符串返回迭代器对象
return Arrays.asList(line.split("\\s+")).iterator();
}
});
JavaPairRDD pairsRDD=wordRDD.mapToPair(new PairFunction() {
@Override
public Tuple2 call(String word)throws Exception {
return new Tuple2(word,1);
}
});
JavaPairRDD retRDD=pairsRDD.reduceByKey(new Function2() {
@Override
public Integer call(Integer v1, Integer v2)throws Exception {
return v1+v2;
}
});
//action触发
retRDD.foreach(new VoidFunction>() {
@Override
public void call(Tuple2 t)throws Exception {
System.out.println(t._1+"---->"+t._2);
}
});
//释放资源
jsc.stop();
}
}
进阶
package day18test;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;
import java.util.Arrays;
public class WordCountJavaApp2 {
public static void main(String[] args){
//局部调整spark应用的日志级别
Logger.getLogger("org.apache.spark").setLevel(Level.WARN);
Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN);
Logger.getLogger("org.spark_project").setLevel(Level.WARN);
//step1加载配置问文件
SparkConf conf=new SparkConf();
conf.setAppName(WordCountJavaApp2.class.getSimpleName());
//设置分区
conf.setMaster("local[*]");
//创建连接对象
JavaSparkContext jsc=new JavaSparkContext(conf);
//加载外部数据
JavaRDD lines=jsc.textFile("file:///D:/test.txt");
//拆分字符串
JavaRDD word=lines.flatMap(line-> Arrays.asList(line.split("\\s+")).iterator());
//转化为map集合
JavaPairRDD pairRDD=word.mapToPair(words->new Tuple2(words,1));
//聚合计算每个单词出现的次数
JavaPairRDD ret=pairRDD.reduceByKey((v1,v2)->v1+v2);
//触发action
ret.foreach(t-> System.out.println(t._1+"==="+t._2));
jsc.close();
}
}