本篇开始介绍Spark SQL的入门示例
Maven中引入
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>2.3.1</version>
</dependency>
在项目根目录下新建配置文件people.json
{"name":"Andy", "age":30, "sex":"女"}
{"name":"Justin", "age":19, "sex":"男"}
{"name":"Michael", "age":20, "sex":"男"}
代码示例
package com.yzy.spark;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
public class demo4 {
private static String appName = "spark.sql.demo";
private static String master = "local[*]";
public static void main(String[] args) {
//初始化SparkSession
SparkSession spark = SparkSession
.builder()
.appName(appName)
.master(master)
.getOrCreate();
//读取元数据文件
Dataset<Row> df = spark.read().json("people.json");
//生成rdd
JavaRDD<Row> rdd = df.toJavaRDD();
//遍历
rdd.foreach(new VoidFunction<Row>() {
public void call(Row row) throws Exception {
System.out.println(row.toString());
}
});
spark.stop();
}
}
输出结果
[30,Andy,女]
[19,Justin,男]
[20,Michael,男]
以上示例只是把元数据简单的打印出来,Spark SQL的功能远远不止如此,他甚至可以像写原生sql语句一样对数据进行过滤,下面列举一些Spark SQL的其他用法。也可以参考官方Demo,下载到本地,查看examples文件夹。
注意:示例中col()函数需导入
import static org.apache.spark.sql.functions.col;
自定义选择某些字段
df = df.select("name","age");
----------------------------------
输出结果:
[Andy,30]
[Justin,19]
[Michael,20]
对年龄字段进行加1计算
df = df.select(col("name"), col("age").plus(1));
----------------------------------
输出结果:
[Andy,31]
[Justin,20]
[Michael,21]
筛选年龄大于19岁的记录
df = df.filter(col("age").gt(19));
----------------------------------
输出结果:
[30,Andy,女]
[20,Michael,男]
按照年龄计数
df = df.groupBy("age").count();
----------------------------------
输出结果:
[30,1]
[19,1]
[20,1]
此外,我还可以使用原生sql来处理以上操作。首先我们要建立people视图
df.createOrReplaceTempView("people");
然后查询元数据就可以这样了
Dataset<Row> sqlDF = spark.sql("SELECT * FROM people");
JavaRDD<Row> rdd = sqlDF.toJavaRDD();
//......
注意:df.createOrReplaceTempView("people");
方式创建的是临时视图,属于会话级别的。如果你希望在所有会话之间共享临时视图并保持活动状态,直到Spark应用程序终止,则可以创建全局临时视图。全局临时视图与系统保存的数据库绑定global_temp,我们必须使用限定名称来引用它,例如SELECT * FROM global_temp.view1
。
全局视图示例
df.createGlobalTempView("people");
Dataset<Row> sqlDF = spark.sql("SELECT * FROM global_temp.people");
JavaRDD<Row> rdd = sqlDF.toJavaRDD();
//......
JavaRDD 转 Dataset<Row>
//people.txt
Michael, 29
Andy, 30
Justin, 19
package com.yzy.spark;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import java.io.Serializable;
public class demo6 {
private static String appName = "spark.sql.demo";
private static String master = "local[*]";
public static void main(String[] args) {
SparkSession spark = SparkSession
.builder()
.appName(appName)
.master(master)
.getOrCreate();
JavaRDD<Person> peopleRDD = spark.read()
.textFile("people.txt")
.javaRDD()
.map(new Function<String, Person>() {
public Person call(String s) throws Exception {
String[] parts = s.split(",");
Person person = new Person();
person.setName(parts[0]);
person.setAge(Integer.parseInt(parts[1].trim()));
return person;
}
});
Dataset<Row> peopleDF = spark.createDataFrame(peopleRDD, Person.class);
peopleDF.show();
}
public static class Person implements Serializable {
private String name;
private int age;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getAge() {
return age;
}
public void setAge(int age) {
this.age = age;
}
}
}
控制台输出
+---+-------+
|age| name|
+---+-------+
| 29|Michael|
| 30| Andy|
| 19| Justin|
+---+-------+