这篇文章将为大家详细讲解有关IDEA如何开发配置SparkSQL,小编觉得挺实用的,因此分享给大家做个参考,希望大家阅读完这篇文章后可以有所收获。
在idea项目的pom.xml中添加依赖。
<!--spark sql依赖,注意版本号-->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.12</artifactId>
<version>3.0.0</version>
</dependency>
package com.zf.bigdata.spark.sql
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
object Spark01_SparkSql_Basic {
def main(args: Array[String]): Unit = {
//创建上下文环境配置对象
val sparkConf = new SparkConf().setMaster("local[*]").setAppName("sparkSql")
//创建 SparkSession 对象
val spark = SparkSession.builder().config(sparkConf).getOrCreate()
// DataFrame
val df: DataFrame = spark.read.json("datas/user.json")
//df.show()
// DataFrame => Sql
//df.createOrReplaceTempView("user")
//spark.sql("select * from user").show()
//spark.sql("select age from user").show()
//spark.sql("select avg(age) from user").show()
//DataFrame => Dsl
//如果涉及到转换操作,转换需要引入隐式转换规则,否则无法转换,比如使用$提取数据的值
//spark 不是包名,是上下文环境对象名
import spark.implicits._
//df.select("age","username").show()
//df.select($"age"+1).show()
//df.select('age+1).show()
// DataSet
//val seq = Seq(1,2,3,4)
//val ds: Dataset[Int] = seq.toDS()
// ds.show()
// RDD <=> DataFrame
val rdd = spark.sparkContext.makeRDD(List((1,"张三",10),(2,"李四",20)))
val df1: DataFrame = rdd.toDF("id", "name", "age")
val rdd1: RDD[Row] = df1.rdd
// DataFrame <=> DataSet
val ds: Dataset[User] = df1.as[User]
val df2: DataFrame = ds.toDF()
// RDD <=> DataSet
val ds1: Dataset[User] = rdd.map {
case (id, name, age) => {
User(id, name = name, age = age)
}
}.toDS()
val rdd2: RDD[User] = ds1.rdd
spark.stop()
}
case class User(id:Int,name:String,age:Int)
}
PS:下面看下在IDEA中开发Spark SQL程序
IDEA 中程序的打包和运行方式都和 SparkCore 类似,Maven 依赖中需要添加新的依赖项:
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>2.1.1</version>
</dependency>
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.types.IntegerType
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.Row
object Demo1 {
def main(args: Array[String]): Unit = {
//使用Spark Session 创建表
val spark = SparkSession.builder().master("local").appName("UnderstandSparkSession").getOrCreate()
//从指定地址创建RDD
val personRDD = spark.sparkContext.textFile("D:\\tmp_files\\student.txt").map(_.split("\t"))
//通过StructType声明Schema
val schema = StructType(
List(
StructField("id", IntegerType),
StructField("name", StringType),
StructField("age", IntegerType)))
//把RDD映射到rowRDD
val rowRDD = personRDD.map(p=>Row(p(0).toInt,p(1),p(2).toInt))
val personDF = spark.createDataFrame(rowRDD, schema)
//注册表
personDF.createOrReplaceTempView("t_person")
//执行SQL
val df = spark.sql("select * from t_person order by age desc limit 4")
df.show()
spark.stop()
}
}
import org.apache.spark.sql.SparkSession
//使用case class
object Demo2 {
def main(args: Array[String]): Unit = {
//创建SparkSession
val spark = SparkSession.builder().master("local").appName("CaseClassDemo").getOrCreate()
//从指定的文件中读取数据,生成对应的RDD
val lineRDD = spark.sparkContext.textFile("D:\\tmp_files\\student.txt").map(_.split("\t"))
//将RDD和case class 关联
val studentRDD = lineRDD.map( x => Student(x(0).toInt,x(1),x(2).toInt))
//生成 DataFrame,通过RDD 生成DF,导入隐式转换
import spark.sqlContext.implicits._
val studentDF = studentRDD.toDF
//注册表 视图
studentDF.createOrReplaceTempView("student")
//执行SQL
spark.sql("select * from student").show()
spark.stop()
}
}
//case class 一定放在外面
case class Student(stuID:Int,stuName:String,stuAge:Int)
import org.apache.spark.sql.types.IntegerType
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.Row
import java.util.Properties
object Demo3 {
def main(args: Array[String]): Unit = {
//使用Spark Session 创建表
val spark = SparkSession.builder().master("local").appName("UnderstandSparkSession").getOrCreate()
//从指定地址创建RDD
val personRDD = spark.sparkContext.textFile("D:\\tmp_files\\student.txt").map(_.split("\t"))
//通过StructType声明Schema
val schema = StructType(
List(
StructField("id", IntegerType),
StructField("name", StringType),
StructField("age", IntegerType)))
//把RDD映射到rowRDD
val rowRDD = personRDD.map(p => Row(p(0).toInt, p(1), p(2).toInt))
val personDF = spark.createDataFrame(rowRDD, schema)
//注册表
personDF.createOrReplaceTempView("person")
//执行SQL
val df = spark.sql("select * from person ")
//查看SqL内容
//df.show()
//将结果保存到mysql中
val props = new Properties()
props.setProperty("user", "root")
props.setProperty("password", "123456")
props.setProperty("driver", "com.mysql.jdbc.Driver")
df.write.mode("overwrite").jdbc("jdbc:mysql://localhost:3306/company?serverTimezone=UTC&characterEncoding=utf-8", "student", props)
spark.close()
}
}
关于“IDEA如何开发配置SparkSQL”这篇文章就分享到这里了,希望以上内容可以对大家有一定的帮助,使各位可以学到更多知识,如果觉得文章不错,请把它分享出去让更多的人看到。
亿速云「云服务器」,即开即用、新一代英特尔至强铂金CPU、三副本存储NVMe SSD云盘,价格低至29元/月。点击查看>>
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。