赞
踩
Linux安装Hive,MySQL
参考这位大佬的安装教程
hdfs dfs -mkdir /user/root/sparksql
hdfs dfs -put /home/xwk/software/spark/examples/src/main/resources/users.parquet /user/root/sparksql
hdfs dfs -put /home/xwk/software/spark/examples/src/main/resources/people.json /user/root/sparksql
scala> import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.SQLContext
scala> val sqlContext=new SQLContext(sc)
sqlContext: org.apache.spark.sql.SQLContext = org.apache.spark.sql.SQLContext@438e1537
scala> val dfUsers=sqlContext.read.load("hdfs://master/user/root/sparksql/users.parquet")
dfUsers: org.apache.spark.sql.DataFrame = [name: string, favorite_color: string, favorite_numbers: array<int>]
scala> val dfPeople=sqlContext.read.json("hdfs://master/user/root/sparksql/people.json")
dfPeople: org.apache.spark.sql.DataFrame = [age: bigint, name: string]
前提是数据库和表都存在
scala> val url="jdbc:mysql://192.168.10.20:3306/hive"
url: String = jdbc:mysql://192.168.10.20:3306/hive
scala> val jdbcDF=sqlContext.read.format("jdbc").options(
| Map("url"->url,
| "user"->"root",
| "password"->"123456",
| "dbtable"->"DBS")).load()
jdbcDF: org.apache.spark.sql.DataFrame = [DB_ID: bigint, DESC: string, DB_LOCATION_URI: string, NAME: string, OWNER_NAME: string, OWNER_TYPE: string]
scala> case class Person(name:String,age:Int)
defined class Person
scala> val data=sc.textFile("/user/root/sparksql/user.txt").map(_.split(","))
data: org.apache.spark.rdd.RDD[Array[String]] = MapPartitionsRDD[8] at map at <console>:28
scala> val people=data.map(p=>Person(p(0),p(1).trim.toInt)).toDF()
people: org.apache.spark.sql.DataFrame = [name: string, age: int]
scala> import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.hive.HiveContext
scala> val hiveContext=new HiveContext(sc)
hiveContext: org.apache.spark.sql.hive.HiveContext = org.apache.spark.sql.hive.HiveContext@27f22d1a
scala> hiveContext.sql("use test")
res4: org.apache.spark.sql.DataFrame = [result: string]
scala> val people=hiveContext.sql("select * from students")
people: org.apache.spark.sql.DataFrame = [id: int, name: string, score: double, classes: string]
使用SparkContext读取该数据并转换为DataFrame
scala> case class Movie(movieId:Int,title:String,Genres:String)
defined class Movie
scala> val data=sc.textFile("hdfs://master/user/root/sparksql/movies.dat").map(_.split("::"))
data: org.apache.spark.rdd.RDD[Array[String]] = MapPartitionsRDD[20] at map at <console>:34
scala> val movies=data.map(m=>Movie(m(0).trim.toInt,m(1),m(2))).toDF()
movies: org.apache.spark.sql.DataFrame = [movieId: int, title: string, Genres: string]
scala> movies.printSchema
root
|-- movieId: integer (nullable = false)
|-- title: string (nullable = true)
|-- Genres: string (nullable = true)
下面这个控制的是字符不是行数,可以看到所有字符都显示出来了
scala> movies.first //获取第一行记录
res14: org.apache.spark.sql.Row = [1,Toy Story (1995),Animation|Children's|Comedy]
scala> movies.head(2) //获取前2行记录
res15: Array[org.apache.spark.sql.Row] = Array([1,Toy Story (1995),Animation|Children's|Comedy], [2,Jumanji (1995),Adventure|Children's|Fantasy])
scala> movies.take(2) //获取前2行记录
res16: Array[org.apache.spark.sql.Row] = Array([1,Toy Story (1995),Animation|Children's|Comedy], [2,Jumanji (1995),Adventure|Children's|Fantasy])
scala> movies.takeAsList(2) //获取前2行记录,并以List的形式展现
res17: java.util.List[org.apache.spark.sql.Row] = [[1,Toy Story (1995),Animation|Children's|Comedy], [2,Jumanji (1995),Adventure|Children's|Fantasy]]
movies.collect()
movies.collectAsList()
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。