赞
踩
需要在pom 文件中引入 jieba的依赖
<dependency>
<groupId>com.huaban</groupId>
<artifactId>jieba-analysis</artifactId>
<version>1.0.2</version>
</dependency>
yarn client 中没有上诉的包 所以不能够实现此功能
import com.huaban.analysis.jieba.{JiebaSegmenter, SegToken}
import com.huaban.analysis.jieba.JiebaSegmenter.SegMode
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._
object JiebaKry {
def main(args: Array[String]): Unit = {
// 定义结巴分词类的序列化
val conf = new SparkConf()
.registerKryoClasses(Array(classOf[JiebaSegmenter]))
.set("spark.rpc.message.maxSize","800")
// 建立sparkSession,并传入定义好的Conf
val spark = SparkSession
.builder()
.appName("Jieba UDF")
.enableHiveSupport()
.config(conf)
.getOrCreate()
// 定义结巴分词的方法,传入的是DataFrame,输出也是DataFrame多一列seg(分好词的一列)
def jieba_seg(df:DataFrame,colname:String): DataFrame ={
val segmenter = new JiebaSegmenter()
val seg = spark.sparkContext.broadcast(segmenter)
val jieba_udf = udf{(sentence:String)=>
val segV = seg.value
segV.process(sentence.toString,SegMode.INDEX)
.toArray().map(_.asInstanceOf[SegToken].word)
.filter(_.length>1).mkString("/")
}
df.withColumn("seg",jieba_udf(col(colname)))
}
val df =spark.sql("select sentence,label from table_name limit 300")
val df_seg = jieba_seg(df,"sentence")
df_seg.show()
}
}
注意:
broadcast 是一个广播变量,传递实例 广播之后 每一个excutor 都会拥有这个变量了
写完以上代码需要提交:
1.打jar包:mvn clean assembly:assembly
2.jar 生成到target目录下
生成两个jar
jieba-1.0-SNAPSHOT.jar 没有pom依赖的jar
jieba-1.0-SNAPSHOT-jar-with-dependencies.jar 有pom依赖的jar
选择有依赖的jar。
3.把jar上传到 服务器相应的目录上(安装一个git就可以进行以下操作)
scp -rpjieba-1.0-SNAPSHOT-jar-with-dependencies.jar root@192.168.174.134:/home/Document/Code/spark/sub/jieba
4.vim run.sh(跑代码)
cd /usr/local/src/spark-2.0.2-bin-hadoop2.6
./bin/spark-submit \
--class JiebaKry \
--master yarn-cluster \
--files $HIVE_HOME$/conf/hive-site.xml \
/home/Documents/code/spark/sub/jieba_test/jieba-1.0-SNAPSHOT-jar-with-dependencies.jar
5.执行 sh run.sh
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。