赞
踩
1.废话不多说。直接上代码。
import java.util.Properties import org.apache.spark.SparkConf import org.apache.spark.sql.SparkSession object KuduToSqlserver { val kuduMasters = "cdh-5:xx,cdh-6:xx" //TODO 1:定义kudu表 val kudutableName = "impala::dw_etl.zxjk_day_etl_flow_rg_kudu" def main(args: Array[String]): Unit = { val conf = new SparkConf() conf.setAppName("SparkKudu") conf.setMaster("local[*]") val option = Map("kudu.master" -> kuduMasters, "kudu.table" -> kudutableName) val sparkSession = SparkSession.builder() .config(conf) .config("hive.metastore.uris", "thrift://cdh-2:9083") .config("hive.metastore.warehouse.dir", "hdfs://http://cdh-3:3306/user/hive/warehouse") .getOrCreate() // TODO: 读取kudu sparkSession.read.format("org.apache.kudu.spark.kudu") .options(Map("kudu.master" -> kuduMasters, "kudu.table" -> kudutableName)).load.createOrReplaceTempView("tmp_kudu_table") val result = sparkSession.sql( """ |select pscode,outputcode,pollutant_code,region_code,province,city,psname,outputname,case pollutant_code when '001' then '颗粒物'when '001' then '二氧化硫'when '003' then '氮氧化物'end pollutantname, |focusindustrytype_new,monitortime,reviseflow, updateflow,qx_flow,reason |from tmp_kudu_table |""".stripMargin) println("读取成功") val prop = new Properties() prop.setProperty("driver", "com.microsoft.sqlserver.jdbc.SQLServerDriver") prop.setProperty("user", "你的登录名") prop.setProperty("password", "密码") result.write.mode("append") .jdbc("jdbc:sqlserver://ip:1433;DatabaseName=AMDB_DataCleanDB", "[dbo].[tb_day11]", prop) println("写入成功") sparkSession.close() } }
总结,本次推入数据大约70多万条,总耗时8分钟,各位如有更快更好的方案,大家一起来讨论,共赏,谢谢大家。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。