赞
踩
先上代码,网上看到的自己整理了一下
训练数据一共有7列 6个特征 最后一列是结果,具体特征都表示啥就不细说了。
def main(args: Array[String]): Unit = {
var sc = init()
val path = "path\\Qualitative_Bankruptcy.data.txt"
var data = readText(sc, path)
println("数据总量:"+data.count())
/*将训练数据转换成变量
*p,a,n分别是每个特征对应的评分
*对特征和结果用map做转换
*/
var pardata = data.map{x=>
val xp = x.split(",")
xp.map { t => t match {
case "P" => 3.0
case "A" => 2.0
case "N" => 1.0
case "NB" => 1.0
case "B" => 0.0
} }
}
printArray(pardata.collect(), 4);
var pdata = pardata.map { line =>
LabeledPoint(line(6),Vectors.dense(line.slice(0, 6)))
}
val splitData = pdata.randomSplit(Array(0.6,0.4), seed=11L)
val trainingData = splitData(0)
val testData = splitData(1)
println(trainingData.count() +" "+testData.count())
//使用逻辑回归进行训练
val model = new LogisticRegressionWithLBFGS().setNumClasses(2).run(trainingData);
val res = testData.map { point =>
(point.label,model.predict(point.features))
}
println(res.filter(x=> x._1!=x._2).count()/res.count().doubleValue())
}
/**
* 输出数组
*/
def printArray(arr:Array[Array[Double]],num:Int): Unit = {
arr.take(num).foreach { x =>
x.foreach {
y =>
print(y+" ")
}
println()
}
}
/**
* 读取文本
*/
def readText(sc:SparkContext,path:String): RDD[String] ={
sc.textFile(path)
}
/**
* 初始化配置
*/
def init():SparkContext = {
Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
var conf = new SparkConf().setMaster("local[4]").setAppName("Logistic");
var sc = new SparkContext(conf);
sc
}
训练数据地址:
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。