当前位置:   article > 正文

基于spark构建逻辑回归_spark 逻辑回归 混淆矩阵

spark 逻辑回归 混淆矩阵
from __future__ import print_function
from pyspark.sql import Row
from pyspark.sql import SQLContext
from pyspark import SparkContext
from pyspark.ml.classification import LogisticRegression
from pyspark.ml import Pipeline
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.ml.feature import PCA
from pyspark.mllib.linalg import Vectors
from pyspark.ml.feature import StandardScaler




print ('666')


sc=SparkContext()
good=sc.textFile("hdfs://master:9000/usr/bad/test")
print ('888')
df=good.map(lambda x:x.split(','))
i=df.take(2)
print(i)


sqlContext=SQLContext(sc)
data=sqlContext.createDataFrame(df,["id","b","c","d","e","f","g","h","i","j","k"])
data.show(6,False)
h=data.printSchema
print(h)


label_set = data.map(lambda x: x[10]).distinct().collect()
label_dict = dict()                                #此句为构建一个空白的字典
i = 0
for key in label_set:
    if key not in label_dict.keys():       #label_dict.keys()中keys()表示字典里边的键值对,意思为如果key没有在字典中出现,那么记录此key的值为i,key和keys没有关系。
        label_dict[key ]= i
        i = i+1
print(label_dict)
print('999')
data1=data.map(lambda x: ([x[i] for i in range(10)],label_dict[x[10]])).\                                 #第一个map将数据集映射为两个部分,分为了x和y两部分
           map(lambda (x,y): [float(x[0]), float(x[1]),float(x[2]),float(x[3]),float(x[4]),float(x[5]),     #第二个map将上一个map中的x转换为数值类型,y不动
               float(x[6]),float(x[7]),int(float(x[8])),int(float(x[9])),y])


print('1010')
data2 = sqlContext.createDataFrame(data1,['a',"b","c","d","e","f","g","h","i","j","id"])
print('111111')
data2.show(5, False)
s=data2.printSchema
print(s)


print('8595')
data3 = data2.map(lambda x:(Vectors.dense([x[i] for i in range(0,10)]),float(x[10])))      #将所有自变量组合成一个特征向量,以符合算法需要的数据类型
print(data3)
feature_data = sqlContext.createDataFrame(data3, ['features', 'id'])


print('7878')
train_data, test_data = feature_data.randomSplit([0.7, 0.3], 6)        #分割数据集


scaler = StandardScaler(inputCol='features', outputCol='scaledFeatures',withStd=True, withMean=False)   
pca = PCA(k=2, inputCol="scaledFeatures", outputCol="pcaFeatures")
lr = LogisticRegression(maxIter=10, featuresCol='pcaFeatures', labelCol='id')


print('6565')
pipeline = Pipeline(stages=[scaler, pca, lr])
Model = pipeline.fit(train_data)
results = Model.transform(test_data)


results.show()





最后评估模型,构建混淆矩阵
from pyspark.mllib.evaluation import MulticlassMetrics
predictionAndLabels = results.select('probability', 'prediction', 'prediction').map(lambda x: (x[1], x[2]))
metrics = MulticlassMetrics(predictionAndLabels)
table=metrics.confusionMatrix().toArray()

print(table)



标注:代码为复制人的代码,换了自己的数据。

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/weixin_40725706/article/detail/737092
推荐阅读
相关标签
  

闽ICP备14008679号