当前位置:   article > 正文

【算法比赛】主流机器学习/深度学习模型代码模板_机器学习深度学习应用实例代码

机器学习深度学习应用实例代码

摘要

最近又开始混乱且忙碌的科研学习,双十一过后,钱包空了,就再不想买买买了,打比赛的议程又提上来了,首先给大家分享两个非常非常非常好的repo,昨天晚上才发现的,又请教了一个博士点经验,踏踏实实准备,浮躁的心就能沉淀下来~

更新最新最全的算法比赛信息:https://github.com/iphysresearch/DataSciComp
各种比赛的top解决方案:https://github.com/Smilexuhc/Data-Competition-TopSolution

一定要多交流多交流,算法岗没有想想的这么难的!
这个就相当于作文模板,稍微改改就能拿来使用啦~

Preprocess

  1. # 通用的预处理框架
  2. import pandas as pd
  3. import numpy as np
  4. import scipy as sp
  5. # 文件读取
  6. def read_csv_file(f, logging=False):
  7. print("==========读取数据=========")
  8. data = pd.read_csv(f)
  9. if logging:
  10. print(data.head(5))
  11. print(f, "包含以下列")
  12. print(data.columns.values)
  13. print(data.describe())
  14. print(data.info())
  15. return data

Logistic Regression

  1. # 通用的LogisticRegression框架
  2. import pandas as pd
  3. import numpy as np
  4. from scipy import sparse
  5. from sklearn.preprocessing import OneHotEncoder
  6. from sklearn.linear_model import LogisticRegression
  7. from sklearn.preprocessing import StandardScaler
  8. # 1. load data
  9. df_train = pd.DataFrame()
  10. df_test = pd.DataFrame()
  11. y_train = df_train['label'].values
  12. # 2. process data
  13. ss = StandardScaler()
  14. # 3. feature engineering/encoding
  15. # 3.1 For Labeled Feature
  16. enc = OneHotEncoder()
  17. feats = ["creativeID", "adID", "campaignID"]
  18. for i, feat in enumerate(feats):
  19. x_train = enc.fit_transform(df_train[feat].values.reshape(-1, 1))
  20. x_test = enc.fit_transform(df_test[feat].values.reshape(-1, 1))
  21. if i == 0:
  22. X_train, X_test = x_train, x_test
  23. else:
  24. X_train, X_test = sparse.hstack((X_train, x_train)), sparse.hstack((X_test, x_test))
  25. # 3.2 For Numerical Feature
  26. # It must be a 2-D Data for StandardScalar, otherwise reshape(-1, len(feats)) is required
  27. feats = ["price", "age"]
  28. x_train = ss.fit_transform(df_train[feats].values)
  29. x_test = ss.fit_transform(df_test[feats].values)
  30. X_train, X_test = sparse.hstack((X_train, x_train)), sparse.hstack((X_test, x_test))
  31. # model training
  32. lr = LogisticRegression()
  33. lr.fit(X_train, y_train)
  34. proba_test = lr.predict_proba(X_test)[:, 1]

LightGBM

1. 二分类

  1. import lightgbm as lgb
  2. import pandas as pd
  3. import numpy as np
  4. import pickle
  5. from sklearn.metrics import roc_auc_score
  6. from sklearn.model_selection import train_test_split
  7. print("Loading Data ... ")
  8. # 导入数据
  9. train_x, train_y, test_x = load_data()
  10. # 用sklearn.cross_validation进行训练数据集划分,这里训练集和交叉验证集比例为73,可以自己根据需要设置
  11. X, val_X, y, val_y = train_test_split(
  12. train_x,
  13. train_y,
  14. test_size=0.05,
  15. random_state=1,
  16. stratify=train_y ## 这里保证分割后y的比例分布与原数据一致
  17. )
  18. X_train = X
  19. y_train = y
  20. X_test = val_X
  21. y_test = val_y
  22. # create dataset for lightgbm
  23. lgb_train = lgb.Dataset(X_train, y_train)
  24. lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
  25. # specify your configurations as a dict
  26. params = {
  27. 'boosting_type': 'gbdt',
  28. 'objective': 'binary',
  29. 'metric': {'binary_logloss', 'auc'},
  30. 'num_leaves': 5,
  31. 'max_depth': 6,
  32. 'min_data_in_leaf': 450,
  33. 'learning_rate': 0.1,
  34. 'feature_fraction': 0.9,
  35. 'bagging_fraction': 0.95,
  36. 'bagging_freq': 5,
  37. 'lambda_l1': 1,
  38. 'lambda_l2': 0.001, # 越小l2正则程度越高
  39. 'min_gain_to_split': 0.2,
  40. 'verbose': 5,
  41. 'is_unbalance': True
  42. }
  43. # train
  44. print('Start training...')
  45. gbm = lgb.train(params,
  46. lgb_train,
  47. num_boost_round=10000,
  48. valid_sets=lgb_eval,
  49. early_stopping_rounds=500)
  50. print('Start predicting...')
  51. preds = gbm.predict(test_x, num_iteration=gbm.best_iteration) # 输出的是概率结果
  52. # 导出结果
  53. threshold = 0.5
  54. for pred in preds:
  55. result = 1 if pred > threshold else 0
  56. # 导出特征重要性
  57. importance = gbm.feature_importance()
  58. names = gbm.feature_name()
  59. with open('./feature_importance.txt', 'w+') as file:
  60. for index, im in enumerate(importance):
  61. string = names[index] + ', ' + str(im) + '\n'
  62. file.write(string)

2. 多分类

  1. import lightgbm as lgb
  2. import pandas as pd
  3. import numpy as np
  4. import pickle
  5. from sklearn.metrics import roc_auc_score
  6. from sklearn.model_selection import train_test_split
  7. print("Loading Data ... ")
  8. # 导入数据
  9. train_x, train_y, test_x = load_data()
  10. # 用sklearn.cross_validation进行训练数据集划分,这里训练集和交叉验证集比例为73,可以自己根据需要设置
  11. X, val_X, y, val_y = train_test_split(
  12. train_x,
  13. train_y,
  14. test_size=0.05,
  15. random_state=1,
  16. stratify=train_y ## 这里保证分割后y的比例分布与原数据一致
  17. )
  18. X_train = X
  19. y_train = y
  20. X_test = val_X
  21. y_test = val_y
  22. # create dataset for lightgbm
  23. lgb_train = lgb.Dataset(X_train, y_train)
  24. lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
  25. # specify your configurations as a dict
  26. params = {
  27. 'boosting_type': 'gbdt',
  28. 'objective': 'multiclass',
  29. 'num_class': 9,
  30. 'metric': 'multi_error',
  31. 'num_leaves': 300,
  32. 'min_data_in_leaf': 100,
  33. 'learning_rate': 0.01,
  34. 'feature_fraction': 0.8,
  35. 'bagging_fraction': 0.8,
  36. 'bagging_freq': 5,
  37. 'lambda_l1': 0.4,
  38. 'lambda_l2': 0.5,
  39. 'min_gain_to_split': 0.2,
  40. 'verbose': 5,
  41. 'is_unbalance': True
  42. }
  43. # train
  44. print('Start training...')
  45. gbm = lgb.train(params,
  46. lgb_train,
  47. num_boost_round=10000,
  48. valid_sets=lgb_eval,
  49. early_stopping_rounds=500)
  50. print('Start predicting...')
  51. preds = gbm.predict(test_x, num_iteration=gbm.best_iteration) # 输出的是概率结果
  52. # 导出结果
  53. for pred in preds:
  54. result = prediction = int(np.argmax(pred))
  55. # 导出特征重要性
  56. importance = gbm.feature_importance()
  57. names = gbm.feature_name()
  58. with open('./feature_importance.txt', 'w+') as file:
  59. for index, im in enumerate(importance):
  60. string = names[index] + ', ' + str(im) + '\n'
  61. file.write(string)

XGBoost

1. 二分类

  1. import numpy as np
  2. import pandas as pd
  3. import xgboost as xgb
  4. import time
  5. from sklearn.model_selection import StratifiedKFold
  6. from sklearn.model_selection import train_test_split
  7. train_x, train_y, test_x = load_data()
  8. # 构建特征
  9. # 用sklearn.cross_validation进行训练数据集划分,这里训练集和交叉验证集比例为73,可以自己根据需要设置
  10. X, val_X, y, val_y = train_test_split(
  11. train_x,
  12. train_y,
  13. test_size=0.01,
  14. random_state=1,
  15. stratify=train_y
  16. )
  17. # xgb矩阵赋值
  18. xgb_val = xgb.DMatrix(val_X, label=val_y)
  19. xgb_train = xgb.DMatrix(X, label=y)
  20. xgb_test = xgb.DMatrix(test_x)
  21. # xgboost模型 #####################
  22. params = {
  23. 'booster': 'gbtree',
  24. # 'objective': 'multi:softmax', # 多分类的问题、
  25. # 'objective': 'multi:softprob', # 多分类概率
  26. 'objective': 'binary:logistic',
  27. 'eval_metric': 'logloss',
  28. # 'num_class': 9, # 类别数,与 multisoftmax 并用
  29. 'gamma': 0.1, # 用于控制是否后剪枝的参数,越大越保守,一般0.10.2这样子。
  30. 'max_depth': 8, # 构建树的深度,越大越容易过拟合
  31. 'alpha': 0, # L1正则化系数
  32. 'lambda': 10, # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。
  33. 'subsample': 0.7, # 随机采样训练样本
  34. 'colsample_bytree': 0.5, # 生成树时进行的列采样
  35. 'min_child_weight': 3,
  36. # 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言
  37. # ,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。
  38. # 这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。
  39. 'silent': 0, # 设置成1则没有运行信息输出,最好是设置为0.
  40. 'eta': 0.03, # 如同学习率
  41. 'seed': 1000,
  42. 'nthread': -1, # cpu 线程数
  43. 'missing': 1,
  44. 'scale_pos_weight': (np.sum(y==0)/np.sum(y==1)) # 用来处理正负样本不均衡的问题,通常取:sum(negative cases) / sum(positive cases)
  45. # 'eval_metric': 'auc'
  46. }
  47. plst = list(params.items())
  48. num_rounds = 2000 # 迭代次数
  49. watchlist = [(xgb_train, 'train'), (xgb_val, 'val')]
  50. # 交叉验证
  51. result = xgb.cv(plst, xgb_train, num_boost_round=200, nfold=4, early_stopping_rounds=200, verbose_eval=True, folds=StratifiedKFold(n_splits=4).split(X, y))
  52. # 训练模型并保存
  53. # early_stopping_rounds 当设置的迭代次数较大时,early_stopping_rounds 可在一定的迭代次数内准确率没有提升就停止训练
  54. model = xgb.train(plst, xgb_train, num_rounds, watchlist, early_stopping_rounds=200)
  55. model.save_model('../data/model/xgb.model') # 用于存储训练出的模型
  56. preds = model.predict(xgb_test)
  57. # 导出结果
  58. threshold = 0.5
  59. for pred in preds:
  60. result = 1 if pred > threshold else 0

Keras

1. 二分类

  1. import numpy as np
  2. import pandas as pd
  3. import time
  4. from sklearn.model_selection import train_test_split
  5. from matplotlib import pyplot as plt
  6. from keras.models import Sequential
  7. from keras.layers import Dropout
  8. from keras.layers import Dense, Activation
  9. from keras.utils.np_utils import to_categorical
  10. # coding=utf-8
  11. from model.util import load_data as load_data_1
  12. from model.util_combine_train_test import load_data as load_data_2
  13. from sklearn.preprocessing import StandardScaler # 用于特征的标准化
  14. from sklearn.preprocessing import Imputer
  15. print("Loading Data ... ")
  16. # 导入数据
  17. train_x, train_y, test_x = load_data()
  18. # 构建特征
  19. X_train = train_x.values
  20. X_test = test_x.values
  21. y = train_y
  22. imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
  23. X_train = imp.fit_transform(X_train)
  24. sc = StandardScaler()
  25. sc.fit(X_train)
  26. X_train = sc.transform(X_train)
  27. X_test = sc.transform(X_test)
  28. model = Sequential()
  29. model.add(Dense(256, input_shape=(X_train.shape[1],)))
  30. model.add(Activation('tanh'))
  31. model.add(Dropout(0.3))
  32. model.add(Dense(512))
  33. model.add(Activation('relu'))
  34. model.add(Dropout(0.3))
  35. model.add(Dense(512))
  36. model.add(Activation('tanh'))
  37. model.add(Dropout(0.3))
  38. model.add(Dense(256))
  39. model.add(Activation('linear'))
  40. model.add(Dense(1)) # 这里需要和输出的维度一致
  41. model.add(Activation('sigmoid'))
  42. # For a multi-class classification problem
  43. model.compile(loss='binary_crossentropy',
  44. optimizer='rmsprop',
  45. metrics=['accuracy'])
  46. epochs = 100
  47. model.fit(X_train, y, epochs=epochs, batch_size=2000, validation_split=0.1, shuffle=True)
  48. # 导出结果
  49. threshold = 0.5
  50. for index, case in enumerate(X_test):
  51. case =np.array([case])
  52. prediction_prob = model.predict(case)
  53. prediction = 1 if prediction_prob[0][0] > threshold else 0

2. 多分类

  1. import numpy as np
  2. import pandas as pd
  3. import time
  4. from sklearn.model_selection import train_test_split
  5. from matplotlib import pyplot as plt
  6. from keras.models import Sequential
  7. from keras.layers import Dropout
  8. from keras.layers import Dense, Activation
  9. from keras.utils.np_utils import to_categorical
  10. # coding=utf-8
  11. from model.util import load_data as load_data_1
  12. from model.util_combine_train_test import load_data as load_data_2
  13. from sklearn.preprocessing import StandardScaler # 用于特征的标准化
  14. from sklearn.preprocessing import Imputer
  15. print("Loading Data ... ")
  16. # 导入数据
  17. train_x, train_y, test_x = load_data()
  18. # 构建特征
  19. X_train = train_x.values
  20. X_test = test_x.values
  21. y = train_y
  22. # 特征处理
  23. sc = StandardScaler()
  24. sc.fit(X_train)
  25. X_train = sc.transform(X_train)
  26. X_test = sc.transform(X_test)
  27. y = to_categorical(y) ## 这一步很重要,一定要将多类别的标签进行one-hot编码
  28. model = Sequential()
  29. model.add(Dense(256, input_shape=(X_train.shape[1],)))
  30. model.add(Activation('tanh'))
  31. model.add(Dropout(0.3))
  32. model.add(Dense(512))
  33. model.add(Activation('relu'))
  34. model.add(Dropout(0.3))
  35. model.add(Dense(512))
  36. model.add(Activation('tanh'))
  37. model.add(Dropout(0.3))
  38. model.add(Dense(256))
  39. model.add(Activation('linear'))
  40. model.add(Dense(9)) # 这里需要和输出的维度一致
  41. model.add(Activation('softmax'))
  42. # For a multi-class classification problem
  43. model.compile(optimizer='rmsprop',
  44. loss='categorical_crossentropy',
  45. metrics=['accuracy'])
  46. epochs = 200
  47. model.fit(X_train, y, epochs=epochs, batch_size=200, validation_split=0.1, shuffle=True)
  48. # 导出结果
  49. for index, case in enumerate(X_test):
  50. case = np.array([case])
  51. prediction_prob = model.predict(case)
  52. prediction = np.argmax(prediction_prob)

处理正负样本不均匀的案例

有些案例中,正负样本数量相差非常大,数据严重unbalanced,这里提供几个解决的思路

  1. # 计算正负样本比例
  2. positive_num = df_train[df_train['label']==1].values.shape[0]
  3. negative_num = df_train[df_train['label']==0].values.shape[0]
  4. print(float(positive_num)/float(negative_num))

主要思路

1. 手动调整正负样本比例

2. 过采样 Over-Sampling

对训练集里面样本数量较少的类别(少数类)进行过采样,合成新的样本来缓解类不平衡,比如SMOTE算法

3. 欠采样 Under-Sampling

4. 将样本按比例一一组合进行训练,训练出多个弱分类器,最后进行集成

框架推荐

Github上大神写的相关框架,专门用来处理此类问题:
https://github.com/scikit-learn-contrib/imbalanced-learn

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/知新_RL/article/detail/263713
推荐阅读
相关标签
  

闽ICP备14008679号