|
| 1 | +# -*- coding: utf-8 -*- |
| 2 | +""" |
| 3 | +Created on Tue Aug 14 22:21:25 2018 |
| 4 | +
|
| 5 | +@author: wzy |
| 6 | +""" |
| 7 | +""" |
| 8 | +# =============神经网络用于分类============= |
| 9 | +import numpy as np |
| 10 | +import matplotlib.pyplot as plt |
| 11 | +from sklearn.neural_network import MLPClassifier |
| 12 | +from sklearn.preprocessing import StandardScaler |
| 13 | +data = [ |
| 14 | + [-0.017612, 14.053064, 0],[-1.395634, 4.662541, 1],[-0.752157, 6.53862, 0],[-1.322371, 7.152853, 0],[0.423363, 11.054677, 0], |
| 15 | + [0.406704, 7.067335, 1],[0.667394, 12.741452, 0],[-2.46015, 6.866805, 1],[0.569411, 9.548755, 0],[-0.026632, 10.427743, 0], |
| 16 | + [0.850433, 6.920334, 1],[1.347183, 13.1755, 0],[1.176813, 3.16702, 1],[-1.781871, 9.097953, 0],[-0.566606, 5.749003, 1], |
| 17 | + [0.931635, 1.589505, 1],[-0.024205, 6.151823, 1],[-0.036453, 2.690988, 1],[-0.196949, 0.444165, 1],[1.014459, 5.754399, 1], |
| 18 | + [1.985298, 3.230619, 1],[-1.693453, -0.55754, 1],[-0.576525, 11.778922, 0],[-0.346811, -1.67873, 1],[-2.124484, 2.672471, 1], |
| 19 | + [1.217916, 9.597015, 0],[-0.733928, 9.098687, 0],[1.416614, 9.619232, 0],[1.38861, 9.341997, 0],[0.317029, 14.739025, 0] |
| 20 | +] |
| 21 | +dataMat = np.array(data) |
| 22 | +X = dataMat[:,0:2] |
| 23 | +y = dataMat[:,2] |
| 24 | +# 神经网络对数据尺度敏感,所以最好在训练前标准化,或者归一化,或者缩放到[-1,1] |
| 25 | +scaler = StandardScaler() # 标准化转换 |
| 26 | +scaler.fit(X) # 训练标准化对象 |
| 27 | +X = scaler.transform(X) # 转换数据集 |
| 28 | +# solver='lbfgs', MLP的求解方法:L-BFGS 在小数据上表现较好,Adam 较为鲁棒,SGD在参数调整较优时会有最佳表现(分类效果与迭代次数);SGD标识随机梯度下降。 |
| 29 | +# alpha:L2的参数:MLP是可以支持正则化的,默认为L2,具体参数需要调整 |
| 30 | +# hidden_layer_sizes=(5, 2) hidden层2层,第一层5个神经元,第二层2个神经元),2层隐藏层,也就有3层神经网络 |
| 31 | +
|
| 32 | +clf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5,2), random_state=1) # 神经网络输入为2,第一隐藏层神经元个数为5,第二隐藏层神经元个数为2,输出结果为2分类。 |
| 33 | +clf.fit(X, y) |
| 34 | +print('每层网络层系数矩阵维度:\n',[coef.shape for coef in clf.coefs_]) |
| 35 | +y_pred = clf.predict([[0.317029, 14.739025]]) |
| 36 | +print('预测结果:',y_pred) |
| 37 | +y_pred_pro =clf.predict_proba([[0.317029, 14.739025]]) |
| 38 | +print('预测结果概率:\n',y_pred_pro) |
| 39 | +
|
| 40 | +cengindex = 0 |
| 41 | +for wi in clf.coefs_: |
| 42 | + cengindex += 1 # 表示底第几层神经网络。 |
| 43 | + print('第%d层网络层:' % cengindex) |
| 44 | + print('权重矩阵维度:',wi.shape) |
| 45 | + print('系数矩阵:\n',wi) |
| 46 | +
|
| 47 | +# 绘制分割区域 |
| 48 | +x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 # 寻找每个维度的范围 |
| 49 | +y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 # 寻找每个维度的范围 |
| 50 | +xx1, xx2 = np.meshgrid(np.arange(x_min, x_max, 0.01),np.arange(y_min, y_max,0.01)) # 在特征范围以0.01位步长预测每一个点的输出结果 |
| 51 | +Z = clf.predict(np.c_[xx1.ravel(), xx2.ravel()]) # 先形成待测样本的形式,在通过模型进行预测。 |
| 52 | +Z = Z.reshape(xx1.shape) # 将输出结果转换为和网格的矩阵形式,以便绘图 |
| 53 | +# 绘制区域网格图 |
| 54 | +plt.pcolormesh(xx1, xx2, Z, cmap=plt.cm.Paired) |
| 55 | +# 绘制样本点 |
| 56 | +plt.scatter(X[:,0],X[:,1],c=y) |
| 57 | +plt.show() |
| 58 | +
|
| 59 | +""" |
| 60 | +# # =============神经网络用于回归============= |
| 61 | + |
| 62 | +import numpy as np |
| 63 | +from sklearn.neural_network import MLPRegressor # 多层线性回归 |
| 64 | +from sklearn.preprocessing import StandardScaler |
| 65 | +data = [ |
| 66 | + [ -0.017612,14.053064,14.035452],[ -1.395634, 4.662541, 3.266907],[ -0.752157, 6.53862,5.786463],[ -1.322371, 7.152853, 5.830482], |
| 67 | + [0.423363,11.054677,11.47804 ],[0.406704, 7.067335, 7.474039],[0.667394,12.741452,13.408846],[ -2.46015,6.866805, 4.406655], |
| 68 | + [0.569411, 9.548755,10.118166],[ -0.026632,10.427743,10.401111],[0.850433, 6.920334, 7.770767],[1.347183,13.1755,14.522683], |
| 69 | + [1.176813, 3.16702,4.343833],[ -1.781871, 9.097953, 7.316082],[ -0.566606, 5.749003, 5.182397],[0.931635, 1.589505, 2.52114 ], |
| 70 | + [ -0.024205, 6.151823, 6.127618],[ -0.036453, 2.690988, 2.654535],[ -0.196949, 0.444165, 0.247216],[1.014459, 5.754399, 6.768858], |
| 71 | + [1.985298, 3.230619, 5.215917],[ -1.693453,-0.55754, -2.250993],[ -0.576525,11.778922,11.202397],[ -0.346811,-1.67873, -2.025541], |
| 72 | + [ -2.124484, 2.672471, 0.547987],[1.217916, 9.597015,10.814931],[ -0.733928, 9.098687, 8.364759],[1.416614, 9.619232,11.035846], |
| 73 | + [1.38861,9.341997,10.730607],[0.317029,14.739025,15.056054] |
| 74 | +] |
| 75 | + |
| 76 | +dataMat = np.array(data) |
| 77 | +X=dataMat[:,0:2] |
| 78 | +y = dataMat[:,2] |
| 79 | +scaler = StandardScaler() # 标准化转换 |
| 80 | +scaler.fit(X) # 训练标准化对象 |
| 81 | +X = scaler.transform(X) # 转换数据集 |
| 82 | + |
| 83 | +# solver='lbfgs', MLP的求解方法:L-BFGS 在小数据上表现较好,Adam 较为鲁棒,SGD在参数调整较优时会有最佳表现(分类效果与迭代次数);SGD标识随机梯度下降。 |
| 84 | +# alpha:L2的参数:MLP是可以支持正则化的,默认为L2,具体参数需要调整 |
| 85 | +# hidden_layer_sizes=(5, 2) hidden层2层,第一层5个神经元,第二层2个神经元),2层隐藏层,也就有3层神经网络 |
| 86 | +clf = MLPRegressor(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5, 2), random_state=1) |
| 87 | +clf.fit(X, y) |
| 88 | +print('预测结果:', clf.predict([[0.317029, 14.739025]])) # 预测某个输入对象 |
| 89 | + |
| 90 | +cengindex = 0 |
| 91 | +for wi in clf.coefs_: |
| 92 | + cengindex += 1 # 表示底第几层神经网络。 |
| 93 | + print('第%d层网络层:' % cengindex) |
| 94 | + print('权重矩阵维度:',wi.shape) |
| 95 | + |
0 commit comments