采用朴素贝叶斯方法,对美国威斯康星州的乳腺癌诊断数据集进行分类,实现针对乳腺癌检测的分类器,以判断一个患者的肿瘤是良性还是恶性。
1.导入sklearn自带的数据集:威斯康星乳腺肿瘤数据集(load_breast_cancer)。
2.打印数据集键值(keys),查看数据集包含的信息。
3.打印查看数据集中标注好的肿瘤分类(target_names)、肿瘤特征名称(feature_names)。
4.将数据集拆分为训练集和测试集,打印查看训练集和测试集的数据形态(shape)。
5.配置高斯朴素贝叶斯模型。
6.训练模型。
7.评估模型,打印查看模型评分(分别打印训练集和测试集的评分)。
8.模型预测:选取某一样本进行预测。(可以进行多次不同样本的预测)
参考方法:可以打印模型预测的分类和真实的分类,进行对比,看是否一致,如果一致,判断这个样本的肿瘤是一个良性的肿瘤,否则结果相反。
也可以用其他方法进行预测。
9.扩展(选做):绘制高斯朴素贝叶斯在威斯康星乳腺肿瘤数据集中的学习曲线。
该数据集中肿瘤是一个非常经典的用于医疗病情分析的数据集,包括569个病例的数据样本,每个样本具有30个特征。
样本共分为两类:恶性(Malignant)和良性(Benign)。
该数据集的特征是从一个乳腺肿块的细针抽吸(FNA)的数字化图像计算出来的。它们描述了图像中细胞核的特征。
特征值很多,涉及一定的医学知识。(具体特征及含义见此节实验指导书)
from sklearn.datasets import load_breast_cancer
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import GaussianNB # 导入先验概率为高斯分布的朴素贝叶斯
from jupyterthemes import jtplot
jtplot.style(theme='monokai') # 选择一个绘图主题
cancers = load_breast_cancer()
cancers.data
array([[1.799e+01, 1.038e+01, 1.228e+02, ..., 2.654e-01, 4.601e-01,
1.189e-01],
[2.057e+01, 1.777e+01, 1.329e+02, ..., 1.860e-01, 2.750e-01,
8.902e-02],
[1.969e+01, 2.125e+01, 1.300e+02, ..., 2.430e-01, 3.613e-01,
8.758e-02],
...,
[1.660e+01, 2.808e+01, 1.083e+02, ..., 1.418e-01, 2.218e-01,
7.820e-02],
[2.060e+01, 2.933e+01, 1.401e+02, ..., 2.650e-01, 4.087e-01,
1.240e-01],
[7.760e+00, 2.454e+01, 4.792e+01, ..., 0.000e+00, 2.871e-01,
7.039e-02]])
print(cancers.keys())
dict_keys(['data', 'target', 'frame', 'target_names', 'DESCR', 'feature_names', 'filename', 'data_module'])
print("target_names:", cancers.target_names)
print("feature_names:", cancers.feature_names)
target_names: ['malignant' 'benign']
feature_names: ['mean radius' 'mean texture' 'mean perimeter' 'mean area'
'mean smoothness' 'mean compactness' 'mean concavity'
'mean concave points' 'mean symmetry' 'mean fractal dimension'
'radius error' 'texture error' 'perimeter error' 'area error'
'smoothness error' 'compactness error' 'concavity error'
'concave points error' 'symmetry error' 'fractal dimension error'
'worst radius' 'worst texture' 'worst perimeter' 'worst area'
'worst smoothness' 'worst compactness' 'worst concavity'
'worst concave points' 'worst symmetry' 'worst fractal dimension']
x_train, x_test, y_train, y_test = train_test_split(
cancers.data, cancers.target, test_size=0.30)
print("y_train.shape:", y_train.shape)
print("y_train.shape:", y_train.shape)
print("x_test.shape:", x_test.shape)
print("y_test.shape:", y_test.shape)
y_train.shape: (398,)
y_train.shape: (398,)
x_test.shape: (171, 30)
y_test.shape: (171,)
model = GaussianNB() # 高斯朴素贝叶斯
model.fit(x_train, y_train) # 训练高斯朴素贝叶斯算法模型
GaussianNB()
sorcel = cross_val_score(model, x_train, y_train, cv=10,
scoring='accuracy') # 计算高斯朴素贝叶斯算法模型的准确率
print('训练集的得分为:', model.score(x_train, y_train))
print('测试集的得分为:', model.score(x_test, y_test))
print("高斯朴素贝叶斯模型的准确率为:", sorcel.mean())
训练集的得分为: 0.9296482412060302
测试集的得分为: 0.9649122807017544
高斯朴素贝叶斯模型的准确率为: 0.9296153846153846
print('模型预测的分类:{}'.format(model.predict([cancers.data[150]])))
print('样本的正确分类是:', cancers.target[150])
模型预测的分类:[1]
样本的正确分类是: 1
from sklearn.metrics import classification_report # 导入分类报告模板
# sklearn中的classification_report函数用于显示主要分类指标的文本报告.在报告中显示每个类的精确度,召回率,F1值等信息。
print(classification_report(y_test, y_predict))
precision recall f1-score support
0 0.98 0.92 0.95 65
1 0.95 0.99 0.97 106
accuracy 0.96 171
macro avg 0.97 0.96 0.96 171
weighted avg 0.97 0.96 0.96 171
from sklearn.model_selection import learning_curve
# 导入随机拆分工具
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.05, 1., 20), verbose=0, plot=True):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
# 设定横轴标签
plt.xlabel("Training examples")
# 设定纵轴标签
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
plt.grid()
plt.plot(train_sizes, train_scores_mean, 'o-',
color="r", label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-',
color="g", label="Cross-valldation score")
plt.legend(loc="lower right")
return plt
# 设定图题
title = "Learning Curves (Naive Bayes)"
# 设定拆分数量
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
# 设定模型为高斯朴素贝叶斯
estimator = GaussianNB()
# 调用我们定义好的函数
plot_learning_curve(estimator, title, cancers.data, cancers.target)
# 显示图片
plt.show()