决策树(DT)是一种用于分类和回归的非参数化监督学习方法。其目的是创建一个模型,通过学习从数据特征推断出的简单决策规则来预测目标变量的值。一棵树可以被看作是一个分片的常数近似。
决策树的一些优点:
决策树的缺点:
- ### 1. classification
- from sklearn.datasets import load_iris
- from sklearn import tree
- from sklearn.tree import export_text
- from sklearn.model_selection import train_test_split
-
- iris = load_iris()
- X, y = iris.data, iris.target
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
- clf = tree.DecisionTreeClassifier(random_state=0, max_depth=2)
- clf = clf.fit(X_train, y_train)
- r = export_text(clf, feature_names=iris['feature_names'])
- print(r)
- print(clf.predict(X_test))
- #print(clf.predict_proba(X_test)) # probability of each class
- print(clf.score(X_test,y_test))
-
- # plot
- import matplotlib.pyplot as plt
- from sklearn.tree import plot_tree
- plt.figure()
- clf = tree.DecisionTreeClassifier().fit(iris.data, iris.target)
- plot_tree(clf, filled=True)
- plt.title("Decision tree trained on all the iris features")
- plt.show()
- ### 2. regression
- # Import the necessary modules and libraries
- import numpy as np
- from sklearn.tree import DecisionTreeRegressor
- import matplotlib.pyplot as plt
-
- # Create a random dataset
- rng = np.random.RandomState(1)
- X = np.sort(5 * rng.rand(80, 1), axis=0)
- y = np.sin(X).ravel()
- y[::5] += 3 * (0.5 - rng.rand(16))
-
- # Fit regression model
- regr_1 = DecisionTreeRegressor(max_depth=2)
- regr_2 = DecisionTreeRegressor(max_depth=5)
- regr_1.fit(X, y)
- regr_2.fit(X, y)
-
- # Predict
- X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
- y_1 = regr_1.predict(X_test)
- y_2 = regr_2.predict(X_test)
-
- # Plot the results
- plt.figure()
- plt.scatter(X, y, s=20, edgecolor="black", c="darkorange", label="data")
- plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
- plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
- plt.xlabel("data")
- plt.ylabel("target")
- plt.title("Decision Tree Regression")
- plt.legend()
- plt.show()
参考: