import numpy as np import urllib # url with dataset url = “http://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data” # download the file raw_data = urllib.urlopen(url) # load the CSV file as a numpy matrix dataset = np.loadtxt(raw_data, delimiter=“,”) # separate the data from the target attributes X = dataset[:,0:7] y = dataset[:,8]
from sklearn import metrics from sklearn.ensemble import ExtraTreesClassifier model = ExtraTreesClassifier() model.fit(X, y)# display the relative importance of each attribute print(model.feature_importances_)
from sklearn import metrics from sklearn.ensemble import ExtraTreesClassifier model = ExtraTreesClassifier() model.fit(X, y)# display the relative importance of each attribute print(model.feature_importances_)
from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression model = LogisticRegression()# create the RFE model and select 3 attributes rfe = RFE(model, 3) rfe = rfe.fit(X, y)# summarize the selection of the attributes print(rfe.support_) print(rfe.ranking_)
from sklearn import metrics from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X, y) print(model)# make predictions expected = y predicted = model.predict(X)# summarize the fit of the model print(metrics.classification_report(expected, predicted)) print(metrics.confusion_matrix(expected, predicted))
from sklearn import metrics from sklearn.naive_bayes import GaussianNB model = GaussianNB() model.fit(X, y) print(model)# make predictions expected = y predicted = model.predict(X)# summarize the fit of the model print(metrics.classification_report(expected, predicted)) print(metrics.confusion_matrix(expected, predicted))
from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier# fit a k - nearest neighbor model to the data model = KNeighborsClassifier() model.fit(X, y) print(model)# make predictions expected = y predicted = model.predict(X)# summarize the fit of the model print(metrics.classification_report(expected, predicted)) print(metrics.confusion_matrix(expected, predicted))
from sklearn import metrics from sklearn.tree import DecisionTreeClassifier# fit a CART model to the data model = DecisionTreeClassifier() model.fit(X, y) print(model)# make predictions expected = y predicted = model.predict(X)# summarize the fit of the model print(metrics.classification_report(expected, predicted)) print(metrics.confusion_matrix(expected, predicted))
from sklearn import metrics from sklearn.svm import SVC # fit a SVM model to the data model = SVC() model.fit(X, y) print(model) # make predictions expected = y predicted = model.predict(X) # summarize the fit of the model print(metrics.classification_report(expected, predicted)) print(metrics.confusion_matrix(expected, predicted))
import numpy as np from sklearn.linear_model import Ridge from sklearn.grid_search import GridSearchCV# prepare a range of alpha values to test alphas = np.array([1, 0.1, 0.01, 0.001, 0.0001, 0])# create and fit a ridge regression model, testing each alpha model = Ridge() grid = GridSearchCV(estimator = model, param_grid = dict(alpha = alphas)) grid.fit(X, y) print(grid)# summarize the results of the grid search print(grid.best_score_) print(grid.best_estimator_.alpha)
import numpy as np from scipy.stats import uniform as sp_rand from sklearn.linear_model import Ridge from sklearn.grid_search import RandomizedSearchCV# prepare a uniform distribution to sample for the alpha parameter param_grid = {‘ alpha': sp_rand() }# create and fit a ridge regression model, testing random alpha values model = Ridge() rsearch = RandomizedSearchCV(estimator = model, param_distributions = param_grid, n_iter = 100) rsearch.fit(X, y) print(rsearch)# summarize the results of the random parameter search print(rsearch.best_score_) print(rsearch.best_estimator_.alpha)
至此我们已经看了整个使用Scikit-Learn库的过程,除了将结果再 输出到一个文件中。这个就作为你的一个练习吧,和R相比Python的一大优点就是它有很棒的文档说明。
总结
以上就是Python与Scikit-Learn的机器学习探索详解的详细内容,更多请关注Gxl网其它相关文章!
查看更多关于Python与Scikit-Learn的机器学习探索详解的详细内容...
声明:本文来自网络,不代表【好得很程序员自学网】立场,转载请注明出处:http://www.haodehen.cn/did84368